Search in sources :

Example 11 with OmKeyArgs

use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.

the class TestStorageContainerManagerHA method testPutKey.

public void testPutKey() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    is.read(fileContent);
    Assert.assertEquals(value, new String(fileContent, UTF_8));
    Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
    Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    is.close();
    final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
    long index = -1;
    for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
        if (scm.checkLeader()) {
            index = getLastAppliedIndex(scm);
        }
    }
    Assert.assertFalse(index == -1);
    long finalIndex = index;
    // Ensure all follower scms have caught up with the leader
    GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
    final long containerID = keyLocationInfos.get(0).getContainerID();
    for (int k = 0; k < numOfSCMs; k++) {
        StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
        // flush to DB on each SCM
        ((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
        Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
        Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo)

Example 12 with OmKeyArgs

use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.

the class ChunkKeyHandler method execute.

@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
    containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
    xceiverClientManager = containerOperationClient.getXceiverClientManager();
    ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
    address.ensureKeyAddress();
    JsonElement element;
    JsonObject result = new JsonObject();
    String volumeName = address.getVolumeName();
    String bucketName = address.getBucketName();
    String keyName = address.getKeyName();
    List<ContainerProtos.ChunkInfo> tempchunks = null;
    List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
    HashSet<String> chunkPaths = new HashSet<>();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    // querying  the keyLocations.The OM is queried to get containerID and
    // localID pertaining to a given key
    List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    // for zero-sized key
    if (locationInfos.isEmpty()) {
        System.out.println("No Key Locations Found");
        return;
    }
    ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
    JsonArray responseArrayList = new JsonArray();
    for (OmKeyLocationInfo keyLocation : locationInfos) {
        ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
        ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
        long containerId = keyLocation.getContainerID();
        chunkPaths.clear();
        Pipeline pipeline = keyLocation.getPipeline();
        if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
            pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
        }
        xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
        // Datanode is queried to get chunk information.Thus querying the
        // OM,SCM and datanode helps us get chunk location information
        ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
        // doing a getBlock on all nodes
        HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
        try {
            responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
        } catch (InterruptedException e) {
            LOG.error("Execution interrupted due to " + e);
            Thread.currentThread().interrupt();
        }
        JsonArray responseFromAllNodes = new JsonArray();
        for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
            JsonObject jsonObj = new JsonObject();
            if (entry.getValue() == null) {
                LOG.error("Cant execute getBlock on this node");
                continue;
            }
            tempchunks = entry.getValue().getBlockData().getChunksList();
            ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
            for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
                String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
                chunkPaths.add(fileName);
                ChunkDetails chunkDetails = new ChunkDetails();
                chunkDetails.setChunkName(fileName);
                chunkDetails.setChunkOffset(chunkInfo.getOffset());
                chunkDetailsList.add(chunkDetails);
            }
            containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
            containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
            containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
            containerChunkInfo.setFiles(chunkPaths);
            containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
            Gson gson = new GsonBuilder().create();
            if (isVerbose()) {
                element = gson.toJsonTree(containerChunkInfoVerbose);
            } else {
                element = gson.toJsonTree(containerChunkInfo);
            }
            jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
            jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
            jsonObj.addProperty("Container-ID", containerId);
            jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
            jsonObj.add("Locations", element);
            responseFromAllNodes.add(jsonObj);
            xceiverClientManager.releaseClientForReadData(xceiverClient, false);
        }
        responseArrayList.add(responseFromAllNodes);
    }
    result.add("KeyLocations", responseArrayList);
    Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
    String prettyJson = gson2.toJson(result);
    System.out.println(prettyJson);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) ArrayList(java.util.ArrayList) JsonObject(com.google.gson.JsonObject) Gson(com.google.gson.Gson) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) HashSet(java.util.HashSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) GsonBuilder(com.google.gson.GsonBuilder) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) JsonArray(com.google.gson.JsonArray) JsonElement(com.google.gson.JsonElement) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File)

Example 13 with OmKeyArgs

use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.

the class TestKeyManagerImpl method testCreateDirectory.

@Test
public void testCreateDirectory() throws IOException {
    // Create directory where the parent directory does not exist
    StringBuffer keyNameBuf = new StringBuffer();
    keyNameBuf.append(RandomStringUtils.randomAlphabetic(5));
    OmKeyArgs keyArgs = createBuilder().setKeyName(keyNameBuf.toString()).build();
    for (int i = 0; i < 5; i++) {
        keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5));
    }
    String keyName = keyNameBuf.toString();
    writeClient.createDirectory(keyArgs);
    Path path = Paths.get(keyName);
    while (path != null) {
        // verify parent directories are created
        Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
        path = path.getParent();
    }
    // make sure create directory fails where parent is a file
    keyName = RandomStringUtils.randomAlphabetic(5);
    keyArgs = createBuilder().setKeyName(keyName).build();
    OpenKeySession keySession = writeClient.openKey(keyArgs);
    keyArgs.setLocationInfoList(keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
    writeClient.commitKey(keyArgs, keySession.getId());
    try {
        writeClient.createDirectory(keyArgs);
        Assert.fail("Creation should fail for directory.");
    } catch (OMException e) {
        Assert.assertEquals(e.getResult(), OMException.ResultCodes.FILE_ALREADY_EXISTS);
    }
    // create directory where parent is root
    keyName = RandomStringUtils.randomAlphabetic(5);
    keyArgs = createBuilder().setKeyName(keyName).build();
    writeClient.createDirectory(keyArgs);
    OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs);
    Assert.assertTrue(fileStatus.isDirectory());
    Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0).getLocationList().isEmpty());
}
Also used : Path(java.nio.file.Path) OpenKeySession(org.apache.hadoop.ozone.om.helpers.OpenKeySession) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) Test(org.junit.Test)

Example 14 with OmKeyArgs

use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.

the class TestKeyManagerImpl method testCheckAccessForFileKey.

@Test
public void testCheckAccessForFileKey() throws Exception {
    // GIVEN
    OmKeyArgs keyArgs = createBuilder().setKeyName("testdir/deep/NOTICE.txt").build();
    OpenKeySession keySession = writeClient.createFile(keyArgs, false, true);
    keyArgs.setLocationInfoList(keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
    writeClient.commitKey(keyArgs, keySession.getId());
    reset(mockScmContainerClient);
    OzoneObj fileKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs).setStoreType(OzoneObj.StoreType.OZONE).build();
    RequestContext context = currentUserReads();
    // WHEN
    boolean access = keyManager.checkAccess(fileKey, context);
    // THEN
    Assert.assertTrue(access);
    verify(mockScmContainerClient, never()).getContainerWithPipelineBatch(any());
}
Also used : OzoneObj(org.apache.hadoop.ozone.security.acl.OzoneObj) OpenKeySession(org.apache.hadoop.ozone.om.helpers.OpenKeySession) RequestContext(org.apache.hadoop.ozone.security.acl.RequestContext) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) Test(org.junit.Test)

Example 15 with OmKeyArgs

use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.

the class TestKeyManagerImpl method testListStatusWithDeletedEntriesInCache.

@Test
public void testListStatusWithDeletedEntriesInCache() throws Exception {
    String prefixKey = "key-";
    TreeSet<String> existKeySet = new TreeSet<>();
    TreeSet<String> deletedKeySet = new TreeSet<>();
    for (int i = 1; i <= 100; i++) {
        if (i % 2 == 0) {
            OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
            existKeySet.add(prefixKey + i);
        } else {
            OMRequestTestUtils.addKeyToTableCache(VOLUME_NAME, BUCKET_NAME, prefixKey + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
            String key = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, prefixKey + i);
            // Mark as deleted in cache.
            metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(key), new CacheValue<>(Optional.absent(), 2L));
            deletedKeySet.add(key);
        }
    }
    OmKeyArgs rootDirArgs = createKeyArgs("");
    List<OzoneFileStatus> fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
    // Should only get entries that are not marked as deleted.
    Assert.assertEquals(50, fileStatuses.size());
    // Test startKey
    fileStatuses = keyManager.listStatus(rootDirArgs, true, prefixKey, 1000);
    // Should only get entries that are not marked as deleted.
    Assert.assertEquals(50, fileStatuses.size());
    // Verify result
    TreeSet<String> expectedKeys = new TreeSet<>();
    for (OzoneFileStatus fileStatus : fileStatuses) {
        String keyName = fileStatus.getKeyInfo().getKeyName();
        expectedKeys.add(keyName);
        Assert.assertTrue(keyName.startsWith(prefixKey));
    }
    Assert.assertEquals(expectedKeys, existKeySet);
    // Sanity check, existKeySet should not intersect with deletedKeySet.
    Assert.assertEquals(0, Sets.intersection(existKeySet, deletedKeySet).size());
    // Next, mark half of the entries left as deleted
    boolean doDelete = false;
    for (String key : existKeySet) {
        if (doDelete) {
            String ozoneKey = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
            metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(ozoneKey), new CacheValue<>(Optional.absent(), 2L));
            deletedKeySet.add(key);
        }
        doDelete = !doDelete;
    }
    // Update existKeySet
    existKeySet.removeAll(deletedKeySet);
    fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
    // Should only get entries that are not marked as deleted.
    Assert.assertEquals(50 / 2, fileStatuses.size());
    // Verify result
    expectedKeys.clear();
    for (OzoneFileStatus fileStatus : fileStatuses) {
        String keyName = fileStatus.getKeyInfo().getKeyName();
        expectedKeys.add(keyName);
        Assert.assertTrue(keyName.startsWith(prefixKey));
    }
    Assert.assertEquals(expectedKeys, existKeySet);
    // Test pagination
    final int batchSize = 5;
    String startKey = "";
    expectedKeys.clear();
    do {
        fileStatuses = keyManager.listStatus(rootDirArgs, true, startKey, batchSize);
        // This is fine as we are using a set to store results.
        for (OzoneFileStatus fileStatus : fileStatuses) {
            startKey = fileStatus.getKeyInfo().getKeyName();
            expectedKeys.add(startKey);
            Assert.assertTrue(startKey.startsWith(prefixKey));
        }
    // fileStatuses.size() == batchSize indicates there might be another batch
    // fileStatuses.size() < batchSize indicates it is the last batch
    } while (fileStatuses.size() == batchSize);
    Assert.assertEquals(expectedKeys, existKeySet);
    // Clean up by marking remaining entries as deleted
    for (String key : existKeySet) {
        String ozoneKey = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
        metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(ozoneKey), new CacheValue<>(Optional.absent(), 2L));
        deletedKeySet.add(key);
    }
    // Update existKeySet
    existKeySet.removeAll(deletedKeySet);
    Assert.assertTrue(existKeySet.isEmpty());
}
Also used : TreeSet(java.util.TreeSet) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) Test(org.junit.Test)

Aggregations

OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)106 Test (org.junit.Test)46 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)45 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)33 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)28 OpenKeySession (org.apache.hadoop.ozone.om.helpers.OpenKeySession)23 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)20 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)19 ArrayList (java.util.ArrayList)18 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)18 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)18 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)17 OzoneFileStatus (org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)17 DeleteKeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs)15 OMRequest (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest)15 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)13 IOException (java.io.IOException)10 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)9 BlockID (org.apache.hadoop.hdds.client.BlockID)8 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)8