Search in sources :

Example 1 with OzoneKeyLocation

use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.

the class RpcClient method getKeyDetails.

@Override
public OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, String keyName) throws IOException {
    Preconditions.checkNotNull(volumeName);
    Preconditions.checkNotNull(bucketName);
    Preconditions.checkNotNull(keyName);
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).setLatestVersionLocation(getLatestVersionLocation).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    List<OzoneKeyLocation> ozoneKeyLocations = new ArrayList<>();
    long lastKeyOffset = 0L;
    List<OmKeyLocationInfo> omKeyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    for (OmKeyLocationInfo info : omKeyLocationInfos) {
        ozoneKeyLocations.add(new OzoneKeyLocation(info.getContainerID(), info.getLocalID(), info.getLength(), info.getOffset(), lastKeyOffset));
        lastKeyOffset += info.getLength();
    }
    return new OzoneKeyDetails(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(), keyInfo.getModificationTime(), ozoneKeyLocations, keyInfo.getReplicationConfig(), keyInfo.getMetadata(), keyInfo.getFileEncryptionInfo());
}
Also used : OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) ArrayList(java.util.ArrayList) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)

Example 2 with OzoneKeyLocation

use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.

the class TestOzoneRpcClientAbstract method testGetKeyDetails.

@Test
public void testGetKeyDetails() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    String keyValue = RandomStringUtils.random(128);
    // String keyValue = "this is a test value.glx";
    // create the initial key with size 0, write will allocate the first block.
    OzoneOutputStream out = bucket.createKey(keyName, keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(keyValue.getBytes(UTF_8));
    out.close();
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[32];
    is.read(fileContent);
    // First, confirm the key info from the client matches the info in OM.
    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
    builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
    long containerID = keyInfo.getContainerID();
    long localID = keyInfo.getLocalID();
    OzoneKeyDetails keyDetails = (OzoneKeyDetails) bucket.getKey(keyName);
    Assert.assertEquals(keyName, keyDetails.getName());
    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
    Assert.assertEquals(1, keyLocations.size());
    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
    // Make sure that the data size matched.
    Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength());
    // Second, sum the data size from chunks in Container via containerID
    // and localID, make sure the size equals to the size from keyDetails.
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    Assert.assertEquals(datanodes.size(), 1);
    DatanodeDetails datanodeDetails = datanodes.get(0);
    Assert.assertNotNull(datanodeDetails);
    HddsDatanodeService datanodeService = null;
    for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
        if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
            datanodeService = datanodeServiceItr;
            break;
        }
    }
    KeyValueContainerData containerData = (KeyValueContainerData) (datanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData());
    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, cluster.getConf());
        BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator()) {
        while (keyValueBlockIterator.hasNext()) {
            BlockData blockData = keyValueBlockIterator.nextBlock();
            if (blockData.getBlockID().getLocalID() == localID) {
                long length = 0;
                List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
                for (ContainerProtos.ChunkInfo chunk : chunks) {
                    length += chunk.getLen();
                }
                Assert.assertEquals(length, keyValue.getBytes(UTF_8).length);
                break;
            }
        }
    }
}
Also used : HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) Test(org.junit.Test)

Example 3 with OzoneKeyLocation

use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.

the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.

/**
 * Tests reading a corrputed chunk file throws checksum exception.
 * @throws IOException
 */
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    byte[] data = value.getBytes(UTF_8);
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    // Write data into a key
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // We need to find the location of the chunk file corresponding to the
    // data we just wrote.
    OzoneKey key = bucket.getKey(keyName);
    List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
    Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Get the container by traversing the datanodes.
    List<Container> containerList = new ArrayList<>();
    Container container;
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container != null) {
            containerList.add(container);
            if (containerList.size() == 3) {
                break;
            }
        }
    }
    Assert.assertTrue("Container not found", !containerList.isEmpty());
    corruptData(containerList.get(0), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(1), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(2), key);
    // Try reading the key. Read will fail here as all the replica are corrupt
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        fail("Reading corrupted data should fail.");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) ArrayList(java.util.ArrayList) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) Test(org.junit.Test)

Example 4 with OzoneKeyLocation

use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.

the class TestReadRetries method testPutKeyAndGetKeyThreeNodes.

@Test
public void testPutKeyAndGetKeyThreeNodes() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = "a/b/c/" + UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
    KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream();
    XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory();
    out.write(value.getBytes(UTF_8));
    out.close();
    // First, confirm the key info from the client matches the info in OM.
    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
    builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
    long containerID = keyInfo.getContainerID();
    long localID = keyInfo.getLocalID();
    OzoneKeyDetails keyDetails = bucket.getKey(keyName);
    Assert.assertEquals(keyName, keyDetails.getName());
    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
    Assert.assertEquals(1, keyLocations.size());
    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
    // Make sure that the data size matched.
    Assert.assertEquals(value.getBytes(UTF_8).length, keyLocations.get(0).getLength());
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    DatanodeDetails datanodeDetails = datanodes.get(0);
    Assert.assertNotNull(datanodeDetails);
    XceiverClientSpi clientSpi = factory.acquireClient(pipeline);
    Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
    XceiverClientRatis ratisClient = (XceiverClientRatis) clientSpi;
    ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId());
    // shutdown the datanode
    cluster.shutdownHddsDatanode(datanodeDetails);
    // try to read, this should be successful
    readKey(bucket, keyName, value);
    // read intermediate directory
    verifyIntermediateDir(bucket, "a/b/c");
    // shutdown the second datanode
    datanodeDetails = datanodes.get(1);
    cluster.shutdownHddsDatanode(datanodeDetails);
    // we still should be able to read via Standalone protocol
    // try to read
    readKey(bucket, keyName, value);
    // shutdown the 3rd datanode
    datanodeDetails = datanodes.get(2);
    cluster.shutdownHddsDatanode(datanodeDetails);
    try {
        // try to read
        readKey(bucket, keyName, value);
        fail("Expected exception not thrown");
    } catch (IOException e) {
    // it should throw an ioException as none of the servers
    // are available
    }
    factory.releaseClient(clientSpi, false);
}
Also used : OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Aggregations

OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)4 OzoneKeyLocation (org.apache.hadoop.ozone.client.OzoneKeyLocation)4 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)3 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)3 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)3 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)3 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)3 Test (org.junit.Test)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)2 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)2 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)2 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)2 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)2 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)1 XceiverClientFactory (org.apache.hadoop.hdds.scm.XceiverClientFactory)1 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)1 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)1 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)1