use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.
the class RpcClient method getKeyDetails.
@Override
public OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, String keyName) throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).setLatestVersionLocation(getLatestVersionLocation).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
List<OzoneKeyLocation> ozoneKeyLocations = new ArrayList<>();
long lastKeyOffset = 0L;
List<OmKeyLocationInfo> omKeyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
for (OmKeyLocationInfo info : omKeyLocationInfos) {
ozoneKeyLocations.add(new OzoneKeyLocation(info.getContainerID(), info.getLocalID(), info.getLength(), info.getOffset(), lastKeyOffset));
lastKeyOffset += info.getLength();
}
return new OzoneKeyDetails(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(), keyInfo.getModificationTime(), ozoneKeyLocations, keyInfo.getReplicationConfig(), keyInfo.getMetadata(), keyInfo.getFileEncryptionInfo());
}
use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.
the class TestOzoneRpcClientAbstract method testGetKeyDetails.
@Test
public void testGetKeyDetails() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
String keyValue = RandomStringUtils.random(128);
// String keyValue = "this is a test value.glx";
// create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName, keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(keyValue.getBytes(UTF_8));
out.close();
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[32];
is.read(fileContent);
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = (OzoneKeyDetails) bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength());
// Second, sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the size from keyDetails.
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(datanodes.size(), 1);
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
HddsDatanodeService datanodeService = null;
for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
datanodeService = datanodeServiceItr;
break;
}
}
KeyValueContainerData containerData = (KeyValueContainerData) (datanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData());
try (ReferenceCountedDB db = BlockUtils.getDB(containerData, cluster.getConf());
BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator()) {
while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock();
if (blockData.getBlockID().getLocalID() == localID) {
long length = 0;
List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunks) {
length += chunk.getLen();
}
Assert.assertEquals(length, keyValue.getBytes(UTF_8).length);
break;
}
}
}
}
use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.
the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.
/**
* Tests reading a corrputed chunk file throws checksum exception.
* @throws IOException
*/
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
byte[] data = value.getBytes(UTF_8);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key = bucket.getKey(keyName);
List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Get the container by traversing the datanodes.
List<Container> containerList = new ArrayList<>();
Container container;
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container != null) {
containerList.add(container);
if (containerList.size() == 3) {
break;
}
}
}
Assert.assertTrue("Container not found", !containerList.isEmpty());
corruptData(containerList.get(0), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(1), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(2), key);
// Try reading the key. Read will fail here as all the replica are corrupt
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
fail("Reading corrupted data should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
}
}
use of org.apache.hadoop.ozone.client.OzoneKeyLocation in project ozone by apache.
the class TestReadRetries method testPutKeyAndGetKeyThreeNodes.
@Test
public void testPutKeyAndGetKeyThreeNodes() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = "a/b/c/" + UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream();
XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory();
out.write(value.getBytes(UTF_8));
out.close();
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(value.getBytes(UTF_8).length, keyLocations.get(0).getLength());
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
XceiverClientSpi clientSpi = factory.acquireClient(pipeline);
Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
XceiverClientRatis ratisClient = (XceiverClientRatis) clientSpi;
ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId());
// shutdown the datanode
cluster.shutdownHddsDatanode(datanodeDetails);
// try to read, this should be successful
readKey(bucket, keyName, value);
// read intermediate directory
verifyIntermediateDir(bucket, "a/b/c");
// shutdown the second datanode
datanodeDetails = datanodes.get(1);
cluster.shutdownHddsDatanode(datanodeDetails);
// we still should be able to read via Standalone protocol
// try to read
readKey(bucket, keyName, value);
// shutdown the 3rd datanode
datanodeDetails = datanodes.get(2);
cluster.shutdownHddsDatanode(datanodeDetails);
try {
// try to read
readKey(bucket, keyName, value);
fail("Expected exception not thrown");
} catch (IOException e) {
// it should throw an ioException as none of the servers
// are available
}
factory.releaseClient(clientSpi, false);
}
Aggregations