use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneRpcClientAbstract method testGetKeyDetails.
@Test
public void testGetKeyDetails() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
String keyValue = RandomStringUtils.random(128);
// String keyValue = "this is a test value.glx";
// create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName, keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(keyValue.getBytes(UTF_8));
out.close();
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[32];
is.read(fileContent);
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = (OzoneKeyDetails) bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength());
// Second, sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the size from keyDetails.
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(datanodes.size(), 1);
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
HddsDatanodeService datanodeService = null;
for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
datanodeService = datanodeServiceItr;
break;
}
}
KeyValueContainerData containerData = (KeyValueContainerData) (datanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData());
try (ReferenceCountedDB db = BlockUtils.getDB(containerData, cluster.getConf());
BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator()) {
while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock();
if (blockData.getBlockID().getLocalID() == localID) {
long length = 0;
List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunks) {
length += chunk.getLen();
}
Assert.assertEquals(length, keyValue.getBytes(UTF_8).length);
break;
}
}
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.
/**
* Tests reading a corrputed chunk file throws checksum exception.
* @throws IOException
*/
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
byte[] data = value.getBytes(UTF_8);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key = bucket.getKey(keyName);
List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Get the container by traversing the datanodes.
List<Container> containerList = new ArrayList<>();
Container container;
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container != null) {
containerList.add(container);
if (containerList.size() == 3) {
break;
}
}
}
Assert.assertTrue("Container not found", !containerList.isEmpty());
corruptData(containerList.get(0), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(1), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(2), key);
// Try reading the key. Read will fail here as all the replica are corrupt
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
fail("Reading corrupted data should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneRpcClientAbstract method testCommitPartAfterCompleteUpload.
@Test
public void testCommitPartAfterCompleteUpload() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
String uploadID = omMultipartInfo.getUploadID();
// upload part 1.
byte[] data = generateData(5 * 1024 * 1024, (byte) RandomUtils.nextLong());
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID);
ozoneOutputStream.write(data, 0, data.length);
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
// Do not close output stream for part 2.
ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID());
ozoneOutputStream.write(data, 0, data.length);
Map<Integer, String> partsMap = new LinkedHashMap<>();
partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
Assert.assertNotNull(omMultipartCommitUploadPartInfo);
byte[] fileContent = new byte[data.length];
OzoneInputStream inputStream = bucket.readKey(keyName);
inputStream.read(fileContent);
StringBuilder sb = new StringBuilder(data.length);
// Combine all parts data, and check is it matching with get key data.
String part1 = new String(data, UTF_8);
sb.append(part1);
Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
try {
ozoneOutputStream.close();
fail("testCommitPartAfterCompleteUpload failed");
} catch (IOException ex) {
assertTrue(ex instanceof OMException);
assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ((OMException) ex).getResult());
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneRpcClientAbstract method doMultipartUpload.
private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val) throws Exception {
// Initiate Multipart upload request
String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType.RATIS, THREE);
// Upload parts
Map<Integer, String> partsMap = new TreeMap<>();
// get 5mb data, as each part should be of min 5mb, last part can be less
// than 5mb
int length = 0;
byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val);
String partName = uploadPart(bucket, keyName, uploadID, 1, data);
partsMap.put(1, partName);
length += data.length;
partName = uploadPart(bucket, keyName, uploadID, 2, data);
partsMap.put(2, partName);
length += data.length;
String part3 = UUID.randomUUID().toString();
partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes(UTF_8));
partsMap.put(3, partName);
length += part3.getBytes(UTF_8).length;
// Complete multipart upload request
completeMultipartUpload(bucket, keyName, uploadID, partsMap);
// Now Read the key which has been completed multipart upload.
byte[] fileContent = new byte[data.length + data.length + part3.getBytes(UTF_8).length];
OzoneInputStream inputStream = bucket.readKey(keyName);
inputStream.read(fileContent);
Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, ReplicationType.RATIS, THREE));
StringBuilder sb = new StringBuilder(length);
// Combine all parts data, and check is it matching with get key data.
String part1 = new String(data, UTF_8);
String part2 = new String(data, UTF_8);
sb.append(part1);
sb.append(part2);
sb.append(part3);
Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
String ozoneKey = ozoneManager.getMetadataManager().getOzoneKey(bucket.getVolumeName(), bucket.getName(), keyName);
OmKeyInfo omKeyInfo = ozoneManager.getMetadataManager().getKeyTable(getBucketLayout()).get(ozoneKey);
OmKeyLocationInfoGroup latestVersionLocations = omKeyInfo.getLatestVersionLocations();
Assert.assertEquals(true, latestVersionLocations.isMultipartKey());
latestVersionLocations.getBlocksLatestVersionOnly().forEach(omKeyLocationInfo -> Assert.assertTrue(omKeyLocationInfo.getPartNumber() != -1));
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneAtRestEncryption method createAndVerifyKeyData.
private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception {
Instant testStartTime = Instant.now();
String keyName = UUID.randomUUID().toString();
String value = "sample value";
try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(StandardCharsets.UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) {
out.write(value.getBytes(StandardCharsets.UTF_8));
}
// Verify content.
OzoneKeyDetails key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
// Check file encryption info is set,
// if set key will use this encryption info and encrypt data.
Assert.assertTrue(key.getFileEncryptionInfo() != null);
byte[] fileContent;
int len = 0;
try (OzoneInputStream is = bucket.readKey(keyName)) {
fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length];
len = is.read(fileContent);
}
Assert.assertEquals(len, value.length());
Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, ReplicationType.RATIS, ReplicationFactor.ONE));
Assert.assertEquals(value, new String(fileContent, StandardCharsets.UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
}
Aggregations