use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.
the class TestOzoneRpcClientAbstract method testZReadKeyWithUnhealthyContainerReplica.
// Make this executed at last, for it has some side effect to other UTs
@Test
public void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName1 = UUID.randomUUID().toString();
// Write first key
OzoneOutputStream out = bucket.createKey(keyName1, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// Write second key
String keyName2 = UUID.randomUUID().toString();
value = "unhealthy container replica";
out = bucket.createKey(keyName2, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// Find container ID
OzoneKey key = bucket.getKey(keyName2);
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Set container replica to UNHEALTHY
Container container;
int index = 1;
List<HddsDatanodeService> involvedDNs = new ArrayList<>();
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container == null) {
continue;
}
container.markContainerUnhealthy();
// Change first and second replica commit sequenceId
if (index < 3) {
long newBCSID = container.getBlockCommitSequenceId() - 1;
try (ReferenceCountedDB db = BlockUtils.getDB((KeyValueContainerData) container.getContainerData(), cluster.getConf())) {
db.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, newBCSID);
}
container.updateBlockCommitSequenceId(newBCSID);
index++;
}
involvedDNs.add(hddsDatanode);
}
// Restart DNs
int dnCount = involvedDNs.size();
for (index = 0; index < dnCount; index++) {
if (index == dnCount - 1) {
cluster.restartHddsDatanode(involvedDNs.get(index).getDatanodeDetails(), true);
} else {
cluster.restartHddsDatanode(involvedDNs.get(index).getDatanodeDetails(), false);
}
}
StorageContainerManager scm = cluster.getStorageContainerManager();
GenericTestUtils.waitFor(() -> {
try {
ContainerInfo containerInfo = scm.getContainerInfo(containerID);
System.out.println("state " + containerInfo.getState());
return containerInfo.getState() == HddsProtos.LifeCycleState.CLOSING;
} catch (IOException e) {
fail("Failed to get container info for " + e.getMessage());
return false;
}
}, 1000, 10000);
// Try reading keyName2
try {
GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG);
OzoneInputStream is = bucket.readKey(keyName2);
byte[] content = new byte[100];
is.read(content);
String retValue = new String(content, UTF_8);
Assert.assertTrue(value.equals(retValue.trim()));
} catch (IOException e) {
fail("Reading unhealthy replica should succeed.");
}
}
use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.
the class TestOzoneRpcClientAbstract method testPutKeyRatisOneNode.
@Test
public void testPutKeyRatisOneNode() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
Instant testStartTime = Instant.now();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
for (int i = 0; i < 10; i++) {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
is.close();
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, ReplicationType.RATIS, ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
}
}
use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.
the class TestOzoneRpcClientAbstract method testPutKey.
@Test
public void testPutKey() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
Instant testStartTime = Instant.now();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
for (int i = 0; i < 10; i++) {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, RATIS, ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
}
}
use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.
the class TestOzoneRpcClientAbstract method readKey.
private void readKey(OzoneBucket bucket, String keyName, String data) throws IOException {
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[data.getBytes(UTF_8).length];
is.read(fileContent);
is.close();
}
use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.
the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedData.
/**
* Tests reading a corrputed chunk file throws checksum exception.
* @throws IOException
*/
@Test
public void testReadKeyWithCorruptedData() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key = bucket.getKey(keyName);
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Get the container by traversing the datanodes. Atleast one of the
// datanode must have this container.
Container container = null;
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container != null) {
break;
}
}
Assert.assertNotNull("Container not found", container);
corruptData(container, key);
// throw a checksum mismatch exception.
try {
OzoneInputStream is = bucket.readKey(keyName);
is.read(new byte[100]);
fail("Reading corrupted data should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
}
}
Aggregations