use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.
the class TestHDDSUpgrade method testAllPossibleDataNodeFailuresAndSCMFailures.
/*
* Two nodes(SCM and a targeted DataNode) combination failure case:
* Thread-Contexts :
* DataNode failure in its own DataNode-Upgrade-Context .
* SCM failure in its own SCM-Upgrade-Context .
*
* Fail the same DataNode that is going through its own Upgrade-processing
* at a specific code execution point. Also fail the SCM when SCM is going
* through upgrade-finalization. This test covers all the combinations of
* SCM-Upgrade-execution points and DataNode-Upgrade-execution points.
*/
@Test
public void testAllPossibleDataNodeFailuresAndSCMFailures() throws Exception {
// execution.
for (UpgradeTestInjectionPoints scmInjectionPoint : UpgradeTestInjectionPoints.values()) {
InjectedUpgradeFinalizationExecutor scmFinalizationExecutor = new InjectedUpgradeFinalizationExecutor();
scmFinalizationExecutor.configureTestInjectionFunction(scmInjectionPoint, () -> {
return this.injectSCMFailureDuringSCMUpgrade();
});
((BasicUpgradeFinalizer) scm.getUpgradeFinalizer()).setFinalizationExecutor(scmFinalizationExecutor);
for (UpgradeTestInjectionPoints datanodeInjectionPoint : UpgradeTestInjectionPoints.values()) {
HddsDatanodeService ds = cluster.getHddsDatanodes().get(1);
testPassed.set(true);
Thread dataNodefailureInjectionThread = injectDataNodeFailureDuringDataNodeUpgrade(ds.getDatanodeDetails());
InjectedUpgradeFinalizationExecutor dataNodeFinalizationExecutor = new InjectedUpgradeFinalizationExecutor();
dataNodeFinalizationExecutor.configureTestInjectionFunction(datanodeInjectionPoint, () -> {
dataNodefailureInjectionThread.start();
return true;
});
((BasicUpgradeFinalizer) ds.getDatanodeStateMachine().getUpgradeFinalizer()).setFinalizationExecutor(dataNodeFinalizationExecutor);
testFinalizationWithFailureInjectionHelper(dataNodefailureInjectionThread);
Assert.assertTrue(testPassed.get());
synchronized (cluster) {
shutdown();
init();
}
LOG.info("testAllPossibleDataNodeFailuresAndSCMFailures: " + "DataNode-Failure-Injection-Point={} with " + "Scm-FailureInjection-Point={} passed.", datanodeInjectionPoint.name(), scmInjectionPoint.name());
}
}
}
use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.
the class TestHDDSUpgrade method testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade.
/*
* Two nodes(SCM and a targeted DataNode together at the same time)
* combination failure case:
* Thread-Contexts :
* DataNode-Upgrade-Finalizer-Context.
*
* Fail the DataNode and the SCM together when the DataNode is going
* through upgrade. This test covers all the combinations of
* DataNode-Upgrade-execution points.
*/
@Test
public void testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade() throws Exception {
for (UpgradeTestInjectionPoints injectionPoint : UpgradeTestInjectionPoints.values()) {
testPassed.set(true);
Thread helpingFailureInjectionThread = injectSCMAndDataNodeFailureTogetherAtTheSameTime();
HddsDatanodeService ds = cluster.getHddsDatanodes().get(1);
InjectedUpgradeFinalizationExecutor dataNodeFinalizationExecutor = new InjectedUpgradeFinalizationExecutor();
dataNodeFinalizationExecutor.configureTestInjectionFunction(injectionPoint, () -> {
helpingFailureInjectionThread.start();
return true;
});
((BasicUpgradeFinalizer) ds.getDatanodeStateMachine().getUpgradeFinalizer()).setFinalizationExecutor(dataNodeFinalizationExecutor);
testFinalizationWithFailureInjectionHelper(helpingFailureInjectionThread);
Assert.assertTrue(testPassed.get());
synchronized (cluster) {
shutdown();
init();
}
LOG.info("testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade: " + "Failure Injection Point {} passed.", injectionPoint.name());
}
}
use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.
the class TestOzoneRpcClientAbstract method testGetKeyDetails.
@Test
public void testGetKeyDetails() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
String keyValue = RandomStringUtils.random(128);
// String keyValue = "this is a test value.glx";
// create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName, keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(keyValue.getBytes(UTF_8));
out.close();
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[32];
is.read(fileContent);
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = (OzoneKeyDetails) bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength());
// Second, sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the size from keyDetails.
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(datanodes.size(), 1);
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
HddsDatanodeService datanodeService = null;
for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
datanodeService = datanodeServiceItr;
break;
}
}
KeyValueContainerData containerData = (KeyValueContainerData) (datanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData());
try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf());
BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator(containerID)) {
while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock();
if (blockData.getBlockID().getLocalID() == localID) {
long length = 0;
List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
for (ContainerProtos.ChunkInfo chunk : chunks) {
length += chunk.getLen();
}
Assert.assertEquals(length, keyValue.getBytes(UTF_8).length);
break;
}
}
}
}
use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.
the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.
/**
* Tests reading a corrputed chunk file throws checksum exception.
* @throws IOException
*/
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
byte[] data = value.getBytes(UTF_8);
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key = bucket.getKey(keyName);
List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Get the container by traversing the datanodes.
List<Container> containerList = new ArrayList<>();
Container container;
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container != null) {
containerList.add(container);
if (containerList.size() == 3) {
break;
}
}
}
Assert.assertTrue("Container not found", !containerList.isEmpty());
corruptData(containerList.get(0), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(1), key);
// failover to next replica
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
Assert.assertTrue(Arrays.equals(b, data));
} catch (OzoneChecksumException e) {
fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(2), key);
// Try reading the key. Read will fail here as all the replica are corrupt
try {
OzoneInputStream is = bucket.readKey(keyName);
byte[] b = new byte[data.length];
is.read(b);
fail("Reading corrupted data should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
}
}
use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.
the class TestOzoneRpcClientAbstract method createAndCorruptKey.
private void createAndCorruptKey(String volumeName, String bucketName, String keyName) throws IOException {
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
// Write data into a key
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// We need to find the location of the chunk file corresponding to the
// data we just wrote.
OzoneKey key = bucket.getKey(keyName);
long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
// Get the container by traversing the datanodes. Atleast one of the
// datanode must have this container.
Container container = null;
for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
if (container != null) {
break;
}
}
Assert.assertNotNull("Container not found", container);
corruptData(container, key);
}
Aggregations