Search in sources :

Example 6 with HddsDatanodeService

use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.

the class TestHDDSUpgrade method testAllPossibleDataNodeFailuresAndSCMFailures.

/*
   * Two nodes(SCM and a targeted DataNode) combination failure case:
   * Thread-Contexts :
   *          DataNode failure in its own DataNode-Upgrade-Context .
   *          SCM failure in its own SCM-Upgrade-Context .
   *
   * Fail the same DataNode that is going through its own Upgrade-processing
   * at a specific code execution point. Also fail the SCM when SCM is going
   * through upgrade-finalization. This test covers all the combinations of
   * SCM-Upgrade-execution points and DataNode-Upgrade-execution points.
   */
@Test
public void testAllPossibleDataNodeFailuresAndSCMFailures() throws Exception {
    // execution.
    for (UpgradeTestInjectionPoints scmInjectionPoint : UpgradeTestInjectionPoints.values()) {
        InjectedUpgradeFinalizationExecutor scmFinalizationExecutor = new InjectedUpgradeFinalizationExecutor();
        scmFinalizationExecutor.configureTestInjectionFunction(scmInjectionPoint, () -> {
            return this.injectSCMFailureDuringSCMUpgrade();
        });
        ((BasicUpgradeFinalizer) scm.getUpgradeFinalizer()).setFinalizationExecutor(scmFinalizationExecutor);
        for (UpgradeTestInjectionPoints datanodeInjectionPoint : UpgradeTestInjectionPoints.values()) {
            HddsDatanodeService ds = cluster.getHddsDatanodes().get(1);
            testPassed.set(true);
            Thread dataNodefailureInjectionThread = injectDataNodeFailureDuringDataNodeUpgrade(ds.getDatanodeDetails());
            InjectedUpgradeFinalizationExecutor dataNodeFinalizationExecutor = new InjectedUpgradeFinalizationExecutor();
            dataNodeFinalizationExecutor.configureTestInjectionFunction(datanodeInjectionPoint, () -> {
                dataNodefailureInjectionThread.start();
                return true;
            });
            ((BasicUpgradeFinalizer) ds.getDatanodeStateMachine().getUpgradeFinalizer()).setFinalizationExecutor(dataNodeFinalizationExecutor);
            testFinalizationWithFailureInjectionHelper(dataNodefailureInjectionThread);
            Assert.assertTrue(testPassed.get());
            synchronized (cluster) {
                shutdown();
                init();
            }
            LOG.info("testAllPossibleDataNodeFailuresAndSCMFailures: " + "DataNode-Failure-Injection-Point={} with " + "Scm-FailureInjection-Point={} passed.", datanodeInjectionPoint.name(), scmInjectionPoint.name());
        }
    }
}
Also used : InjectedUpgradeFinalizationExecutor(org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor) BasicUpgradeFinalizer(org.apache.hadoop.ozone.upgrade.BasicUpgradeFinalizer) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) UpgradeTestInjectionPoints(org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints) Test(org.junit.jupiter.api.Test)

Example 7 with HddsDatanodeService

use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.

the class TestHDDSUpgrade method testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade.

/*
   * Two nodes(SCM and a targeted DataNode together at the same time)
   * combination failure case:
   * Thread-Contexts :
   *          DataNode-Upgrade-Finalizer-Context.
   *
   * Fail the DataNode and the SCM together when the DataNode is going
   * through upgrade. This test covers all the combinations of
   * DataNode-Upgrade-execution points.
   */
@Test
public void testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade() throws Exception {
    for (UpgradeTestInjectionPoints injectionPoint : UpgradeTestInjectionPoints.values()) {
        testPassed.set(true);
        Thread helpingFailureInjectionThread = injectSCMAndDataNodeFailureTogetherAtTheSameTime();
        HddsDatanodeService ds = cluster.getHddsDatanodes().get(1);
        InjectedUpgradeFinalizationExecutor dataNodeFinalizationExecutor = new InjectedUpgradeFinalizationExecutor();
        dataNodeFinalizationExecutor.configureTestInjectionFunction(injectionPoint, () -> {
            helpingFailureInjectionThread.start();
            return true;
        });
        ((BasicUpgradeFinalizer) ds.getDatanodeStateMachine().getUpgradeFinalizer()).setFinalizationExecutor(dataNodeFinalizationExecutor);
        testFinalizationWithFailureInjectionHelper(helpingFailureInjectionThread);
        Assert.assertTrue(testPassed.get());
        synchronized (cluster) {
            shutdown();
            init();
        }
        LOG.info("testDataNodeAndSCMFailuresTogetherDuringDataNodeUpgrade: " + "Failure Injection Point {} passed.", injectionPoint.name());
    }
}
Also used : InjectedUpgradeFinalizationExecutor(org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) BasicUpgradeFinalizer(org.apache.hadoop.ozone.upgrade.BasicUpgradeFinalizer) UpgradeTestInjectionPoints(org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints) Test(org.junit.jupiter.api.Test)

Example 8 with HddsDatanodeService

use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.

the class TestOzoneRpcClientAbstract method testGetKeyDetails.

@Test
public void testGetKeyDetails() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    String keyValue = RandomStringUtils.random(128);
    // String keyValue = "this is a test value.glx";
    // create the initial key with size 0, write will allocate the first block.
    OzoneOutputStream out = bucket.createKey(keyName, keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(keyValue.getBytes(UTF_8));
    out.close();
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[32];
    is.read(fileContent);
    // First, confirm the key info from the client matches the info in OM.
    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
    builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
    long containerID = keyInfo.getContainerID();
    long localID = keyInfo.getLocalID();
    OzoneKeyDetails keyDetails = (OzoneKeyDetails) bucket.getKey(keyName);
    Assert.assertEquals(keyName, keyDetails.getName());
    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
    Assert.assertEquals(1, keyLocations.size());
    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
    // Make sure that the data size matched.
    Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength());
    // Second, sum the data size from chunks in Container via containerID
    // and localID, make sure the size equals to the size from keyDetails.
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    Assert.assertEquals(datanodes.size(), 1);
    DatanodeDetails datanodeDetails = datanodes.get(0);
    Assert.assertNotNull(datanodeDetails);
    HddsDatanodeService datanodeService = null;
    for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
        if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
            datanodeService = datanodeServiceItr;
            break;
        }
    }
    KeyValueContainerData containerData = (KeyValueContainerData) (datanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData());
    try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf());
        BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator(containerID)) {
        while (keyValueBlockIterator.hasNext()) {
            BlockData blockData = keyValueBlockIterator.nextBlock();
            if (blockData.getBlockID().getLocalID() == localID) {
                long length = 0;
                List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
                for (ContainerProtos.ChunkInfo chunk : chunks) {
                    length += chunk.getLen();
                }
                Assert.assertEquals(length, keyValue.getBytes(UTF_8).length);
                break;
            }
        }
    }
}
Also used : HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) DBHandle(org.apache.hadoop.ozone.container.common.interfaces.DBHandle) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 9 with HddsDatanodeService

use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.

the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.

/**
 * Tests reading a corrputed chunk file throws checksum exception.
 * @throws IOException
 */
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    byte[] data = value.getBytes(UTF_8);
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    // Write data into a key
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // We need to find the location of the chunk file corresponding to the
    // data we just wrote.
    OzoneKey key = bucket.getKey(keyName);
    List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
    Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Get the container by traversing the datanodes.
    List<Container> containerList = new ArrayList<>();
    Container container;
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container != null) {
            containerList.add(container);
            if (containerList.size() == 3) {
                break;
            }
        }
    }
    Assert.assertTrue("Container not found", !containerList.isEmpty());
    corruptData(containerList.get(0), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(1), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(2), key);
    // Try reading the key. Read will fail here as all the replica are corrupt
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        fail("Reading corrupted data should fail.");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) ArrayList(java.util.ArrayList) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 10 with HddsDatanodeService

use of org.apache.hadoop.ozone.HddsDatanodeService in project ozone by apache.

the class TestOzoneRpcClientAbstract method createAndCorruptKey.

private void createAndCorruptKey(String volumeName, String bucketName, String keyName) throws IOException {
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    // Write data into a key
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // We need to find the location of the chunk file corresponding to the
    // data we just wrote.
    OzoneKey key = bucket.getKey(keyName);
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Get the container by traversing the datanodes. Atleast one of the
    // datanode must have this container.
    Container container = null;
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container != null) {
            break;
        }
    }
    Assert.assertNotNull("Container not found", container);
    corruptData(container, key);
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService)

Aggregations

HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)41 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)20 Test (org.junit.jupiter.api.Test)20 IOException (java.io.IOException)17 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)14 File (java.io.File)12 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)12 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)12 Test (org.junit.Test)12 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)11 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)10 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)9 OzoneContainer (org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer)9 GenericTestUtils (org.apache.ozone.test.GenericTestUtils)9 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)8 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)8 ArrayList (java.util.ArrayList)7 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)7 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)7 ContainerData (org.apache.hadoop.ozone.container.common.impl.ContainerData)7