Search in sources :

Example 1 with ContainerData

use of org.apache.hadoop.ozone.container.common.impl.ContainerData in project ozone by apache.

the class ChunkManagerDummyImpl method writeChunk.

@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
    Preconditions.checkNotNull(dispatcherContext);
    DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
    ContainerData containerData = container.getContainerData();
    if (stage == DispatcherContext.WriteChunkStage.WRITE_DATA || stage == DispatcherContext.WriteChunkStage.COMBINED) {
        ChunkUtils.validateBufferSize(info.getLen(), data.remaining());
        HddsVolume volume = containerData.getVolume();
        VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
        volumeIOStats.incWriteOpCount();
        volumeIOStats.incWriteBytes(info.getLen());
    }
    if (stage == DispatcherContext.WriteChunkStage.COMMIT_DATA || stage == DispatcherContext.WriteChunkStage.COMBINED) {
        containerData.updateWriteStats(info.getLen(), false);
    }
}
Also used : HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) VolumeIOStats(org.apache.hadoop.ozone.container.common.volume.VolumeIOStats) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) ContainerData(org.apache.hadoop.ozone.container.common.impl.ContainerData)

Example 2 with ContainerData

use of org.apache.hadoop.ozone.container.common.impl.ContainerData in project ozone by apache.

the class TestContainerStateMachineFailures method testApplyTransactionIdempotencyWithClosedContainer.

@Test
public void testApplyTransactionIdempotencyWithClosedContainer() throws Exception {
    OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    // First write and flush creates a container in the datanode
    key.write("ratis".getBytes(UTF_8));
    key.flush();
    key.write("ratis".getBytes(UTF_8));
    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
    List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
    HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
    ContainerData containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
    Assert.assertTrue(containerData instanceof KeyValueContainerData);
    key.close();
    ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(dn, omKeyLocationInfo.getPipeline());
    SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
    stateMachine.takeSnapshot();
    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
    FileInfo snapshot = storage.findLatestSnapshot().getFile();
    Assert.assertNotNull(snapshot);
    long containerID = omKeyLocationInfo.getContainerID();
    Pipeline pipeline = cluster.getStorageContainerLocationClient().getContainerWithPipeline(containerID).getPipeline();
    XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
    ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    request.setCmdType(ContainerProtos.Type.CloseContainer);
    request.setContainerID(containerID);
    request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
    try {
        xceiverClient.sendCommand(request.build());
    } catch (IOException e) {
        Assert.fail("Exception should not be thrown");
    }
    Assert.assertTrue(TestHelper.getDatanodeService(omKeyLocationInfo, cluster).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED);
    Assert.assertTrue(stateMachine.isStateMachineHealthy());
    try {
        stateMachine.takeSnapshot();
    } catch (IOException ioe) {
        Assert.fail("Exception should not be thrown");
    }
    FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
    Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
}
Also used : Path(java.nio.file.Path) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) ContainerStateMachine(org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine) FileInfo(org.apache.ratis.server.storage.FileInfo) SimpleStateMachineStorage(org.apache.ratis.statemachine.impl.SimpleStateMachineStorage) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerData(org.apache.hadoop.ozone.container.common.impl.ContainerData) Test(org.junit.Test)

Example 3 with ContainerData

use of org.apache.hadoop.ozone.container.common.impl.ContainerData in project ozone by apache.

the class TestContainerStateMachineFailures method testApplyTransactionFailure.

@Test
public void testApplyTransactionFailure() throws Exception {
    OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    // First write and flush creates a container in the datanode
    key.write("ratis".getBytes(UTF_8));
    key.flush();
    key.write("ratis".getBytes(UTF_8));
    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
    List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
    HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
    int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
    ContainerData containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
    Assert.assertTrue(containerData instanceof KeyValueContainerData);
    KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData;
    key.close();
    ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(cluster.getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline());
    SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
    stateMachine.takeSnapshot();
    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
    // Since the snapshot threshold is set to 1, since there are
    // applyTransactions, we should see snapshots
    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
    FileInfo snapshot = storage.findLatestSnapshot().getFile();
    Assert.assertNotNull(snapshot);
    long containerID = omKeyLocationInfo.getContainerID();
    // delete the container db file
    FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
    Pipeline pipeline = cluster.getStorageContainerLocationClient().getContainerWithPipeline(containerID).getPipeline();
    XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
    ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    request.setCmdType(ContainerProtos.Type.CloseContainer);
    request.setContainerID(containerID);
    request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
    try {
        xceiverClient.sendCommand(request.build());
        Assert.fail("Expected exception not thrown");
    } catch (IOException e) {
    // Exception should be thrown
    }
    // Make sure the container is marked unhealthy
    Assert.assertTrue(dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
    try {
        // try to take a new snapshot, ideally it should just fail
        stateMachine.takeSnapshot();
    } catch (IOException ioe) {
        Assert.assertTrue(ioe instanceof StateMachineException);
    }
    if (snapshot.getPath().toFile().exists()) {
        // Make sure the latest snapshot is same as the previous one
        try {
            FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
            Assert.assertTrue(snapshot.getPath().equals(latestSnapshot.getPath()));
        } catch (Throwable e) {
            Assert.assertFalse(snapshot.getPath().toFile().exists());
        }
    }
    // when remove pipeline, group dir including snapshot will be deleted
    LambdaTestUtils.await(5000, 500, () -> (!snapshot.getPath().toFile().exists()));
}
Also used : Path(java.nio.file.Path) StateMachineException(org.apache.ratis.protocol.exceptions.StateMachineException) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) ContainerStateMachine(org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine) FileInfo(org.apache.ratis.server.storage.FileInfo) SimpleStateMachineStorage(org.apache.ratis.statemachine.impl.SimpleStateMachineStorage) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) File(java.io.File) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerData(org.apache.hadoop.ozone.container.common.impl.ContainerData) Test(org.junit.Test)

Example 4 with ContainerData

use of org.apache.hadoop.ozone.container.common.impl.ContainerData in project ozone by apache.

the class TestValidateBCSIDOnRestart method testValidateBCSIDOnDnRestart.

@Test
public void testValidateBCSIDOnDnRestart() throws Exception {
    OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    // First write and flush creates a container in the datanode
    key.write("ratis".getBytes(UTF_8));
    key.flush();
    key.write("ratis".getBytes(UTF_8));
    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
    List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
    HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
    ContainerData containerData = TestHelper.getDatanodeService(omKeyLocationInfo, cluster).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
    Assert.assertTrue(containerData instanceof KeyValueContainerData);
    KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData;
    key.close();
    long containerID = omKeyLocationInfo.getContainerID();
    int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
    // delete the container db file
    FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
    HddsDatanodeService dnService = cluster.getHddsDatanodes().get(index);
    OzoneContainer ozoneContainer = dnService.getDatanodeStateMachine().getContainer();
    ozoneContainer.getContainerSet().removeContainer(containerID);
    ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(cluster.getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline());
    SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
    stateMachine.takeSnapshot();
    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
    stateMachine.buildMissingContainerSet(parentPath.toFile());
    // Since the snapshot threshold is set to 1, since there are
    // applyTransactions, we should see snapshots
    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
    // make sure the missing containerSet is not empty
    HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher();
    Assert.assertTrue(!dispatcher.getMissingContainerSet().isEmpty());
    Assert.assertTrue(dispatcher.getMissingContainerSet().contains(containerID));
    // write a new key
    key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    // First write and flush creates a container in the datanode
    key.write("ratis1".getBytes(UTF_8));
    key.flush();
    groupOutputStream = (KeyOutputStream) key.getOutputStream();
    locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    omKeyLocationInfo = locationInfoList.get(0);
    key.close();
    containerID = omKeyLocationInfo.getContainerID();
    dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
    containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
    Assert.assertTrue(containerData instanceof KeyValueContainerData);
    keyValueContainerData = (KeyValueContainerData) containerData;
    ReferenceCountedDB db = BlockUtils.getDB(keyValueContainerData, conf);
    // modify the bcsid for the container in the ROCKS DB thereby inducing
    // corruption
    db.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, 0L);
    db.decrementReference();
    // after the restart, there will be a mismatch in BCSID of what is recorded
    // in the and what is there in RockSDB and hence the container would be
    // marked unhealthy
    index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
    cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
    // Make sure the container is marked unhealthy
    Assert.assertTrue(cluster.getHddsDatanodes().get(index).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
}
Also used : Path(java.nio.file.Path) HashMap(java.util.HashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ContainerStateMachine(org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine) SimpleStateMachineStorage(org.apache.ratis.statemachine.impl.SimpleStateMachineStorage) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) File(java.io.File) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerData(org.apache.hadoop.ozone.container.common.impl.ContainerData) Test(org.junit.Test)

Example 5 with ContainerData

use of org.apache.hadoop.ozone.container.common.impl.ContainerData in project ozone by apache.

the class TestDeleteContainerHandler method isContainerClosed.

/**
 * Checks whether is closed or not on a datanode.
 * @param hddsDatanodeService
 * @param containerID
 * @return true - if container is closes, else returns false.
 */
private Boolean isContainerClosed(HddsDatanodeService hddsDatanodeService, long containerID) {
    ContainerData containerData;
    containerData = hddsDatanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData();
    return !containerData.isOpen();
}
Also used : ContainerData(org.apache.hadoop.ozone.container.common.impl.ContainerData)

Aggregations

ContainerData (org.apache.hadoop.ozone.container.common.impl.ContainerData)19 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)11 Test (org.junit.Test)8 IOException (java.io.IOException)7 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)7 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)5 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)5 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)5 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)5 File (java.io.File)4 Path (java.nio.file.Path)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4 ContainerStateMachine (org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine)4 OzoneContainer (org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer)4 SimpleStateMachineStorage (org.apache.ratis.statemachine.impl.SimpleStateMachineStorage)4 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)3 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)3 ContainerMetrics (org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics)3 DatanodeConfiguration (org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration)3 KeyValueHandler (org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler)3