Search in sources :

Example 1 with HddsDispatcher

use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.

the class TestValidateBCSIDOnRestart method testValidateBCSIDOnDnRestart.

@Test
public void testValidateBCSIDOnDnRestart() throws Exception {
    OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    // First write and flush creates a container in the datanode
    key.write("ratis".getBytes(UTF_8));
    key.flush();
    key.write("ratis".getBytes(UTF_8));
    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
    List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
    HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
    ContainerData containerData = TestHelper.getDatanodeService(omKeyLocationInfo, cluster).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
    Assert.assertTrue(containerData instanceof KeyValueContainerData);
    KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData;
    key.close();
    long containerID = omKeyLocationInfo.getContainerID();
    int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
    // delete the container db file
    FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath()));
    HddsDatanodeService dnService = cluster.getHddsDatanodes().get(index);
    OzoneContainer ozoneContainer = dnService.getDatanodeStateMachine().getContainer();
    ozoneContainer.getContainerSet().removeContainer(containerID);
    ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(cluster.getHddsDatanodes().get(index), omKeyLocationInfo.getPipeline());
    SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
    stateMachine.takeSnapshot();
    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
    stateMachine.buildMissingContainerSet(parentPath.toFile());
    // Since the snapshot threshold is set to 1, since there are
    // applyTransactions, we should see snapshots
    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
    // make sure the missing containerSet is not empty
    HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher();
    Assert.assertTrue(!dispatcher.getMissingContainerSet().isEmpty());
    Assert.assertTrue(dispatcher.getMissingContainerSet().contains(containerID));
    // write a new key
    key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    // First write and flush creates a container in the datanode
    key.write("ratis1".getBytes(UTF_8));
    key.flush();
    groupOutputStream = (KeyOutputStream) key.getOutputStream();
    locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    omKeyLocationInfo = locationInfoList.get(0);
    key.close();
    containerID = omKeyLocationInfo.getContainerID();
    dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
    containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
    Assert.assertTrue(containerData instanceof KeyValueContainerData);
    keyValueContainerData = (KeyValueContainerData) containerData;
    ReferenceCountedDB db = BlockUtils.getDB(keyValueContainerData, conf);
    // modify the bcsid for the container in the ROCKS DB thereby inducing
    // corruption
    db.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, 0L);
    db.decrementReference();
    // after the restart, there will be a mismatch in BCSID of what is recorded
    // in the and what is there in RockSDB and hence the container would be
    // marked unhealthy
    index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
    cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
    // Make sure the container is marked unhealthy
    Assert.assertTrue(cluster.getHddsDatanodes().get(index).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
}
Also used : Path(java.nio.file.Path) HashMap(java.util.HashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ContainerStateMachine(org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine) SimpleStateMachineStorage(org.apache.ratis.statemachine.impl.SimpleStateMachineStorage) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) File(java.io.File) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerData(org.apache.hadoop.ozone.container.common.impl.ContainerData) Test(org.junit.Test)

Example 2 with HddsDispatcher

use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.

the class TestHandler method setup.

@Before
public void setup() throws Exception {
    this.conf = new OzoneConfiguration();
    this.containerSet = Mockito.mock(ContainerSet.class);
    this.volumeSet = Mockito.mock(MutableVolumeSet.class);
    DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
    DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
    StateContext context = Mockito.mock(StateContext.class);
    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
    Mockito.when(context.getParent()).thenReturn(stateMachine);
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
    for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
        handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, TestHddsDispatcher.NO_OP_ICR_SENDER));
    }
    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, null, metrics, null);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) KeyValueHandler(org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) TestHddsDispatcher(org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) Before(org.junit.Before)

Example 3 with HddsDispatcher

use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.

the class TestKeyValueHandler method setup.

@Before
public void setup() throws StorageContainerException {
    // Create mock HddsDispatcher and KeyValueHandler.
    handler = Mockito.mock(KeyValueHandler.class);
    HashMap<ContainerType, Handler> handlers = new HashMap<>();
    handlers.put(ContainerType.KeyValueContainer, handler);
    dispatcher = new HddsDispatcher(new OzoneConfiguration(), Mockito.mock(ContainerSet.class), Mockito.mock(VolumeSet.class), handlers, Mockito.mock(StateContext.class), Mockito.mock(ContainerMetrics.class), Mockito.mock(TokenVerifier.class));
}
Also used : HashMap(java.util.HashMap) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) Before(org.junit.Before)

Example 4 with HddsDispatcher

use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.

the class TestContainerMetrics method testContainerMetrics.

@Test
public void testContainerMetrics() throws Exception {
    XceiverServerGrpc server = null;
    XceiverClientGrpc client = null;
    long containerID = ContainerTestHelper.getTestContainerID();
    String path = GenericTestUtils.getRandomizedTempPath();
    try {
        final int interval = 1;
        Pipeline pipeline = MockPipeline.createSingleNodePipeline();
        OzoneConfiguration conf = new OzoneConfiguration();
        conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
        conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, interval);
        DatanodeDetails datanodeDetails = randomDatanodeDetails();
        conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
        conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
        VolumeSet volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
        ContainerSet containerSet = new ContainerSet();
        DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
        StateContext context = Mockito.mock(StateContext.class);
        Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
        Mockito.when(context.getParent()).thenReturn(stateMachine);
        ContainerMetrics metrics = ContainerMetrics.create(conf);
        Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
        for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
            handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, c -> {
            }));
        }
        HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
        dispatcher.setClusterId(UUID.randomUUID().toString());
        server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
        client = new XceiverClientGrpc(pipeline, conf);
        server.start();
        client.connect();
        // Create container
        ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(containerID, pipeline);
        ContainerCommandResponseProto response = client.sendCommand(request);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        // Write Chunk
        BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
        ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
        response = client.sendCommand(writeChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        // Read Chunk
        ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
        response = client.sendCommand(readChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        MetricsRecordBuilder containerMetrics = getMetrics("StorageContainerMetrics");
        assertCounter("NumOps", 3L, containerMetrics);
        assertCounter("numCreateContainer", 1L, containerMetrics);
        assertCounter("numWriteChunk", 1L, containerMetrics);
        assertCounter("numReadChunk", 1L, containerMetrics);
        assertCounter("bytesWriteChunk", 1024L, containerMetrics);
        assertCounter("bytesReadChunk", 1024L, containerMetrics);
        String sec = interval + "s";
        Thread.sleep((interval + 1) * 1000);
        assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
        // Check VolumeIOStats metrics
        List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
        HddsVolume hddsVolume = volumes.get(0);
        MetricsRecordBuilder volumeIOMetrics = getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
        assertCounter("ReadBytes", 1024L, volumeIOMetrics);
        assertCounter("ReadOpCount", 1L, volumeIOMetrics);
        assertCounter("WriteBytes", 1024L, volumeIOMetrics);
        assertCounter("WriteOpCount", 1L, volumeIOMetrics);
    } finally {
        if (client != null) {
            client.close();
        }
        if (server != null) {
            server.stop();
        }
        // clean up volume dir
        File file = new File(path);
        if (file.exists()) {
            FileUtil.fullyDelete(file);
        }
    }
}
Also used : ScmConfigKeys(org.apache.hadoop.hdds.scm.ScmConfigKeys) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) BlockID(org.apache.hadoop.hdds.client.BlockID) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MetricsAsserts.getMetrics(org.apache.hadoop.test.MetricsAsserts.getMetrics) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) MetricsAsserts.assertCounter(org.apache.hadoop.test.MetricsAsserts.assertCounter) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Map(java.util.Map) Timeout(org.junit.rules.Timeout) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) StorageVolumeUtil(org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil) FileUtil(org.apache.hadoop.fs.FileUtil) ContainerTestHelper(org.apache.hadoop.ozone.container.ContainerTestHelper) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test) UUID(java.util.UUID) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) Maps(com.google.common.collect.Maps) File(java.io.File) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) Mockito(org.mockito.Mockito) List(java.util.List) StorageVolume(org.apache.hadoop.ozone.container.common.volume.StorageVolume) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) Rule(org.junit.Rule) MetricsAsserts.assertQuantileGauges(org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges) OzoneConfigKeys(org.apache.hadoop.ozone.OzoneConfigKeys) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) Assert(org.junit.Assert) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) DFSConfigKeysLegacy(org.apache.hadoop.hdds.DFSConfigKeysLegacy) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) BlockID(org.apache.hadoop.hdds.client.BlockID) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) File(java.io.File) Test(org.junit.Test)

Example 5 with HddsDispatcher

use of org.apache.hadoop.ozone.container.common.impl.HddsDispatcher in project ozone by apache.

the class TestSecureContainerServer method createDispatcher.

private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException {
    ContainerSet containerSet = new ContainerSet();
    conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(TEST_DIR, "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString());
    conf.set(OZONE_METADATA_DIRS, TEST_DIR);
    VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
    StateContext context = Mockito.mock(StateContext.class);
    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
    Mockito.when(context.getParent()).thenReturn(stateMachine);
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    Map<ContainerProtos.ContainerType, Handler> handlers = Maps.newHashMap();
    for (ContainerProtos.ContainerType containerType : ContainerProtos.ContainerType.values()) {
        handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), containerSet, volumeSet, metrics, c -> {
        }));
    }
    HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, TokenVerifier.create(new SecurityConfig((conf)), caClient));
    hddsDispatcher.setClusterId(scmId.toString());
    return hddsDispatcher;
}
Also used : MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) XceiverServerSpi(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi) ContainerTestHelper.newGetBlockRequestBuilder(org.apache.hadoop.ozone.container.ContainerTestHelper.newGetBlockRequestBuilder) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerProtocolCalls(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) DefaultMetricsSystem(org.apache.hadoop.metrics2.lib.DefaultMetricsSystem) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) Map(java.util.Map) After(org.junit.After) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) HddsConfigKeys(org.apache.hadoop.hdds.HddsConfigKeys) CheckedBiConsumer(org.apache.ratis.util.function.CheckedBiConsumer) XceiverServerRatis(org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis) EnumSet(java.util.EnumSet) OZONE_METADATA_DIRS(org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS) CertificateClientTestImpl(org.apache.hadoop.ozone.client.CertificateClientTestImpl) AfterClass(org.junit.AfterClass) SecurityConfig(org.apache.hadoop.hdds.security.x509.SecurityConfig) TokenVerifier(org.apache.hadoop.hdds.security.token.TokenVerifier) ContainerTestHelper.getCreateContainerRequest(org.apache.hadoop.ozone.container.ContainerTestHelper.getCreateContainerRequest) OZONE_SECURITY_ENABLED_KEY(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY) UUID(java.util.UUID) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) List(java.util.List) OzoneBlockTokenSecretManager(org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) HDDS_BLOCK_TOKEN_ENABLED(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED) OzoneConfigKeys(org.apache.hadoop.ozone.OzoneConfigKeys) RandomStringUtils(org.apache.commons.lang3.RandomStringUtils) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) ContainerTestHelper.getTestBlockID(org.apache.hadoop.ozone.container.ContainerTestHelper.getTestBlockID) ExceptionUtils(org.apache.commons.lang3.exception.ExceptionUtils) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) RandomUtils(org.apache.commons.lang3.RandomUtils) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) RatisTestHelper(org.apache.hadoop.ozone.RatisTestHelper) BlockID(org.apache.hadoop.hdds.client.BlockID) ContainerController(org.apache.hadoop.ozone.container.ozoneimpl.ContainerController) BeforeClass(org.junit.BeforeClass) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) ContainerTestHelper.newReadChunkRequestBuilder(org.apache.hadoop.ozone.container.ContainerTestHelper.newReadChunkRequestBuilder) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ArrayList(java.util.ArrayList) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) ContainerTestHelper.getTestContainerID(org.apache.hadoop.ozone.container.ContainerTestHelper.getTestContainerID) ContainerDispatcher(org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) ContainerTestHelper.newGetCommittedBlockLengthBuilder(org.apache.hadoop.ozone.container.ContainerTestHelper.newGetCommittedBlockLengthBuilder) HddsUtils.isReadOnly(org.apache.hadoop.hdds.HddsUtils.isReadOnly) AccessModeProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto) Assert.assertNotNull(org.junit.Assert.assertNotNull) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) FileUtils(org.apache.commons.io.FileUtils) Test(org.junit.Test) Token(org.apache.hadoop.security.token.Token) Maps(com.google.common.collect.Maps) File(java.io.File) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) ContainerTokenSecretManager(org.apache.hadoop.hdds.security.token.ContainerTokenSecretManager) ContainerTestHelper.newPutBlockRequestBuilder(org.apache.hadoop.ozone.container.ContainerTestHelper.newPutBlockRequestBuilder) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) Mockito(org.mockito.Mockito) OzoneBlockTokenIdentifier(org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier) StorageVolume(org.apache.hadoop.ozone.container.common.volume.StorageVolume) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) Paths(java.nio.file.Paths) HDDS_DATANODE_DIR_KEY(org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY) ContainerTestHelper.newWriteChunkRequestBuilder(org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder) RpcType(org.apache.ratis.rpc.RpcType) ExitUtils(org.apache.ratis.util.ExitUtils) GRPC(org.apache.ratis.rpc.SupportedRpcType.GRPC) Assert(org.junit.Assert) SUCCESS(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.SUCCESS) Assert.assertEquals(org.junit.Assert.assertEquals) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) StateContext(org.apache.hadoop.ozone.container.common.statemachine.StateContext) Handler(org.apache.hadoop.ozone.container.common.interfaces.Handler) HddsDispatcher(org.apache.hadoop.ozone.container.common.impl.HddsDispatcher) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) SecurityConfig(org.apache.hadoop.hdds.security.x509.SecurityConfig) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet)

Aggregations

HddsDispatcher (org.apache.hadoop.ozone.container.common.impl.HddsDispatcher)8 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)6 Test (org.junit.Test)6 File (java.io.File)5 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)5 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)4 XceiverClientGrpc (org.apache.hadoop.hdds.scm.XceiverClientGrpc)4 ContainerMetrics (org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics)4 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)4 Handler (org.apache.hadoop.ozone.container.common.interfaces.Handler)4 DatanodeStateMachine (org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine)4 StateContext (org.apache.hadoop.ozone.container.common.statemachine.StateContext)4 XceiverServerGrpc (org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc)4 MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)4 Maps (com.google.common.collect.Maps)3 IOException (java.io.IOException)3 List (java.util.List)3 Map (java.util.Map)3 UUID (java.util.UUID)3 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)3