Search in sources :

Example 1 with ContainerNotOpenException

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException in project ozone by apache.

the class ContainerStateMachine method startTransaction.

@Override
public TransactionContext startTransaction(RaftClientRequest request) throws IOException {
    long startTime = Time.monotonicNowNanos();
    final ContainerCommandRequestProto proto = message2ContainerCommandRequestProto(request.getMessage());
    Preconditions.checkArgument(request.getRaftGroupId().equals(gid));
    try {
        dispatcher.validateContainerCommand(proto);
    } catch (IOException ioe) {
        if (ioe instanceof ContainerNotOpenException) {
            metrics.incNumContainerNotOpenVerifyFailures();
        } else {
            metrics.incNumStartTransactionVerifyFailures();
            LOG.error("startTransaction validation failed on leader", ioe);
        }
        TransactionContext ctxt = TransactionContext.newBuilder().setClientRequest(request).setStateMachine(this).setServerRole(RaftPeerRole.LEADER).build();
        ctxt.setException(ioe);
        return ctxt;
    }
    if (proto.getCmdType() == Type.WriteChunk) {
        final WriteChunkRequestProto write = proto.getWriteChunk();
        // create the log entry proto
        final WriteChunkRequestProto commitWriteChunkProto = WriteChunkRequestProto.newBuilder().setBlockID(write.getBlockID()).setChunkData(write.getChunkData()).build();
        ContainerCommandRequestProto commitContainerCommandProto = ContainerCommandRequestProto.newBuilder(proto).setWriteChunk(commitWriteChunkProto).setTraceID(proto.getTraceID()).build();
        Preconditions.checkArgument(write.hasData());
        Preconditions.checkArgument(!write.getData().isEmpty());
        return TransactionContext.newBuilder().setClientRequest(request).setStateMachine(this).setServerRole(RaftPeerRole.LEADER).setStateMachineContext(startTime).setStateMachineData(write.getData()).setLogData(commitContainerCommandProto.toByteString()).build();
    } else {
        return TransactionContext.newBuilder().setClientRequest(request).setStateMachine(this).setServerRole(RaftPeerRole.LEADER).setStateMachineContext(startTime).setLogData(proto.toByteString()).build();
    }
}
Also used : WriteChunkRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto) TransactionContext(org.apache.ratis.statemachine.TransactionContext) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) IOException(java.io.IOException) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException)

Example 2 with ContainerNotOpenException

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException in project ozone by apache.

the class TestBlockOutputStreamWithFailuresFlushDelay method testWatchForCommitWithSingleNodeRatis.

private void testWatchForCommitWithSingleNodeRatis() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0, ReplicationFactor.ONE);
    int dataLength = maxFlushSize + chunkSize;
    // write data more than 1 chunk
    byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
    key.write(data1);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
    OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
    Assert.assertTrue(stream instanceof BlockOutputStream);
    RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
    // we have just written data more than flush Size(2 chunks), at this time
    // buffer pool will have 4 buffers allocated worth of chunk size
    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
    // writtenDataLength as well flushedDataLength will be updated here
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(maxFlushSize, blockOutputStream.getTotalDataFlushedLength());
    // since data equals to maxBufferSize is written, this will be a blocking
    // call and hence will wait for atleast flushSize worth of data to get
    // ack'd by all servers right here
    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
    // watchForCommit will clean up atleast one entry from the map where each
    // entry corresponds to flushSize worth of data
    Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
    // Now do a flush. This will flush the data and update the flush length and
    // the map.
    key.flush();
    // Since the data in the buffer is already flushed, flush here will have
    // no impact on the counters and data structures
    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
    // flush will make sure one more entry gets updated in the map
    Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
    XceiverClientRatis raftClient = (XceiverClientRatis) blockOutputStream.getXceiverClient();
    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
    // Close the containers on the Datanode and write more data
    TestHelper.waitForContainerClose(key, cluster);
    // 4 writeChunks = maxFlushSize + 2 putBlocks  will be discarded here
    // once exception is hit
    key.write(data1);
    // As a part of handling the exception, 4 failed writeChunks  will be
    // rewritten plus one partial chunk plus two putBlocks for flushSize
    // and one flush for partial chunk
    key.flush();
    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof ContainerNotOpenException);
    // Make sure the retryCount is reset after the exception is handled
    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
    // commitInfoMap will remain intact as there is no server failure
    Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
    // now close the stream, It will update the ack length after watchForCommit
    key.close();
    // make sure the bufferPool is empty
    Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
    Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size());
    // Written the same data twice
    String dataString = new String(data1, UTF_8);
    validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
}
Also used : RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) OutputStream(java.io.OutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream)

Example 3 with ContainerNotOpenException

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException in project ozone by apache.

the class TestBlockOutputStreamWithFailuresFlushDelay method test2DatanodesFailure.

@Test
public void test2DatanodesFailure() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
    int dataLength = maxFlushSize + chunkSize;
    // write data more than 1 chunk
    byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
    key.write(data1);
    // since its hitting the full bufferCondition, it will call watchForCommit
    // and completes atleast putBlock for first flushSize worth of data
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
    Assert.assertTrue(stream instanceof BlockOutputStream);
    RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
    // we have just written data more than flush Size(2 chunks), at this time
    // buffer pool will have 3 buffers allocated worth of chunk size
    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
    // writtenDataLength as well flushedDataLength will be updated here
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(maxFlushSize, blockOutputStream.getTotalDataFlushedLength());
    // since data equals to maxBufferSize is written, this will be a blocking
    // call and hence will wait for atleast flushSize worth of data to get
    // acked by all servers right here
    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
    // watchForCommit will clean up atleast one entry from the map where each
    // entry corresponds to flushSize worth of data
    Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
    // Now do a flush. This will flush the data and update the flush length and
    // the map.
    key.flush();
    // Since the data in the buffer is already flushed, flush here will have
    // no impact on the counters and data structures
    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
    // flush will make sure one more entry gets updated in the map
    Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
    XceiverClientRatis raftClient = (XceiverClientRatis) blockOutputStream.getXceiverClient();
    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
    Pipeline pipeline = raftClient.getPipeline();
    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
    cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
    // again write data with more than max buffer limit. This will call
    // watchForCommit again. Since the commit will happen 2 way, the
    // commitInfoMap will get updated for servers which are alive
    // 4 writeChunks = maxFlushSize + 2 putBlocks  will be discarded here
    // once exception is hit
    key.write(data1);
    // As a part of handling the exception, 4 failed writeChunks  will be
    // rewritten plus one partial chunk plus two putBlocks for flushSize
    // and one flush for partial chunk
    key.flush();
    Throwable ioException = HddsClientUtils.checkForException(blockOutputStream.getIoException());
    // Since, 2 datanodes went down,
    // a) if the pipeline gets destroyed quickly it will hit
    // GroupMismatchException.
    // b) will hit close container exception if the container is closed
    // but pipeline is still not destroyed.
    // c) will fail with RaftRetryFailureException if the leader election
    // did not finish before the request retry count finishes.
    Assert.assertTrue(ioException instanceof RaftRetryFailureException || ioException instanceof GroupMismatchException || ioException instanceof ContainerNotOpenException);
    // Make sure the retryCount is reset after the exception is handled
    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
    // now close the stream, It will update the ack length after watchForCommit
    key.close();
    Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
    // make sure the bufferPool is empty
    Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
    Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size());
    validateData(keyName, data1);
}
Also used : RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) OutputStream(java.io.OutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) GroupMismatchException(org.apache.ratis.protocol.exceptions.GroupMismatchException) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) RaftRetryFailureException(org.apache.ratis.protocol.exceptions.RaftRetryFailureException) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 4 with ContainerNotOpenException

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException in project ozone by apache.

the class TestBlockOutputStreamWithFailuresFlushDelay method testWatchForCommitWithCloseContainerException.

private void testWatchForCommitWithCloseContainerException() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
    int dataLength = maxFlushSize + chunkSize;
    // write data more than 1 chunk
    byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
    key.write(data1);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
    OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
    Assert.assertTrue(stream instanceof BlockOutputStream);
    RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
    // we have just written data more than flush Size(2 chunks), at this time
    // buffer pool will have 4 buffers allocated worth of chunk size
    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
    // writtenDataLength as well flushedDataLength will be updated here
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(maxFlushSize, blockOutputStream.getTotalDataFlushedLength());
    // since data equals to maxBufferSize is written, this will be a blocking
    // call and hence will wait for atleast flushSize worth of data to get
    // ack'd by all servers right here
    Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
    // watchForCommit will clean up atleast one entry from the map where each
    // entry corresponds to flushSize worth of data
    Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
    // Now do a flush. This will flush the data and update the flush length and
    // the map.
    key.flush();
    // flush is a sync call, all pending operations will complete
    // Since the data in the buffer is already flushed, flush here will have
    // no impact on the counters and data structures
    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
    // flush will make sure one more entry gets updated in the map
    Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
    XceiverClientRatis raftClient = (XceiverClientRatis) blockOutputStream.getXceiverClient();
    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
    // Close the containers on the Datanode and write more data
    TestHelper.waitForContainerClose(key, cluster);
    key.write(data1);
    // As a part of handling the exception, 4 failed writeChunks  will be
    // rewritten plus one partial chunk plus two putBlocks for flushSize
    // and one flush for partial chunk
    key.flush();
    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof ContainerNotOpenException);
    // Make sure the retryCount is reset after the exception is handled
    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
    // commitInfoMap will remain intact as there is no server failure
    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
    // now close the stream, It will update the ack length after watchForCommit
    key.close();
    // make sure the bufferPool is empty
    Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
    // Written the same data twice
    String dataString = new String(data1, UTF_8);
    validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
}
Also used : RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) OutputStream(java.io.OutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream)

Example 5 with ContainerNotOpenException

use of org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException in project ozone by apache.

the class TestBlockOutputStreamWithFailures method testFailureWithPrimeSizedData.

private void testFailureWithPrimeSizedData() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
    int dataLength = maxFlushSize + 69;
    // write data more than 1 chunk
    byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
    key.write(data1);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
    OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
    Assert.assertTrue(stream instanceof BlockOutputStream);
    RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
    Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(400, blockOutputStream.getTotalDataFlushedLength());
    // Now do a flush. This will flush the data and update the flush length and
    // the map.
    key.flush();
    Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
    XceiverClientRatis raftClient = (XceiverClientRatis) blockOutputStream.getXceiverClient();
    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
    // Close the containers on the Datanode and write more data
    TestHelper.waitForContainerClose(key, cluster);
    key.write(data1);
    // As a part of handling the exception, 2 failed writeChunks  will be
    // rewritten plus 1 putBlocks for flush
    // and one flush for partial chunk
    key.flush();
    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof ContainerNotOpenException);
    // Make sure the retryCount is reset after the exception is handled
    Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
    // commitInfoMap will remain intact as there is no server failure
    Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
    // now close the stream, It will update the ack length after watchForCommit
    key.close();
    // make sure the bufferPool is empty
    Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
    Assert.assertTrue(keyOutputStream.getLocationInfoList().size() == 0);
    // Written the same data twice
    String dataString = new String(data1, UTF_8);
    validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
}
Also used : RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) OutputStream(java.io.OutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream)

Aggregations

ContainerNotOpenException (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException)14 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)12 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)12 OutputStream (java.io.OutputStream)11 BlockOutputStream (org.apache.hadoop.hdds.scm.storage.BlockOutputStream)11 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)10 RatisBlockOutputStream (org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream)10 IOException (java.io.IOException)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)2 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)2 GroupMismatchException (org.apache.ratis.protocol.exceptions.GroupMismatchException)2 RaftRetryFailureException (org.apache.ratis.protocol.exceptions.RaftRetryFailureException)2 Path (java.nio.file.Path)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)1 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)1 State (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State)1