use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class TestDeleteWithSlowFollower method testDeleteKeyWithSlowFollower.
/**
* The test simulates a slow follower by first writing key thereby creating a
* a container on 3 dns of the cluster. Then, a dn is shutdown and a close
* container cmd gets issued so that in the leader and the alive follower,
* container gets closed. And then, key is deleted and
* the node is started up again so that it
* rejoins the ring and starts applying the transaction from where it left
* by fetching the entries from the leader. Until and unless this follower
* catches up and its replica gets closed,
* the data is not deleted from any of the nodes which have the
* closed replica.
*/
@Test
public void testDeleteKeyWithSlowFollower() throws Exception {
String keyName = "ratis";
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey(keyName, 0, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
byte[] testData = "ratis".getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(testData);
key.flush();
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assume.assumeTrue("Expected exactly a single location, but got: " + locationInfoList.size(), 1 == locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
long containerID = omKeyLocationInfo.getContainerID();
// A container is created on the datanode. Now figure out a follower node to
// kill/slow down.
HddsDatanodeService follower = null;
HddsDatanodeService leader = null;
List<Pipeline> pipelineList = cluster.getStorageContainerManager().getPipelineManager().getPipelines(RatisReplicationConfig.getInstance(THREE));
Assume.assumeTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT);
Pipeline pipeline = pipelineList.get(0);
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
if (RatisTestHelper.isRatisFollower(dn, pipeline)) {
follower = dn;
} else if (RatisTestHelper.isRatisLeader(dn, pipeline)) {
leader = dn;
}
}
Assume.assumeNotNull(follower, leader);
// ensure that the chosen follower is still a follower
Assume.assumeTrue(RatisTestHelper.isRatisFollower(follower, pipeline));
// shutdown the follower node
cluster.shutdownHddsDatanode(follower.getDatanodeDetails());
key.write(testData);
key.close();
// now move the container to the closed on the datanode.
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
xceiverClient.sendCommand(request.build());
ContainerStateMachine stateMachine = (ContainerStateMachine) RatisTestHelper.getStateMachine(leader, pipeline);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(THREE)).setKeyName(keyName).build();
OmKeyInfo info = cluster.getOzoneManager().lookupKey(keyArgs);
BlockID blockID = info.getKeyLocationVersions().get(0).getLocationList().get(0).getBlockID();
OzoneContainer ozoneContainer;
final DatanodeStateMachine dnStateMachine = leader.getDatanodeStateMachine();
ozoneContainer = dnStateMachine.getContainer();
KeyValueHandler keyValueHandler = (KeyValueHandler) ozoneContainer.getDispatcher().getHandler(ContainerProtos.ContainerType.KeyValueContainer);
Container container = ozoneContainer.getContainerSet().getContainer(blockID.getContainerID());
KeyValueContainerData containerData = ((KeyValueContainerData) container.getContainerData());
long delTrxId = containerData.getDeleteTransactionId();
long numPendingDeletionBlocks = containerData.getNumPendingDeletionBlocks();
BlockData blockData = keyValueHandler.getBlockManager().getBlock(container, blockID);
// cluster.getOzoneManager().deleteKey(keyArgs);
client.getObjectStore().getVolume(volumeName).getBucket(bucketName).deleteKey("ratis");
GenericTestUtils.waitFor(() -> {
try {
if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) {
cluster.getStorageContainerManager().getScmHAManager().asSCMHADBTransactionBuffer().flush();
}
return dnStateMachine.getCommandDispatcher().getDeleteBlocksCommandHandler().getInvocationCount() >= 1;
} catch (IOException e) {
return false;
}
}, 500, 100000);
Assert.assertTrue(containerData.getDeleteTransactionId() > delTrxId);
Assert.assertTrue(containerData.getNumPendingDeletionBlocks() > numPendingDeletionBlocks);
// deleteBlock handler is invoked
try {
for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
keyValueHandler.getChunkManager().readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo), null);
}
} catch (IOException ioe) {
Assert.fail("Exception should not be thrown.");
}
long numReadStateMachineOps = stateMachine.getMetrics().getNumReadStateMachineOps();
Assert.assertTrue(stateMachine.getMetrics().getNumReadStateMachineFails() == 0);
stateMachine.evictStateMachineCache();
cluster.restartHddsDatanode(follower.getDatanodeDetails(), false);
// wait for the raft server to come up and join the ratis ring
Thread.sleep(10000);
// Make sure the readStateMachine call got triggered after the follower
// caught up
Assert.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps() > numReadStateMachineOps);
Assert.assertTrue(stateMachine.getMetrics().getNumReadStateMachineFails() == 0);
// wait for the chunk to get deleted now
Thread.sleep(10000);
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
keyValueHandler = (KeyValueHandler) dn.getDatanodeStateMachine().getContainer().getDispatcher().getHandler(ContainerProtos.ContainerType.KeyValueContainer);
// make sure the chunk is now deleted on the all dns
try {
for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
keyValueHandler.getChunkManager().readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo), null);
}
Assert.fail("Expected exception is not thrown");
} catch (IOException ioe) {
Assert.assertTrue(ioe instanceof StorageContainerException);
Assert.assertTrue(((StorageContainerException) ioe).getResult() == ContainerProtos.Result.UNABLE_TO_FIND_CHUNK);
}
}
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class GeneratorDatanode method createContainer.
private KeyValueContainer createContainer(long containerId) throws IOException {
ContainerLayoutVersion layoutVersion = ContainerLayoutVersion.getConfiguredVersion(config);
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, layoutVersion, getContainerSize(config), getPrefix(), datanodeId);
KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
try {
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
} catch (StorageContainerException ex) {
throw new RuntimeException(ex);
}
return keyValueContainer;
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class ChunkInputStream method readChunk.
/**
* Send RPC call to get the chunk from the container.
*/
@VisibleForTesting
protected ByteBuffer[] readChunk(ChunkInfo readChunkInfo) throws IOException {
ReadChunkResponseProto readChunkResponse;
try {
List<CheckedBiFunction> validators = ContainerProtocolCalls.getValidatorList();
validators.add(validator);
readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient, readChunkInfo, blockID, validators, token);
} catch (IOException e) {
if (e instanceof StorageContainerException) {
throw e;
}
throw new IOException("Unexpected OzoneException: " + e.toString(), e);
}
if (readChunkResponse.hasData()) {
return readChunkResponse.getData().asReadOnlyByteBufferList().toArray(new ByteBuffer[0]);
} else if (readChunkResponse.hasDataBuffers()) {
List<ByteString> buffersList = readChunkResponse.getDataBuffers().getBuffersList();
return BufferUtils.getReadOnlyByteBuffersArray(buffersList);
} else {
throw new IOException("Unexpected error while reading chunk data " + "from container. No data returned.");
}
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class TestBlockInputStream method testRefreshOnReadFailureAfterUnbuffer.
@Test
public void testRefreshOnReadFailureAfterUnbuffer() throws Exception {
// GIVEN
BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
Pipeline newPipeline = MockPipeline.createSingleNodePipeline();
XceiverClientFactory clientFactory = mock(XceiverClientFactory.class);
XceiverClientSpi client = mock(XceiverClientSpi.class);
when(clientFactory.acquireClientForReadData(pipeline)).thenReturn(client);
final int len = 200;
final ChunkInputStream stream = mock(ChunkInputStream.class);
when(stream.read(any(), anyInt(), anyInt())).thenThrow(new StorageContainerException("test", CONTAINER_NOT_FOUND)).thenReturn(len);
when(stream.getRemaining()).thenReturn((long) len);
when(refreshPipeline.apply(blockID)).thenReturn(newPipeline);
BlockInputStream subject = new BlockInputStream(blockID, blockSize, pipeline, null, false, clientFactory, refreshPipeline) {
@Override
protected List<ChunkInfo> getChunkInfos() throws IOException {
acquireClient();
return chunks;
}
@Override
protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) {
return stream;
}
};
try {
subject.initialize();
subject.unbuffer();
// WHEN
byte[] b = new byte[len];
int bytesRead = subject.read(b, 0, len);
// THEN
Assert.assertEquals(len, bytesRead);
verify(refreshPipeline).apply(blockID);
verify(clientFactory).acquireClientForReadData(pipeline);
verify(clientFactory).releaseClient(client, false);
} finally {
subject.close();
}
}
use of org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException in project ozone by apache.
the class TestBlockInputStream method testRefreshExitsIfPipelineHasSameNodes.
@Test
public void testRefreshExitsIfPipelineHasSameNodes() throws Exception {
// GIVEN
BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
final int len = 200;
final ChunkInputStream stream = mock(ChunkInputStream.class);
when(stream.read(any(), anyInt(), anyInt())).thenThrow(new StorageContainerException("test", CONTAINER_UNHEALTHY));
when(stream.getRemaining()).thenReturn((long) len);
when(refreshPipeline.apply(blockID)).thenAnswer(invocation -> samePipelineWithNewId(pipeline));
BlockInputStream subject = new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, null, refreshPipeline, chunks, null) {
@Override
protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) {
return stream;
}
};
try {
subject.initialize();
// WHEN
byte[] b = new byte[len];
LambdaTestUtils.intercept(StorageContainerException.class, () -> subject.read(b, 0, len));
// THEN
verify(refreshPipeline).apply(blockID);
} finally {
subject.close();
}
}
Aggregations