use of org.apache.hadoop.hdds.scm.XceiverClientRatis in project ozone by apache.
the class TestBlockOutputStreamWithFailures method testExceptionDuringClose.
private void testExceptionDuringClose() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
int dataLength = 167;
// write data more than 1 chunk
byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
key.write(data1);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
Assert.assertTrue(stream instanceof BlockOutputStream);
RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
Assert.assertEquals(0, blockOutputStream.getTotalDataFlushedLength());
Assert.assertTrue(blockOutputStream.getTotalAckDataLength() == 0);
Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
// Now do a flush. This will flush the data and update the flush length and
// the map.
key.flush();
// Since the data in the buffer is already flushed, flush here will have
// no impact on the counters and data structures
Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
// flush will make sure one more entry gets updated in the map
Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() == 0);
XceiverClientRatis raftClient = (XceiverClientRatis) blockOutputStream.getXceiverClient();
Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
// Close the containers on the Datanode and write more data
TestHelper.waitForContainerClose(key, cluster);
key.write(data1);
// commitInfoMap will remain intact as there is no server failure
Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
// now close the stream, It will hit exception
key.close();
Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof ContainerNotOpenException);
// Make sure the retryCount is reset after the exception is handled
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
// make sure the bufferPool is empty
Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
// Written the same data twice
String dataString = new String(data1, UTF_8);
validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
}
use of org.apache.hadoop.hdds.scm.XceiverClientRatis in project ozone by apache.
the class TestReadRetries method testPutKeyAndGetKeyThreeNodes.
@Test
public void testPutKeyAndGetKeyThreeNodes() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = "a/b/c/" + UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream();
XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory();
out.write(value.getBytes(UTF_8));
out.close();
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(value.getBytes(UTF_8).length, keyLocations.get(0).getLength());
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
XceiverClientSpi clientSpi = factory.acquireClient(pipeline);
Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
XceiverClientRatis ratisClient = (XceiverClientRatis) clientSpi;
ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId());
// shutdown the datanode
cluster.shutdownHddsDatanode(datanodeDetails);
// try to read, this should be successful
readKey(bucket, keyName, value);
// read intermediate directory
verifyIntermediateDir(bucket, "a/b/c");
// shutdown the second datanode
datanodeDetails = datanodes.get(1);
cluster.shutdownHddsDatanode(datanodeDetails);
// we still should be able to read via Standalone protocol
// try to read
readKey(bucket, keyName, value);
// shutdown the 3rd datanode
datanodeDetails = datanodes.get(2);
cluster.shutdownHddsDatanode(datanodeDetails);
try {
// try to read
readKey(bucket, keyName, value);
fail("Expected exception not thrown");
} catch (IOException e) {
// it should throw an ioException as none of the servers
// are available
}
factory.releaseClient(clientSpi, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientRatis in project ozone by apache.
the class TestWatchForCommit method testWatchForCommitWithKeyWrite.
@Test
public void testWatchForCommitWithKeyWrite() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
int dataLength = maxFlushSize + 50;
// write data more than 1 chunk
byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
key.write(data1);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
Assert.assertTrue(stream instanceof BlockOutputStream);
RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
// we have just written data more than flush Size(2 chunks), at this time
// buffer pool will have 3 buffers allocated worth of chunk size
Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
// writtenDataLength as well flushedDataLength will be updated here
Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
Assert.assertEquals(maxFlushSize, blockOutputStream.getTotalDataFlushedLength());
// since data equals to maxBufferSize is written, this will be a blocking
// call and hence will wait for atleast flushSize worth of data to get
// acked by all servers right here
Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize);
// watchForCommit will clean up atleast one entry from the map where each
// entry corresponds to flushSize worth of data
Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1);
// Now do a flush. This will flush the data and update the flush length and
// the map.
key.flush();
// Since the data in the buffer is already flushed, flush here will have
// no impact on the counters and data structures
Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize());
Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
// flush will make sure one more entry gets updated in the map
Assert.assertTrue(blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2);
XceiverClientRatis raftClient = (XceiverClientRatis) blockOutputStream.getXceiverClient();
Assert.assertEquals(3, raftClient.getCommitInfoMap().size());
Pipeline pipeline = raftClient.getPipeline();
cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
// again write data with more than max buffer limit. This will call
// watchForCommit again. Since the commit will happen 2 way, the
// commitInfoMap will get updated for servers which are alive
// 4 writeChunks = maxFlushSize + 2 putBlocks will be discarded here
// once exception is hit
key.write(data1);
// As a part of handling the exception, 4 failed writeChunks will be
// rewritten plus one partial chunk plus two putBlocks for flushSize
// and one flush for partial chunk
key.flush();
// Make sure the retryCount is reset after the exception is handled
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
// now close the stream, It will update the ack length after watchForCommit
key.close();
Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
// make sure the bufferPool is empty
Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
validateData(keyName, data1);
}
use of org.apache.hadoop.hdds.scm.XceiverClientRatis in project ozone by apache.
the class TestWatchForCommit method test2WayCommitForTimeoutException.
@Test
public void test2WayCommitForTimeoutException() throws Exception {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG);
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container1 = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager.acquireClient(container1.getPipeline());
Assert.assertEquals(1, xceiverClient.getRefcount());
Assert.assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
TestHelper.createPipelineOnDatanode(pipeline, cluster);
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
XceiverClientReply reply = xceiverClient.sendCommandAsync(ContainerTestHelper.getCreateContainerRequest(container1.getContainerInfo().getContainerID(), xceiverClient.getPipeline()));
reply.getResponse().get();
Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
List<DatanodeDetails> nodesInPipeline = pipeline.getNodes();
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
// shutdown the ratis follower
if (nodesInPipeline.contains(dn.getDatanodeDetails()) && RatisTestHelper.isRatisFollower(dn, pipeline)) {
cluster.shutdownHddsDatanode(dn.getDatanodeDetails());
break;
}
}
reply = xceiverClient.sendCommandAsync(ContainerTestHelper.getCloseContainer(pipeline, container1.getContainerInfo().getContainerID()));
reply.getResponse().get();
xceiverClient.watchForCommit(reply.getLogIndex());
// commitInfo Map will be reduced to 2 here
Assert.assertEquals(2, ratisClient.getCommitInfoMap().size());
clientManager.releaseClient(xceiverClient, false);
Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed"));
Assert.assertTrue(logCapturer.getOutput().contains("TimeoutException"));
Assert.assertTrue(logCapturer.getOutput().contains("Committed by majority"));
logCapturer.stopCapturing();
}
use of org.apache.hadoop.hdds.scm.XceiverClientRatis in project ozone by apache.
the class TestWatchForCommit method testWatchForCommitForGroupMismatchException.
@Test
public void testWatchForCommitForGroupMismatchException() throws Exception {
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container1 = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager.acquireClient(container1.getPipeline());
Assert.assertEquals(1, xceiverClient.getRefcount());
Assert.assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
long containerId = container1.getContainerInfo().getContainerID();
XceiverClientReply reply = xceiverClient.sendCommandAsync(ContainerTestHelper.getCreateContainerRequest(containerId, xceiverClient.getPipeline()));
reply.getResponse().get();
Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
List<Pipeline> pipelineList = new ArrayList<>();
pipelineList.add(pipeline);
TestHelper.waitForPipelineClose(pipelineList, cluster);
try {
// just watch for a log index which in not updated in the commitInfo Map
// as well as there is no logIndex generate in Ratis.
// The basic idea here is just to test if its throws an exception.
xceiverClient.watchForCommit(reply.getLogIndex() + new Random().nextInt(100) + 10);
Assert.fail("Expected exception not thrown");
} catch (Exception e) {
Assert.assertTrue(HddsClientUtils.checkForException(e) instanceof GroupMismatchException);
}
clientManager.releaseClient(xceiverClient, false);
}
Aggregations