use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.
the class TestWatchForCommit method testWatchForCommitForRetryfailure.
@Test
public void testWatchForCommitForRetryfailure() throws Exception {
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container1 = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager.acquireClient(container1.getPipeline());
Assert.assertEquals(1, xceiverClient.getRefcount());
Assert.assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
TestHelper.createPipelineOnDatanode(pipeline, cluster);
XceiverClientReply reply = xceiverClient.sendCommandAsync(ContainerTestHelper.getCreateContainerRequest(container1.getContainerInfo().getContainerID(), xceiverClient.getPipeline()));
reply.getResponse().get();
long index = reply.getLogIndex();
cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
// again write data with more than max buffer limit. This wi
try {
// just watch for a log index which in not updated in the commitInfo Map
// as well as there is no logIndex generate in Ratis.
// The basic idea here is just to test if its throws an exception.
xceiverClient.watchForCommit(index + new Random().nextInt(100) + 10);
Assert.fail("expected exception not thrown");
} catch (Exception e) {
Assert.assertTrue(e instanceof ExecutionException);
// since the timeout value is quite long, the watch request will either
// fail with NotReplicated exceptio, RetryFailureException or
// RuntimeException
Assert.assertFalse(HddsClientUtils.checkForException(e) instanceof TimeoutException);
}
clientManager.releaseClient(xceiverClient, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.
the class TestWatchForCommit method testWatchForCommitForGroupMismatchException.
@Test
public void testWatchForCommitForGroupMismatchException() throws Exception {
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container1 = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager.acquireClient(container1.getPipeline());
Assert.assertEquals(1, xceiverClient.getRefcount());
Assert.assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
long containerId = container1.getContainerInfo().getContainerID();
XceiverClientReply reply = xceiverClient.sendCommandAsync(ContainerTestHelper.getCreateContainerRequest(containerId, xceiverClient.getPipeline()));
reply.getResponse().get();
Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
List<Pipeline> pipelineList = new ArrayList<>();
pipelineList.add(pipeline);
TestHelper.waitForPipelineClose(pipelineList, cluster);
try {
// just watch for a log index which in not updated in the commitInfo Map
// as well as there is no logIndex generate in Ratis.
// The basic idea here is just to test if its throws an exception.
xceiverClient.watchForCommit(reply.getLogIndex() + new Random().nextInt(100) + 10);
Assert.fail("Expected exception not thrown");
} catch (Exception e) {
Assert.assertTrue(HddsClientUtils.checkForException(e) instanceof GroupMismatchException);
}
clientManager.releaseClient(xceiverClient, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.
the class BlockOutputStream method executePutBlock.
/**
* @param close whether putBlock is happening as part of closing the stream
* @param force true if no data was written since most recent putBlock and
* stream is being closed
*/
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> executePutBlock(boolean close, boolean force) throws IOException {
checkOpen();
long flushPos = totalDataFlushedLength;
final List<ChunkBuffer> byteBufferList;
if (!force) {
Preconditions.checkNotNull(bufferList);
byteBufferList = bufferList;
bufferList = null;
Preconditions.checkNotNull(byteBufferList);
} else {
byteBufferList = null;
}
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> flushFuture = null;
try {
BlockData blockData = containerBlockData.build();
XceiverClientReply asyncReply = putBlockAsync(xceiverClient, blockData, close, token);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
flushFuture = future.thenApplyAsync(e -> {
try {
validateResponse(e);
} catch (IOException sce) {
throw new CompletionException(sce);
}
// if the ioException is not set, putBlock is successful
if (getIoException() == null && !force) {
BlockID responseBlockID = BlockID.getFromProtobuf(e.getPutBlock().getCommittedBlockLength().getBlockID());
Preconditions.checkState(blockID.get().getContainerBlockID().equals(responseBlockID.getContainerBlockID()));
// updates the bcsId of the block
blockID.set(responseBlockID);
if (LOG.isDebugEnabled()) {
LOG.debug("Adding index " + asyncReply.getLogIndex() + " flushLength " + flushPos + " numBuffers " + byteBufferList.size() + " blockID " + blockID + " bufferPool size" + bufferPool.getSize() + " currentBufferIndex " + bufferPool.getCurrentBufferIndex());
}
// for standalone protocol, logIndex will always be 0.
updateCommitInfo(asyncReply, byteBufferList);
}
return e;
}, responseExecutor).exceptionally(e -> {
if (LOG.isDebugEnabled()) {
LOG.debug("putBlock failed for blockID {} with exception {}", blockID, e.getLocalizedMessage());
}
CompletionException ce = new CompletionException(e);
setIoException(ce);
throw ce;
});
} catch (IOException | ExecutionException e) {
throw new IOException(EXCEPTION_MSG + e.toString(), e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
handleInterruptedException(ex, false);
}
putFlushFuture(flushPos, flushFuture);
return flushFuture;
}
use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.
the class BlockOutputStream method writeChunkToContainer.
/**
* Writes buffered data as a new chunk to the container and saves chunk
* information to be used later in putKey call.
*
* @throws IOException if there is an I/O error while performing the call
* @throws OzoneChecksumException if there is an error while computing
* checksum
* @return
*/
CompletableFuture<ContainerCommandResponseProto> writeChunkToContainer(ChunkBuffer chunk) throws IOException {
int effectiveChunkSize = chunk.remaining();
final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
final ByteString data = chunk.toByteString(bufferPool.byteStringConversion());
ChecksumData checksumData = checksum.computeChecksum(chunk);
ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex).setOffset(offset).setLen(effectiveChunkSize).setChecksumData(checksumData.getProtoBufMessage()).build();
if (LOG.isDebugEnabled()) {
LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset);
}
try {
XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, blockID.get(), data, token, replicationIndex);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> respFuture = asyncReply.getResponse();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> validateFuture = respFuture.thenApplyAsync(e -> {
try {
validateResponse(e);
} catch (IOException sce) {
respFuture.completeExceptionally(sce);
}
return e;
}, responseExecutor).exceptionally(e -> {
String msg = "Failed to write chunk " + chunkInfo.getChunkName() + " into block " + blockID;
LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
CompletionException ce = new CompletionException(msg, e);
setIoException(ce);
throw ce;
});
containerBlockData.addChunks(chunkInfo);
return validateFuture;
} catch (IOException | ExecutionException e) {
throw new IOException(EXCEPTION_MSG + e.toString(), e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
handleInterruptedException(ex, false);
}
return null;
}
use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.
the class MockXceiverClientSpi method result.
private XceiverClientReply result(ContainerCommandRequestProto request, Function<ContainerCommandResponseProto.Builder, ContainerCommandResponseProto.Builder> function) {
Builder builder = ContainerCommandResponseProto.newBuilder().setResult(Result.SUCCESS).setCmdType(request.getCmdType());
builder = function.apply(builder);
XceiverClientReply reply = new XceiverClientReply(CompletableFuture.completedFuture(builder.build()));
return reply;
}
Aggregations