use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.
the class TestContainerCommandRequestMessage method newPutSmallFile.
static ContainerCommandRequestProto newPutSmallFile(BlockID blockID, ByteString data) {
final BlockData.Builder blockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf());
final PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(blockData);
final KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
final ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.size()).addMetadata(keyValue).setChecksumData(checksum(data).getProtoBufMessage()).build();
final PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(putBlockRequest).setData(data).build();
return ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(UUID.randomUUID().toString()).setPutSmallFile(putSmallFileRequest).build();
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.
the class ContainerProtocolCalls method writeSmallFile.
/**
* Allows writing a small file using single RPC. This takes the container
* name, block name and data to write sends all that data to the container
* using a single RPC. This API is designed to be used for files which are
* smaller than 1 MB.
*
* @param client - client that communicates with the container.
* @param blockID - ID of the block
* @param data - Data to be written into the container.
* @param token a token for this block (may be null)
* @return container protocol writeSmallFile response
* @throws IOException
*/
public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token<OzoneBlockTokenIdentifier> token) throws IOException {
BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).build();
PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
Checksum checksum = new Checksum(ChecksumType.CRC32, 256);
final ChecksumData checksumData = checksum.computeChecksum(data);
ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.length).addMetadata(keyValue).setChecksumData(checksumData.getProtoBufMessage()).build();
PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(createBlockRequest).setData(ByteString.copyFrom(data)).build();
String id = client.getPipeline().getFirstNode().getUuidString();
ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(id).setPutSmallFile(putSmallFileRequest);
if (token != null) {
builder.setEncodedToken(token.encodeToUrlString());
}
ContainerCommandRequestProto request = builder.build();
ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList());
return response.getPutSmallFile();
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.
the class DatanodeBlockPutter method putBlock.
private void putBlock(long stepNo) throws Exception {
ContainerProtos.DatanodeBlockID blockId = ContainerProtos.DatanodeBlockID.newBuilder().setContainerID(1L).setLocalID(stepNo).setBlockCommitSequenceId(stepNo).build();
BlockData.Builder blockData = BlockData.newBuilder().setBlockID(blockId);
for (long l = 0; l < chunksPerBlock; l++) {
ChunkInfo.Builder chunkInfo = ChunkInfo.newBuilder().setChunkName(getPrefix() + "_chunk_" + stepNo).setOffset(l * chunkSize).setLen(chunkSize).setChecksumData(checksumProtobuf);
blockData.addChunks(chunkInfo);
}
PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(blockData);
String id = client.getPipeline().getFirstNode().getUuidString();
ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock).setContainerID(blockId.getContainerID()).setDatanodeUuid(id).setPutBlock(putBlockRequest);
ContainerCommandRequestProto request = builder.build();
timer.time(() -> {
client.sendCommand(request);
return null;
});
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.
the class BlockOutputStream method executePutBlock.
/**
* @param close whether putBlock is happening as part of closing the stream
* @param force true if no data was written since most recent putBlock and
* stream is being closed
*/
private CompletableFuture<ContainerProtos.ContainerCommandResponseProto> executePutBlock(boolean close, boolean force) throws IOException {
checkOpen();
long flushPos = totalDataFlushedLength;
final List<ChunkBuffer> byteBufferList;
if (!force) {
Preconditions.checkNotNull(bufferList);
byteBufferList = bufferList;
bufferList = null;
Preconditions.checkNotNull(byteBufferList);
} else {
byteBufferList = null;
}
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> flushFuture = null;
try {
BlockData blockData = containerBlockData.build();
XceiverClientReply asyncReply = putBlockAsync(xceiverClient, blockData, close, token);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
flushFuture = future.thenApplyAsync(e -> {
try {
validateResponse(e);
} catch (IOException sce) {
throw new CompletionException(sce);
}
// if the ioException is not set, putBlock is successful
if (getIoException() == null && !force) {
BlockID responseBlockID = BlockID.getFromProtobuf(e.getPutBlock().getCommittedBlockLength().getBlockID());
Preconditions.checkState(blockID.get().getContainerBlockID().equals(responseBlockID.getContainerBlockID()));
// updates the bcsId of the block
blockID.set(responseBlockID);
if (LOG.isDebugEnabled()) {
LOG.debug("Adding index " + asyncReply.getLogIndex() + " flushLength " + flushPos + " numBuffers " + byteBufferList.size() + " blockID " + blockID + " bufferPool size" + bufferPool.getSize() + " currentBufferIndex " + bufferPool.getCurrentBufferIndex());
}
// for standalone protocol, logIndex will always be 0.
updateCommitInfo(asyncReply, byteBufferList);
}
return e;
}, responseExecutor).exceptionally(e -> {
if (LOG.isDebugEnabled()) {
LOG.debug("putBlock failed for blockID {} with exception {}", blockID, e.getLocalizedMessage());
}
CompletionException ce = new CompletionException(e);
setIoException(ce);
throw ce;
});
} catch (IOException | ExecutionException e) {
throw new IOException(EXCEPTION_MSG + e.toString(), e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
handleInterruptedException(ex, false);
}
putFlushFuture(flushPos, flushFuture);
return flushFuture;
}
Aggregations