use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto in project ozone by apache.
the class TestContainerCommandRequestMessage method newPutSmallFile.
static ContainerCommandRequestProto newPutSmallFile(BlockID blockID, ByteString data) {
final BlockData.Builder blockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf());
final PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(blockData);
final KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
final ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.size()).addMetadata(keyValue).setChecksumData(checksum(data).getProtoBufMessage()).build();
final PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(putBlockRequest).setData(data).build();
return ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(UUID.randomUUID().toString()).setPutSmallFile(putSmallFileRequest).build();
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto in project ozone by apache.
the class ContainerProtocolCalls method writeSmallFile.
/**
* Allows writing a small file using single RPC. This takes the container
* name, block name and data to write sends all that data to the container
* using a single RPC. This API is designed to be used for files which are
* smaller than 1 MB.
*
* @param client - client that communicates with the container.
* @param blockID - ID of the block
* @param data - Data to be written into the container.
* @param token a token for this block (may be null)
* @return container protocol writeSmallFile response
* @throws IOException
*/
public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token<OzoneBlockTokenIdentifier> token) throws IOException {
BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).build();
PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
Checksum checksum = new Checksum(ChecksumType.CRC32, 256);
final ChecksumData checksumData = checksum.computeChecksum(data);
ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.length).addMetadata(keyValue).setChecksumData(checksumData.getProtoBufMessage()).build();
PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(createBlockRequest).setData(ByteString.copyFrom(data)).build();
String id = client.getPipeline().getFirstNode().getUuidString();
ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(id).setPutSmallFile(putSmallFileRequest);
if (token != null) {
builder.setEncodedToken(token.encodeToUrlString());
}
ContainerCommandRequestProto request = builder.build();
ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList());
return response.getPutSmallFile();
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto in project ozone by apache.
the class ContainerCommandRequestMessage method toMessage.
public static ContainerCommandRequestMessage toMessage(ContainerCommandRequestProto request, String traceId) {
final ContainerCommandRequestProto.Builder b = ContainerCommandRequestProto.newBuilder(request);
if (traceId != null) {
b.setTraceID(traceId);
}
ByteString data = ByteString.EMPTY;
if (request.getCmdType() == Type.WriteChunk) {
final WriteChunkRequestProto w = request.getWriteChunk();
data = w.getData();
b.setWriteChunk(w.toBuilder().clearData());
} else if (request.getCmdType() == Type.PutSmallFile) {
final PutSmallFileRequestProto p = request.getPutSmallFile();
data = p.getData();
b.setPutSmallFile(p.toBuilder().setData(ByteString.EMPTY));
}
return new ContainerCommandRequestMessage(b.build(), data);
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto in project ozone by apache.
the class KeyValueHandler method handlePutSmallFile.
/**
* Handle Put Small File operation. Writes the chunk and associated key
* using a single RPC. Calls BlockManager and ChunkManager to process the
* request.
*/
ContainerCommandResponseProto handlePutSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
if (!request.hasPutSmallFile()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Malformed Put Small File request. trace ID: {}", request.getTraceID());
}
return malformedRequest(request);
}
PutSmallFileRequestProto putSmallFileReq = request.getPutSmallFile();
final ContainerProtos.BlockData blockDataProto;
try {
checkContainerOpen(kvContainer);
BlockData blockData = BlockData.getFromProtoBuf(putSmallFileReq.getBlock().getBlockData());
Preconditions.checkNotNull(blockData);
ContainerProtos.ChunkInfo chunkInfoProto = putSmallFileReq.getChunkInfo();
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
Preconditions.checkNotNull(chunkInfo);
ChunkBuffer data = ChunkBuffer.wrap(putSmallFileReq.getData().asReadOnlyByteBufferList());
if (dispatcherContext == null) {
dispatcherContext = new DispatcherContext.Builder().build();
}
BlockID blockID = blockData.getBlockID();
// chunks will be committed as a part of handling putSmallFile
// here. There is no need to maintain this info in openContainerBlockMap.
chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
validateChunkChecksumData(data, chunkInfo);
chunkManager.finishWriteChunks(kvContainer, blockData);
List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
chunks.add(chunkInfoProto);
blockData.setChunks(chunks);
blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex());
blockManager.putBlock(kvContainer, blockData);
blockDataProto = blockData.getProtoBufMessage();
metrics.incContainerBytesStats(Type.PutSmallFile, chunkInfo.getLen());
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Read Chunk failed", ex, PUT_SMALL_FILE_ERROR), request);
}
return getPutFileResponseSuccess(request, blockDataProto);
}
Aggregations