Search in sources :

Example 1 with BlockData

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.

the class TestContainerCommandRequestMessage method newPutSmallFile.

static ContainerCommandRequestProto newPutSmallFile(BlockID blockID, ByteString data) {
    final BlockData.Builder blockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf());
    final PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(blockData);
    final KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
    final ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.size()).addMetadata(keyValue).setChecksumData(checksum(data).getProtoBufMessage()).build();
    final PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(putBlockRequest).setData(data).build();
    return ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(UUID.randomUUID().toString()).setPutSmallFile(putSmallFileRequest).build();
}
Also used : PutSmallFileRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) PutBlockRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData)

Example 2 with BlockData

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.

the class ContainerProtocolCalls method writeSmallFile.

/**
 * Allows writing a small file using single RPC. This takes the container
 * name, block name and data to write sends all that data to the container
 * using a single RPC. This API is designed to be used for files which are
 * smaller than 1 MB.
 *
 * @param client - client that communicates with the container.
 * @param blockID - ID of the block
 * @param data - Data to be written into the container.
 * @param token a token for this block (may be null)
 * @return container protocol writeSmallFile response
 * @throws IOException
 */
public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token<OzoneBlockTokenIdentifier> token) throws IOException {
    BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).build();
    PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
    KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
    Checksum checksum = new Checksum(ChecksumType.CRC32, 256);
    final ChecksumData checksumData = checksum.computeChecksum(data);
    ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.length).addMetadata(keyValue).setChecksumData(checksumData.getProtoBufMessage()).build();
    PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(createBlockRequest).setData(ByteString.copyFrom(data)).build();
    String id = client.getPipeline().getFirstNode().getUuidString();
    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(id).setPutSmallFile(putSmallFileRequest);
    if (token != null) {
        builder.setEncodedToken(token.encodeToUrlString());
    }
    ContainerCommandRequestProto request = builder.build();
    ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList());
    return response.getPutSmallFile();
}
Also used : PutSmallFileRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) PutBlockRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto) Checksum(org.apache.hadoop.ozone.common.Checksum) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Example 3 with BlockData

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.

the class DatanodeBlockPutter method putBlock.

private void putBlock(long stepNo) throws Exception {
    ContainerProtos.DatanodeBlockID blockId = ContainerProtos.DatanodeBlockID.newBuilder().setContainerID(1L).setLocalID(stepNo).setBlockCommitSequenceId(stepNo).build();
    BlockData.Builder blockData = BlockData.newBuilder().setBlockID(blockId);
    for (long l = 0; l < chunksPerBlock; l++) {
        ChunkInfo.Builder chunkInfo = ChunkInfo.newBuilder().setChunkName(getPrefix() + "_chunk_" + stepNo).setOffset(l * chunkSize).setLen(chunkSize).setChecksumData(checksumProtobuf);
        blockData.addChunks(chunkInfo);
    }
    PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(blockData);
    String id = client.getPipeline().getFirstNode().getUuidString();
    ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock).setContainerID(blockId.getContainerID()).setDatanodeUuid(id).setPutBlock(putBlockRequest);
    ContainerCommandRequestProto request = builder.build();
    timer.time(() -> {
        client.sendCommand(request);
        return null;
    });
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) PutBlockRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData)

Example 4 with BlockData

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData in project ozone by apache.

the class BlockOutputStream method executePutBlock.

/**
 * @param close whether putBlock is happening as part of closing the stream
 * @param force true if no data was written since most recent putBlock and
 *            stream is being closed
 */
private CompletableFuture<ContainerProtos.ContainerCommandResponseProto> executePutBlock(boolean close, boolean force) throws IOException {
    checkOpen();
    long flushPos = totalDataFlushedLength;
    final List<ChunkBuffer> byteBufferList;
    if (!force) {
        Preconditions.checkNotNull(bufferList);
        byteBufferList = bufferList;
        bufferList = null;
        Preconditions.checkNotNull(byteBufferList);
    } else {
        byteBufferList = null;
    }
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> flushFuture = null;
    try {
        BlockData blockData = containerBlockData.build();
        XceiverClientReply asyncReply = putBlockAsync(xceiverClient, blockData, close, token);
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
        flushFuture = future.thenApplyAsync(e -> {
            try {
                validateResponse(e);
            } catch (IOException sce) {
                throw new CompletionException(sce);
            }
            // if the ioException is not set, putBlock is successful
            if (getIoException() == null && !force) {
                BlockID responseBlockID = BlockID.getFromProtobuf(e.getPutBlock().getCommittedBlockLength().getBlockID());
                Preconditions.checkState(blockID.get().getContainerBlockID().equals(responseBlockID.getContainerBlockID()));
                // updates the bcsId of the block
                blockID.set(responseBlockID);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Adding index " + asyncReply.getLogIndex() + " flushLength " + flushPos + " numBuffers " + byteBufferList.size() + " blockID " + blockID + " bufferPool size" + bufferPool.getSize() + " currentBufferIndex " + bufferPool.getCurrentBufferIndex());
                }
                // for standalone protocol, logIndex will always be 0.
                updateCommitInfo(asyncReply, byteBufferList);
            }
            return e;
        }, responseExecutor).exceptionally(e -> {
            if (LOG.isDebugEnabled()) {
                LOG.debug("putBlock failed for blockID {} with exception {}", blockID, e.getLocalizedMessage());
            }
            CompletionException ce = new CompletionException(e);
            setIoException(ce);
            throw ce;
        });
    } catch (IOException | ExecutionException e) {
        throw new IOException(EXCEPTION_MSG + e.toString(), e);
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        handleInterruptedException(ex, false);
    }
    putFlushFuture(flushPos, flushFuture);
    return flushFuture;
}
Also used : BlockID(org.apache.hadoop.hdds.client.BlockID) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerProtocolCalls.writeChunkAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) AtomicReference(java.util.concurrent.atomic.AtomicReference) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) ArrayList(java.util.ArrayList) TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) Checksum(org.apache.hadoop.ozone.common.Checksum) ExecutorService(java.util.concurrent.ExecutorService) OutputStream(java.io.OutputStream) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) Logger(org.slf4j.Logger) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Token(org.apache.hadoop.security.token.Token) ContainerProtocolCalls.putBlockAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync) Executors(java.util.concurrent.Executors) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) IOException(java.io.IOException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletionException(java.util.concurrent.CompletionException) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

BlockData (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData)4 ChunkInfo (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo)4 KeyValue (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue)3 PutBlockRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto)3 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)2 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)2 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)2 PutSmallFileRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto)2 Checksum (org.apache.hadoop.ozone.common.Checksum)2 ChecksumData (org.apache.hadoop.ozone.common.ChecksumData)2 ByteString (org.apache.ratis.thirdparty.com.google.protobuf.ByteString)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 IOException (java.io.IOException)1 OutputStream (java.io.OutputStream)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 CompletionException (java.util.concurrent.CompletionException)1 ExecutionException (java.util.concurrent.ExecutionException)1