Search in sources :

Example 1 with DatanodeBlockID

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID in project ozone by apache.

the class DatanodeChunkGenerator method writeChunk.

private void writeChunk(long stepNo) throws Exception {
    // Always use this fake blockid.
    DatanodeBlockID blockId = DatanodeBlockID.newBuilder().setContainerID(1L).setLocalID(stepNo % 20).build();
    ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(getPrefix() + "_testdata_chunk_" + stepNo).setOffset((stepNo / 20) * chunkSize).setLen(chunkSize).setChecksumData(checksumProtobuf).build();
    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto.newBuilder().setBlockID(blockId).setChunkData(chunkInfo).setData(dataToWrite);
    XceiverClientSpi clientSpi = xceiverClients.get((int) (stepNo % (xceiverClients.size())));
    sendWriteChunkRequest(blockId, writeChunkRequest, clientSpi);
}
Also used : WriteChunkRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) DatanodeBlockID(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi)

Example 2 with DatanodeBlockID

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID in project ozone by apache.

the class ContainerTestHelper method getDummyCommandRequestProto.

/**
 * Construct fake protobuf messages for various types of requests.
 * This is tedious, however necessary to test. Protobuf classes are final
 * and cannot be mocked by Mockito.
 *
 * @param cmdType type of the container command.
 * @return
 */
public static ContainerCommandRequestProto getDummyCommandRequestProto(ContainerProtos.Type cmdType) {
    final Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(cmdType).setContainerID(DUMMY_CONTAINER_ID).setDatanodeUuid(DATANODE_UUID);
    final DatanodeBlockID fakeBlockId = DatanodeBlockID.newBuilder().setContainerID(DUMMY_CONTAINER_ID).setLocalID(1).setBlockCommitSequenceId(101).build();
    final ContainerProtos.ChunkInfo fakeChunkInfo = ContainerProtos.ChunkInfo.newBuilder().setChunkName("dummy").setOffset(0).setLen(100).setChecksumData(ContainerProtos.ChecksumData.newBuilder().setBytesPerChecksum(1).setType(ChecksumType.CRC32).build()).build();
    switch(cmdType) {
        case ReadContainer:
            builder.setReadContainer(ContainerProtos.ReadContainerRequestProto.newBuilder().build());
            break;
        case GetBlock:
            builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder().setBlockID(fakeBlockId).build());
            break;
        case GetCommittedBlockLength:
            builder.setGetCommittedBlockLength(ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder().setBlockID(fakeBlockId).build());
            break;
        case ReadChunk:
            builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder().setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).setReadChunkVersion(ContainerProtos.ReadChunkVersion.V1).build());
            break;
        case GetSmallFile:
            builder.setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder().setBlock(ContainerProtos.GetBlockRequestProto.newBuilder().setBlockID(fakeBlockId).build()).build());
            break;
        default:
            Assert.fail("Unhandled request type " + cmdType + " in unit test");
    }
    return builder.build();
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) Builder(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder) DatanodeBlockID(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID)

Example 3 with DatanodeBlockID

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID in project ozone by apache.

the class ContainerTestHelper method newGetBlockRequestBuilder.

public static Builder newGetBlockRequestBuilder(Pipeline pipeline, ContainerProtos.PutBlockRequestProtoOrBuilder putBlock) throws IOException {
    DatanodeBlockID blockID = putBlock.getBlockData().getBlockID();
    ContainerProtos.GetBlockRequestProto.Builder getRequest = ContainerProtos.GetBlockRequestProto.newBuilder();
    getRequest.setBlockID(blockID);
    Builder request = ContainerCommandRequestProto.newBuilder();
    request.setCmdType(ContainerProtos.Type.GetBlock);
    request.setContainerID(blockID.getContainerID());
    request.setGetBlock(getRequest);
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    return request;
}
Also used : Builder(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder) DatanodeBlockID(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID)

Example 4 with DatanodeBlockID

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID in project ozone by apache.

the class XceiverClientGrpc method sendCommandWithRetry.

private XceiverClientReply sendCommandWithRetry(ContainerCommandRequestProto request, List<CheckedBiFunction> validators) throws IOException {
    ContainerCommandResponseProto responseProto = null;
    IOException ioException = null;
    // In case of an exception or an error, we will try to read from the
    // datanodes in the pipeline in a round robin fashion.
    // TODO: cache the correct leader info in here, so that any subsequent calls
    // should first go to leader
    XceiverClientReply reply = new XceiverClientReply(null);
    List<DatanodeDetails> datanodeList = null;
    DatanodeBlockID blockID = null;
    if (request.getCmdType() == ContainerProtos.Type.GetBlock) {
        blockID = request.getGetBlock().getBlockID();
    } else if (request.getCmdType() == ContainerProtos.Type.ReadChunk) {
        blockID = request.getReadChunk().getBlockID();
    } else if (request.getCmdType() == ContainerProtos.Type.GetSmallFile) {
        blockID = request.getGetSmallFile().getBlock().getBlockID();
    }
    if (blockID != null) {
        // Check if the DN to which the GetBlock command was sent has been cached.
        DatanodeDetails cachedDN = getBlockDNcache.get(blockID);
        if (cachedDN != null) {
            datanodeList = pipeline.getNodes();
            int getBlockDNCacheIndex = datanodeList.indexOf(cachedDN);
            if (getBlockDNCacheIndex > 0) {
                // Pull the Cached DN to the top of the DN list
                Collections.swap(datanodeList, 0, getBlockDNCacheIndex);
            }
        }
    }
    if (datanodeList == null) {
        if (topologyAwareRead) {
            datanodeList = pipeline.getNodesInOrder();
        } else {
            datanodeList = pipeline.getNodes();
            // Shuffle datanode list so that clients do not read in the same order
            // every time.
            Collections.shuffle(datanodeList);
        }
    }
    for (DatanodeDetails dn : datanodeList) {
        try {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Executing command {} on datanode {}", processForDebug(request), dn);
            }
            // In case the command gets retried on a 2nd datanode,
            // sendCommandAsyncCall will create a new channel and async stub
            // in case these don't exist for the specific datanode.
            reply.addDatanode(dn);
            responseProto = sendCommandAsync(request, dn).getResponse().get();
            if (validators != null && !validators.isEmpty()) {
                for (CheckedBiFunction validator : validators) {
                    validator.apply(request, responseProto);
                }
            }
            if (request.getCmdType() == ContainerProtos.Type.GetBlock) {
                DatanodeBlockID getBlockID = request.getGetBlock().getBlockID();
                getBlockDNcache.put(getBlockID, dn);
            }
            break;
        } catch (IOException e) {
            ioException = e;
            responseProto = null;
            if (LOG.isDebugEnabled()) {
                LOG.debug("Failed to execute command {} on datanode {}", processForDebug(request), dn, e);
            }
        } catch (ExecutionException e) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Failed to execute command {} on datanode {}", processForDebug(request), dn, e);
            }
            if (Status.fromThrowable(e.getCause()).getCode() == Status.UNAUTHENTICATED.getCode()) {
                throw new SCMSecurityException("Failed to authenticate with " + "GRPC XceiverServer with Ozone block token.");
            }
            ioException = new IOException(e);
            responseProto = null;
        } catch (InterruptedException e) {
            LOG.error("Command execution was interrupted ", e);
            Thread.currentThread().interrupt();
            responseProto = null;
        }
    }
    if (responseProto != null) {
        reply.setResponse(CompletableFuture.completedFuture(responseProto));
        return reply;
    } else {
        Preconditions.checkNotNull(ioException);
        LOG.error("Failed to execute command {} on the pipeline {}.", processForDebug(request), pipeline);
        throw ioException;
    }
}
Also used : CheckedBiFunction(org.apache.hadoop.hdds.scm.storage.CheckedBiFunction) SCMSecurityException(org.apache.hadoop.hdds.security.exception.SCMSecurityException) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) SupplierWithIOException(org.apache.hadoop.hdds.function.SupplierWithIOException) DatanodeBlockID(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID) ExecutionException(java.util.concurrent.ExecutionException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Example 5 with DatanodeBlockID

use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID in project ozone by apache.

the class ContainerTestHelper method newGetCommittedBlockLengthBuilder.

public static Builder newGetCommittedBlockLengthBuilder(Pipeline pipeline, ContainerProtos.PutBlockRequestProtoOrBuilder putBlock) throws IOException {
    DatanodeBlockID blockID = putBlock.getBlockData().getBlockID();
    ContainerProtos.GetCommittedBlockLengthRequestProto.Builder req = ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder().setBlockID(blockID);
    return ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos.Type.GetCommittedBlockLength).setContainerID(blockID.getContainerID()).setDatanodeUuid(pipeline.getFirstNode().getUuidString()).setGetCommittedBlockLength(req);
}
Also used : DatanodeBlockID(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID)

Aggregations

DatanodeBlockID (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID)6 ChunkInfo (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo)2 Builder (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder)2 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 ExecutionException (java.util.concurrent.ExecutionException)1 SupplierWithIOException (org.apache.hadoop.hdds.function.SupplierWithIOException)1 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)1 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)1 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)1 GetBlockResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto)1 WriteChunkRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto)1 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)1 CheckedBiFunction (org.apache.hadoop.hdds.scm.storage.CheckedBiFunction)1 SCMSecurityException (org.apache.hadoop.hdds.security.exception.SCMSecurityException)1