Search in sources :

Example 1 with XceiverClientReply

use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.

the class TestReplicatedFileChecksumHelper method buildValidResponse.

private XceiverClientReply buildValidResponse() {
    // return a GetBlockResponse message of a block and its chunk checksums.
    ContainerProtos.DatanodeBlockID blockID = ContainerProtos.DatanodeBlockID.newBuilder().setContainerID(1).setLocalID(1).setBlockCommitSequenceId(1).build();
    byte[] byteArray = new byte[10];
    ByteString byteString = ByteString.copyFrom(byteArray);
    ContainerProtos.ChecksumData checksumData = ContainerProtos.ChecksumData.newBuilder().setType(CRC32).setBytesPerChecksum(1024).addChecksums(byteString).build();
    ContainerProtos.ChunkInfo chunkInfo = ContainerProtos.ChunkInfo.newBuilder().setChunkName("dummy_chunk").setOffset(1).setLen(10).setChecksumData(checksumData).build();
    ContainerProtos.BlockData blockData = ContainerProtos.BlockData.newBuilder().setBlockID(blockID).addChunks(chunkInfo).build();
    ContainerProtos.GetBlockResponseProto getBlockResponseProto = ContainerProtos.GetBlockResponseProto.newBuilder().setBlockData(blockData).build();
    ContainerProtos.ContainerCommandResponseProto resp = ContainerProtos.ContainerCommandResponseProto.newBuilder().setCmdType(ContainerProtos.Type.GetBlock).setResult(ContainerProtos.Result.SUCCESS).setGetBlock(getBlockResponseProto).build();
    final CompletableFuture<ContainerProtos.ContainerCommandResponseProto> replyFuture = new CompletableFuture<>();
    replyFuture.complete(resp);
    return new XceiverClientReply(replyFuture);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletableFuture(java.util.concurrent.CompletableFuture)

Example 2 with XceiverClientReply

use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.

the class LeaderAppendLogEntryGenerator method call.

@Override
public Void call() throws Exception {
    inFlightMessages = new LinkedBlockingQueue<>(inflightLimit);
    OzoneConfiguration conf = createOzoneConfiguration();
    byte[] data = RandomStringUtils.randomAscii(chunkSize).getBytes(StandardCharsets.UTF_8);
    dataToWrite = ByteString.copyFrom(data);
    setServerIdFromFile(conf);
    requestor = RaftPeerProto.newBuilder().setId(RaftPeerId.valueOf(FAKE_FOLLOWER_ID1).toByteString()).setAddress(FAKE_LEADER_ADDDRESS1).build();
    NettyChannelBuilder channelBuilder = NettyChannelBuilder.forTarget(serverAddress);
    channelBuilder.negotiationType(NegotiationType.PLAINTEXT);
    ManagedChannel build = channelBuilder.build();
    stub = RaftServerProtocolServiceGrpc.newStub(build);
    init();
    if (nextIndex == 0) {
        configureGroup();
    }
    Thread.sleep(3000L);
    XceiverClientRatis client = createXceiverClient(conf);
    client.connect();
    long containerId = 1L;
    System.out.println(client.sendCommand(createContainerRequest(containerId)));
    timer = getMetrics().timer("append-entry");
    runTests(step -> timer.time(() -> {
        inFlightMessages.put(step);
        XceiverClientReply xceiverClientReply = client.sendCommandAsync(createChunkWriteRequest(containerId, step));
        xceiverClientReply.getResponse().thenApply(response -> inFlightMessages.remove(step));
        return null;
    }));
    return null;
}
Also used : XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) Name(org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name) RaftGroup(org.apache.ratis.protocol.RaftGroup) CreateContainerRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CreateContainerRequestProto) LoggerFactory(org.slf4j.LoggerFactory) Random(java.util.Random) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) HddsVersionProvider(org.apache.hadoop.hdds.cli.HddsVersionProvider) RatisReplicationConfig(org.apache.hadoop.hdds.client.RatisReplicationConfig) RaftPeer(org.apache.ratis.protocol.RaftPeer) ReplicationFactor(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) NegotiationType(org.apache.ratis.thirdparty.io.grpc.netty.NegotiationType) UUID(java.util.UUID) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) RaftServerProtocolServiceGrpc(org.apache.ratis.proto.grpc.RaftServerProtocolServiceGrpc) ContainerType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType) List(java.util.List) Option(picocli.CommandLine.Option) ClientId(org.apache.ratis.protocol.ClientId) RaftClientReply(org.apache.ratis.protocol.RaftClientReply) DatanodeBlockID(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID) RaftProperties(org.apache.ratis.conf.RaftProperties) ManagedChannel(org.apache.ratis.thirdparty.io.grpc.ManagedChannel) Timer(com.codahale.metrics.Timer) RaftServerProtocolServiceStub(org.apache.ratis.proto.grpc.RaftServerProtocolServiceGrpc.RaftServerProtocolServiceStub) RandomStringUtils(org.apache.commons.lang3.RandomStringUtils) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) RaftPeerProto(org.apache.ratis.proto.RaftProtos.RaftPeerProto) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) PipelineState(org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState) Callable(java.util.concurrent.Callable) ArrayList(java.util.ArrayList) RaftGroupId(org.apache.ratis.protocol.RaftGroupId) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) Type(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type) ChecksumType(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType) WriteChunkRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto) NettyChannelBuilder(org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder) Command(picocli.CommandLine.Command) ChecksumData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumData) Logger(org.slf4j.Logger) RaftPeerId(org.apache.ratis.protocol.RaftPeerId) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) RaftClient(org.apache.ratis.client.RaftClient) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) NettyChannelBuilder(org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder) ManagedChannel(org.apache.ratis.thirdparty.io.grpc.ManagedChannel) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis)

Example 3 with XceiverClientReply

use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.

the class Test2WayCommitInRatis method test2WayCommitForRetryfailure.

@Test
public void test2WayCommitForRetryfailure() throws Exception {
    OzoneConfiguration conf = new OzoneConfiguration();
    startCluster(conf);
    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG);
    XceiverClientManager clientManager = new XceiverClientManager(conf);
    ContainerWithPipeline container1 = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
    XceiverClientSpi xceiverClient = clientManager.acquireClient(container1.getPipeline());
    Assert.assertEquals(1, xceiverClient.getRefcount());
    Assert.assertEquals(container1.getPipeline(), xceiverClient.getPipeline());
    Pipeline pipeline = xceiverClient.getPipeline();
    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
    XceiverClientReply reply = xceiverClient.sendCommandAsync(ContainerTestHelper.getCreateContainerRequest(container1.getContainerInfo().getContainerID(), xceiverClient.getPipeline()));
    reply.getResponse().get();
    Assert.assertEquals(3, ratisClient.getCommitInfoMap().size());
    // wait for the container to be created on all the nodes
    xceiverClient.watchForCommit(reply.getLogIndex());
    for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
        // shutdown the ratis follower
        if (RatisTestHelper.isRatisFollower(dn, pipeline)) {
            cluster.shutdownHddsDatanode(dn.getDatanodeDetails());
            break;
        }
    }
    reply = xceiverClient.sendCommandAsync(ContainerTestHelper.getCloseContainer(pipeline, container1.getContainerInfo().getContainerID()));
    reply.getResponse().get();
    xceiverClient.watchForCommit(reply.getLogIndex());
    // commitInfo Map will be reduced to 2 here
    Assert.assertEquals(2, ratisClient.getCommitInfoMap().size());
    clientManager.releaseClient(xceiverClient, false);
    Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed"));
    Assert.assertTrue(logCapturer.getOutput().contains("Committed by majority"));
    logCapturer.stopCapturing();
    shutdown();
}
Also used : XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 4 with XceiverClientReply

use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.

the class TestCommitWatcher method testReleaseBuffers.

@Test
public void testReleaseBuffers() throws Exception {
    int capacity = 2;
    BufferPool bufferPool = new BufferPool(chunkSize, capacity);
    XceiverClientManager clientManager = new XceiverClientManager(conf);
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
    Pipeline pipeline = container.getPipeline();
    long containerId = container.getContainerInfo().getContainerID();
    XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
    Assert.assertEquals(1, xceiverClient.getRefcount());
    Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
    CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
    BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
    List<XceiverClientReply> replies = new ArrayList<>();
    long length = 0;
    List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>> futures = new ArrayList<>();
    for (int i = 0; i < capacity; i++) {
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
        // add the data to the buffer pool
        final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
        byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
        ratisClient.sendCommandAsync(writeChunkRequest);
        ContainerProtos.ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
        XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
        final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
        length += byteBuffer.position();
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = reply.getResponse().thenApply(v -> {
            watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
            return v;
        });
        futures.add(future);
        watcher.getFutureMap().put(length, future);
        replies.add(reply);
    }
    Assert.assertTrue(replies.size() == 2);
    // wait on the 1st putBlock to complete
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 = futures.get(0);
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 = futures.get(1);
    future1.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) chunkSize).equals(future1));
    // wait on 2nd putBlock to complete
    future2.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2));
    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
    watcher.watchOnFirstIndex();
    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(0).getLogIndex()));
    Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
    Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
    watcher.watchOnLastIndex();
    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(1).getLogIndex()));
    Assert.assertFalse(watcher.getFutureMap().containsKey(2 * chunkSize));
    Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
    Assert.assertTrue(watcher.getFutureMap().isEmpty());
    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
}
Also used : CommitWatcher(org.apache.hadoop.hdds.scm.storage.CommitWatcher) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ArrayList(java.util.ArrayList) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletableFuture(java.util.concurrent.CompletableFuture) BufferPool(org.apache.hadoop.hdds.scm.storage.BufferPool) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Test(org.junit.Test)

Example 5 with XceiverClientReply

use of org.apache.hadoop.hdds.scm.XceiverClientReply in project ozone by apache.

the class CommitWatcher method watchForCommit.

/**
 * calls watchForCommit API of the Ratis Client. For Standalone client,
 * it is a no op.
 * @param commitIndex log index to watch for
 * @return minimum commit index replicated to all nodes
 * @throws IOException IOException in case watch gets timed out
 */
public XceiverClientReply watchForCommit(long commitIndex) throws IOException {
    long index;
    try {
        XceiverClientReply reply = xceiverClient.watchForCommit(commitIndex);
        if (reply == null) {
            index = 0;
        } else {
            index = reply.getLogIndex();
        }
        adjustBuffers(index);
        return reply;
    } catch (InterruptedException e) {
        // Re-interrupt the thread while catching InterruptedException
        Thread.currentThread().interrupt();
        throw getIOExceptionForWatchForCommit(commitIndex, e);
    } catch (TimeoutException | ExecutionException e) {
        throw getIOExceptionForWatchForCommit(commitIndex, e);
    }
}
Also used : XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)14 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)10 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)8 IOException (java.io.IOException)7 ArrayList (java.util.ArrayList)6 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)6 XceiverClientManager (org.apache.hadoop.hdds.scm.XceiverClientManager)6 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)6 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)6 Test (org.junit.Test)6 CompletableFuture (java.util.concurrent.CompletableFuture)5 ExecutionException (java.util.concurrent.ExecutionException)5 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)5 ByteString (org.apache.ratis.thirdparty.com.google.protobuf.ByteString)5 BlockID (org.apache.hadoop.hdds.client.BlockID)4 List (java.util.List)3 Random (java.util.Random)3 TimeoutException (java.util.concurrent.TimeoutException)3 ChunkInfo (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo)3 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)3