Search in sources :

Example 1 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class ReplicatedFileChecksumHelper method getChunkInfos.

// copied from BlockInputStream
/**
 * Send RPC call to get the block info from the container.
 * @return List of chunks in this block.
 */
protected List<ContainerProtos.ChunkInfo> getChunkInfos(OmKeyLocationInfo keyLocationInfo) throws IOException {
    // irrespective of the container state, we will always read via Standalone
    // protocol.
    Token<OzoneBlockTokenIdentifier> token = keyLocationInfo.getToken();
    Pipeline pipeline = keyLocationInfo.getPipeline();
    BlockID blockID = keyLocationInfo.getBlockID();
    if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
        pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()))).build();
    }
    boolean success = false;
    List<ContainerProtos.ChunkInfo> chunks;
    XceiverClientSpi xceiverClientSpi = null;
    try {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Initializing BlockInputStream for get key to access {}", blockID.getContainerID());
        }
        xceiverClientSpi = getXceiverClientFactory().acquireClientForReadData(pipeline);
        ContainerProtos.DatanodeBlockID datanodeBlockID = blockID.getDatanodeBlockIDProtobuf();
        ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls.getBlock(xceiverClientSpi, datanodeBlockID, token);
        chunks = response.getBlockData().getChunksList();
        success = true;
    } finally {
        if (!success && xceiverClientSpi != null) {
            getXceiverClientFactory().releaseClientForReadData(xceiverClientSpi, false);
        }
    }
    return chunks;
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OzoneBlockTokenIdentifier(org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline)

Example 2 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestXceiverClientMetrics method testMetrics.

@Test
public void testMetrics() throws Exception {
    OzoneConfiguration conf = new OzoneConfiguration();
    String metaDir = GenericTestUtils.getTempPath(TestXceiverClientManager.class.getName() + UUID.randomUUID());
    conf.set(HDDS_METADATA_DIR_NAME, metaDir);
    XceiverClientManager clientManager = new XceiverClientManager(conf);
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(conf), SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
    XceiverClientSpi client = clientManager.acquireClient(container.getPipeline());
    ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(container.getContainerInfo().getContainerID(), container.getPipeline());
    client.sendCommand(request);
    MetricsRecordBuilder containerMetrics = getMetrics(XceiverClientMetrics.SOURCE_NAME);
    // Above request command is in a synchronous way, so there will be no
    // pending requests.
    assertCounter("PendingOps", 0L, containerMetrics);
    assertCounter("numPendingCreateContainer", 0L, containerMetrics);
    // the counter value of average latency metric should be increased
    assertCounter("CreateContainerLatencyNumOps", 1L, containerMetrics);
    breakFlag = false;
    latch = new CountDownLatch(1);
    int numRequest = 10;
    List<CompletableFuture<ContainerCommandResponseProto>> computeResults = new ArrayList<>();
    // start new thread to send async requests
    Thread sendThread = new Thread(() -> {
        while (!breakFlag) {
            try {
                // use async interface for testing pending metrics
                for (int i = 0; i < numRequest; i++) {
                    BlockID blockID = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
                    ContainerProtos.ContainerCommandRequestProto smallFileRequest;
                    smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(client.getPipeline(), blockID, 1024);
                    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> response = client.sendCommandAsync(smallFileRequest).getResponse();
                    computeResults.add(response);
                }
                Thread.sleep(1000);
            } catch (Exception ignored) {
            }
        }
        latch.countDown();
    });
    sendThread.start();
    GenericTestUtils.waitFor(() -> {
        // check if pending metric count is increased
        MetricsRecordBuilder metric = getMetrics(XceiverClientMetrics.SOURCE_NAME);
        long pendingOps = getLongCounter("PendingOps", metric);
        long pendingPutSmallFileOps = getLongCounter("numPendingPutSmallFile", metric);
        if (pendingOps > 0 && pendingPutSmallFileOps > 0) {
            // reset break flag
            breakFlag = true;
            return true;
        } else {
            return false;
        }
    }, 100, 60000);
    // blocking until we stop sending async requests
    latch.await();
    // Wait for all futures being done.
    GenericTestUtils.waitFor(() -> {
        for (CompletableFuture future : computeResults) {
            if (!future.isDone()) {
                return false;
            }
        }
        return true;
    }, 100, 60000);
    // the counter value of pending metrics should be decreased to 0
    containerMetrics = getMetrics(XceiverClientMetrics.SOURCE_NAME);
    assertCounter("PendingOps", 0L, containerMetrics);
    assertCounter("numPendingPutSmallFile", 0L, containerMetrics);
    clientManager.close();
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) CountDownLatch(java.util.concurrent.CountDownLatch) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) CompletableFuture(java.util.concurrent.CompletableFuture) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) BlockID(org.apache.hadoop.hdds.client.BlockID) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 3 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestXceiverClientManager method testFreeByReference.

@Test
public void testFreeByReference() throws IOException {
    OzoneConfiguration conf = new OzoneConfiguration();
    ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class);
    clientConfig.setMaxSize(1);
    String metaDir = GenericTestUtils.getTempPath(TestXceiverClientManager.class.getName() + UUID.randomUUID());
    conf.set(HDDS_METADATA_DIR_NAME, metaDir);
    XceiverClientManager clientManager = new XceiverClientManager(conf, clientConfig, null);
    Cache<String, XceiverClientSpi> cache = clientManager.getClientCache();
    ContainerWithPipeline container1 = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(conf), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline());
    Assert.assertEquals(1, client1.getRefcount());
    Assert.assertEquals(container1.getPipeline(), client1.getPipeline());
    ContainerWithPipeline container2 = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(conf), HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
    XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline());
    Assert.assertEquals(1, client2.getRefcount());
    Assert.assertNotEquals(client1, client2);
    // least recent container (i.e containerName1) is evicted
    XceiverClientSpi nonExistent1 = cache.getIfPresent(container1.getContainerInfo().getPipelineID().getId().toString() + container1.getContainerInfo().getReplicationType());
    Assert.assertEquals(null, nonExistent1);
    // However container call should succeed because of refcount on the client.
    ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null);
    // After releasing the client, this connection should be closed
    // and any container operations should fail
    clientManager.releaseClient(client1, false);
    String expectedMessage = "This channel is not connected.";
    try {
        ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null);
        Assert.fail("Create container should throw exception on closed" + "client");
    } catch (Exception e) {
        Assert.assertEquals(e.getClass(), IOException.class);
        Assert.assertTrue(e.getMessage().contains(expectedMessage));
    }
    clientManager.releaseClient(client2, false);
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ScmClientConfig(org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) IOException(java.io.IOException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) IOException(java.io.IOException) ExpectedException(org.junit.rules.ExpectedException) Test(org.junit.Test)

Example 4 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestContainerSmallFile method testAllocateWrite.

@Test
public void testAllocateWrite() throws Exception {
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    XceiverClientSpi client = xceiverClientManager.acquireClient(container.getPipeline());
    ContainerProtocolCalls.createContainer(client, container.getContainerInfo().getContainerID(), null);
    BlockID blockID = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
    ContainerProtocolCalls.writeSmallFile(client, blockID, "data123".getBytes(UTF_8), null);
    ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, blockID, null);
    String readData = response.getData().getDataBuffers().getBuffersList().get(0).toStringUtf8();
    Assert.assertEquals("data123", readData);
    xceiverClientManager.releaseClient(client, false);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Test(org.junit.Test)

Example 5 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestContainerSmallFile method testReadWriteWithBCSId.

@Test
public void testReadWriteWithBCSId() throws Exception {
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    XceiverClientSpi client = xceiverClientManager.acquireClient(container.getPipeline());
    ContainerProtocolCalls.createContainer(client, container.getContainerInfo().getContainerID(), null);
    BlockID blockID1 = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
    ContainerProtos.PutSmallFileResponseProto responseProto = ContainerProtocolCalls.writeSmallFile(client, blockID1, "data123".getBytes(UTF_8), null);
    long bcsId = responseProto.getCommittedBlockLength().getBlockID().getBlockCommitSequenceId();
    try {
        blockID1.setBlockCommitSequenceId(bcsId + 1);
        // read a file with higher bcsId than the container bcsId
        ContainerProtocolCalls.readSmallFile(client, blockID1, null);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == ContainerProtos.Result.UNKNOWN_BCSID);
    }
    // write a new block again to bump up the container bcsId
    BlockID blockID2 = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
    ContainerProtocolCalls.writeSmallFile(client, blockID2, "data123".getBytes(UTF_8), null);
    try {
        blockID1.setBlockCommitSequenceId(bcsId + 1);
        // read a file with higher bcsId than the committed bcsId for the block
        ContainerProtocolCalls.readSmallFile(client, blockID1, null);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == ContainerProtos.Result.BCSID_MISMATCH);
    }
    blockID1.setBlockCommitSequenceId(bcsId);
    ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, blockID1, null);
    String readData = response.getData().getDataBuffers().getBuffersList().get(0).toStringUtf8();
    Assert.assertEquals("data123", readData);
    xceiverClientManager.releaseClient(client, false);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Test(org.junit.Test)

Aggregations

XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)41 Test (org.junit.Test)30 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)28 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)21 XceiverClientManager (org.apache.hadoop.hdds.scm.XceiverClientManager)18 BlockID (org.apache.hadoop.hdds.client.BlockID)16 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)15 IOException (java.io.IOException)14 ArrayList (java.util.ArrayList)10 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)9 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)9 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)9 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)7 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)7 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)7 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)6 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)5 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)5 MockPipeline (org.apache.hadoop.hdds.scm.pipeline.MockPipeline)5