Search in sources :

Example 21 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestOzoneClientRetriesOnExceptions method testMaxRetriesByOzoneClient.

@Test
public void testMaxRetriesByOzoneClient() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    List<BlockOutputStreamEntry> entries = keyOutputStream.getStreamEntries();
    Assert.assertEquals((MAX_RETRIES + 1), keyOutputStream.getStreamEntries().size());
    int dataLength = maxFlushSize + 50;
    // write data more than 1 chunk
    byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
    long containerID;
    List<Long> containerList = new ArrayList<>();
    for (BlockOutputStreamEntry entry : entries) {
        containerID = entry.getBlockID().getContainerID();
        ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
        Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
        XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
        Assume.assumeFalse(containerList.contains(containerID));
        containerList.add(containerID);
        xceiverClient.sendCommand(ContainerTestHelper.getCreateContainerRequest(containerID, pipeline));
        xceiverClientManager.releaseClient(xceiverClient, false);
    }
    key.write(data1);
    OutputStream stream = entries.get(0).getOutputStream();
    Assert.assertTrue(stream instanceof BlockOutputStream);
    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
    TestHelper.waitForContainerClose(key, cluster);
    // Ensure that blocks for the key have been allocated to at least N+1
    // containers so that write request will be tried on N+1 different blocks
    // of N+1 different containers and it will finally fail as it will hit
    // the max retry count of N.
    Assume.assumeTrue(containerList.size() + " <= " + MAX_RETRIES, containerList.size() > MAX_RETRIES);
    try {
        key.write(data1);
        // ensure that write is flushed to dn
        key.flush();
        Assert.fail("Expected exception not thrown");
    } catch (IOException ioe) {
        Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof ContainerNotOpenException);
        Assert.assertTrue(ioe.getMessage().contains("Retry request failed. " + "retries get failed due to exceeded maximum " + "allowed retries number: " + MAX_RETRIES));
    }
    try {
        key.flush();
        Assert.fail("Expected exception not thrown");
    } catch (IOException ioe) {
        Assert.assertTrue(ioe.getMessage().contains("Stream is closed"));
    }
    try {
        key.close();
    } catch (IOException ioe) {
        Assert.fail("Expected should not be thrown");
    }
}
Also used : OutputStream(java.io.OutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) ArrayList(java.util.ArrayList) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerNotOpenException(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) BlockOutputStreamEntry(org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 22 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestOzoneClientRetriesOnExceptions method testGroupMismatchExceptionHandling.

@Test
public void testGroupMismatchExceptionHandling() throws Exception {
    String keyName = getKeyName();
    int dataLength = maxFlushSize + 50;
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, dataLength);
    // write data more than 1 chunk
    byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    long containerID = keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
    xceiverClient.sendCommand(ContainerTestHelper.getCreateContainerRequest(containerID, pipeline));
    xceiverClientManager.releaseClient(xceiverClient, false);
    key.write(data1);
    OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
    Assert.assertTrue(stream instanceof BlockOutputStream);
    BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
    TestHelper.waitForPipelineClose(key, cluster, false);
    key.flush();
    Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof GroupMismatchException);
    Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds().contains(pipeline.getId()));
    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
    key.close();
    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
    validateData(keyName, data1);
}
Also used : OutputStream(java.io.OutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) GroupMismatchException(org.apache.ratis.protocol.exceptions.GroupMismatchException) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 23 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestReadRetries method testPutKeyAndGetKeyThreeNodes.

@Test
public void testPutKeyAndGetKeyThreeNodes() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = "a/b/c/" + UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
    KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream();
    XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory();
    out.write(value.getBytes(UTF_8));
    out.close();
    // First, confirm the key info from the client matches the info in OM.
    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
    builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
    long containerID = keyInfo.getContainerID();
    long localID = keyInfo.getLocalID();
    OzoneKeyDetails keyDetails = bucket.getKey(keyName);
    Assert.assertEquals(keyName, keyDetails.getName());
    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
    Assert.assertEquals(1, keyLocations.size());
    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
    // Make sure that the data size matched.
    Assert.assertEquals(value.getBytes(UTF_8).length, keyLocations.get(0).getLength());
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    DatanodeDetails datanodeDetails = datanodes.get(0);
    Assert.assertNotNull(datanodeDetails);
    XceiverClientSpi clientSpi = factory.acquireClient(pipeline);
    Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
    XceiverClientRatis ratisClient = (XceiverClientRatis) clientSpi;
    ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId());
    // shutdown the datanode
    cluster.shutdownHddsDatanode(datanodeDetails);
    // try to read, this should be successful
    readKey(bucket, keyName, value);
    // read intermediate directory
    verifyIntermediateDir(bucket, "a/b/c");
    // shutdown the second datanode
    datanodeDetails = datanodes.get(1);
    cluster.shutdownHddsDatanode(datanodeDetails);
    // we still should be able to read via Standalone protocol
    // try to read
    readKey(bucket, keyName, value);
    // shutdown the 3rd datanode
    datanodeDetails = datanodes.get(2);
    cluster.shutdownHddsDatanode(datanodeDetails);
    try {
        // try to read
        readKey(bucket, keyName, value);
        fail("Expected exception not thrown");
    } catch (IOException e) {
    // it should throw an ioException as none of the servers
    // are available
    }
    factory.releaseClient(clientSpi, false);
}
Also used : OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 24 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestContainerServer method runTestClientServer.

static void runTestClientServer(int numDatanodes, CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf, CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer) throws Exception {
    final List<XceiverServerSpi> servers = new ArrayList<>();
    XceiverClientSpi client = null;
    try {
        final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
        initConf.accept(pipeline, CONF);
        for (DatanodeDetails dn : pipeline.getNodes()) {
            final XceiverServerSpi s = createServer.apply(dn, CONF);
            servers.add(s);
            s.start();
            initServer.accept(dn, pipeline);
        }
        client = createClient.apply(pipeline, CONF);
        client.connect();
        final ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(ContainerTestHelper.getTestContainerID(), pipeline);
        Assert.assertNotNull(request.getTraceID());
        client.sendCommand(request);
    } finally {
        if (client != null) {
            client.close();
        }
        servers.stream().forEach(XceiverServerSpi::stop);
    }
}
Also used : MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ArrayList(java.util.ArrayList) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) XceiverServerSpi(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline)

Example 25 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestCSMMetrics method runContainerStateMachineMetrics.

static void runContainerStateMachineMetrics(int numDatanodes, BiConsumer<Pipeline, OzoneConfiguration> initConf, TestCSMMetrics.CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, TestCSMMetrics.CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer) throws Exception {
    final List<XceiverServerSpi> servers = new ArrayList<>();
    XceiverClientSpi client = null;
    try {
        final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
        final OzoneConfiguration conf = new OzoneConfiguration();
        initConf.accept(pipeline, conf);
        for (DatanodeDetails dn : pipeline.getNodes()) {
            final XceiverServerSpi s = createServer.apply(dn, conf);
            servers.add(s);
            s.start();
            initServer.accept(dn, pipeline);
        }
        client = createClient.apply(pipeline, conf);
        client.connect();
        // Before Read Chunk/Write Chunk
        MetricsRecordBuilder metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString());
        assertCounter("NumWriteStateMachineOps", 0L, metric);
        assertCounter("NumReadStateMachineOps", 0L, metric);
        assertCounter("NumApplyTransactionOps", 0L, metric);
        assertCounter("NumBytesWrittenCount", 0L, metric);
        assertCounter("NumBytesCommittedCount", 0L, metric);
        assertCounter("NumStartTransactionVerifyFailures", 0L, metric);
        assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
        assertCounter("WriteChunkNumOps", 0L, metric);
        double applyTransactionLatency = getDoubleGauge("ApplyTransactionAvgTime", metric);
        assertTrue(applyTransactionLatency == 0.0);
        double writeStateMachineLatency = getDoubleGauge("WriteStateMachineDataAvgTime", metric);
        assertTrue(writeStateMachineLatency == 0.0);
        // Write Chunk
        BlockID blockID = ContainerTestHelper.getTestBlockID(ContainerTestHelper.getTestContainerID());
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
        ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString());
        assertCounter("NumWriteStateMachineOps", 1L, metric);
        assertCounter("NumBytesWrittenCount", 1024L, metric);
        assertCounter("NumApplyTransactionOps", 1L, metric);
        assertCounter("NumBytesCommittedCount", 1024L, metric);
        assertCounter("NumStartTransactionVerifyFailures", 0L, metric);
        assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
        assertCounter("WriteChunkNumOps", 1L, metric);
        // Read Chunk
        ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
        response = client.sendCommand(readChunkRequest);
        Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
        metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString());
        assertCounter("NumQueryStateMachineOps", 1L, metric);
        assertCounter("NumApplyTransactionOps", 1L, metric);
        applyTransactionLatency = getDoubleGauge("ApplyTransactionAvgTime", metric);
        assertTrue(applyTransactionLatency > 0.0);
        writeStateMachineLatency = getDoubleGauge("WriteStateMachineDataAvgTime", metric);
        assertTrue(writeStateMachineLatency > 0.0);
    } finally {
        if (client != null) {
            client.close();
        }
        servers.stream().forEach(XceiverServerSpi::stop);
    }
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) XceiverServerSpi(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) BlockID(org.apache.hadoop.hdds.client.BlockID) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Aggregations

XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)41 Test (org.junit.Test)30 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)28 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)21 XceiverClientManager (org.apache.hadoop.hdds.scm.XceiverClientManager)18 BlockID (org.apache.hadoop.hdds.client.BlockID)16 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)15 IOException (java.io.IOException)14 ArrayList (java.util.ArrayList)10 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)9 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)9 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)9 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)7 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)7 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)7 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)6 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)5 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)5 MockPipeline (org.apache.hadoop.hdds.scm.pipeline.MockPipeline)5