Search in sources :

Example 41 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestContainerServer method runTestClientServer.

static void runTestClientServer(int numDatanodes, CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf, CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer) throws Exception {
    final List<XceiverServerSpi> servers = new ArrayList<>();
    XceiverClientSpi client = null;
    try {
        final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
        initConf.accept(pipeline, CONF);
        for (DatanodeDetails dn : pipeline.getNodes()) {
            final XceiverServerSpi s = createServer.apply(dn, CONF);
            servers.add(s);
            s.start();
            initServer.accept(dn, pipeline);
        }
        client = createClient.apply(pipeline, CONF);
        client.connect();
        final ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(ContainerTestHelper.getTestContainerID(), pipeline);
        Assert.assertNotNull(request.getTraceID());
        client.sendCommand(request);
    } finally {
        if (client != null) {
            client.close();
        }
        servers.stream().forEach(XceiverServerSpi::stop);
    }
}
Also used : MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ArrayList(java.util.ArrayList) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) XceiverServerSpi(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline)

Example 42 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestContainerSmallFile method testInvalidContainerRead.

@Test
public void testInvalidContainerRead() throws Exception {
    long nonExistContainerID = 8888L;
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    XceiverClientSpi client = xceiverClientManager.acquireClient(container.getPipeline());
    ContainerProtocolCalls.createContainer(client, container.getContainerInfo().getContainerID(), null);
    BlockID blockID = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
    ContainerProtocolCalls.writeSmallFile(client, blockID, "data123".getBytes(UTF_8), null);
    thrown.expect(StorageContainerException.class);
    thrown.expectMessage("ContainerID 8888 does not exist");
    // Try to read a invalid key
    ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, ContainerTestHelper.getTestBlockID(nonExistContainerID), null);
    xceiverClientManager.releaseClient(client, false);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Test(org.junit.Test)

Example 43 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestGetCommittedBlockLengthAndPutKey method tesPutKeyResposne.

@Test
public void tesPutKeyResposne() throws Exception {
    ContainerProtos.PutBlockResponseProto response;
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    long containerID = container.getContainerInfo().getContainerID();
    Pipeline pipeline = container.getPipeline();
    XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
    // create the container
    ContainerProtocolCalls.createContainer(client, containerID, null);
    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
    byte[] data = RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
    ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(container.getPipeline(), blockID, data.length, null);
    client.sendCommand(writeChunkRequest);
    // Now, explicitly make a putKey request for the block.
    ContainerProtos.ContainerCommandRequestProto putKeyRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
    response = client.sendCommand(putKeyRequest).getPutBlock();
    Assert.assertEquals(response.getCommittedBlockLength().getBlockLength(), data.length);
    Assert.assertTrue(response.getCommittedBlockLength().getBlockID().getBlockCommitSequenceId() > 0);
    BlockID responseBlockID = BlockID.getFromProtobuf(response.getCommittedBlockLength().getBlockID());
    blockID.setBlockCommitSequenceId(responseBlockID.getBlockCommitSequenceId());
    // make sure the block ids in the request and response are same.
    // This will also ensure that closing the container committed the block
    // on the Datanodes.
    Assert.assertEquals(responseBlockID, blockID);
    xceiverClientManager.releaseClient(client, false);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 44 with XceiverClientSpi

use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.

the class TestECBlockOutputStreamEntry method testAcquireDifferentClientForECBlocksOnTheSameHostButDifferentPort.

@Test
public void testAcquireDifferentClientForECBlocksOnTheSameHostButDifferentPort() throws IOException {
    PipelineID randomId = PipelineID.randomId();
    ReplicationConfig ecReplicationConfig = new ECReplicationConfig("RS-3-2-1024k");
    DatanodeDetails node1 = aNode("127.0.0.1", "localhost", 2001);
    DatanodeDetails node2 = aNode("127.0.0.1", "localhost", 2002);
    DatanodeDetails node3 = aNode("127.0.0.1", "localhost", 2003);
    DatanodeDetails node4 = aNode("127.0.0.1", "localhost", 2004);
    DatanodeDetails node5 = aNode("127.0.0.1", "localhost", 2005);
    List<DatanodeDetails> nodes = Arrays.asList(node1, node2, node3, node4, node5);
    Pipeline anECPipeline = Pipeline.newBuilder().setId(randomId).setReplicationConfig(ecReplicationConfig).setState(Pipeline.PipelineState.OPEN).setNodes(nodes).build();
    XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration());
    HashSet<XceiverClientSpi> clients = new HashSet<>();
    ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder().setXceiverClientManager(manager).setPipeline(anECPipeline).build();
    for (int i = 0; i < nodes.size(); i++) {
        clients.add(manager.acquireClient(entry.createSingleECBlockPipeline(anECPipeline, nodes.get(i), i)));
    }
    assertEquals(5, clients.size());
}
Also used : ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) ECReplicationConfig(org.apache.hadoop.hdds.client.ECReplicationConfig) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) ECReplicationConfig(org.apache.hadoop.hdds.client.ECReplicationConfig) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)44 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)31 Test (org.junit.Test)26 XceiverClientManager (org.apache.hadoop.hdds.scm.XceiverClientManager)21 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)21 BlockID (org.apache.hadoop.hdds.client.BlockID)17 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)16 IOException (java.io.IOException)14 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)11 ArrayList (java.util.ArrayList)10 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)9 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)9 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)9 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)7 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)7 Test (org.junit.jupiter.api.Test)7 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)6 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)6 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 ContainerCommandRequestProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto)5