use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerServer method runTestClientServer.
static void runTestClientServer(int numDatanodes, CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf, CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer) throws Exception {
final List<XceiverServerSpi> servers = new ArrayList<>();
XceiverClientSpi client = null;
try {
final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
initConf.accept(pipeline, CONF);
for (DatanodeDetails dn : pipeline.getNodes()) {
final XceiverServerSpi s = createServer.apply(dn, CONF);
servers.add(s);
s.start();
initServer.accept(dn, pipeline);
}
client = createClient.apply(pipeline, CONF);
client.connect();
final ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(ContainerTestHelper.getTestContainerID(), pipeline);
Assert.assertNotNull(request.getTraceID());
client.sendCommand(request);
} finally {
if (client != null) {
client.close();
}
servers.stream().forEach(XceiverServerSpi::stop);
}
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerSmallFile method testInvalidContainerRead.
@Test
public void testInvalidContainerRead() throws Exception {
long nonExistContainerID = 8888L;
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(ozoneConfig), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
XceiverClientSpi client = xceiverClientManager.acquireClient(container.getPipeline());
ContainerProtocolCalls.createContainer(client, container.getContainerInfo().getContainerID(), null);
BlockID blockID = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
ContainerProtocolCalls.writeSmallFile(client, blockID, "data123".getBytes(UTF_8), null);
thrown.expect(StorageContainerException.class);
thrown.expectMessage("ContainerID 8888 does not exist");
// Try to read a invalid key
ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, ContainerTestHelper.getTestBlockID(nonExistContainerID), null);
xceiverClientManager.releaseClient(client, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestGetCommittedBlockLengthAndPutKey method tesPutKeyResposne.
@Test
public void tesPutKeyResposne() throws Exception {
ContainerProtos.PutBlockResponseProto response;
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
long containerID = container.getContainerInfo().getContainerID();
Pipeline pipeline = container.getPipeline();
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
// create the container
ContainerProtocolCalls.createContainer(client, containerID, null);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
byte[] data = RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(container.getPipeline(), blockID, data.length, null);
client.sendCommand(writeChunkRequest);
// Now, explicitly make a putKey request for the block.
ContainerProtos.ContainerCommandRequestProto putKeyRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(putKeyRequest).getPutBlock();
Assert.assertEquals(response.getCommittedBlockLength().getBlockLength(), data.length);
Assert.assertTrue(response.getCommittedBlockLength().getBlockID().getBlockCommitSequenceId() > 0);
BlockID responseBlockID = BlockID.getFromProtobuf(response.getCommittedBlockLength().getBlockID());
blockID.setBlockCommitSequenceId(responseBlockID.getBlockCommitSequenceId());
// make sure the block ids in the request and response are same.
// This will also ensure that closing the container committed the block
// on the Datanodes.
Assert.assertEquals(responseBlockID, blockID);
xceiverClientManager.releaseClient(client, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestECBlockOutputStreamEntry method testAcquireDifferentClientForECBlocksOnTheSameHostButDifferentPort.
@Test
public void testAcquireDifferentClientForECBlocksOnTheSameHostButDifferentPort() throws IOException {
PipelineID randomId = PipelineID.randomId();
ReplicationConfig ecReplicationConfig = new ECReplicationConfig("RS-3-2-1024k");
DatanodeDetails node1 = aNode("127.0.0.1", "localhost", 2001);
DatanodeDetails node2 = aNode("127.0.0.1", "localhost", 2002);
DatanodeDetails node3 = aNode("127.0.0.1", "localhost", 2003);
DatanodeDetails node4 = aNode("127.0.0.1", "localhost", 2004);
DatanodeDetails node5 = aNode("127.0.0.1", "localhost", 2005);
List<DatanodeDetails> nodes = Arrays.asList(node1, node2, node3, node4, node5);
Pipeline anECPipeline = Pipeline.newBuilder().setId(randomId).setReplicationConfig(ecReplicationConfig).setState(Pipeline.PipelineState.OPEN).setNodes(nodes).build();
XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration());
HashSet<XceiverClientSpi> clients = new HashSet<>();
ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder().setXceiverClientManager(manager).setPipeline(anECPipeline).build();
for (int i = 0; i < nodes.size(); i++) {
clients.add(manager.acquireClient(entry.createSingleECBlockPipeline(anECPipeline, nodes.get(i), i)));
}
assertEquals(5, clients.size());
}
Aggregations