use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestOzoneClientRetriesOnExceptions method testMaxRetriesByOzoneClient.
@Test
public void testMaxRetriesByOzoneClient() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> entries = keyOutputStream.getStreamEntries();
Assert.assertEquals((MAX_RETRIES + 1), keyOutputStream.getStreamEntries().size());
int dataLength = maxFlushSize + 50;
// write data more than 1 chunk
byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
long containerID;
List<Long> containerList = new ArrayList<>();
for (BlockOutputStreamEntry entry : entries) {
containerID = entry.getBlockID().getContainerID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
Assume.assumeFalse(containerList.contains(containerID));
containerList.add(containerID);
xceiverClient.sendCommand(ContainerTestHelper.getCreateContainerRequest(containerID, pipeline));
xceiverClientManager.releaseClient(xceiverClient, false);
}
key.write(data1);
OutputStream stream = entries.get(0).getOutputStream();
Assert.assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForContainerClose(key, cluster);
// Ensure that blocks for the key have been allocated to at least N+1
// containers so that write request will be tried on N+1 different blocks
// of N+1 different containers and it will finally fail as it will hit
// the max retry count of N.
Assume.assumeTrue(containerList.size() + " <= " + MAX_RETRIES, containerList.size() > MAX_RETRIES);
try {
key.write(data1);
// ensure that write is flushed to dn
key.flush();
Assert.fail("Expected exception not thrown");
} catch (IOException ioe) {
Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof ContainerNotOpenException);
Assert.assertTrue(ioe.getMessage().contains("Retry request failed. " + "retries get failed due to exceeded maximum " + "allowed retries number: " + MAX_RETRIES));
}
try {
key.flush();
Assert.fail("Expected exception not thrown");
} catch (IOException ioe) {
Assert.assertTrue(ioe.getMessage().contains("Stream is closed"));
}
try {
key.close();
} catch (IOException ioe) {
Assert.fail("Expected should not be thrown");
}
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestOzoneClientRetriesOnExceptions method testGroupMismatchExceptionHandling.
@Test
public void testGroupMismatchExceptionHandling() throws Exception {
String keyName = getKeyName();
int dataLength = maxFlushSize + 50;
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, dataLength);
// write data more than 1 chunk
byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
long containerID = keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
xceiverClient.sendCommand(ContainerTestHelper.getCreateContainerRequest(containerID, pipeline));
xceiverClientManager.releaseClient(xceiverClient, false);
key.write(data1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
Assert.assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForPipelineClose(key, cluster, false);
key.flush();
Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream.getIoException()) instanceof GroupMismatchException);
Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds().contains(pipeline.getId()));
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
key.close();
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
validateData(keyName, data1);
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestReadRetries method testPutKeyAndGetKeyThreeNodes.
@Test
public void testPutKeyAndGetKeyThreeNodes() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = "a/b/c/" + UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream();
XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory();
out.write(value.getBytes(UTF_8));
out.close();
// First, confirm the key info from the client matches the info in OM.
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
long containerID = keyInfo.getContainerID();
long localID = keyInfo.getLocalID();
OzoneKeyDetails keyDetails = bucket.getKey(keyName);
Assert.assertEquals(keyName, keyDetails.getName());
List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
Assert.assertEquals(1, keyLocations.size());
Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
// Make sure that the data size matched.
Assert.assertEquals(value.getBytes(UTF_8).length, keyLocations.get(0).getLength());
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
DatanodeDetails datanodeDetails = datanodes.get(0);
Assert.assertNotNull(datanodeDetails);
XceiverClientSpi clientSpi = factory.acquireClient(pipeline);
Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
XceiverClientRatis ratisClient = (XceiverClientRatis) clientSpi;
ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId());
// shutdown the datanode
cluster.shutdownHddsDatanode(datanodeDetails);
// try to read, this should be successful
readKey(bucket, keyName, value);
// read intermediate directory
verifyIntermediateDir(bucket, "a/b/c");
// shutdown the second datanode
datanodeDetails = datanodes.get(1);
cluster.shutdownHddsDatanode(datanodeDetails);
// we still should be able to read via Standalone protocol
// try to read
readKey(bucket, keyName, value);
// shutdown the 3rd datanode
datanodeDetails = datanodes.get(2);
cluster.shutdownHddsDatanode(datanodeDetails);
try {
// try to read
readKey(bucket, keyName, value);
fail("Expected exception not thrown");
} catch (IOException e) {
// it should throw an ioException as none of the servers
// are available
}
factory.releaseClient(clientSpi, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestContainerServer method runTestClientServer.
static void runTestClientServer(int numDatanodes, CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf, CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer) throws Exception {
final List<XceiverServerSpi> servers = new ArrayList<>();
XceiverClientSpi client = null;
try {
final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
initConf.accept(pipeline, CONF);
for (DatanodeDetails dn : pipeline.getNodes()) {
final XceiverServerSpi s = createServer.apply(dn, CONF);
servers.add(s);
s.start();
initServer.accept(dn, pipeline);
}
client = createClient.apply(pipeline, CONF);
client.connect();
final ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(ContainerTestHelper.getTestContainerID(), pipeline);
Assert.assertNotNull(request.getTraceID());
client.sendCommand(request);
} finally {
if (client != null) {
client.close();
}
servers.stream().forEach(XceiverServerSpi::stop);
}
}
use of org.apache.hadoop.hdds.scm.XceiverClientSpi in project ozone by apache.
the class TestCSMMetrics method runContainerStateMachineMetrics.
static void runContainerStateMachineMetrics(int numDatanodes, BiConsumer<Pipeline, OzoneConfiguration> initConf, TestCSMMetrics.CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi, IOException> createClient, TestCSMMetrics.CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi, IOException> createServer, CheckedBiConsumer<DatanodeDetails, Pipeline, IOException> initServer) throws Exception {
final List<XceiverServerSpi> servers = new ArrayList<>();
XceiverClientSpi client = null;
try {
final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes);
final OzoneConfiguration conf = new OzoneConfiguration();
initConf.accept(pipeline, conf);
for (DatanodeDetails dn : pipeline.getNodes()) {
final XceiverServerSpi s = createServer.apply(dn, conf);
servers.add(s);
s.start();
initServer.accept(dn, pipeline);
}
client = createClient.apply(pipeline, conf);
client.connect();
// Before Read Chunk/Write Chunk
MetricsRecordBuilder metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString());
assertCounter("NumWriteStateMachineOps", 0L, metric);
assertCounter("NumReadStateMachineOps", 0L, metric);
assertCounter("NumApplyTransactionOps", 0L, metric);
assertCounter("NumBytesWrittenCount", 0L, metric);
assertCounter("NumBytesCommittedCount", 0L, metric);
assertCounter("NumStartTransactionVerifyFailures", 0L, metric);
assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
assertCounter("WriteChunkNumOps", 0L, metric);
double applyTransactionLatency = getDoubleGauge("ApplyTransactionAvgTime", metric);
assertTrue(applyTransactionLatency == 0.0);
double writeStateMachineLatency = getDoubleGauge("WriteStateMachineDataAvgTime", metric);
assertTrue(writeStateMachineLatency == 0.0);
// Write Chunk
BlockID blockID = ContainerTestHelper.getTestBlockID(ContainerTestHelper.getTestContainerID());
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, 1024, null);
ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString());
assertCounter("NumWriteStateMachineOps", 1L, metric);
assertCounter("NumBytesWrittenCount", 1024L, metric);
assertCounter("NumApplyTransactionOps", 1L, metric);
assertCounter("NumBytesCommittedCount", 1024L, metric);
assertCounter("NumStartTransactionVerifyFailures", 0L, metric);
assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric);
assertCounter("WriteChunkNumOps", 1L, metric);
// Read Chunk
ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(readChunkRequest);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
metric = getMetrics(CSMMetrics.SOURCE_NAME + RaftGroupId.valueOf(pipeline.getId().getId()).toString());
assertCounter("NumQueryStateMachineOps", 1L, metric);
assertCounter("NumApplyTransactionOps", 1L, metric);
applyTransactionLatency = getDoubleGauge("ApplyTransactionAvgTime", metric);
assertTrue(applyTransactionLatency > 0.0);
writeStateMachineLatency = getDoubleGauge("WriteStateMachineDataAvgTime", metric);
assertTrue(writeStateMachineLatency > 0.0);
} finally {
if (client != null) {
client.close();
}
servers.stream().forEach(XceiverServerSpi::stop);
}
}
Aggregations