use of org.apache.hadoop.hdds.scm.XceiverClientManager in project ozone by apache.
the class TestXceiverClientManager method testFreeByReference.
@Test
public void testFreeByReference() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class);
clientConfig.setMaxSize(1);
String metaDir = GenericTestUtils.getTempPath(TestXceiverClientManager.class.getName() + UUID.randomUUID());
conf.set(HDDS_METADATA_DIR_NAME, metaDir);
XceiverClientManager clientManager = new XceiverClientManager(conf, clientConfig, null);
Cache<String, XceiverClientSpi> cache = clientManager.getClientCache();
ContainerWithPipeline container1 = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(conf), HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline());
Assert.assertEquals(1, client1.getRefcount());
Assert.assertEquals(container1.getPipeline(), client1.getPipeline());
ContainerWithPipeline container2 = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(conf), HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline());
Assert.assertEquals(1, client2.getRefcount());
Assert.assertNotEquals(client1, client2);
// least recent container (i.e containerName1) is evicted
XceiverClientSpi nonExistent1 = cache.getIfPresent(container1.getContainerInfo().getPipelineID().getId().toString() + container1.getContainerInfo().getReplicationType());
Assert.assertEquals(null, nonExistent1);
// However container call should succeed because of refcount on the client.
ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null);
// After releasing the client, this connection should be closed
// and any container operations should fail
clientManager.releaseClient(client1, false);
String expectedMessage = "This channel is not connected.";
try {
ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null);
Assert.fail("Create container should throw exception on closed" + "client");
} catch (Exception e) {
Assert.assertEquals(e.getClass(), IOException.class);
Assert.assertTrue(e.getMessage().contains(expectedMessage));
}
clientManager.releaseClient(client2, false);
}
use of org.apache.hadoop.hdds.scm.XceiverClientManager in project ozone by apache.
the class TestXceiverClientMetrics method testMetrics.
@Test
public void testMetrics() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
String metaDir = GenericTestUtils.getTempPath(TestXceiverClientManager.class.getName() + UUID.randomUUID());
conf.set(HDDS_METADATA_DIR_NAME, metaDir);
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(SCMTestUtils.getReplicationType(conf), SCMTestUtils.getReplicationFactor(conf), OzoneConsts.OZONE);
XceiverClientSpi client = clientManager.acquireClient(container.getPipeline());
ContainerCommandRequestProto request = ContainerTestHelper.getCreateContainerRequest(container.getContainerInfo().getContainerID(), container.getPipeline());
client.sendCommand(request);
MetricsRecordBuilder containerMetrics = getMetrics(XceiverClientMetrics.SOURCE_NAME);
// Above request command is in a synchronous way, so there will be no
// pending requests.
assertCounter("PendingOps", 0L, containerMetrics);
assertCounter("numPendingCreateContainer", 0L, containerMetrics);
// the counter value of average latency metric should be increased
assertCounter("CreateContainerLatencyNumOps", 1L, containerMetrics);
breakFlag = false;
latch = new CountDownLatch(1);
int numRequest = 10;
List<CompletableFuture<ContainerCommandResponseProto>> computeResults = new ArrayList<>();
// start new thread to send async requests
Thread sendThread = new Thread(() -> {
while (!breakFlag) {
try {
// use async interface for testing pending metrics
for (int i = 0; i < numRequest; i++) {
BlockID blockID = ContainerTestHelper.getTestBlockID(container.getContainerInfo().getContainerID());
ContainerProtos.ContainerCommandRequestProto smallFileRequest;
smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(client.getPipeline(), blockID, 1024);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> response = client.sendCommandAsync(smallFileRequest).getResponse();
computeResults.add(response);
}
Thread.sleep(1000);
} catch (Exception ignored) {
}
}
latch.countDown();
});
sendThread.start();
GenericTestUtils.waitFor(() -> {
// check if pending metric count is increased
MetricsRecordBuilder metric = getMetrics(XceiverClientMetrics.SOURCE_NAME);
long pendingOps = getLongCounter("PendingOps", metric);
long pendingPutSmallFileOps = getLongCounter("numPendingPutSmallFile", metric);
if (pendingOps > 0 && pendingPutSmallFileOps > 0) {
// reset break flag
breakFlag = true;
return true;
} else {
return false;
}
}, 100, 60000);
// blocking until we stop sending async requests
latch.await();
// Wait for all futures being done.
GenericTestUtils.waitFor(() -> {
for (CompletableFuture future : computeResults) {
if (!future.isDone()) {
return false;
}
}
return true;
}, 100, 60000);
// the counter value of pending metrics should be decreased to 0
containerMetrics = getMetrics(XceiverClientMetrics.SOURCE_NAME);
assertCounter("PendingOps", 0L, containerMetrics);
assertCounter("numPendingPutSmallFile", 0L, containerMetrics);
clientManager.close();
}
use of org.apache.hadoop.hdds.scm.XceiverClientManager in project ozone by apache.
the class DatanodeChunkGenerator method call.
@Override
public Void call() throws Exception {
OzoneConfiguration ozoneConf = createOzoneConfiguration();
if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
throw new IllegalArgumentException("Datanode chunk generator is not supported in secure environment");
}
List<String> pipelinesFromCmd = Arrays.asList(pipelineIds.split(","));
List<String> datanodeHosts = Arrays.asList(this.datanodes.split(","));
Set<Pipeline> pipelines;
try (StorageContainerLocationProtocol scmLocationClient = createStorageContainerLocationClient(ozoneConf);
XceiverClientManager xceiverClientManager = new XceiverClientManager(ozoneConf)) {
List<Pipeline> pipelinesFromSCM = scmLocationClient.listPipelines();
Pipeline firstPipeline;
init();
if (!arePipelinesOrDatanodesProvided()) {
// default behaviour if no arguments provided
firstPipeline = pipelinesFromSCM.stream().filter(p -> p.getReplicationConfig().getRequiredNodes() == 3).findFirst().orElseThrow(() -> new IllegalArgumentException("Pipeline ID is NOT defined, and no pipeline " + "has been found with factor=THREE"));
XceiverClientSpi xceiverClientSpi = xceiverClientManager.acquireClient(firstPipeline);
xceiverClients = new ArrayList<>();
xceiverClients.add(xceiverClientSpi);
} else {
xceiverClients = new ArrayList<>();
pipelines = new HashSet<>();
for (String pipelineId : pipelinesFromCmd) {
List<Pipeline> selectedPipelines = pipelinesFromSCM.stream().filter((p -> p.getId().toString().equals("PipelineID=" + pipelineId) || pipelineContainsDatanode(p, datanodeHosts))).collect(Collectors.toList());
pipelines.addAll(selectedPipelines);
}
for (Pipeline p : pipelines) {
LOG.info("Writing to pipeline: " + p.getId());
xceiverClients.add(xceiverClientManager.acquireClient(p));
}
if (pipelines.isEmpty()) {
throw new IllegalArgumentException("Couldn't find the any/the selected pipeline");
}
}
runTest();
} finally {
for (XceiverClientSpi xceiverClientSpi : xceiverClients) {
if (xceiverClientSpi != null) {
xceiverClientSpi.close();
}
}
}
return null;
}
use of org.apache.hadoop.hdds.scm.XceiverClientManager in project ozone by apache.
the class DatanodeChunkValidator method call.
@Override
public Void call() throws Exception {
init();
OzoneConfiguration ozoneConf = createOzoneConfiguration();
if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) {
throw new IllegalArgumentException("Datanode chunk validator is not supported in secure environment");
}
try (StorageContainerLocationProtocol scmClient = createStorageContainerLocationClient(ozoneConf)) {
Pipeline pipeline = findPipelineForTest(pipelineId, scmClient, LOG);
try (XceiverClientManager xceiverClientManager = new XceiverClientManager(ozoneConf)) {
xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
checksumProtobuf = ContainerProtos.ChecksumData.newBuilder().setBytesPerChecksum(4).setType(ContainerProtos.ChecksumType.CRC32).build();
readReference();
timer = getMetrics().timer("chunk-validate");
runTests(this::validateChunk);
xceiverClientManager.releaseClientForReadData(xceiverClient, true);
}
} finally {
if (xceiverClient != null) {
xceiverClient.close();
}
}
return null;
}
use of org.apache.hadoop.hdds.scm.XceiverClientManager in project ozone by apache.
the class TestOzoneClientRetriesOnExceptions method init.
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@Before
public void init() throws Exception {
chunkSize = 100;
flushSize = 2 * chunkSize;
maxFlushSize = 2 * flushSize;
blockSize = 2 * maxFlushSize;
OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setMaxRetryCount(MAX_RETRIES);
clientConfig.setChecksumType(ChecksumType.NONE);
clientConfig.setStreamBufferFlushDelay(false);
conf.setFromObject(clientConfig);
conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 3);
conf.setQuietMode(false);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7).setTotalPipelineNumLimit(10).setBlockSize(blockSize).setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize).setStreamBufferMaxSize(maxFlushSize).setStreamBufferSizeUnit(StorageUnit.BYTES).build();
cluster.waitForClusterToBeReady();
// the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
xceiverClientManager = new XceiverClientManager(conf);
keyString = UUID.randomUUID().toString();
volumeName = "testblockoutputstreamwithretries";
bucketName = volumeName;
objectStore.createVolume(volumeName);
objectStore.getVolume(volumeName).createBucket(bucketName);
}
Aggregations