use of org.apache.hadoop.fs.StorageStatistics.LongStatistic in project hadoop by apache.
the class TestDFSOpsCountStatistics method testIsTracked.
@Test
public void testIsTracked() {
assertFalse(statistics.isTracked(null));
assertFalse(statistics.isTracked(NO_SUCH_OP));
final Iterator<LongStatistic> iter = statistics.getLongStatistics();
while (iter.hasNext()) {
final LongStatistic longStatistic = iter.next();
assertTrue(statistics.isTracked(longStatistic.getName()));
}
}
use of org.apache.hadoop.fs.StorageStatistics.LongStatistic in project hadoop by apache.
the class TestDistributedFileSystem method testConcurrentStatistics.
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
@Test(timeout = 180000)
public void testConcurrentStatistics() throws IOException, InterruptedException {
FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, DistributedFileSystem.class).reset();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()).build();
cluster.waitActive();
final FileSystem fs = cluster.getFileSystem();
final int numThreads = 5;
final ExecutorService threadPool = HadoopExecutors.newFixedThreadPool(numThreads);
try {
final CountDownLatch allExecutorThreadsReady = new CountDownLatch(numThreads);
final CountDownLatch startBlocker = new CountDownLatch(1);
final CountDownLatch allDone = new CountDownLatch(numThreads);
final AtomicReference<Throwable> childError = new AtomicReference<>();
for (int i = 0; i < numThreads; i++) {
threadPool.submit(new Runnable() {
@Override
public void run() {
allExecutorThreadsReady.countDown();
try {
startBlocker.await();
final FileSystem fs = cluster.getFileSystem();
fs.mkdirs(new Path("/testStatisticsParallelChild"));
} catch (Throwable t) {
LOG.error("Child failed when calling mkdir", t);
childError.compareAndSet(null, t);
} finally {
allDone.countDown();
}
}
});
}
final long oldMkdirOpCount = getOpStatistics(OpType.MKDIRS);
// wait until all threads are ready
allExecutorThreadsReady.await();
// all threads start making directories
startBlocker.countDown();
// wait until all threads are done
allDone.await();
assertNull("Child failed with exception " + childError.get(), childError.get());
checkStatistics(fs, 0, numThreads, 0);
// check the single operation count stat
checkOpStatistics(OpType.MKDIRS, numThreads + oldMkdirOpCount);
// iterate all the operation counts
for (Iterator<LongStatistic> opCountIter = FileSystem.getGlobalStorageStatistics().get(DFSOpsCountStatistics.NAME).getLongStatistics(); opCountIter.hasNext(); ) {
final LongStatistic opCount = opCountIter.next();
if (OpType.MKDIRS.getSymbol().equals(opCount.getName())) {
assertEquals("Unexpected op count from iterator!", numThreads + oldMkdirOpCount, opCount.getValue());
}
LOG.info(opCount.getName() + "\t" + opCount.getValue());
}
} finally {
threadPool.shutdownNow();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.StorageStatistics.LongStatistic in project hadoop by apache.
the class TestFileSystemStorageStatistics method testGetLongStatistics.
@Test
public void testGetLongStatistics() {
Iterator<LongStatistic> iter = storageStatistics.getLongStatistics();
while (iter.hasNext()) {
final LongStatistic longStat = iter.next();
assertNotNull(longStat);
final long expectedStat = getStatisticsValue(longStat.getName());
LOG.info("{}: FileSystem.Statistics={}, FileSystemStorageStatistics={}", longStat.getName(), expectedStat, longStat.getValue());
assertEquals(expectedStat, longStat.getValue());
}
}
use of org.apache.hadoop.fs.StorageStatistics.LongStatistic in project hadoop by apache.
the class TestDFSOpsCountStatistics method testGetLongStatistics.
@Test
public void testGetLongStatistics() {
// number of the iter.hasNext()
short iterations = 0;
final Iterator<LongStatistic> iter = statistics.getLongStatistics();
while (iter.hasNext()) {
final LongStatistic longStat = iter.next();
assertNotNull(longStat);
final OpType opType = OpType.fromSymbol(longStat.getName());
assertNotNull(opType);
assertTrue(expectedOpsCountMap.containsKey(opType));
assertEquals(expectedOpsCountMap.get(opType).longValue(), longStat.getValue());
iterations++;
}
// check that all the OpType enum entries are iterated via iter
assertEquals(OpType.values().length, iterations);
}
use of org.apache.hadoop.fs.StorageStatistics.LongStatistic in project hadoop by apache.
the class TestDFSOpsCountStatistics method testReset.
@Test
public void testReset() {
statistics.reset();
for (OpType opType : OpType.values()) {
expectedOpsCountMap.get(opType).set(0);
}
final Iterator<LongStatistic> iter = statistics.getLongStatistics();
while (iter.hasNext()) {
final LongStatistic longStat = iter.next();
assertEquals(0, longStat.getValue());
}
incrementOpsCountByRandomNumbers();
verifyStatistics();
}
Aggregations