use of java.util.Random in project hadoop by apache.
the class LoadGenerator method generateLoadOnNN.
/**
* This is the main function - run threads to generate load on NN
* It starts the number of DFSClient threads as specified by
* the user.
* It stops all the threads when the specified elapsed time is passed.
*/
protected int generateLoadOnNN() throws InterruptedException {
int hostHashCode = hostname.hashCode();
if (seed == 0) {
r = new Random(System.currentTimeMillis() + hostHashCode);
} else {
r = new Random(seed + hostHashCode);
}
try {
fc = FileContext.getFileContext(getConf());
} catch (IOException ioe) {
System.err.println("Can not initialize the file system: " + ioe.getLocalizedMessage());
return -1;
}
int status = initFileDirTables();
if (status != 0) {
return status;
}
barrier();
DFSClientThread[] threads = new DFSClientThread[numOfThreads];
for (int i = 0; i < numOfThreads; i++) {
threads[i] = new DFSClientThread(i);
threads[i].start();
}
if (durations[0] > 0) {
if (durations.length == 1) {
// There is a fixed run time
while (shouldRun) {
Thread.sleep(2000);
totalTime += 2;
if (totalTime >= durations[0] || stopFileCreated()) {
shouldRun = false;
}
}
} else {
while (shouldRun) {
Thread.sleep(durations[currentIndex] * 1000);
totalTime += durations[currentIndex];
// Are we on the final line of the script?
if ((currentIndex + 1) == durations.length || stopFileCreated()) {
shouldRun = false;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Moving to index " + currentIndex + ": r = " + readProbs[currentIndex] + ", w = " + writeProbs + " for duration " + durations[currentIndex]);
}
currentIndex++;
}
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Done with testing. Waiting for threads to finish.");
}
boolean failed = false;
for (DFSClientThread thread : threads) {
thread.join();
for (int i = 0; i < TOTAL_OP_TYPES; i++) {
executionTime[i] += thread.executionTime[i];
numOfOps[i] += thread.totalNumOfOps[i];
}
failed = failed || thread.failed;
}
int exitCode = 0;
if (failed) {
exitCode = -ERR_TEST_FAILED;
}
totalOps = 0;
for (int i = 0; i < TOTAL_OP_TYPES; i++) {
totalOps += numOfOps[i];
}
return exitCode;
}
use of java.util.Random in project hadoop by apache.
the class StructureGenerator method init.
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
try {
for (int i = 0; i < args.length; i++) {
// parse command line
if (args[i].equals("-maxDepth")) {
maxDepth = Integer.parseInt(args[++i]);
if (maxDepth < 1) {
System.err.println("maxDepth must be positive: " + maxDepth);
return -1;
}
} else if (args[i].equals("-minWidth")) {
minWidth = Integer.parseInt(args[++i]);
if (minWidth < 0) {
System.err.println("minWidth must be positive: " + minWidth);
return -1;
}
} else if (args[i].equals("-maxWidth")) {
maxWidth = Integer.parseInt(args[++i]);
} else if (args[i].equals("-numOfFiles")) {
numOfFiles = Integer.parseInt(args[++i]);
if (numOfFiles < 1) {
System.err.println("NumOfFiles must be positive: " + numOfFiles);
return -1;
}
} else if (args[i].equals("-avgFileSize")) {
avgFileSize = Double.parseDouble(args[++i]);
if (avgFileSize <= 0) {
System.err.println("AvgFileSize must be positive: " + avgFileSize);
return -1;
}
} else if (args[i].equals("-outDir")) {
outDir = new File(args[++i]);
} else if (args[i].equals("-seed")) {
r = new Random(Long.parseLong(args[++i]));
} else {
System.err.println(USAGE);
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
}
} catch (NumberFormatException e) {
System.err.println("Illegal parameter: " + e.getLocalizedMessage());
System.err.println(USAGE);
return -1;
}
if (maxWidth < minWidth) {
System.err.println("maxWidth must be bigger than minWidth: " + maxWidth);
return -1;
}
if (r == null) {
r = new Random();
}
return 0;
}
use of java.util.Random in project hadoop by apache.
the class TestMutableMetrics method testMutableRatesWithAggregationManyThreads.
@Test
public void testMutableRatesWithAggregationManyThreads() throws InterruptedException {
final MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
final int n = 10;
long[] opCount = new long[n];
double[] opTotalTime = new double[n];
for (int i = 0; i < n; i++) {
opCount[i] = 0;
opTotalTime[i] = 0;
// Initialize so that the getLongCounter() method doesn't complain
rates.add("metric" + i, 0);
}
Thread[] threads = new Thread[n];
final CountDownLatch firstAddsFinished = new CountDownLatch(threads.length);
final CountDownLatch firstSnapshotsFinished = new CountDownLatch(1);
final CountDownLatch secondAddsFinished = new CountDownLatch(threads.length);
final CountDownLatch secondSnapshotsFinished = new CountDownLatch(1);
long seed = new Random().nextLong();
LOG.info("Random seed = " + seed);
final Random sleepRandom = new Random(seed);
for (int tIdx = 0; tIdx < threads.length; tIdx++) {
final int threadIdx = tIdx;
threads[threadIdx] = new Thread() {
@Override
public void run() {
try {
for (int i = 0; i < 1000; i++) {
rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
// Sleep so additions can be interleaved with snapshots
Thread.sleep(sleepRandom.nextInt(5));
}
firstAddsFinished.countDown();
// Make sure all threads stay alive long enough for the first
// snapshot to complete; else their metrics may be lost to GC
firstSnapshotsFinished.await();
// Let half the threads continue with more metrics and let half die
if (threadIdx % 2 == 0) {
for (int i = 0; i < 1000; i++) {
rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
}
secondAddsFinished.countDown();
secondSnapshotsFinished.await();
} else {
secondAddsFinished.countDown();
}
} catch (InterruptedException e) {
// Ignore
}
}
};
}
for (Thread t : threads) {
t.start();
}
// opCount / opTotalTime
for (int i = 0; i < 100; i++) {
snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
Thread.sleep(sleepRandom.nextInt(20));
}
firstAddsFinished.await();
// Final snapshot to grab any remaining metrics and then verify that
// the totals are as expected
snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
for (int i = 0; i < n; i++) {
assertEquals("metric" + i + " count", 1001, opCount[i]);
assertEquals("metric" + i + " total", 1500, opTotalTime[i], 1.0);
}
firstSnapshotsFinished.countDown();
// After half of the threads die, ensure that the remaining ones still
// add metrics correctly and that snapshot occurs correctly
secondAddsFinished.await();
snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
for (int i = 0; i < n; i++) {
assertEquals("metric" + i + " count", 1501, opCount[i]);
assertEquals("metric" + i + " total", 2250, opTotalTime[i], 1.0);
}
secondSnapshotsFinished.countDown();
}
use of java.util.Random in project hadoop by apache.
the class TestSampleQuantiles method testQuantileError.
/**
* Correctness test that checks that absolute error of the estimate is within
* specified error bounds for some randomly permuted streams of items.
*/
@Test
public void testQuantileError() throws IOException {
final int count = 100000;
Random r = new Random(0xDEADDEAD);
Long[] values = new Long[count];
for (int i = 0; i < count; i++) {
values[i] = (long) (i + 1);
}
// Do 10 shuffle/insert/check cycles
for (int i = 0; i < 10; i++) {
System.out.println("Starting run " + i);
Collections.shuffle(Arrays.asList(values), r);
estimator.clear();
for (int j = 0; j < count; j++) {
estimator.insert(values[j]);
}
Map<Quantile, Long> snapshot;
snapshot = estimator.snapshot();
for (Quantile q : quantiles) {
long actual = (long) (q.quantile * count);
long error = (long) (q.error * count);
long estimate = snapshot.get(q);
System.out.println(String.format("Expected %d with error %d, estimated %d", actual, error, estimate));
assertTrue(estimate <= actual + error);
assertTrue(estimate >= actual - error);
}
}
}
use of java.util.Random in project hadoop by apache.
the class TestAppendDifferentChecksum method testAlgoSwitchRandomized.
/**
* Test which randomly alternates between appending with
* CRC32 and with CRC32C, crossing several block boundaries.
* Then, checks that all of the data can be read back correct.
*/
@Test(timeout = RANDOM_TEST_RUNTIME * 2)
public void testAlgoSwitchRandomized() throws IOException {
FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512);
FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);
Path p = new Path("/testAlgoSwitchRandomized");
long seed = Time.now();
System.out.println("seed: " + seed);
Random r = new Random(seed);
// Create empty to start
IOUtils.closeStream(fsWithCrc32.create(p));
long st = Time.now();
int len = 0;
while (Time.now() - st < RANDOM_TEST_RUNTIME) {
int thisLen = r.nextInt(500);
FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C);
FSDataOutputStream stm = fs.append(p);
try {
AppendTestUtil.write(stm, len, thisLen);
} finally {
stm.close();
}
len += thisLen;
}
AppendTestUtil.check(fsWithCrc32, p, len);
AppendTestUtil.check(fsWithCrc32C, p, len);
}
Aggregations