use of org.apache.hadoop.hbase.PerformanceEvaluation.TestOptions in project hbase by apache.
the class TestPerformanceEvaluation method testWriteInputFile.
/**
* Exercise the mr spec writing. Simple assertions to make sure it is basically working.
* @throws IOException
*/
@Ignore
@Test
public void testWriteInputFile() throws IOException {
TestOptions opts = new PerformanceEvaluation.TestOptions();
final int clients = 10;
opts.setNumClientThreads(clients);
opts.setPerClientRunRows(10);
Path dir = PerformanceEvaluation.writeInputFile(HTU.getConfiguration(), opts, HTU.getDataTestDir());
FileSystem fs = FileSystem.get(HTU.getConfiguration());
Path p = new Path(dir, PerformanceEvaluation.JOB_INPUT_FILENAME);
long len = fs.getFileStatus(p).getLen();
assertTrue(len > 0);
byte[] content = new byte[(int) len];
FSDataInputStream dis = fs.open(p);
try {
dis.readFully(content);
BufferedReader br = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(content)));
int count = 0;
while (br.readLine() != null) {
count++;
}
assertEquals(clients, count);
} finally {
dis.close();
}
}
use of org.apache.hadoop.hbase.PerformanceEvaluation.TestOptions in project hbase by apache.
the class TestPerformanceEvaluation method testZipfian.
@Test
public void testZipfian() throws NoSuchMethodException, SecurityException, InstantiationException, IllegalAccessException, IllegalArgumentException, InvocationTargetException {
TestOptions opts = new PerformanceEvaluation.TestOptions();
opts.setValueZipf(true);
final int valueSize = 1024;
opts.setValueSize(valueSize);
RandomReadTest rrt = new RandomReadTest(null, opts, null);
Constructor<?> ctor = Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class);
ctor.setAccessible(true);
Histogram histogram = (Histogram) ctor.newInstance(new UniformReservoir(1024 * 500));
for (int i = 0; i < 100; i++) {
histogram.update(rrt.getValueLength(null));
}
Snapshot snapshot = histogram.getSnapshot();
double stddev = snapshot.getStdDev();
assertTrue(stddev != 0 && stddev != 1.0);
assertTrue(snapshot.getStdDev() != 0);
double median = snapshot.getMedian();
assertTrue(median != 0 && median != 1 && median != valueSize);
}
use of org.apache.hadoop.hbase.PerformanceEvaluation.TestOptions in project hbase by apache.
the class TestPerformanceEvaluation method testSizeCalculation.
@Test
public void testSizeCalculation() {
TestOptions opts = new PerformanceEvaluation.TestOptions();
opts = PerformanceEvaluation.calculateRowsAndSize(opts);
int rows = opts.getPerClientRunRows();
// Default row count
final int defaultPerClientRunRows = 1024 * 1024;
assertEquals(defaultPerClientRunRows, rows);
// If size is 2G, then twice the row count.
opts.setSize(2.0f);
opts = PerformanceEvaluation.calculateRowsAndSize(opts);
assertEquals(defaultPerClientRunRows * 2, opts.getPerClientRunRows());
// If two clients, then they get half the rows each.
opts.setNumClientThreads(2);
opts = PerformanceEvaluation.calculateRowsAndSize(opts);
assertEquals(defaultPerClientRunRows, opts.getPerClientRunRows());
// What if valueSize is 'random'? Then half of the valueSize so twice the rows.
opts.valueRandom = true;
opts = PerformanceEvaluation.calculateRowsAndSize(opts);
assertEquals(defaultPerClientRunRows * 2, opts.getPerClientRunRows());
}
Aggregations