use of io.cdap.cdap.api.dataset.lib.KeyValueTable in project cdap by cdapio.
the class MapReduceProgramRunnerTest method testMapreduceWithFile.
private void testMapreduceWithFile(String inputDatasetName, String inputPaths, String outputDatasetName, String outputPath, Class appClass, Class mrClass, Map<String, String> extraRuntimeArgs, @Nullable final String counterTableName, @Nullable final String outputSeparator) throws Exception {
final ApplicationWithPrograms app = deployApp(appClass, new AppWithMapReduceUsingFileSet.AppConfig(inputDatasetName, outputDatasetName));
Map<String, String> runtimeArguments = Maps.newHashMap();
Map<String, String> inputArgs = Maps.newHashMap();
Map<String, String> outputArgs = Maps.newHashMap();
FileSetArguments.setInputPaths(inputArgs, inputPaths);
FileSetArguments.setOutputPath(outputArgs, outputPath);
if (outputSeparator != null) {
outputArgs.put(FileSetProperties.OUTPUT_PROPERTIES_PREFIX + TextOutputFormat.SEPERATOR, "#");
}
runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, inputDatasetName, inputArgs));
runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, outputDatasetName, outputArgs));
if (extraRuntimeArgs != null) {
runtimeArguments.putAll(extraRuntimeArgs);
}
// clear the counters in case a previous test case left behind some values
if (counterTableName != null) {
Transactions.execute(datasetCache.newTransactionContext(), "countersVerify", () -> {
KeyValueTable counters = datasetCache.getDataset(counterTableName);
counters.delete(AppWithMapReduceUsingRuntimeDatasets.INPUT_RECORDS);
counters.delete(AppWithMapReduceUsingRuntimeDatasets.REDUCE_KEYS);
});
}
// write a handful of numbers to a file; compute their sum, too.
final long[] values = { 15L, 17L, 7L, 3L };
final FileSet input = datasetCache.getDataset(inputDatasetName, inputArgs);
long sum = 0L, count = 1;
long inputRecords = 0;
for (Location inputLocation : input.getInputLocations()) {
final PrintWriter writer = new PrintWriter(inputLocation.getOutputStream());
for (long value : values) {
value *= count;
writer.println(value);
sum += value;
inputRecords++;
}
writer.close();
count++;
}
runProgram(app, mrClass, new BasicArguments(runtimeArguments));
// output location in file system is a directory that contains a part file, a _SUCCESS file, and checksums
// (.<filename>.crc) for these files. Find the actual part file. Its name begins with "part". In this case,
// there should be only one part file (with this small data, we have a single reducer).
final FileSet results = datasetCache.getDataset(outputDatasetName, outputArgs);
Location resultLocation = results.getOutputLocation();
if (resultLocation.isDirectory()) {
for (Location child : resultLocation.list()) {
if (!child.isDirectory() && child.getName().startsWith("part")) {
resultLocation = child;
break;
}
}
}
Assert.assertFalse(resultLocation.isDirectory());
// read output and verify result
String line = CharStreams.readFirstLine(CharStreams.newReaderSupplier(Locations.newInputSupplier(resultLocation), Charsets.UTF_8));
Assert.assertNotNull(line);
String[] fields = line.split(outputSeparator == null ? ":" : outputSeparator);
Assert.assertEquals(2, fields.length);
Assert.assertEquals(AppWithMapReduceUsingFileSet.FileMapper.ONLY_KEY, fields[0]);
Assert.assertEquals(sum, Long.parseLong(fields[1]));
if (counterTableName != null) {
final long totalInputRecords = inputRecords;
Transactions.execute(datasetCache.newTransactionContext(), "countersVerify", () -> {
KeyValueTable counters = datasetCache.getDataset(counterTableName);
Assert.assertEquals(totalInputRecords, counters.incrementAndGet(AppWithMapReduceUsingRuntimeDatasets.INPUT_RECORDS, 0L));
Assert.assertEquals(1L, counters.incrementAndGet(AppWithMapReduceUsingRuntimeDatasets.REDUCE_KEYS, 0L));
});
}
}
use of io.cdap.cdap.api.dataset.lib.KeyValueTable in project cdap by cdapio.
the class MapReduceProgramRunnerTest method testMapReduceWithLocalFiles.
@Test
public void testMapReduceWithLocalFiles() throws Exception {
ApplicationWithPrograms appWithPrograms = deployApp(AppWithLocalFiles.class);
URI stopWordsFile = createStopWordsFile();
final KeyValueTable kvTable = datasetCache.getDataset(AppWithLocalFiles.MR_INPUT_DATASET);
Transactions.createTransactionExecutor(txExecutorFactory, kvTable).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
kvTable.write("2324", "a test record");
kvTable.write("43353", "the test table");
kvTable.write("34335", "an end record");
}
});
runProgram(appWithPrograms, AppWithLocalFiles.MapReduceWithLocalFiles.class, new BasicArguments(ImmutableMap.of(AppWithLocalFiles.MR_INPUT_DATASET, "input", AppWithLocalFiles.MR_OUTPUT_DATASET, "output", AppWithLocalFiles.STOPWORDS_FILE_ARG, stopWordsFile.toString())));
final KeyValueTable outputKvTable = datasetCache.getDataset(AppWithLocalFiles.MR_OUTPUT_DATASET);
Transactions.createTransactionExecutor(txExecutorFactory, outputKvTable).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
Assert.assertNull(outputKvTable.read("a"));
Assert.assertNull(outputKvTable.read("the"));
Assert.assertNull(outputKvTable.read("an"));
Assert.assertEquals(2, Bytes.toInt(outputKvTable.read("test")));
Assert.assertEquals(2, Bytes.toInt(outputKvTable.read("record")));
Assert.assertEquals(1, Bytes.toInt(outputKvTable.read("table")));
Assert.assertEquals(1, Bytes.toInt(outputKvTable.read("end")));
}
});
}
use of io.cdap.cdap.api.dataset.lib.KeyValueTable in project cdap by cdapio.
the class MapReduceProgramRunnerTest method testFailure.
// TODO: this tests failure in Map tasks. We also need to test: failure in Reduce task, kill of a job by user.
private void testFailure(boolean frequentFlushing) throws Exception {
// We want to verify that when mapreduce job fails:
// * things written in initialize() remains and visible to others
// * things written in tasks not visible to others TODO AAA: do invalidate
// * things written in onfinish() remains and visible to others
// NOTE: the code of this test is similar to testTimeSeriesRecordsCount() test. We put some "bad data" intentionally
// here to be recognized by map tasks as a message to emulate failure
final ApplicationWithPrograms app = deployApp(AppWithMapReduce.class);
// we need to start a tx context and do a "get" on all datasets so that they are in datasetCache
datasetCache.newTransactionContext();
final TimeseriesTable table = datasetCache.getDataset("timeSeries");
final KeyValueTable beforeSubmitTable = datasetCache.getDataset("beforeSubmit");
final KeyValueTable onFinishTable = datasetCache.getDataset("onFinish");
final Table counters = datasetCache.getDataset("counters");
final Table countersFromContext = datasetCache.getDataset("countersFromContext");
// 1) fill test data
fillTestInputData(txExecutorFactory, table, true);
// 2) run job
final long start = System.currentTimeMillis();
runProgram(app, AppWithMapReduce.AggregateTimeseriesByTag.class, frequentFlushing, false);
final long stop = System.currentTimeMillis();
// 3) verify results
Transactions.createTransactionExecutor(txExecutorFactory, datasetCache.getTransactionAwares()).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
// data should be rolled back todo: test that partially written is rolled back too
Assert.assertFalse(table.read(AggregateMetricsByTag.BY_TAGS, start, stop).hasNext());
// but written beforeSubmit and onFinish is available to others
Assert.assertArrayEquals(Bytes.toBytes("beforeSubmit:done"), beforeSubmitTable.read(Bytes.toBytes("beforeSubmit")));
Assert.assertArrayEquals(Bytes.toBytes("onFinish:done"), onFinishTable.read(Bytes.toBytes("onFinish")));
Assert.assertEquals(0, counters.get(new Get("mapper")).getLong("records", 0));
Assert.assertEquals(0, counters.get(new Get("reducer")).getLong("records", 0));
Assert.assertEquals(0, countersFromContext.get(new Get("mapper")).getLong("records", 0));
Assert.assertEquals(0, countersFromContext.get(new Get("reducer")).getLong("records", 0));
}
});
datasetCache.dismissTransactionContext();
}
use of io.cdap.cdap.api.dataset.lib.KeyValueTable in project cdap by cdapio.
the class MapReduceProgramRunnerTest method testWordCount.
@Test
public void testWordCount() throws Exception {
// deploy to namespace default by default
final ApplicationWithPrograms app = deployApp(AppWithMapReduce.class);
final String inputPath = createInput();
final java.io.File outputDir = new java.io.File(TEMP_FOLDER.newFolder(), "output");
try {
datasetCache.getDataset("someOtherNameSpace", "jobConfig");
Assert.fail("getDataset() should throw an exception when accessing a non-existing dataset.");
} catch (DatasetInstantiationException e) {
// expected
}
// Should work if explicitly specify the default namespace
final KeyValueTable jobConfigTable = datasetCache.getDataset(NamespaceId.DEFAULT.getNamespace(), "jobConfig");
// write config into dataset
Transactions.createTransactionExecutor(txExecutorFactory, jobConfigTable).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
jobConfigTable.write(Bytes.toBytes("inputPath"), Bytes.toBytes(inputPath));
jobConfigTable.write(Bytes.toBytes("outputPath"), Bytes.toBytes(outputDir.getPath()));
}
});
runProgram(app, AppWithMapReduce.ClassicWordCount.class, false, true);
Assert.assertEquals("true", System.getProperty("partitioner.initialize"));
Assert.assertEquals("true", System.getProperty("partitioner.destroy"));
Assert.assertEquals("true", System.getProperty("partitioner.set.conf"));
Assert.assertEquals("true", System.getProperty("comparator.initialize"));
Assert.assertEquals("true", System.getProperty("comparator.destroy"));
Assert.assertEquals("true", System.getProperty("comparator.set.conf"));
File[] outputFiles = outputDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.startsWith("part-r-") && !name.endsWith(".crc");
}
});
Assert.assertNotNull("no output files found", outputFiles);
int lines = 0;
for (File file : outputFiles) {
lines += Files.readLines(file, Charsets.UTF_8).size();
}
// dummy check that output file is not empty
Assert.assertTrue(lines > 0);
}
use of io.cdap.cdap.api.dataset.lib.KeyValueTable in project cdap by cdapio.
the class MapReduceProgramRunnerTest method testFailureInOutputCommitter.
@Test
public void testFailureInOutputCommitter() throws Exception {
final ApplicationWithPrograms app = deployApp(AppWithMapReduce.class);
// We want to verify that when a mapreduce fails when committing the dataset outputs,
// the destroy method is still called and committed.
// (1) setup the datasets we use
datasetCache.newTransactionContext();
final KeyValueTable kvTable = datasetCache.getDataset("recorder");
Transactions.createTransactionExecutor(txExecutorFactory, datasetCache.getTransactionAwares()).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
// the table should not have initialized=true
kvTable.write("initialized", "false");
}
});
// 2) run job
runProgram(app, AppWithMapReduce.MapReduceWithFailingOutputCommitter.class, new HashMap<String, String>(), false);
// 3) verify results
Transactions.createTransactionExecutor(txExecutorFactory, datasetCache.getTransactionAwares()).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
// the destroy() method should have recorded FAILED status in the kv table
Assert.assertEquals(ProgramStatus.FAILED.name(), Bytes.toString(kvTable.read("status")));
}
});
datasetCache.dismissTransactionContext();
}
Aggregations