use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestPipes method runNonPipedProgram.
/**
* Run a map/reduce word count that does all of the map input and reduce
* output directly rather than sending it back up to Java.
* @param mr The mini mr cluster
* @param dfs the dfs cluster
* @param program the program to run
* @throws IOException
*/
static void runNonPipedProgram(MiniMRCluster mr, MiniDFSCluster dfs, Path program, JobConf conf) throws IOException {
JobConf job;
if (conf == null) {
job = mr.createJobConf();
} else {
job = new JobConf(conf);
}
job.setInputFormat(WordCountInputFormat.class);
FileSystem local = FileSystem.getLocal(job);
Path testDir = new Path("file:" + System.getProperty("test.build.data"), "pipes");
Path inDir = new Path(testDir, "input");
nonPipedOutDir = new Path(testDir, "output");
Path wordExec = new Path("testing/bin/application");
Path jobXml = new Path(testDir, "job.xml");
{
FileSystem fs = dfs.getFileSystem();
fs.delete(wordExec.getParent(), true);
fs.copyFromLocalFile(program, wordExec);
}
DataOutputStream out = local.create(new Path(inDir, "part0"));
out.writeBytes("i am a silly test\n");
out.writeBytes("you are silly\n");
out.writeBytes("i am a cat test\n");
out.writeBytes("you is silly\n");
out.writeBytes("i am a billy test\n");
out.writeBytes("hello are silly\n");
out.close();
out = local.create(new Path(inDir, "part1"));
out.writeBytes("mall world things drink java\n");
out.writeBytes("hall silly cats drink java\n");
out.writeBytes("all dogs bow wow\n");
out.writeBytes("hello drink java\n");
out.close();
local.delete(nonPipedOutDir, true);
local.mkdirs(nonPipedOutDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
out = local.create(jobXml);
job.writeXml(out);
out.close();
System.err.println("About to run: Submitter -conf " + jobXml + " -input " + inDir + " -output " + nonPipedOutDir + " -program " + dfs.getFileSystem().makeQualified(wordExec));
try {
int ret = ToolRunner.run(new Submitter(), new String[] { "-conf", jobXml.toString(), "-input", inDir.toString(), "-output", nonPipedOutDir.toString(), "-program", dfs.getFileSystem().makeQualified(wordExec).toString(), "-reduces", "2" });
assertEquals(0, ret);
} catch (Exception e) {
assertTrue("got exception: " + StringUtils.stringifyException(e), false);
}
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestPipesNonJavaInputFormat method testFormat.
/**
* test PipesNonJavaInputFormat
*/
@Test
public void testFormat() throws IOException {
PipesNonJavaInputFormat inputFormat = new PipesNonJavaInputFormat();
JobConf conf = new JobConf();
Reporter reporter = mock(Reporter.class);
RecordReader<FloatWritable, NullWritable> reader = inputFormat.getRecordReader(new FakeSplit(), conf, reporter);
assertEquals(0.0f, reader.getProgress(), 0.001);
// input and output files
File input1 = new File(workSpace + File.separator + "input1");
if (!input1.getParentFile().exists()) {
Assert.assertTrue(input1.getParentFile().mkdirs());
}
if (!input1.exists()) {
Assert.assertTrue(input1.createNewFile());
}
File input2 = new File(workSpace + File.separator + "input2");
if (!input2.exists()) {
Assert.assertTrue(input2.createNewFile());
}
// set data for splits
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, StringUtils.escapeString(input1.getAbsolutePath()) + "," + StringUtils.escapeString(input2.getAbsolutePath()));
InputSplit[] splits = inputFormat.getSplits(conf, 2);
assertEquals(2, splits.length);
PipesNonJavaInputFormat.PipesDummyRecordReader dummyRecordReader = new PipesNonJavaInputFormat.PipesDummyRecordReader(conf, splits[0]);
// empty dummyRecordReader
assertNull(dummyRecordReader.createKey());
assertNull(dummyRecordReader.createValue());
assertEquals(0, dummyRecordReader.getPos());
assertEquals(0.0, dummyRecordReader.getProgress(), 0.001);
// test method next
assertTrue(dummyRecordReader.next(new FloatWritable(2.0f), NullWritable.get()));
assertEquals(2.0, dummyRecordReader.getProgress(), 0.001);
dummyRecordReader.close();
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestOutputFormat method testJobSubmission.
@Test
public void testJobSubmission() throws Exception {
JobConf conf = new JobConf();
Job job = new Job(conf);
job.setInputFormatClass(TestInputFormat.class);
job.setMapperClass(TestMapper.class);
job.setOutputFormatClass(TestOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestConstructQuery method testSetOutput.
@Test
public void testSetOutput() throws IOException {
JobConf job = new JobConf();
DBOutputFormat.setOutput(job, "hadoop_output", fieldNames);
DBConfiguration dbConf = new DBConfiguration(job);
String actual = format.constructQuery(dbConf.getOutputTableName(), dbConf.getOutputFieldNames());
assertEquals(expected, actual);
job = new JobConf();
dbConf = new DBConfiguration(job);
DBOutputFormat.setOutput(job, "hadoop_output", nullFieldNames.length);
assertNull(dbConf.getOutputFieldNames());
assertEquals(nullFieldNames.length, dbConf.getOutputFieldCount());
actual = format.constructQuery(dbConf.getOutputTableName(), new String[dbConf.getOutputFieldCount()]);
assertEquals(nullExpected, actual);
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestPipeApplication method testPipesPartitioner.
/**
* test PipesPartitioner
* test set and get data from PipesPartitioner
*/
@Test
public void testPipesPartitioner() {
PipesPartitioner<IntWritable, Text> partitioner = new PipesPartitioner<IntWritable, Text>();
JobConf configuration = new JobConf();
Submitter.getJavaPartitioner(configuration);
partitioner.configure(new JobConf());
IntWritable iw = new IntWritable(4);
// the cache empty
assertEquals(0, partitioner.getPartition(iw, new Text("test"), 2));
// set data into cache
PipesPartitioner.setNextPartition(3);
// get data from cache
assertEquals(3, partitioner.getPartition(iw, new Text("test"), 2));
}
Aggregations