use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestHSAdminServer method init.
@Before
public void init() throws HadoopIllegalArgumentException, IOException {
conf = new JobConf();
conf.set(JHAdminConfig.JHS_ADMIN_ADDRESS, "0.0.0.0:0");
conf.setClass("hadoop.security.group.mapping", MockUnixGroupsMapping.class, GroupMappingServiceProvider.class);
conf.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, securityEnabled);
Groups.getUserToGroupsMappingService(conf);
jobHistoryService = mock(JobHistory.class);
alds = mock(AggregatedLogDeletionService.class);
hsAdminServer = new HSAdminServer(alds, jobHistoryService) {
@Override
protected Configuration createConf() {
return conf;
}
};
hsAdminServer.init(conf);
hsAdminServer.start();
conf.setSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS, hsAdminServer.clientRpcServer.getListenerAddress());
hsAdminClient = new HSAdmin(conf);
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestHsWebServicesAcls method setup.
@Before
public void setup() throws IOException {
this.conf = new JobConf();
this.conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, NullGroupsProvider.class.getName());
this.conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
Groups.getUserToGroupsMappingService(conf);
this.ctx = buildHistoryContext(this.conf);
WebApp webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
this.hsWebServices = new HsWebServices(ctx, conf, webApp);
this.hsWebServices.setResponse(mock(HttpServletResponse.class));
Job job = ctx.getAllJobs().values().iterator().next();
this.jobIdStr = job.getID().toString();
Task task = job.getTasks().values().iterator().next();
this.taskIdStr = task.getID().toString();
this.taskAttemptIdStr = task.getAttempts().keySet().iterator().next().toString();
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestChainMapReduce method testChain.
@Test
public void testChain() throws Exception {
Path inDir = new Path("testing/chain/input");
Path outDir = new Path("testing/chain/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data", "/tmp").replace(' ', '+');
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
JobConf conf = createJobConf();
conf.setBoolean("localFS", isLocalFS());
conf.setInt("mapreduce.job.maps", 1);
cleanFlags(conf);
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("1\n2\n");
file.close();
conf.setJobName("chain");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.set("a", "X");
JobConf mapAConf = new JobConf(false);
mapAConf.set("a", "A");
ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class, LongWritable.class, Text.class, true, mapAConf);
ChainMapper.addMapper(conf, BMap.class, LongWritable.class, Text.class, LongWritable.class, Text.class, false, null);
JobConf reduceConf = new JobConf(false);
reduceConf.set("a", "C");
ChainReducer.setReducer(conf, CReduce.class, LongWritable.class, Text.class, LongWritable.class, Text.class, true, reduceConf);
ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class, LongWritable.class, Text.class, false, null);
JobConf mapEConf = new JobConf(false);
mapEConf.set("a", "E");
ChainReducer.addMapper(conf, EMap.class, LongWritable.class, Text.class, LongWritable.class, Text.class, true, mapEConf);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
JobClient jc = new JobClient(conf);
RunningJob job = jc.submitJob(conf);
while (!job.isComplete()) {
Thread.sleep(100);
}
assertTrue(getFlag(conf, "configure.A"));
assertTrue(getFlag(conf, "configure.B"));
assertTrue(getFlag(conf, "configure.C"));
assertTrue(getFlag(conf, "configure.D"));
assertTrue(getFlag(conf, "configure.E"));
assertTrue(getFlag(conf, "map.A.value.1"));
assertTrue(getFlag(conf, "map.A.value.2"));
assertTrue(getFlag(conf, "map.B.value.1"));
assertTrue(getFlag(conf, "map.B.value.2"));
assertTrue(getFlag(conf, "reduce.C.value.2"));
assertTrue(getFlag(conf, "reduce.C.value.1"));
assertTrue(getFlag(conf, "map.D.value.1"));
assertTrue(getFlag(conf, "map.D.value.2"));
assertTrue(getFlag(conf, "map.E.value.1"));
assertTrue(getFlag(conf, "map.E.value.2"));
assertTrue(getFlag(conf, "close.A"));
assertTrue(getFlag(conf, "close.B"));
assertTrue(getFlag(conf, "close.C"));
assertTrue(getFlag(conf, "close.D"));
assertTrue(getFlag(conf, "close.E"));
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestDelegatingInputFormat method testSplitting.
@Test
public void testSplitting() throws Exception {
JobConf conf = new JobConf();
MiniDFSCluster dfs = null;
try {
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" }).hosts(new String[] { "host0", "host1", "host2", "host3" }).build();
FileSystem fs = dfs.getFileSystem();
Path path = getPath("/foo/bar", fs);
Path path2 = getPath("/foo/baz", fs);
Path path3 = getPath("/bar/bar", fs);
Path path4 = getPath("/bar/baz", fs);
final int numSplits = 100;
MultipleInputs.addInputPath(conf, path, TextInputFormat.class, MapClass.class);
MultipleInputs.addInputPath(conf, path2, TextInputFormat.class, MapClass2.class);
MultipleInputs.addInputPath(conf, path3, KeyValueTextInputFormat.class, MapClass.class);
MultipleInputs.addInputPath(conf, path4, TextInputFormat.class, MapClass2.class);
DelegatingInputFormat inFormat = new DelegatingInputFormat();
InputSplit[] splits = inFormat.getSplits(conf, numSplits);
int[] bins = new int[3];
for (InputSplit split : splits) {
assertTrue(split instanceof TaggedInputSplit);
final TaggedInputSplit tis = (TaggedInputSplit) split;
int index = -1;
if (tis.getInputFormatClass().equals(KeyValueTextInputFormat.class)) {
// path3
index = 0;
} else if (tis.getMapperClass().equals(MapClass.class)) {
// path
index = 1;
} else {
// path2 and path4
index = 2;
}
bins[index]++;
}
// regardless of the number of paths that use that Mapper/InputFormat
for (int count : bins) {
assertEquals(numSplits, count);
}
assertTrue(true);
} finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
use of org.apache.hadoop.mapred.JobConf in project hadoop by apache.
the class TestMultipleInputs method testAddInputPathWithMapper.
@Test
public void testAddInputPathWithMapper() {
final JobConf conf = new JobConf();
MultipleInputs.addInputPath(conf, new Path("/foo"), TextInputFormat.class, MapClass.class);
MultipleInputs.addInputPath(conf, new Path("/bar"), KeyValueTextInputFormat.class, MapClass2.class);
final Map<Path, InputFormat> inputs = MultipleInputs.getInputFormatMap(conf);
final Map<Path, Class<? extends Mapper>> maps = MultipleInputs.getMapperTypeMap(conf);
assertEquals(TextInputFormat.class, inputs.get(new Path("/foo")).getClass());
assertEquals(KeyValueTextInputFormat.class, inputs.get(new Path("/bar")).getClass());
assertEquals(MapClass.class, maps.get(new Path("/foo")));
assertEquals(MapClass2.class, maps.get(new Path("/bar")));
}
Aggregations