use of org.apache.hadoop.mapred.InvalidJobConfException in project hadoop by apache.
the class TestMRSequenceFileAsBinaryOutputFormat method testcheckOutputSpecsForbidRecordCompression.
@Test
public void testcheckOutputSpecsForbidRecordCompression() throws IOException {
Job job = Job.getInstance();
FileSystem fs = FileSystem.getLocal(job.getConfiguration());
Path outputdir = new Path(System.getProperty("test.build.data", "/tmp") + "/output");
fs.delete(outputdir, true);
// Without outputpath, FileOutputFormat.checkoutputspecs will throw
// InvalidJobConfException
FileOutputFormat.setOutputPath(job, outputdir);
// SequenceFileAsBinaryOutputFormat doesn't support record compression
// It should throw an exception when checked by checkOutputSpecs
SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK);
try {
new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job);
} catch (Exception e) {
fail("Block compression should be allowed for " + "SequenceFileAsBinaryOutputFormat:Caught " + e.getClass().getName());
}
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.RECORD);
try {
new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job);
fail("Record compression should not be allowed for " + "SequenceFileAsBinaryOutputFormat");
} catch (InvalidJobConfException ie) {
// expected
} catch (Exception e) {
fail("Expected " + InvalidJobConfException.class.getName() + "but caught " + e.getClass().getName());
}
}
use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.
the class JobManager method launchJob.
/**
*
* @param OrbJob job
*/
private void launchJob(OrbJob job) {
try {
ZookeeperUtils.notExistCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber());
ZookeeperUtils.notExistCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber() + "/OrbPartitionLeaderGroup");
ZookeeperUtils.notExistCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber() + "/messages");
ZookeeperUtils.tryToCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber() + "/messages/heartbeat", new LongWritable(0), CreateMode.PERSISTENT);
// allocate resources and if enough, start the job
logger.info("checking for available OrbTracker resources");
Map<M, Integer[]> assignments = null;
try {
assignments = resourceAllocator.assignResources(job.getOrbConf());
} catch (InvalidJobConfException e) {
logger.error(e.getMessage());
}
logger.info("Starting Job");
logger.info("********** Job {} started: {}", job.getJobNumber(), new Date().getTime());
if (assignments != null) {
logger.info("Allocating partitions");
int basePartitionID = 0;
for (M tracker : orbTrackerMembers) {
logger.debug("OrbTracker - " + tracker.getHostname() + ":" + tracker.getPort());
Integer[] assignment = assignments.get(tracker);
tracker.initProxy(getOrbConf());
try {
logger.debug("jobConf().getHDFSdistributedFiles(): {}", job.getOrbConf().getHDFSdistributedFiles());
tracker.getRequiredFiles(job.getOrbConf());
} catch (OrbZKFailure e) {
logger.error("EXCEPTION : An OrbTrackerMember failed to copy files from HDFS to local machine");
logger.error(e.getMessage());
throw e;
}
PartitionRequest request = new PartitionRequest();
request.setActivePartitions(assignment[ResourceAllocator.TRACKER_AVAILABLE]);
request.setReservedPartitions(assignment[ResourceAllocator.TRACKER_RESERVED]);
request.setJobID(job.getJobNumber());
request.setBasePartitionID(basePartitionID);
request.setJobConf(job.getOrbConf());
basePartitionID += assignment[ResourceAllocator.TRACKER_AVAILABLE];
logger.debug("requesting partitions");
tracker.requestPartitions(request);
logger.info(request.toString());
JobStillActiveCheck jobStillActiveCheck = new JobStillActiveCheck(job);
job.setJobStillActiveInterface(jobStillActiveCheck);
new Thread(jobStillActiveCheck).start();
activeJobs.add(job.getJobNumber());
checkForDeathComplete(job);
heartbeat(job);
}
} else {
logger.error("not enough capacity for this job");
jobComplete(job);
}
} catch (OrbZKFailure e) {
e.printStackTrace();
logger.error(e.getMessage());
fireEvent(new OrbExceptionEvent(e));
}
//catch (IOException e) {
// e.printStackTrace();
// logger.error(e.getMessage());
// }
}
use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.
the class ResourceAllocatorTest method testEnoughCapacity.
@Test
public void testEnoughCapacity() {
List<OrbTracker> orbTrackers = new ArrayList<OrbTracker>();
OrbConfiguration conf = new OrbConfiguration(true);
conf.setOrbRequestedPartitions(6);
conf.setOrbReservedPartitions(2);
conf.setNumberOfPartitionsPerMachine(0);
for (int i = 0; i < 4; i++) {
OrbTracker ot = new OrbTracker(conf);
ot.setAvailablePartitions(3);
ot.setReservedPartitions(1);
orbTrackers.add(ot);
}
ResourceAllocator<OrbTracker> ra = new ResourceAllocator<OrbTracker>(conf, orbTrackers);
Map<OrbTracker, Integer[]> ret = null;
try {
ret = ra.assignResources(conf);
} catch (InvalidJobConfException e) {
e.printStackTrace();
}
// check each assignment
assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
}
use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.
the class ResourceAllocatorTest method testUnbalancedAssignment.
@Test
public void testUnbalancedAssignment() {
List<OrbTracker> orbTrackers = new ArrayList<OrbTracker>();
OrbConfiguration conf = new OrbConfiguration(true);
conf.setOrbRequestedPartitions(6);
conf.setOrbReservedPartitions(2);
conf.setNumberOfPartitionsPerMachine(0);
for (int i = 0; i < 4; i++) {
OrbTracker ot = new OrbTracker(conf);
ot.setAvailablePartitions(3);
ot.setReservedPartitions(1);
orbTrackers.add(ot);
}
orbTrackers.get(1).setAvailablePartitions(1);
orbTrackers.get(2).setAvailablePartitions(1);
ResourceAllocator<OrbTracker> ra = new ResourceAllocator<OrbTracker>(conf, orbTrackers);
Map<OrbTracker, Integer[]> ret = null;
try {
ret = ra.assignResources(conf);
} catch (InvalidJobConfException e) {
e.printStackTrace();
}
// check each assignment
assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
}
use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.
the class ResourceAllocatorTest method testEnoughCapacityWithPPM.
@Test
public void testEnoughCapacityWithPPM() {
List<OrbTracker> orbTrackers = new ArrayList<OrbTracker>();
OrbConfiguration conf = new OrbConfiguration(true);
conf.setOrbRequestedPartitions(6);
conf.setOrbReservedPartitions(2);
// the "PPM" part
conf.setNumberOfPartitionsPerMachine(2);
for (int i = 0; i < 4; i++) {
OrbTracker ot = new OrbTracker(conf);
ot.setAvailablePartitions(3);
ot.setReservedPartitions(1);
orbTrackers.add(ot);
}
ResourceAllocator<OrbTracker> ra = new ResourceAllocator<OrbTracker>(conf, orbTrackers);
Map<OrbTracker, Integer[]> ret = null;
try {
ret = ra.assignResources(conf);
} catch (InvalidJobConfException e) {
e.printStackTrace();
}
// check each assignment
assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 0);
assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
}
Aggregations