Search in sources :

Example 1 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project hadoop by apache.

the class TestMRSequenceFileAsBinaryOutputFormat method testcheckOutputSpecsForbidRecordCompression.

@Test
public void testcheckOutputSpecsForbidRecordCompression() throws IOException {
    Job job = Job.getInstance();
    FileSystem fs = FileSystem.getLocal(job.getConfiguration());
    Path outputdir = new Path(System.getProperty("test.build.data", "/tmp") + "/output");
    fs.delete(outputdir, true);
    // Without outputpath, FileOutputFormat.checkoutputspecs will throw 
    // InvalidJobConfException
    FileOutputFormat.setOutputPath(job, outputdir);
    // SequenceFileAsBinaryOutputFormat doesn't support record compression
    // It should throw an exception when checked by checkOutputSpecs
    SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
    SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK);
    try {
        new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job);
    } catch (Exception e) {
        fail("Block compression should be allowed for " + "SequenceFileAsBinaryOutputFormat:Caught " + e.getClass().getName());
    }
    SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.RECORD);
    try {
        new SequenceFileAsBinaryOutputFormat().checkOutputSpecs(job);
        fail("Record compression should not be allowed for " + "SequenceFileAsBinaryOutputFormat");
    } catch (InvalidJobConfException ie) {
    // expected
    } catch (Exception e) {
        fail("Expected " + InvalidJobConfException.class.getName() + "but caught " + e.getClass().getName());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) Job(org.apache.hadoop.mapreduce.Job) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) IOException(java.io.IOException) Test(org.junit.Test)

Example 2 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.

the class JobManager method launchJob.

/**
 * 
 * @param  OrbJob job
 */
private void launchJob(OrbJob job) {
    try {
        ZookeeperUtils.notExistCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber());
        ZookeeperUtils.notExistCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber() + "/OrbPartitionLeaderGroup");
        ZookeeperUtils.notExistCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber() + "/messages");
        ZookeeperUtils.tryToCreateNode(zk, jobsInProgressPath + "/" + job.getJobNumber() + "/messages/heartbeat", new LongWritable(0), CreateMode.PERSISTENT);
        // allocate resources and if enough, start the job
        logger.info("checking for available OrbTracker resources");
        Map<M, Integer[]> assignments = null;
        try {
            assignments = resourceAllocator.assignResources(job.getOrbConf());
        } catch (InvalidJobConfException e) {
            logger.error(e.getMessage());
        }
        logger.info("Starting Job");
        logger.info("********** Job {} started: {}", job.getJobNumber(), new Date().getTime());
        if (assignments != null) {
            logger.info("Allocating partitions");
            int basePartitionID = 0;
            for (M tracker : orbTrackerMembers) {
                logger.debug("OrbTracker - " + tracker.getHostname() + ":" + tracker.getPort());
                Integer[] assignment = assignments.get(tracker);
                tracker.initProxy(getOrbConf());
                try {
                    logger.debug("jobConf().getHDFSdistributedFiles(): {}", job.getOrbConf().getHDFSdistributedFiles());
                    tracker.getRequiredFiles(job.getOrbConf());
                } catch (OrbZKFailure e) {
                    logger.error("EXCEPTION : An OrbTrackerMember failed to copy files from HDFS to local machine");
                    logger.error(e.getMessage());
                    throw e;
                }
                PartitionRequest request = new PartitionRequest();
                request.setActivePartitions(assignment[ResourceAllocator.TRACKER_AVAILABLE]);
                request.setReservedPartitions(assignment[ResourceAllocator.TRACKER_RESERVED]);
                request.setJobID(job.getJobNumber());
                request.setBasePartitionID(basePartitionID);
                request.setJobConf(job.getOrbConf());
                basePartitionID += assignment[ResourceAllocator.TRACKER_AVAILABLE];
                logger.debug("requesting partitions");
                tracker.requestPartitions(request);
                logger.info(request.toString());
                JobStillActiveCheck jobStillActiveCheck = new JobStillActiveCheck(job);
                job.setJobStillActiveInterface(jobStillActiveCheck);
                new Thread(jobStillActiveCheck).start();
                activeJobs.add(job.getJobNumber());
                checkForDeathComplete(job);
                heartbeat(job);
            }
        } else {
            logger.error("not enough capacity for this job");
            jobComplete(job);
        }
    } catch (OrbZKFailure e) {
        e.printStackTrace();
        logger.error(e.getMessage());
        fireEvent(new OrbExceptionEvent(e));
    }
//catch (IOException e) {
//      e.printStackTrace();
//      logger.error(e.getMessage());
//    }
}
Also used : OrbExceptionEvent(org.goldenorb.event.OrbExceptionEvent) PartitionRequest(org.goldenorb.jet.PartitionRequest) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) Date(java.util.Date) OrbZKFailure(org.goldenorb.zookeeper.OrbZKFailure) LongWritable(org.apache.hadoop.io.LongWritable)

Example 3 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.

the class ResourceAllocatorTest method testEnoughCapacity.

@Test
public void testEnoughCapacity() {
    List<OrbTracker> orbTrackers = new ArrayList<OrbTracker>();
    OrbConfiguration conf = new OrbConfiguration(true);
    conf.setOrbRequestedPartitions(6);
    conf.setOrbReservedPartitions(2);
    conf.setNumberOfPartitionsPerMachine(0);
    for (int i = 0; i < 4; i++) {
        OrbTracker ot = new OrbTracker(conf);
        ot.setAvailablePartitions(3);
        ot.setReservedPartitions(1);
        orbTrackers.add(ot);
    }
    ResourceAllocator<OrbTracker> ra = new ResourceAllocator<OrbTracker>(conf, orbTrackers);
    Map<OrbTracker, Integer[]> ret = null;
    try {
        ret = ra.assignResources(conf);
    } catch (InvalidJobConfException e) {
        e.printStackTrace();
    }
    // check each assignment
    assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
    assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
    assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
    assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
}
Also used : OrbConfiguration(org.goldenorb.conf.OrbConfiguration) OrbTracker(org.goldenorb.OrbTracker) ArrayList(java.util.ArrayList) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) Test(org.junit.Test)

Example 4 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.

the class ResourceAllocatorTest method testUnbalancedAssignment.

@Test
public void testUnbalancedAssignment() {
    List<OrbTracker> orbTrackers = new ArrayList<OrbTracker>();
    OrbConfiguration conf = new OrbConfiguration(true);
    conf.setOrbRequestedPartitions(6);
    conf.setOrbReservedPartitions(2);
    conf.setNumberOfPartitionsPerMachine(0);
    for (int i = 0; i < 4; i++) {
        OrbTracker ot = new OrbTracker(conf);
        ot.setAvailablePartitions(3);
        ot.setReservedPartitions(1);
        orbTrackers.add(ot);
    }
    orbTrackers.get(1).setAvailablePartitions(1);
    orbTrackers.get(2).setAvailablePartitions(1);
    ResourceAllocator<OrbTracker> ra = new ResourceAllocator<OrbTracker>(conf, orbTrackers);
    Map<OrbTracker, Integer[]> ret = null;
    try {
        ret = ra.assignResources(conf);
    } catch (InvalidJobConfException e) {
        e.printStackTrace();
    }
    // check each assignment
    assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
    assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
    assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
    assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
}
Also used : OrbConfiguration(org.goldenorb.conf.OrbConfiguration) OrbTracker(org.goldenorb.OrbTracker) ArrayList(java.util.ArrayList) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) Test(org.junit.Test)

Example 5 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.

the class ResourceAllocatorTest method testEnoughCapacityWithPPM.

@Test
public void testEnoughCapacityWithPPM() {
    List<OrbTracker> orbTrackers = new ArrayList<OrbTracker>();
    OrbConfiguration conf = new OrbConfiguration(true);
    conf.setOrbRequestedPartitions(6);
    conf.setOrbReservedPartitions(2);
    // the "PPM" part
    conf.setNumberOfPartitionsPerMachine(2);
    for (int i = 0; i < 4; i++) {
        OrbTracker ot = new OrbTracker(conf);
        ot.setAvailablePartitions(3);
        ot.setReservedPartitions(1);
        orbTrackers.add(ot);
    }
    ResourceAllocator<OrbTracker> ra = new ResourceAllocator<OrbTracker>(conf, orbTrackers);
    Map<OrbTracker, Integer[]> ret = null;
    try {
        ret = ra.assignResources(conf);
    } catch (InvalidJobConfException e) {
        e.printStackTrace();
    }
    // check each assignment
    assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
    assertEquals(ret.get(orbTrackers.get(0))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
    assertEquals(ret.get(orbTrackers.get(1))[ResourceAllocator.TRACKER_RESERVED].intValue(), 1);
    assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 2);
    assertEquals(ret.get(orbTrackers.get(2))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
    assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_AVAILABLE].intValue(), 0);
    assertEquals(ret.get(orbTrackers.get(3))[ResourceAllocator.TRACKER_RESERVED].intValue(), 0);
}
Also used : OrbConfiguration(org.goldenorb.conf.OrbConfiguration) OrbTracker(org.goldenorb.OrbTracker) ArrayList(java.util.ArrayList) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) Test(org.junit.Test)

Aggregations

InvalidJobConfException (org.apache.hadoop.mapred.InvalidJobConfException)12 Path (org.apache.hadoop.fs.Path)5 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 OrbTracker (org.goldenorb.OrbTracker)4 OrbConfiguration (org.goldenorb.conf.OrbConfiguration)4 IOException (java.io.IOException)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 FileNotFoundException (java.io.FileNotFoundException)2 FileAlreadyExistsException (org.apache.hadoop.mapred.FileAlreadyExistsException)2 File (java.io.File)1 URI (java.net.URI)1 Date (java.util.Date)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 LongWritable (org.apache.hadoop.io.LongWritable)1 JobClient (org.apache.hadoop.mapred.JobClient)1 TaskAttemptID (org.apache.hadoop.mapred.TaskAttemptID)1 INativeSerializer (org.apache.hadoop.mapred.nativetask.serde.INativeSerializer)1