Search in sources :

Example 6 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project goldenorb by jzachr.

the class ResourceAllocatorTest method insufficientCapacity.

/**
 * 
 */
@Test
public void insufficientCapacity() {
    List<OrbTracker> orbTrackers = new ArrayList<OrbTracker>();
    OrbConfiguration conf = new OrbConfiguration(true);
    conf.setOrbRequestedPartitions(6);
    conf.setOrbReservedPartitions(2);
    conf.setNumberOfPartitionsPerMachine(0);
    for (int i = 0; i < 4; i++) {
        OrbTracker ot = new OrbTracker(conf);
        ot.setAvailablePartitions(1);
        ot.setReservedPartitions(1);
        orbTrackers.add(ot);
    }
    ResourceAllocator<OrbTracker> ra = new ResourceAllocator<OrbTracker>(conf, orbTrackers);
    Map<OrbTracker, Integer[]> ret = null;
    try {
        ret = ra.assignResources(conf);
    } catch (InvalidJobConfException e) {
        e.printStackTrace();
    }
    assertNull(ret);
}
Also used : OrbConfiguration(org.goldenorb.conf.OrbConfiguration) OrbTracker(org.goldenorb.OrbTracker) ArrayList(java.util.ArrayList) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) Test(org.junit.Test)

Example 7 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project hadoop by apache.

the class MRApps method parseDistributedCacheArtifacts.

// TODO - Move this to MR!
// Use TaskDistributedCacheManager.CacheFiles.makeCacheFiles(URI[], 
// long[], boolean[], Path[], FileType)
private static void parseDistributedCacheArtifacts(Configuration conf, Map<String, LocalResource> localResources, LocalResourceType type, URI[] uris, long[] timestamps, long[] sizes, boolean[] visibilities) throws IOException {
    if (uris != null) {
        // Sanity check
        if ((uris.length != timestamps.length) || (uris.length != sizes.length) || (uris.length != visibilities.length)) {
            throw new IllegalArgumentException("Invalid specification for " + "distributed-cache artifacts of type " + type + " :" + " #uris=" + uris.length + " #timestamps=" + timestamps.length + " #visibilities=" + visibilities.length);
        }
        for (int i = 0; i < uris.length; ++i) {
            URI u = uris[i];
            Path p = new Path(u);
            FileSystem remoteFS = p.getFileSystem(conf);
            String linkName = null;
            if (p.getName().equals(DistributedCache.WILDCARD)) {
                p = p.getParent();
                linkName = p.getName() + Path.SEPARATOR + DistributedCache.WILDCARD;
            }
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            // If there's no wildcard, try using the fragment for the link
            if (linkName == null) {
                linkName = u.getFragment();
                // it with care.
                if (linkName != null) {
                    Path linkPath = new Path(linkName);
                    if (linkPath.isAbsolute()) {
                        throw new IllegalArgumentException("Resource name must be " + "relative");
                    }
                    linkName = linkPath.toUri().getPath();
                }
            } else if (u.getFragment() != null) {
                throw new IllegalArgumentException("Invalid path URI: " + p + " - cannot contain both a URI fragment and a wildcard");
            }
            // If there's no wildcard or fragment, just link to the file name
            if (linkName == null) {
                linkName = p.getName();
            }
            LocalResource orig = localResources.get(linkName);
            if (orig != null && !orig.getResource().equals(URL.fromURI(p.toUri()))) {
                throw new InvalidJobConfException(getResourceDescription(orig.getType()) + orig.getResource() + " conflicts with " + getResourceDescription(type) + u);
            }
            localResources.put(linkName, LocalResource.newInstance(URL.fromURI(p.toUri()), type, visibilities[i] ? LocalResourceVisibility.PUBLIC : LocalResourceVisibility.PRIVATE, sizes[i], timestamps[i]));
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) URI(java.net.URI) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource)

Example 8 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project hadoop by apache.

the class FileOutputFormat method checkOutputSpecs.

public void checkOutputSpecs(JobContext job) throws FileAlreadyExistsException, IOException {
    // Ensure that the output directory is set and not already there
    Path outDir = getOutputPath(job);
    if (outDir == null) {
        throw new InvalidJobConfException("Output directory not set.");
    }
    // get delegation token for outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outDir }, job.getConfiguration());
    if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
        throw new FileAlreadyExistsException("Output directory " + outDir + " already exists");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.mapred.FileAlreadyExistsException) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException)

Example 9 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project hadoop by apache.

the class NativeMapOutputCollectorDelegator method init.

@SuppressWarnings("unchecked")
@Override
public void init(Context context) throws IOException, ClassNotFoundException {
    this.context = context;
    this.job = context.getJobConf();
    Platforms.init(job);
    if (job.getNumReduceTasks() == 0) {
        String message = "There is no reducer, no need to use native output collector";
        LOG.error(message);
        throw new InvalidJobConfException(message);
    }
    Class<?> comparatorClass = job.getClass(MRJobConfig.KEY_COMPARATOR, null, RawComparator.class);
    if (comparatorClass != null && !Platforms.define(comparatorClass)) {
        String message = "Native output collector doesn't support customized java comparator " + job.get(MRJobConfig.KEY_COMPARATOR);
        LOG.error(message);
        throw new InvalidJobConfException(message);
    }
    if (!QuickSort.class.getName().equals(job.get(Constants.MAP_SORT_CLASS))) {
        String message = "Native-Task doesn't support sort class " + job.get(Constants.MAP_SORT_CLASS);
        LOG.error(message);
        throw new InvalidJobConfException(message);
    }
    if (job.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, false) == true) {
        String message = "Native-Task doesn't support secure shuffle";
        LOG.error(message);
        throw new InvalidJobConfException(message);
    }
    final Class<?> keyCls = job.getMapOutputKeyClass();
    try {
        @SuppressWarnings("rawtypes") final INativeSerializer serializer = NativeSerialization.getInstance().getSerializer(keyCls);
        if (null == serializer) {
            String message = "Key type not supported. Cannot find serializer for " + keyCls.getName();
            LOG.error(message);
            throw new InvalidJobConfException(message);
        } else if (!Platforms.support(keyCls.getName(), serializer, job)) {
            String message = "Native output collector doesn't support this key, " + "this key is not comparable in native: " + keyCls.getName();
            LOG.error(message);
            throw new InvalidJobConfException(message);
        }
    } catch (final IOException e) {
        String message = "Cannot find serializer for " + keyCls.getName();
        LOG.error(message);
        throw new IOException(message);
    }
    final boolean ret = NativeRuntime.isNativeLibraryLoaded();
    if (ret) {
        if (job.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, false)) {
            String codec = job.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC);
            if (!NativeRuntime.supportsCompressionCodec(codec.getBytes(Charsets.UTF_8))) {
                String message = "Native output collector doesn't support compression codec " + codec;
                LOG.error(message);
                throw new InvalidJobConfException(message);
            }
        }
        NativeRuntime.configure(job);
        final long updateInterval = job.getLong(Constants.NATIVE_STATUS_UPDATE_INTERVAL, Constants.NATIVE_STATUS_UPDATE_INTERVAL_DEFVAL);
        updater = new StatusReportChecker(context.getReporter(), updateInterval);
        updater.start();
    } else {
        String message = "NativeRuntime cannot be loaded, please check that " + "libnativetask.so is in hadoop library dir";
        LOG.error(message);
        throw new InvalidJobConfException(message);
    }
    this.handler = null;
    try {
        final Class<K> oKClass = (Class<K>) job.getMapOutputKeyClass();
        final Class<K> oVClass = (Class<K>) job.getMapOutputValueClass();
        final TaskAttemptID id = context.getMapTask().getTaskID();
        final TaskContext taskContext = new TaskContext(job, null, null, oKClass, oVClass, context.getReporter(), id);
        handler = NativeCollectorOnlyHandler.create(taskContext);
    } catch (final IOException e) {
        String message = "Native output collector cannot be loaded;";
        LOG.error(message);
        throw new IOException(message, e);
    }
    LOG.info("Native output collector can be successfully enabled!");
}
Also used : TaskAttemptID(org.apache.hadoop.mapred.TaskAttemptID) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException) IOException(java.io.IOException) INativeSerializer(org.apache.hadoop.mapred.nativetask.serde.INativeSerializer)

Example 10 with InvalidJobConfException

use of org.apache.hadoop.mapred.InvalidJobConfException in project hadoop by apache.

the class TeraOutputFormat method checkOutputSpecs.

@Override
public void checkOutputSpecs(JobContext job) throws InvalidJobConfException, IOException {
    // Ensure that the output directory is set
    Path outDir = getOutputPath(job);
    if (outDir == null) {
        throw new InvalidJobConfException("Output directory not set in JobConf.");
    }
    final Configuration jobConf = job.getConfiguration();
    // get delegation token for outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outDir }, jobConf);
    final FileSystem fs = outDir.getFileSystem(jobConf);
    try {
        // existing output dir is considered empty iff its only content is the
        // partition file.
        //
        final FileStatus[] outDirKids = fs.listStatus(outDir);
        boolean empty = false;
        if (outDirKids != null && outDirKids.length == 1) {
            final FileStatus st = outDirKids[0];
            final String fname = st.getPath().getName();
            empty = !st.isDirectory() && TeraInputFormat.PARTITION_FILENAME.equals(fname);
        }
        if (TeraSort.getUseSimplePartitioner(job) || !empty) {
            throw new FileAlreadyExistsException("Output directory " + outDir + " already exists");
        }
    } catch (FileNotFoundException ignored) {
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.mapred.FileAlreadyExistsException) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FileNotFoundException(java.io.FileNotFoundException) InvalidJobConfException(org.apache.hadoop.mapred.InvalidJobConfException)

Aggregations

InvalidJobConfException (org.apache.hadoop.mapred.InvalidJobConfException)12 Path (org.apache.hadoop.fs.Path)5 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 OrbTracker (org.goldenorb.OrbTracker)4 OrbConfiguration (org.goldenorb.conf.OrbConfiguration)4 IOException (java.io.IOException)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 FileNotFoundException (java.io.FileNotFoundException)2 FileAlreadyExistsException (org.apache.hadoop.mapred.FileAlreadyExistsException)2 File (java.io.File)1 URI (java.net.URI)1 Date (java.util.Date)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 LongWritable (org.apache.hadoop.io.LongWritable)1 JobClient (org.apache.hadoop.mapred.JobClient)1 TaskAttemptID (org.apache.hadoop.mapred.TaskAttemptID)1 INativeSerializer (org.apache.hadoop.mapred.nativetask.serde.INativeSerializer)1