Search in sources :

Example 6 with HadoopIllegalArgumentException

use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.

the class DfsClientConf method createChecksum.

/** create a DataChecksum with the given option. */
public DataChecksum createChecksum(ChecksumOpt userOpt) {
    // Fill in any missing field with the default.
    ChecksumOpt opt = ChecksumOpt.processChecksumOpt(defaultChecksumOpt, userOpt);
    DataChecksum dataChecksum = DataChecksum.newDataChecksum(opt.getChecksumType(), opt.getBytesPerChecksum());
    if (dataChecksum == null) {
        throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt=" + userOpt + ", default=" + defaultChecksumOpt + ", effective=null");
    }
    return dataChecksum;
}
Also used : ChecksumOpt(org.apache.hadoop.fs.Options.ChecksumOpt) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) DataChecksum(org.apache.hadoop.util.DataChecksum)

Example 7 with HadoopIllegalArgumentException

use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.

the class BlockManager method getBlocksWithLocations.

/** Get all blocks with location information from a datanode. */
public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException {
    final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
    if (node == null) {
        blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode);
        throw new HadoopIllegalArgumentException("Datanode " + datanode + " not found.");
    }
    int numBlocks = node.numBlocks();
    if (numBlocks == 0) {
        return new BlocksWithLocations(new BlockWithLocations[0]);
    }
    Iterator<BlockInfo> iter = node.getBlockIterator();
    // starting from a random block
    int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
    // skip blocks
    for (int i = 0; i < startBlock; i++) {
        iter.next();
    }
    List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
    long totalSize = 0;
    BlockInfo curBlock;
    while (totalSize < size && iter.hasNext()) {
        curBlock = iter.next();
        if (!curBlock.isComplete())
            continue;
        if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
            continue;
        }
        totalSize += addBlock(curBlock, results);
    }
    if (totalSize < size) {
        // start from the beginning
        iter = node.getBlockIterator();
        for (int i = 0; i < startBlock && totalSize < size; i++) {
            curBlock = iter.next();
            if (!curBlock.isComplete())
                continue;
            if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
                continue;
            }
            totalSize += addBlock(curBlock, results);
        }
    }
    return new BlocksWithLocations(results.toArray(new BlockWithLocations[results.size()]));
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) BlocksWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations) ReportedBlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) ArrayList(java.util.ArrayList) StripedBlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations)

Example 8 with HadoopIllegalArgumentException

use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.

the class NNBench method parseInputs.

/**
   * Parse input arguments
   *  @param args array of command line's parameters to be parsed
   *
   */
private void parseInputs(final String[] args) {
    // If there are no command line arguments, exit
    if (args.length == 0) {
        displayUsage();
        throw new HadoopIllegalArgumentException("Give valid inputs");
    }
    // Parse command line args
    for (int i = 0; i < args.length; i++) {
        if (args[i].equals("-operation")) {
            operation = args[++i];
        } else if (args[i].equals("-maps")) {
            checkArgs(i + 1, args.length);
            numberOfMaps = Long.parseLong(args[++i]);
        } else if (args[i].equals("-reduces")) {
            checkArgs(i + 1, args.length);
            numberOfReduces = Long.parseLong(args[++i]);
        } else if (args[i].equals("-startTime")) {
            checkArgs(i + 1, args.length);
            startTime = Long.parseLong(args[++i]) * 1000;
        } else if (args[i].equals("-blockSize")) {
            checkArgs(i + 1, args.length);
            blockSize = Long.parseLong(args[++i]);
        } else if (args[i].equals("-bytesToWrite")) {
            checkArgs(i + 1, args.length);
            bytesToWrite = Integer.parseInt(args[++i]);
        } else if (args[i].equals("-bytesPerChecksum")) {
            checkArgs(i + 1, args.length);
            bytesPerChecksum = Long.parseLong(args[++i]);
        } else if (args[i].equals("-numberOfFiles")) {
            checkArgs(i + 1, args.length);
            numberOfFiles = Long.parseLong(args[++i]);
        } else if (args[i].equals("-replicationFactorPerFile")) {
            checkArgs(i + 1, args.length);
            replicationFactorPerFile = Short.parseShort(args[++i]);
        } else if (args[i].equals("-baseDir")) {
            checkArgs(i + 1, args.length);
            baseDir = args[++i];
        } else if (args[i].equals("-readFileAfterOpen")) {
            checkArgs(i + 1, args.length);
            readFileAfterOpen = Boolean.parseBoolean(args[++i]);
        } else if (args[i].equals("-help")) {
            displayUsage();
            isHelpMessage = true;
        }
    }
    LOG.info("Test Inputs: ");
    LOG.info("           Test Operation: " + operation);
    LOG.info("               Start time: " + sdf.format(new Date(startTime)));
    LOG.info("           Number of maps: " + numberOfMaps);
    LOG.info("        Number of reduces: " + numberOfReduces);
    LOG.info("               Block Size: " + blockSize);
    LOG.info("           Bytes to write: " + bytesToWrite);
    LOG.info("       Bytes per checksum: " + bytesPerChecksum);
    LOG.info("          Number of files: " + numberOfFiles);
    LOG.info("       Replication factor: " + replicationFactorPerFile);
    LOG.info("                 Base dir: " + baseDir);
    LOG.info("     Read file after open: " + readFileAfterOpen);
    // Set user-defined parameters, so the map method can access the values
    getConf().set("test.nnbench.operation", operation);
    getConf().setLong("test.nnbench.maps", numberOfMaps);
    getConf().setLong("test.nnbench.reduces", numberOfReduces);
    getConf().setLong("test.nnbench.starttime", startTime);
    getConf().setLong("test.nnbench.blocksize", blockSize);
    getConf().setInt("test.nnbench.bytestowrite", bytesToWrite);
    getConf().setLong("test.nnbench.bytesperchecksum", bytesPerChecksum);
    getConf().setLong("test.nnbench.numberoffiles", numberOfFiles);
    getConf().setInt("test.nnbench.replicationfactor", (int) replicationFactorPerFile);
    getConf().set("test.nnbench.basedir", baseDir);
    getConf().setBoolean("test.nnbench.readFileAfterOpen", readFileAfterOpen);
    getConf().set("test.nnbench.datadir.name", DATA_DIR_NAME);
    getConf().set("test.nnbench.outputdir.name", OUTPUT_DIR_NAME);
    getConf().set("test.nnbench.controldir.name", CONTROL_DIR_NAME);
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) Date(java.util.Date)

Example 9 with HadoopIllegalArgumentException

use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.

the class DistCpSync method preSyncCheck.

/**
   * Check if three conditions are met before sync.
   * 1. Only one source directory.
   * 2. Both source and target file system are DFS.
   * 3. There is no change between from and the current status in target
   *    file system.
   *  Throw exceptions if first two aren't met, and return false to fallback to
   *  default distcp if the third condition isn't met.
   */
private boolean preSyncCheck() throws IOException {
    List<Path> sourcePaths = inputOptions.getSourcePaths();
    if (sourcePaths.size() != 1) {
        // we only support one source dir which must be a snapshottable directory
        throw new IllegalArgumentException(sourcePaths.size() + " source paths are provided");
    }
    final Path sourceDir = sourcePaths.get(0);
    final Path targetDir = inputOptions.getTargetPath();
    final FileSystem srcFs = sourceDir.getFileSystem(conf);
    final FileSystem tgtFs = targetDir.getFileSystem(conf);
    final FileSystem snapshotDiffFs = isRdiff() ? tgtFs : srcFs;
    final Path snapshotDiffDir = isRdiff() ? targetDir : sourceDir;
    // DistributedFileSystem.
    if (!(srcFs instanceof DistributedFileSystem) || !(tgtFs instanceof DistributedFileSystem)) {
        throw new IllegalArgumentException("The FileSystems needs to" + " be DistributedFileSystem for using snapshot-diff-based distcp");
    }
    final DistributedFileSystem targetFs = (DistributedFileSystem) tgtFs;
    // make sure targetFS has no change between from and the current states
    if (!checkNoChange(targetFs, targetDir)) {
        // set the source path using the snapshot path
        inputOptions.setSourcePaths(Arrays.asList(getSnapshotPath(sourceDir, inputOptions.getToSnapshot())));
        return false;
    }
    final String from = getSnapshotName(inputOptions.getFromSnapshot());
    final String to = getSnapshotName(inputOptions.getToSnapshot());
    try {
        final FileStatus fromSnapshotStat = snapshotDiffFs.getFileStatus(getSnapshotPath(snapshotDiffDir, from));
        final FileStatus toSnapshotStat = snapshotDiffFs.getFileStatus(getSnapshotPath(snapshotDiffDir, to));
        if (isRdiff()) {
            // If fromSnapshot isn't current dir then do a time check
            if (!from.equals("") && fromSnapshotStat.getModificationTime() < toSnapshotStat.getModificationTime()) {
                throw new HadoopIllegalArgumentException("Snapshot " + from + " should be newer than " + to);
            }
        } else {
            // If toSnapshot isn't current dir then do a time check
            if (!to.equals("") && fromSnapshotStat.getModificationTime() > toSnapshotStat.getModificationTime()) {
                throw new HadoopIllegalArgumentException("Snapshot " + to + " should be newer than " + from);
            }
        }
    } catch (FileNotFoundException nfe) {
        throw new InvalidInputException("Input snapshot is not found", nfe);
    }
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) InvalidInputException(org.apache.hadoop.tools.CopyListing.InvalidInputException) FileStatus(org.apache.hadoop.fs.FileStatus) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) FileNotFoundException(java.io.FileNotFoundException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException)

Example 10 with HadoopIllegalArgumentException

use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.

the class AbstractFileSystem method create.

/**
   * The specification of this method matches that of
   * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
   * that the Path f must be fully qualified and the permission is absolute
   * (i.e. umask has been applied).
   */
public final FSDataOutputStream create(final Path f, final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException {
    checkPath(f);
    int bufferSize = -1;
    short replication = -1;
    long blockSize = -1;
    int bytesPerChecksum = -1;
    ChecksumOpt checksumOpt = null;
    FsPermission permission = null;
    Progressable progress = null;
    Boolean createParent = null;
    for (CreateOpts iOpt : opts) {
        if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
            if (blockSize != -1) {
                throw new HadoopIllegalArgumentException("BlockSize option is set multiple times");
            }
            blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
        } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
            if (bufferSize != -1) {
                throw new HadoopIllegalArgumentException("BufferSize option is set multiple times");
            }
            bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
        } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
            if (replication != -1) {
                throw new HadoopIllegalArgumentException("ReplicationFactor option is set multiple times");
            }
            replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
        } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
            if (bytesPerChecksum != -1) {
                throw new HadoopIllegalArgumentException("BytesPerChecksum option is set multiple times");
            }
            bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
        } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
            if (checksumOpt != null) {
                throw new HadoopIllegalArgumentException("CreateChecksumType option is set multiple times");
            }
            checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
        } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
            if (permission != null) {
                throw new HadoopIllegalArgumentException("Perms option is set multiple times");
            }
            permission = ((CreateOpts.Perms) iOpt).getValue();
        } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
            if (progress != null) {
                throw new HadoopIllegalArgumentException("Progress option is set multiple times");
            }
            progress = ((CreateOpts.Progress) iOpt).getValue();
        } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
            if (createParent != null) {
                throw new HadoopIllegalArgumentException("CreateParent option is set multiple times");
            }
            createParent = ((CreateOpts.CreateParent) iOpt).getValue();
        } else {
            throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " + iOpt.getClass().getName());
        }
    }
    if (permission == null) {
        throw new HadoopIllegalArgumentException("no permission supplied");
    }
    FsServerDefaults ssDef = getServerDefaults();
    if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
        throw new IOException("Internal error: default blockSize is" + " not a multiple of default bytesPerChecksum ");
    }
    if (blockSize == -1) {
        blockSize = ssDef.getBlockSize();
    }
    // Create a checksum option honoring user input as much as possible.
    // If bytesPerChecksum is specified, it will override the one set in
    // checksumOpt. Any missing value will be filled in using the default.
    ChecksumOpt defaultOpt = new ChecksumOpt(ssDef.getChecksumType(), ssDef.getBytesPerChecksum());
    checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt, checksumOpt, bytesPerChecksum);
    if (bufferSize == -1) {
        bufferSize = ssDef.getFileBufferSize();
    }
    if (replication == -1) {
        replication = ssDef.getReplication();
    }
    if (createParent == null) {
        createParent = false;
    }
    if (blockSize % bytesPerChecksum != 0) {
        throw new HadoopIllegalArgumentException("blockSize should be a multiple of checksumsize");
    }
    return this.createInternal(f, createFlag, permission, bufferSize, replication, blockSize, progress, checksumOpt, createParent);
}
Also used : HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) ChecksumOpt(org.apache.hadoop.fs.Options.ChecksumOpt) CreateOpts(org.apache.hadoop.fs.Options.CreateOpts) Progressable(org.apache.hadoop.util.Progressable) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Aggregations

HadoopIllegalArgumentException (org.apache.hadoop.HadoopIllegalArgumentException)20 IOException (java.io.IOException)4 FileNotFoundException (java.io.FileNotFoundException)3 InetSocketAddress (java.net.InetSocketAddress)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 Path (org.apache.hadoop.fs.Path)3 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)3 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 ChecksumOpt (org.apache.hadoop.fs.Options.ChecksumOpt)2 XAttr (org.apache.hadoop.fs.XAttr)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)2 Test (org.junit.Test)2 URISyntaxException (java.net.URISyntaxException)1 ByteBuffer (java.nio.ByteBuffer)1 Date (java.util.Date)1 ParseException (org.apache.commons.cli.ParseException)1