use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class NNBench method parseInputs.
/**
* Parse input arguments
* @param args array of command line's parameters to be parsed
*
*/
private void parseInputs(final String[] args) {
// If there are no command line arguments, exit
if (args.length == 0) {
displayUsage();
throw new HadoopIllegalArgumentException("Give valid inputs");
}
// Parse command line args
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-operation")) {
operation = args[++i];
} else if (args[i].equals("-maps")) {
checkArgs(i + 1, args.length);
numberOfMaps = Long.parseLong(args[++i]);
} else if (args[i].equals("-reduces")) {
checkArgs(i + 1, args.length);
numberOfReduces = Long.parseLong(args[++i]);
} else if (args[i].equals("-startTime")) {
checkArgs(i + 1, args.length);
startTime = Long.parseLong(args[++i]) * 1000;
} else if (args[i].equals("-blockSize")) {
checkArgs(i + 1, args.length);
blockSize = Long.parseLong(args[++i]);
} else if (args[i].equals("-bytesToWrite")) {
checkArgs(i + 1, args.length);
bytesToWrite = Integer.parseInt(args[++i]);
} else if (args[i].equals("-bytesPerChecksum")) {
checkArgs(i + 1, args.length);
bytesPerChecksum = Long.parseLong(args[++i]);
} else if (args[i].equals("-numberOfFiles")) {
checkArgs(i + 1, args.length);
numberOfFiles = Long.parseLong(args[++i]);
} else if (args[i].equals("-replicationFactorPerFile")) {
checkArgs(i + 1, args.length);
replicationFactorPerFile = Short.parseShort(args[++i]);
} else if (args[i].equals("-baseDir")) {
checkArgs(i + 1, args.length);
baseDir = args[++i];
} else if (args[i].equals("-readFileAfterOpen")) {
checkArgs(i + 1, args.length);
readFileAfterOpen = Boolean.parseBoolean(args[++i]);
} else if (args[i].equals("-help")) {
displayUsage();
isHelpMessage = true;
}
}
LOG.info("Test Inputs: ");
LOG.info(" Test Operation: " + operation);
LOG.info(" Start time: " + sdf.format(new Date(startTime)));
LOG.info(" Number of maps: " + numberOfMaps);
LOG.info(" Number of reduces: " + numberOfReduces);
LOG.info(" Block Size: " + blockSize);
LOG.info(" Bytes to write: " + bytesToWrite);
LOG.info(" Bytes per checksum: " + bytesPerChecksum);
LOG.info(" Number of files: " + numberOfFiles);
LOG.info(" Replication factor: " + replicationFactorPerFile);
LOG.info(" Base dir: " + baseDir);
LOG.info(" Read file after open: " + readFileAfterOpen);
// Set user-defined parameters, so the map method can access the values
getConf().set("test.nnbench.operation", operation);
getConf().setLong("test.nnbench.maps", numberOfMaps);
getConf().setLong("test.nnbench.reduces", numberOfReduces);
getConf().setLong("test.nnbench.starttime", startTime);
getConf().setLong("test.nnbench.blocksize", blockSize);
getConf().setInt("test.nnbench.bytestowrite", bytesToWrite);
getConf().setLong("test.nnbench.bytesperchecksum", bytesPerChecksum);
getConf().setLong("test.nnbench.numberoffiles", numberOfFiles);
getConf().setInt("test.nnbench.replicationfactor", (int) replicationFactorPerFile);
getConf().set("test.nnbench.basedir", baseDir);
getConf().setBoolean("test.nnbench.readFileAfterOpen", readFileAfterOpen);
getConf().set("test.nnbench.datadir.name", DATA_DIR_NAME);
getConf().set("test.nnbench.outputdir.name", OUTPUT_DIR_NAME);
getConf().set("test.nnbench.controldir.name", CONTROL_DIR_NAME);
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class DistCpSync method preSyncCheck.
/**
* Check if three conditions are met before sync.
* 1. Only one source directory.
* 2. Both source and target file system are DFS.
* 3. There is no change between from and the current status in target
* file system.
* Throw exceptions if first two aren't met, and return false to fallback to
* default distcp if the third condition isn't met.
*/
private boolean preSyncCheck() throws IOException {
List<Path> sourcePaths = inputOptions.getSourcePaths();
if (sourcePaths.size() != 1) {
// we only support one source dir which must be a snapshottable directory
throw new IllegalArgumentException(sourcePaths.size() + " source paths are provided");
}
final Path sourceDir = sourcePaths.get(0);
final Path targetDir = inputOptions.getTargetPath();
final FileSystem srcFs = sourceDir.getFileSystem(conf);
final FileSystem tgtFs = targetDir.getFileSystem(conf);
final FileSystem snapshotDiffFs = isRdiff() ? tgtFs : srcFs;
final Path snapshotDiffDir = isRdiff() ? targetDir : sourceDir;
// DistributedFileSystem.
if (!(srcFs instanceof DistributedFileSystem) || !(tgtFs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("The FileSystems needs to" + " be DistributedFileSystem for using snapshot-diff-based distcp");
}
final DistributedFileSystem targetFs = (DistributedFileSystem) tgtFs;
// make sure targetFS has no change between from and the current states
if (!checkNoChange(targetFs, targetDir)) {
// set the source path using the snapshot path
inputOptions.setSourcePaths(Arrays.asList(getSnapshotPath(sourceDir, inputOptions.getToSnapshot())));
return false;
}
final String from = getSnapshotName(inputOptions.getFromSnapshot());
final String to = getSnapshotName(inputOptions.getToSnapshot());
try {
final FileStatus fromSnapshotStat = snapshotDiffFs.getFileStatus(getSnapshotPath(snapshotDiffDir, from));
final FileStatus toSnapshotStat = snapshotDiffFs.getFileStatus(getSnapshotPath(snapshotDiffDir, to));
if (isRdiff()) {
// If fromSnapshot isn't current dir then do a time check
if (!from.equals("") && fromSnapshotStat.getModificationTime() < toSnapshotStat.getModificationTime()) {
throw new HadoopIllegalArgumentException("Snapshot " + from + " should be newer than " + to);
}
} else {
// If toSnapshot isn't current dir then do a time check
if (!to.equals("") && fromSnapshotStat.getModificationTime() > toSnapshotStat.getModificationTime()) {
throw new HadoopIllegalArgumentException("Snapshot " + to + " should be newer than " + from);
}
}
} catch (FileNotFoundException nfe) {
throw new InvalidInputException("Input snapshot is not found", nfe);
}
return true;
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class XAttrHelper method buildXAttr.
/**
* Build <code>XAttr</code> from name with prefix and value.
* Name can not be null. Value can be null. The name and prefix
* are validated.
* Both name and namespace are case sensitive.
*/
public static XAttr buildXAttr(String name, byte[] value) {
Preconditions.checkNotNull(name, "XAttr name cannot be null.");
final int prefixIndex = name.indexOf(".");
if (prefixIndex < 3) {
// Prefix length is at least 3.
throw new HadoopIllegalArgumentException("An XAttr name must be " + "prefixed with user/trusted/security/system/raw, followed by a '.'");
} else if (prefixIndex == name.length() - 1) {
throw new HadoopIllegalArgumentException("XAttr name cannot be empty.");
}
NameSpace ns;
final String prefix = name.substring(0, prefixIndex);
if (StringUtils.equalsIgnoreCase(prefix, NameSpace.USER.toString())) {
ns = NameSpace.USER;
} else if (StringUtils.equalsIgnoreCase(prefix, NameSpace.TRUSTED.toString())) {
ns = NameSpace.TRUSTED;
} else if (StringUtils.equalsIgnoreCase(prefix, NameSpace.SYSTEM.toString())) {
ns = NameSpace.SYSTEM;
} else if (StringUtils.equalsIgnoreCase(prefix, NameSpace.SECURITY.toString())) {
ns = NameSpace.SECURITY;
} else if (StringUtils.equalsIgnoreCase(prefix, NameSpace.RAW.toString())) {
ns = NameSpace.RAW;
} else {
throw new HadoopIllegalArgumentException("An XAttr name must be " + "prefixed with user/trusted/security/system/raw, followed by a '.'");
}
return (new XAttr.Builder()).setNameSpace(ns).setName(name.substring(prefixIndex + 1)).setValue(value).build();
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class DfsClientConf method createChecksum.
/** create a DataChecksum with the given option. */
public DataChecksum createChecksum(ChecksumOpt userOpt) {
// Fill in any missing field with the default.
ChecksumOpt opt = ChecksumOpt.processChecksumOpt(defaultChecksumOpt, userOpt);
DataChecksum dataChecksum = DataChecksum.newDataChecksum(opt.getChecksumType(), opt.getBytesPerChecksum());
if (dataChecksum == null) {
throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt=" + userOpt + ", default=" + defaultChecksumOpt + ", effective=null");
}
return dataChecksum;
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class BlockManager method getBlocksWithLocations.
/** Get all blocks with location information from a datanode. */
public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode);
throw new HadoopIllegalArgumentException("Datanode " + datanode + " not found.");
}
int numBlocks = node.numBlocks();
if (numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
Iterator<BlockInfo> iter = node.getBlockIterator();
// starting from a random block
int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
// skip blocks
for (int i = 0; i < startBlock; i++) {
iter.next();
}
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
BlockInfo curBlock;
while (totalSize < size && iter.hasNext()) {
curBlock = iter.next();
if (!curBlock.isComplete())
continue;
if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
if (totalSize < size) {
// start from the beginning
iter = node.getBlockIterator();
for (int i = 0; i < startBlock && totalSize < size; i++) {
curBlock = iter.next();
if (!curBlock.isComplete())
continue;
if (curBlock.getNumBytes() < getBlocksMinBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
}
return new BlocksWithLocations(results.toArray(new BlockWithLocations[results.size()]));
}
Aggregations