use of java.util.Date in project hadoop by apache.
the class NNBench method parseInputs.
/**
* Parse input arguments
* @param args array of command line's parameters to be parsed
*
*/
private void parseInputs(final String[] args) {
// If there are no command line arguments, exit
if (args.length == 0) {
displayUsage();
throw new HadoopIllegalArgumentException("Give valid inputs");
}
// Parse command line args
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-operation")) {
operation = args[++i];
} else if (args[i].equals("-maps")) {
checkArgs(i + 1, args.length);
numberOfMaps = Long.parseLong(args[++i]);
} else if (args[i].equals("-reduces")) {
checkArgs(i + 1, args.length);
numberOfReduces = Long.parseLong(args[++i]);
} else if (args[i].equals("-startTime")) {
checkArgs(i + 1, args.length);
startTime = Long.parseLong(args[++i]) * 1000;
} else if (args[i].equals("-blockSize")) {
checkArgs(i + 1, args.length);
blockSize = Long.parseLong(args[++i]);
} else if (args[i].equals("-bytesToWrite")) {
checkArgs(i + 1, args.length);
bytesToWrite = Integer.parseInt(args[++i]);
} else if (args[i].equals("-bytesPerChecksum")) {
checkArgs(i + 1, args.length);
bytesPerChecksum = Long.parseLong(args[++i]);
} else if (args[i].equals("-numberOfFiles")) {
checkArgs(i + 1, args.length);
numberOfFiles = Long.parseLong(args[++i]);
} else if (args[i].equals("-replicationFactorPerFile")) {
checkArgs(i + 1, args.length);
replicationFactorPerFile = Short.parseShort(args[++i]);
} else if (args[i].equals("-baseDir")) {
checkArgs(i + 1, args.length);
baseDir = args[++i];
} else if (args[i].equals("-readFileAfterOpen")) {
checkArgs(i + 1, args.length);
readFileAfterOpen = Boolean.parseBoolean(args[++i]);
} else if (args[i].equals("-help")) {
displayUsage();
isHelpMessage = true;
}
}
LOG.info("Test Inputs: ");
LOG.info(" Test Operation: " + operation);
LOG.info(" Start time: " + sdf.format(new Date(startTime)));
LOG.info(" Number of maps: " + numberOfMaps);
LOG.info(" Number of reduces: " + numberOfReduces);
LOG.info(" Block Size: " + blockSize);
LOG.info(" Bytes to write: " + bytesToWrite);
LOG.info(" Bytes per checksum: " + bytesPerChecksum);
LOG.info(" Number of files: " + numberOfFiles);
LOG.info(" Replication factor: " + replicationFactorPerFile);
LOG.info(" Base dir: " + baseDir);
LOG.info(" Read file after open: " + readFileAfterOpen);
// Set user-defined parameters, so the map method can access the values
getConf().set("test.nnbench.operation", operation);
getConf().setLong("test.nnbench.maps", numberOfMaps);
getConf().setLong("test.nnbench.reduces", numberOfReduces);
getConf().setLong("test.nnbench.starttime", startTime);
getConf().setLong("test.nnbench.blocksize", blockSize);
getConf().setInt("test.nnbench.bytestowrite", bytesToWrite);
getConf().setLong("test.nnbench.bytesperchecksum", bytesPerChecksum);
getConf().setLong("test.nnbench.numberoffiles", numberOfFiles);
getConf().setInt("test.nnbench.replicationfactor", (int) replicationFactorPerFile);
getConf().set("test.nnbench.basedir", baseDir);
getConf().setBoolean("test.nnbench.readFileAfterOpen", readFileAfterOpen);
getConf().set("test.nnbench.datadir.name", DATA_DIR_NAME);
getConf().set("test.nnbench.outputdir.name", OUTPUT_DIR_NAME);
getConf().set("test.nnbench.controldir.name", CONTROL_DIR_NAME);
}
use of java.util.Date in project hadoop by apache.
the class NNBenchWithoutMR method main.
/**
* This launches a given namenode operation (<code>-operation</code>),
* starting at a given time (<code>-startTime</code>). The files used
* by the openRead, rename, and delete operations are the same files
* created by the createWrite operation. Typically, the program
* would be run four times, once for each operation in this order:
* createWrite, openRead, rename, delete.
*
* <pre>
* Usage: nnbench
* -operation <one of createWrite, openRead, rename, or delete>
* -baseDir <base output/input DFS path>
* -startTime <time to start, given in seconds from the epoch>
* -numFiles <number of files to create, read, rename, or delete>
* -blocksPerFile <number of blocks to create per file>
* [-bytesPerBlock <number of bytes to write to each block, default is 1>]
* [-bytesPerChecksum <value for io.bytes.per.checksum>]
* </pre>
*
* @param args is an array of the program command line arguments
* @throws IOException indicates a problem with test startup
*/
public static void main(String[] args) throws IOException {
String version = "NameNodeBenchmark.0.3";
System.out.println(version);
int bytesPerChecksum = -1;
String usage = "Usage: nnbench " + " -operation <one of createWrite, openRead, rename, or delete>\n " + " -baseDir <base output/input DFS path>\n " + " -startTime <time to start, given in seconds from the epoch>\n" + " -numFiles <number of files to create>\n " + " -replicationFactorPerFile <Replication factor for the files, default is 1>\n" + " -blocksPerFile <number of blocks to create per file>\n" + " [-bytesPerBlock <number of bytes to write to each block, default is 1>]\n" + " [-bytesPerChecksum <value for io.bytes.per.checksum>]\n" + "Note: bytesPerBlock MUST be a multiple of bytesPerChecksum\n";
String operation = null;
for (int i = 0; i < args.length; i++) {
// parse command line
if (args[i].equals("-baseDir")) {
baseDir = new Path(args[++i]);
} else if (args[i].equals("-numFiles")) {
numFiles = Integer.parseInt(args[++i]);
} else if (args[i].equals("-blocksPerFile")) {
blocksPerFile = Integer.parseInt(args[++i]);
} else if (args[i].equals("-bytesPerBlock")) {
bytesPerBlock = Long.parseLong(args[++i]);
} else if (args[i].equals("-bytesPerChecksum")) {
bytesPerChecksum = Integer.parseInt(args[++i]);
} else if (args[i].equals("-replicationFactorPerFile")) {
replicationFactorPerFile = Short.parseShort(args[++i]);
} else if (args[i].equals("-startTime")) {
startTime = Long.parseLong(args[++i]) * 1000;
} else if (args[i].equals("-operation")) {
operation = args[++i];
} else {
System.out.println(usage);
System.exit(-1);
}
}
bytesPerFile = bytesPerBlock * blocksPerFile;
JobConf jobConf = new JobConf(new Configuration(), NNBench.class);
if (bytesPerChecksum < 0) {
// if it is not set in cmdline
bytesPerChecksum = jobConf.getInt("io.bytes.per.checksum", 512);
}
jobConf.set("io.bytes.per.checksum", Integer.toString(bytesPerChecksum));
System.out.println("Inputs: ");
System.out.println(" operation: " + operation);
System.out.println(" baseDir: " + baseDir);
System.out.println(" startTime: " + startTime);
System.out.println(" numFiles: " + numFiles);
System.out.println(" replicationFactorPerFile: " + replicationFactorPerFile);
System.out.println(" blocksPerFile: " + blocksPerFile);
System.out.println(" bytesPerBlock: " + bytesPerBlock);
System.out.println(" bytesPerChecksum: " + bytesPerChecksum);
if (// verify args
operation == null || baseDir == null || numFiles < 1 || blocksPerFile < 1 || bytesPerBlock < 0 || bytesPerBlock % bytesPerChecksum != 0) {
System.err.println(usage);
System.exit(-1);
}
fileSys = FileSystem.get(jobConf);
String uniqueId = java.net.InetAddress.getLocalHost().getHostName();
taskDir = new Path(baseDir, uniqueId);
// initialize buffer used for writing/reading file
buffer = new byte[(int) Math.min(bytesPerFile, 32768L)];
Date execTime;
Date endTime;
long duration;
int exceptions = 0;
// wait for coordinated start time
barrier();
execTime = new Date();
System.out.println("Job started: " + startTime);
if (operation.equals("createWrite")) {
if (!fileSys.mkdirs(taskDir)) {
throw new IOException("Mkdirs failed to create " + taskDir.toString());
}
exceptions = createWrite();
} else if (operation.equals("openRead")) {
exceptions = openRead();
} else if (operation.equals("rename")) {
exceptions = rename();
} else if (operation.equals("delete")) {
exceptions = delete();
} else {
System.err.println(usage);
System.exit(-1);
}
endTime = new Date();
System.out.println("Job ended: " + endTime);
duration = (endTime.getTime() - execTime.getTime()) / 1000;
System.out.println("The " + operation + " job took " + duration + " seconds.");
System.out.println("The job recorded " + exceptions + " exceptions.");
}
use of java.util.Date in project hadoop by apache.
the class FileBench method readBench.
// InputFormat instantiation
@SuppressWarnings("unchecked")
static long readBench(JobConf conf) throws IOException {
InputFormat inf = conf.getInputFormat();
final String fn = conf.get("test.filebench.name", "");
Path pin = new Path(FileInputFormat.getInputPaths(conf)[0], fn);
FileStatus in = pin.getFileSystem(conf).getFileStatus(pin);
RecordReader rr = inf.getRecordReader(new FileSplit(pin, 0, in.getLen(), (String[]) null), conf, Reporter.NULL);
try {
Object key = rr.createKey();
Object val = rr.createValue();
Date start = new Date();
while (rr.next(key, val)) ;
Date end = new Date();
return end.getTime() - start.getTime();
} finally {
rr.close();
}
}
use of java.util.Date in project hadoop by apache.
the class FileBench method writeBench.
// OutputFormat instantiation
@SuppressWarnings("unchecked")
static long writeBench(JobConf conf) throws IOException {
long filelen = conf.getLong("filebench.file.bytes", 5 * 1024 * 1024 * 1024);
Text key = new Text();
Text val = new Text();
final String fn = conf.get("test.filebench.name", "");
final Path outd = FileOutputFormat.getOutputPath(conf);
conf.set("mapred.work.output.dir", outd.toString());
OutputFormat outf = conf.getOutputFormat();
RecordWriter<Text, Text> rw = outf.getRecordWriter(outd.getFileSystem(conf), conf, fn, Reporter.NULL);
try {
long acc = 0L;
Date start = new Date();
for (int i = 0; acc < filelen; ++i) {
i %= keys.length;
key.set(keys[i]);
val.set(values[i]);
rw.write(key, val);
acc += keys[i].length();
acc += values[i].length();
}
Date end = new Date();
return end.getTime() - start.getTime();
} finally {
rw.close(Reporter.NULL);
}
}
use of java.util.Date in project hadoop by apache.
the class BigMapOutput method run.
public int run(String[] args) throws Exception {
if (args.length < 4) {
//input-dir should contain a huge file ( > 2GB)
usage();
}
Path bigMapInput = null;
Path outputPath = null;
boolean createInput = false;
// default of 3GB (>2GB)
long fileSizeInMB = 3 * 1024;
for (int i = 0; i < args.length; ++i) {
if ("-input".equals(args[i])) {
bigMapInput = new Path(args[++i]);
} else if ("-output".equals(args[i])) {
outputPath = new Path(args[++i]);
} else if ("-create".equals(args[i])) {
createInput = true;
fileSizeInMB = Long.parseLong(args[++i]);
} else {
usage();
}
}
FileSystem fs = FileSystem.get(getConf());
JobConf jobConf = new JobConf(getConf(), BigMapOutput.class);
jobConf.setJobName("BigMapOutput");
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
FileInputFormat.setInputPaths(jobConf, bigMapInput);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
FileOutputFormat.setOutputPath(jobConf, outputPath);
jobConf.setMapperClass(IdentityMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(BytesWritable.class);
if (createInput) {
createBigMapInputFile(jobConf, fs, bigMapInput, fileSizeInMB);
}
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(jobConf);
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
return 0;
}
Aggregations