use of org.apache.hadoop.fs.ChecksumFileSystem in project hadoop by apache.
the class BenchmarkThroughput method run.
@Override
public int run(String[] args) throws IOException {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
GenericTestUtils.setLogLevel(hadoopLog, Level.WARN);
}
int reps = 1;
if (args.length == 1) {
try {
reps = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
printUsage();
return -1;
}
} else if (args.length > 1) {
printUsage();
return -1;
}
Configuration conf = getConf();
// the size of the file to write
long SIZE = conf.getLong("dfsthroughput.file.size", 10L * 1024 * 1024 * 1024);
BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
String localDir = conf.get("mapred.temp.dir");
if (localDir == null) {
localDir = conf.get("hadoop.tmp.dir");
conf.set("mapred.temp.dir", localDir);
}
dir = new LocalDirAllocator("mapred.temp.dir");
System.setProperty("test.build.data", localDir);
System.out.println("Local = " + localDir);
ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
FileSystem rawLocal = checkedLocal.getRawFileSystem();
for (int i = 0; i < reps; ++i) {
writeAndReadLocalFile("local", conf, SIZE);
writeAndReadFile(rawLocal, "raw", conf, SIZE);
writeAndReadFile(checkedLocal, "checked", conf, SIZE);
}
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).racks(new String[] { "/foo" }).build();
cluster.waitActive();
FileSystem dfs = cluster.getFileSystem();
for (int i = 0; i < reps; ++i) {
writeAndReadFile(dfs, "dfs", conf, SIZE);
}
} finally {
if (cluster != null) {
cluster.shutdown();
// clean up minidfs junk
rawLocal.delete(new Path(localDir, "dfs"), true);
}
}
return 0;
}
use of org.apache.hadoop.fs.ChecksumFileSystem in project hadoop by apache.
the class TestSeekBug method smallReadSeek.
/*
* Read some data, skip a few bytes and read more. HADOOP-922.
*/
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
if (fileSys instanceof ChecksumFileSystem) {
fileSys = ((ChecksumFileSystem) fileSys).getRawFileSystem();
}
// Make the buffer size small to trigger code for HADOOP-922
FSDataInputStream stmRaw = fileSys.open(name, 1);
byte[] expected = new byte[ONEMB];
Random rand = new Random(seed);
rand.nextBytes(expected);
// Issue a simple read first.
byte[] actual = new byte[128];
stmRaw.seek(100000);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, 100000, expected, "First Small Read Test");
// now do a small seek of 4 bytes, within the same block.
int newpos1 = 100000 + 128 + 4;
stmRaw.seek(newpos1);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");
// seek another 256 bytes this time
int newpos2 = newpos1 + 256;
stmRaw.seek(newpos2);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");
// all done
stmRaw.close();
}
Aggregations