use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFileUnsortedByteArrays method setUp.
@Before
public void setUp() throws IOException {
conf = new Configuration();
path = new Path(ROOT, outputFile);
fs = path.getFileSystem(conf);
out = fs.create(path);
writer = new Writer(out, BLOCK_SIZE, compression, null, conf);
writer.append("keyZ".getBytes(), "valueZ".getBytes());
writer.append("keyM".getBytes(), "valueM".getBytes());
writer.append("keyN".getBytes(), "valueN".getBytes());
writer.append("keyA".getBytes(), "valueA".getBytes());
closeOutput();
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFileComparators method testFailureBadJClasses.
// class exists but not a RawComparator
@Test
public void testFailureBadJClasses() throws IOException {
try {
writer = new Writer(out, BLOCK_SIZE, compression, "jclass:org.apache.hadoop.io.file.tfile.Chunk", conf);
Assert.fail("Failed to catch unsupported comparator names");
} catch (Exception e) {
// noop, expecting exceptions
e.printStackTrace();
}
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFileSeek method createTFile.
private void createTFile() throws IOException {
long totalBytes = 0;
FSDataOutputStream fout = createFSOutput(path, fs);
try {
Writer writer = new Writer(fout, options.minBlockSize, options.compress, "memcmp", conf);
try {
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.start();
for (long i = 0; true; ++i) {
if (i % 1000 == 0) {
// test the size for every 1000 rows.
if (fs.getFileStatus(path).getLen() >= options.fileSize) {
break;
}
}
kvGen.next(key, val, false);
writer.append(key.getBytes(), 0, key.getLength(), val.getBytes(), 0, val.getLength());
totalBytes += key.getLength();
totalBytes += val.getLength();
}
timer.stop();
} finally {
writer.close();
}
} finally {
fout.close();
}
// in us.
double duration = (double) timer.read() / 1000;
long fsize = fs.getFileStatus(path).getLen();
System.out.printf("time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes / duration);
System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFileSplit method createFile.
void createFile(int count, String compress) throws IOException {
conf = new Configuration();
path = new Path(ROOT, outputFile + "." + compress);
fs = path.getFileSystem(conf);
FSDataOutputStream out = fs.create(path);
Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);
int nx;
for (nx = 0; nx < count; nx++) {
byte[] key = composeSortedKey(KEY, count, nx).getBytes();
byte[] value = (VALUE + nx).getBytes();
writer.append(key, value);
}
writer.close();
out.close();
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFile method unsortedWithSomeCodec.
// unsorted with some codec
void unsortedWithSomeCodec(String codec) throws IOException {
Path uTfile = new Path(ROOT, "unsorted.tfile");
FSDataOutputStream fout = createFSOutput(uTfile);
Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
writeRecords(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(uTfile);
Reader reader = new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);
Scanner scanner = reader.createScanner();
readAllRecords(scanner);
scanner.close();
reader.close();
fin.close();
fs.delete(uTfile, true);
}
Aggregations