use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFile method testMetaBlocks.
// test meta blocks for tfiles
@Test
public void testMetaBlocks() throws IOException {
Path mFile = new Path(ROOT, "meta.tfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
someTestingWithMetaBlock(writer, "none");
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFileByteArrays method testFailureBadCompressionCodec.
@Test
public void testFailureBadCompressionCodec() throws IOException {
if (skip)
return;
closeOutput();
out = fs.create(path);
try {
writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf);
Assert.fail("Error on handling invalid compression codecs.");
} catch (Exception e) {
// noop, expecting exceptions
// e.printStackTrace();
}
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFile method basicWithSomeCodec.
/**
* test none codecs
*/
void basicWithSomeCodec(String codec) throws IOException {
Path ncTFile = new Path(ROOT, "basic.tfile");
FSDataOutputStream fout = createFSOutput(ncTFile);
Writer writer = new Writer(fout, minBlockSize, codec, "memcmp", conf);
writeRecords(writer);
fout.close();
FSDataInputStream fin = fs.open(ncTFile);
Reader reader = new Reader(fs.open(ncTFile), fs.getFileStatus(ncTFile).getLen(), conf);
Scanner scanner = reader.createScanner();
readAllRecords(scanner);
scanner.seekTo(getSomeKey(50));
assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)));
// read the key and see if it matches
byte[] readKey = readKey(scanner);
assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50), readKey));
scanner.seekTo(new byte[0]);
byte[] val1 = readValue(scanner);
scanner.seekTo(new byte[0]);
byte[] val2 = readValue(scanner);
assertTrue(Arrays.equals(val1, val2));
// check for lowerBound
scanner.lowerBound(getSomeKey(50));
assertTrue("locaton lookup failed", scanner.currentLocation.compareTo(reader.end()) < 0);
readKey = readKey(scanner);
assertTrue("seeked key does not match", Arrays.equals(readKey, getSomeKey(50)));
// check for upper bound
scanner.upperBound(getSomeKey(50));
assertTrue("location lookup failed", scanner.currentLocation.compareTo(reader.end()) < 0);
readKey = readKey(scanner);
assertTrue("seeked key does not match", Arrays.equals(readKey, getSomeKey(51)));
scanner.close();
// test for a range of scanner
scanner = reader.createScannerByKey(getSomeKey(10), getSomeKey(60));
readAndCheckbytes(scanner, 10, 50);
assertFalse(scanner.advance());
scanner.close();
reader.close();
fin.close();
fs.delete(ncTFile, true);
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFileByteArrays method testFailureFileWriteNotAt0Position.
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
if (skip)
return;
closeOutput();
out = fs.create(path);
out.write(123);
try {
writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
Assert.fail("Failed to catch file write not at position 0.");
} catch (Exception e) {
// noop, expecting exceptions
}
closeOutput();
}
use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.
the class TestTFileByteArrays method setUp.
@Before
public void setUp() throws IOException {
path = new Path(ROOT, outputFile);
fs = path.getFileSystem(conf);
out = fs.create(path);
writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
Aggregations