Search in sources :

Example 6 with Writer

use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.

the class TestTFile method testMetaBlocks.

// test meta blocks for tfiles
@Test
public void testMetaBlocks() throws IOException {
    Path mFile = new Path(ROOT, "meta.tfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
    someTestingWithMetaBlock(writer, "none");
    writer.close();
    fout.close();
    FSDataInputStream fin = fs.open(mFile);
    Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
    someReadingWithMetaBlock(reader);
    fs.delete(mFile, true);
    reader.close();
    fin.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Writer(org.apache.hadoop.io.file.tfile.TFile.Writer) Test(org.junit.Test)

Example 7 with Writer

use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.

the class TestTFileByteArrays method testFailureBadCompressionCodec.

@Test
public void testFailureBadCompressionCodec() throws IOException {
    if (skip)
        return;
    closeOutput();
    out = fs.create(path);
    try {
        writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf);
        Assert.fail("Error on handling invalid compression codecs.");
    } catch (Exception e) {
    // noop, expecting exceptions
    // e.printStackTrace();
    }
}
Also used : Writer(org.apache.hadoop.io.file.tfile.TFile.Writer) IOException(java.io.IOException) EOFException(java.io.EOFException) Test(org.junit.Test)

Example 8 with Writer

use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.

the class TestTFile method basicWithSomeCodec.

/**
   * test none codecs
   */
void basicWithSomeCodec(String codec) throws IOException {
    Path ncTFile = new Path(ROOT, "basic.tfile");
    FSDataOutputStream fout = createFSOutput(ncTFile);
    Writer writer = new Writer(fout, minBlockSize, codec, "memcmp", conf);
    writeRecords(writer);
    fout.close();
    FSDataInputStream fin = fs.open(ncTFile);
    Reader reader = new Reader(fs.open(ncTFile), fs.getFileStatus(ncTFile).getLen(), conf);
    Scanner scanner = reader.createScanner();
    readAllRecords(scanner);
    scanner.seekTo(getSomeKey(50));
    assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)));
    // read the key and see if it matches
    byte[] readKey = readKey(scanner);
    assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50), readKey));
    scanner.seekTo(new byte[0]);
    byte[] val1 = readValue(scanner);
    scanner.seekTo(new byte[0]);
    byte[] val2 = readValue(scanner);
    assertTrue(Arrays.equals(val1, val2));
    // check for lowerBound
    scanner.lowerBound(getSomeKey(50));
    assertTrue("locaton lookup failed", scanner.currentLocation.compareTo(reader.end()) < 0);
    readKey = readKey(scanner);
    assertTrue("seeked key does not match", Arrays.equals(readKey, getSomeKey(50)));
    // check for upper bound
    scanner.upperBound(getSomeKey(50));
    assertTrue("location lookup failed", scanner.currentLocation.compareTo(reader.end()) < 0);
    readKey = readKey(scanner);
    assertTrue("seeked key does not match", Arrays.equals(readKey, getSomeKey(51)));
    scanner.close();
    // test for a range of scanner
    scanner = reader.createScannerByKey(getSomeKey(10), getSomeKey(60));
    readAndCheckbytes(scanner, 10, 50);
    assertFalse(scanner.advance());
    scanner.close();
    reader.close();
    fin.close();
    fs.delete(ncTFile, true);
}
Also used : Path(org.apache.hadoop.fs.Path) Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Writer(org.apache.hadoop.io.file.tfile.TFile.Writer)

Example 9 with Writer

use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.

the class TestTFileByteArrays method testFailureFileWriteNotAt0Position.

@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
    if (skip)
        return;
    closeOutput();
    out = fs.create(path);
    out.write(123);
    try {
        writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
        Assert.fail("Failed to catch file write not at position 0.");
    } catch (Exception e) {
    // noop, expecting exceptions
    }
    closeOutput();
}
Also used : Writer(org.apache.hadoop.io.file.tfile.TFile.Writer) IOException(java.io.IOException) EOFException(java.io.EOFException) Test(org.junit.Test)

Example 10 with Writer

use of org.apache.hadoop.io.file.tfile.TFile.Writer in project hadoop by apache.

the class TestTFileByteArrays method setUp.

@Before
public void setUp() throws IOException {
    path = new Path(ROOT, outputFile);
    fs = path.getFileSystem(conf);
    out = fs.create(path);
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
Also used : Path(org.apache.hadoop.fs.Path) Writer(org.apache.hadoop.io.file.tfile.TFile.Writer) Before(org.junit.Before)

Aggregations

Writer (org.apache.hadoop.io.file.tfile.TFile.Writer)14 Path (org.apache.hadoop.fs.Path)8 Test (org.junit.Test)7 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 IOException (java.io.IOException)5 Configuration (org.apache.hadoop.conf.Configuration)4 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 Reader (org.apache.hadoop.io.file.tfile.TFile.Reader)3 Before (org.junit.Before)3 EOFException (java.io.EOFException)2 BytesWritable (org.apache.hadoop.io.BytesWritable)2 Scanner (org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner)2 DataOutputStream (java.io.DataOutputStream)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 LongWritable (org.apache.hadoop.io.LongWritable)1