Search in sources :

Example 1 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileUnsortedByteArrays method testScanRange.

// we still can scan records in an unsorted TFile
@Test
public void testScanRange() throws IOException {
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Assert.assertFalse(reader.isSorted());
    Assert.assertEquals((int) reader.getEntryCount(), 4);
    Scanner scanner = reader.createScanner();
    try {
        // read key and value
        byte[] kbuf = new byte[BUF_SIZE];
        int klen = scanner.entry().getKeyLength();
        scanner.entry().getKey(kbuf);
        Assert.assertEquals(new String(kbuf, 0, klen), "keyZ");
        byte[] vbuf = new byte[BUF_SIZE];
        int vlen = scanner.entry().getValueLength();
        scanner.entry().getValue(vbuf);
        Assert.assertEquals(new String(vbuf, 0, vlen), "valueZ");
        scanner.advance();
        // now try get value first
        vbuf = new byte[BUF_SIZE];
        vlen = scanner.entry().getValueLength();
        scanner.entry().getValue(vbuf);
        Assert.assertEquals(new String(vbuf, 0, vlen), "valueM");
        kbuf = new byte[BUF_SIZE];
        klen = scanner.entry().getKeyLength();
        scanner.entry().getKey(kbuf);
        Assert.assertEquals(new String(kbuf, 0, klen), "keyM");
    } finally {
        scanner.close();
        reader.close();
    }
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) Test(org.junit.Test)

Example 2 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileSeek method seekTFile.

public void seekTFile() throws IOException {
    int miss = 0;
    long totalBytes = 0;
    FSDataInputStream fsdis = fs.open(path);
    Reader reader = new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
    KeySampler kSampler = new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(), keyLenGen);
    Scanner scanner = reader.createScanner();
    BytesWritable key = new BytesWritable();
    BytesWritable val = new BytesWritable();
    timer.reset();
    timer.start();
    for (int i = 0; i < options.seekCount; ++i) {
        kSampler.next(key);
        scanner.lowerBound(key.getBytes(), 0, key.getLength());
        if (!scanner.atEnd()) {
            scanner.entry().get(key, val);
            totalBytes += key.getLength();
            totalBytes += val.getLength();
        } else {
            ++miss;
        }
    }
    timer.stop();
    // in us.
    double duration = (double) timer.read() / 1000;
    System.out.printf("time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n", timer.toString(), NanoTimer.nanoTimeToString(timer.read() / options.seekCount), options.seekCount - miss, miss, (double) totalBytes / 1024 / (options.seekCount - miss));
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) BytesWritable(org.apache.hadoop.io.BytesWritable)

Example 3 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFile method unsortedWithSomeCodec.

// unsorted with some codec
void unsortedWithSomeCodec(String codec) throws IOException {
    Path uTfile = new Path(ROOT, "unsorted.tfile");
    FSDataOutputStream fout = createFSOutput(uTfile);
    Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
    writeRecords(writer);
    writer.close();
    fout.close();
    FSDataInputStream fin = fs.open(uTfile);
    Reader reader = new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);
    Scanner scanner = reader.createScanner();
    readAllRecords(scanner);
    scanner.close();
    reader.close();
    fin.close();
    fs.delete(uTfile, true);
}
Also used : Path(org.apache.hadoop.fs.Path) Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Writer(org.apache.hadoop.io.file.tfile.TFile.Writer)

Example 4 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileByteArrays method readValueWithoutKey.

private void readValueWithoutKey(int recordIndex) throws IOException {
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScannerByKey(composeSortedKey(KEY, recordIndex).getBytes(), null);
    byte[] vbuf1 = new byte[BUF_SIZE];
    int vlen1 = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf1);
    Assert.assertEquals(new String(vbuf1, 0, vlen1), VALUE + recordIndex);
    if (scanner.advance() && !scanner.atEnd()) {
        byte[] vbuf2 = new byte[BUF_SIZE];
        int vlen2 = scanner.entry().getValueLength();
        scanner.entry().getValue(vbuf2);
        Assert.assertEquals(new String(vbuf2, 0, vlen2), VALUE + (recordIndex + 1));
    }
    scanner.close();
    reader.close();
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader)

Example 5 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileByteArrays method testFailureNegativeLength_3.

@Test
public void testFailureNegativeLength_3() throws IOException {
    if (skip)
        return;
    writeRecords(3);
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    try {
        // test negative array offset
        try {
            scanner.seekTo("keyY".getBytes(), -1, 4);
            Assert.fail("Failed to handle negative offset.");
        } catch (Exception e) {
        // noop, expecting exceptions
        }
        // test negative array length
        try {
            scanner.seekTo("keyY".getBytes(), 0, -2);
            Assert.fail("Failed to handle negative key length.");
        } catch (Exception e) {
        // noop, expecting exceptions
        }
    } finally {
        reader.close();
        scanner.close();
    }
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) IOException(java.io.IOException) EOFException(java.io.EOFException) Test(org.junit.Test)

Aggregations

Reader (org.apache.hadoop.io.file.tfile.TFile.Reader)22 Scanner (org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner)22 Test (org.junit.Test)11 IOException (java.io.IOException)6 EOFException (java.io.EOFException)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)3 BytesWritable (org.apache.hadoop.io.BytesWritable)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 Path (org.apache.hadoop.fs.Path)2 Writer (org.apache.hadoop.io.file.tfile.TFile.Writer)2 Location (org.apache.hadoop.io.file.tfile.TFile.Reader.Location)1