Search in sources :

Example 6 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileByteArrays method readValueBeforeKey.

private void readValueBeforeKey(int recordIndex) throws IOException {
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScannerByKey(composeSortedKey(KEY, recordIndex).getBytes(), null);
    try {
        byte[] vbuf = new byte[BUF_SIZE];
        int vlen = scanner.entry().getValueLength();
        scanner.entry().getValue(vbuf);
        Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + recordIndex);
        byte[] kbuf = new byte[BUF_SIZE];
        int klen = scanner.entry().getKeyLength();
        scanner.entry().getKey(kbuf);
        Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY, recordIndex));
    } finally {
        scanner.close();
        reader.close();
    }
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader)

Example 7 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileByteArrays method readKeyWithoutValue.

private void readKeyWithoutValue(int recordIndex) throws IOException {
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScannerByKey(composeSortedKey(KEY, recordIndex).getBytes(), null);
    try {
        // read the indexed key
        byte[] kbuf1 = new byte[BUF_SIZE];
        int klen1 = scanner.entry().getKeyLength();
        scanner.entry().getKey(kbuf1);
        Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY, recordIndex));
        if (scanner.advance() && !scanner.atEnd()) {
            // read the next key following the indexed
            byte[] kbuf2 = new byte[BUF_SIZE];
            int klen2 = scanner.entry().getKeyLength();
            scanner.entry().getKey(kbuf2);
            Assert.assertEquals(new String(kbuf2, 0, klen2), composeSortedKey(KEY, recordIndex + 1));
        }
    } finally {
        scanner.close();
        reader.close();
    }
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader)

Example 8 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileByteArrays method testFailureReadValueManyTimes.

@Test
public void testFailureReadValueManyTimes() throws IOException {
    if (skip)
        return;
    writeRecords(5);
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + 0);
    try {
        scanner.entry().getValue(vbuf);
        Assert.fail("Cannot get the value mlutiple times.");
    } catch (Exception e) {
    // noop, expecting exceptions
    }
    scanner.close();
    reader.close();
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) IOException(java.io.IOException) EOFException(java.io.EOFException) Test(org.junit.Test)

Example 9 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileByteArrays method testFailureNegativeLength_2.

@Test
public void testFailureNegativeLength_2() throws IOException {
    if (skip)
        return;
    closeOutput();
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    try {
        scanner.lowerBound("keyX".getBytes(), 0, -1);
        Assert.fail("Error on handling negative length.");
    } catch (Exception e) {
    // noop, expecting exceptions
    } finally {
        scanner.close();
        reader.close();
    }
    closeOutput();
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) IOException(java.io.IOException) EOFException(java.io.EOFException) Test(org.junit.Test)

Example 10 with Scanner

use of org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner in project hadoop by apache.

the class TestTFileByteArrays method testFailureNegativeOffset_2.

@Test
public void testFailureNegativeOffset_2() throws IOException {
    if (skip)
        return;
    closeOutput();
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    try {
        scanner.lowerBound("keyX".getBytes(), -1, 4);
        Assert.fail("Error on handling negative offset.");
    } catch (Exception e) {
    // noop, expecting exceptions
    } finally {
        reader.close();
        scanner.close();
    }
    closeOutput();
}
Also used : Scanner(org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner) Reader(org.apache.hadoop.io.file.tfile.TFile.Reader) IOException(java.io.IOException) EOFException(java.io.EOFException) Test(org.junit.Test)

Aggregations

Reader (org.apache.hadoop.io.file.tfile.TFile.Reader)22 Scanner (org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner)22 Test (org.junit.Test)11 IOException (java.io.IOException)6 EOFException (java.io.EOFException)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)3 BytesWritable (org.apache.hadoop.io.BytesWritable)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 Path (org.apache.hadoop.fs.Path)2 Writer (org.apache.hadoop.io.file.tfile.TFile.Writer)2 Location (org.apache.hadoop.io.file.tfile.TFile.Reader.Location)1