Search in sources :

Example 1 with ClientContext

use of org.apache.hadoop.hdfs.ClientContext in project hadoop by apache.

the class TestScrLazyPersistFiles method testLegacyScrAfterEviction.

@Test
public void testLegacyScrAfterEviction() throws IOException, InterruptedException, TimeoutException {
    getClusterBuilder().setUseScr(true).setUseLegacyBlockReaderLocal(true).build();
    doShortCircuitReadAfterEvictionTest();
    // In the implementation of legacy short-circuit reads, any failure is
    // trapped silently, reverts back to a remote read, and also disables all
    // subsequent legacy short-circuit reads in the ClientContext.
    // Assert that it didn't get disabled.
    ClientContext clientContext = client.getClientContext();
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
}
Also used : ClientContext(org.apache.hadoop.hdfs.ClientContext) Test(org.junit.Test)

Example 2 with ClientContext

use of org.apache.hadoop.hdfs.ClientContext in project hadoop by apache.

the class TestShortCircuitLocalRead method checkFileContent.

/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected, int readOffset, String readingUser, Configuration conf, boolean legacyShortCircuitFails) throws IOException, InterruptedException {
    // Ensure short circuit is enabled
    DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
    ClientContext getClientContext = ClientContext.getFromConf(conf);
    if (legacyShortCircuitFails) {
        assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
    }
    FSDataInputStream stm = fs.open(name);
    byte[] actual = new byte[expected.length - readOffset];
    stm.readFully(readOffset, actual);
    checkData(actual, readOffset, expected, "Read 2");
    stm.close();
    // Now read using a different API.
    actual = new byte[expected.length - readOffset];
    stm = fs.open(name);
    IOUtils.skipFully(stm, readOffset);
    //Read a small number of bytes first.
    int nread = stm.read(actual, 0, 3);
    nread += stm.read(actual, nread, 2);
    int len = 517;
    if (actual.length - nread >= len) {
        //Read across chunk boundary
        nread += stm.read(actual, nread, len);
    }
    checkData(actual, readOffset, expected, nread, "A few bytes");
    //Now read rest of it
    while (nread < actual.length) {
        int nbytes = stm.read(actual, nread, actual.length - nread);
        if (nbytes < 0) {
            throw new EOFException("End of file reached before reading fully.");
        }
        nread += nbytes;
    }
    checkData(actual, readOffset, expected, "Read 3");
    if (legacyShortCircuitFails) {
        assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
    }
    stm.close();
}
Also used : ClientContext(org.apache.hadoop.hdfs.ClientContext) EOFException(java.io.EOFException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 3 with ClientContext

use of org.apache.hadoop.hdfs.ClientContext in project hadoop by apache.

the class TestShortCircuitLocalRead method checkFileContentDirect.

/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected, int readOffset, String readingUser, Configuration conf, boolean legacyShortCircuitFails) throws IOException, InterruptedException {
    // Ensure short circuit is enabled
    DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
    ClientContext clientContext = ClientContext.getFromConf(conf);
    if (legacyShortCircuitFails) {
        assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
    }
    HdfsDataInputStream stm = (HdfsDataInputStream) fs.open(name);
    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
    IOUtils.skipFully(stm, readOffset);
    actual.limit(3);
    //Read a small number of bytes first.
    int nread = stm.read(actual);
    actual.limit(nread + 2);
    nread += stm.read(actual);
    // Read across chunk boundary
    actual.limit(Math.min(actual.capacity(), nread + 517));
    nread += stm.read(actual);
    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread, "A few bytes");
    //Now read rest of it
    actual.limit(actual.capacity());
    while (actual.hasRemaining()) {
        int nbytes = stm.read(actual);
        if (nbytes < 0) {
            throw new EOFException("End of file reached before reading fully.");
        }
        nread += nbytes;
    }
    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
    if (legacyShortCircuitFails) {
        assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
    }
    stm.close();
}
Also used : ClientContext(org.apache.hadoop.hdfs.ClientContext) EOFException(java.io.EOFException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ByteBuffer(java.nio.ByteBuffer) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Aggregations

ClientContext (org.apache.hadoop.hdfs.ClientContext)3 EOFException (java.io.EOFException)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 ByteBuffer (java.nio.ByteBuffer)1 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)1 HdfsDataInputStream (org.apache.hadoop.hdfs.client.HdfsDataInputStream)1 Test (org.junit.Test)1