use of org.apache.hadoop.hdfs.server.datanode.CachingStrategy in project hadoop by apache.
the class DFSInputStream method getBlockReader.
protected BlockReader getBlockReader(LocatedBlock targetBlock, long offsetInBlock, long length, InetSocketAddress targetAddr, StorageType storageType, DatanodeInfo datanode) throws IOException {
ExtendedBlock blk = targetBlock.getBlock();
Token<BlockTokenIdentifier> accessToken = targetBlock.getBlockToken();
CachingStrategy curCachingStrategy;
boolean shortCircuitForbidden;
synchronized (infoLock) {
curCachingStrategy = cachingStrategy;
shortCircuitForbidden = shortCircuitForbidden();
}
return new BlockReaderFactory(dfsClient.getConf()).setInetSocketAddress(targetAddr).setRemotePeerFactory(dfsClient).setDatanodeInfo(datanode).setStorageType(storageType).setFileName(src).setBlock(blk).setBlockToken(accessToken).setStartOffset(offsetInBlock).setVerifyChecksum(verifyChecksum).setClientName(dfsClient.clientName).setLength(length).setCachingStrategy(curCachingStrategy).setAllowShortCircuitLocalReads(!shortCircuitForbidden).setClientCacheContext(dfsClient.getClientContext()).setUserGroupInformation(dfsClient.ugi).setConfiguration(dfsClient.getConfiguration()).setTracer(dfsClient.getTracer()).build();
}
use of org.apache.hadoop.hdfs.server.datanode.CachingStrategy in project hadoop by apache.
the class DFSOutputStream method setDropBehind.
@Override
public void setDropBehind(Boolean dropBehind) throws IOException {
CachingStrategy prevStrategy, nextStrategy;
// modifications we want, and compare-and-swap it in.
do {
prevStrategy = this.cachingStrategy.get();
nextStrategy = new CachingStrategy.Builder(prevStrategy).setDropBehind(dropBehind).build();
} while (!this.cachingStrategy.compareAndSet(prevStrategy, nextStrategy));
}
use of org.apache.hadoop.hdfs.server.datanode.CachingStrategy in project hadoop by apache.
the class TestBlockReaderLocal method runBlockReaderLocalTest.
public void runBlockReaderLocalTest(BlockReaderLocalTest test, boolean checksum, long readahead) throws IOException {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
MiniDFSCluster cluster = null;
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, !checksum);
conf.setLong(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BlockReaderLocalTest.BYTES_PER_CHECKSUM);
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead);
test.setConfiguration(conf);
FileInputStream dataIn = null, metaIn = null;
final Path TEST_PATH = new Path("/a");
final long RANDOM_SEED = 4567L;
BlockReaderLocal blockReaderLocal = null;
FSDataInputStream fsIn = null;
byte[] original = new byte[BlockReaderLocalTest.TEST_LENGTH];
FileSystem fs = null;
ShortCircuitShm shm = null;
RandomAccessFile raf = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short) 1, RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH);
fsIn.close();
fsIn = null;
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
File dataFile = cluster.getBlockFile(0, block);
File metaFile = cluster.getBlockMetadataFile(0, block);
ShortCircuitCache shortCircuitCache = ClientContext.getFromConf(conf).getShortCircuitCache();
cluster.shutdown();
cluster = null;
test.setup(dataFile, checksum);
FileInputStream[] streams = { new FileInputStream(dataFile), new FileInputStream(metaFile) };
dataIn = streams[0];
metaIn = streams[1];
ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
raf = new RandomAccessFile(new File(sockDir.getDir().getAbsolutePath(), UUID.randomUUID().toString()), "rw");
raf.setLength(8192);
FileInputStream shmStream = new FileInputStream(raf.getFD());
shm = new ShortCircuitShm(ShmId.createRandom(), shmStream);
ShortCircuitReplica replica = new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache, Time.now(), shm.allocAndRegisterSlot(ExtendedBlockId.fromExtendedBlock(block)));
blockReaderLocal = new BlockReaderLocal.Builder(new DfsClientConf.ShortCircuitConf(conf)).setFilename(TEST_PATH.getName()).setBlock(block).setShortCircuitReplica(replica).setCachingStrategy(new CachingStrategy(false, readahead)).setVerifyChecksum(checksum).setTracer(FsTracer.get(conf)).build();
dataIn = null;
metaIn = null;
test.doTest(blockReaderLocal, original);
// BlockReaderLocal should not alter the file position.
Assert.assertEquals(0, streams[0].getChannel().position());
Assert.assertEquals(0, streams[1].getChannel().position());
} finally {
if (fsIn != null)
fsIn.close();
if (fs != null)
fs.close();
if (cluster != null)
cluster.shutdown();
if (dataIn != null)
dataIn.close();
if (metaIn != null)
metaIn.close();
if (blockReaderLocal != null)
blockReaderLocal.close();
if (shm != null)
shm.free();
if (raf != null)
raf.close();
}
}
Aggregations