use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class DFSClientCache method getDfsInputStream.
FSDataInputStream getDfsInputStream(String userName, String inodePath) {
DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath);
FSDataInputStream s = null;
try {
s = inputstreamCache.get(k);
} catch (ExecutionException e) {
LOG.warn("Failed to create DFSInputStream for user:" + userName + " Cause:" + e);
}
return s;
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class StripedFileTestUtil method verifyStatefulRead.
static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength, byte[] expected, byte[] buf) throws IOException {
try (FSDataInputStream in = fs.open(srcPath)) {
final byte[] result = new byte[fileLength];
int readLen = 0;
int ret;
while ((ret = in.read(buf, 0, buf.length)) >= 0) {
System.arraycopy(buf, 0, result, readLen, ret);
readLen += ret;
}
assertEquals("The length of file should be the same to write size", fileLength, readLen);
Assert.assertArrayEquals(expected, result);
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class StripedFileTestUtil method verifyPread.
static void verifyPread(FileSystem fs, Path srcPath, int fileLength, byte[] expected, byte[] buf) throws IOException {
final ErasureCodingPolicy ecPolicy = ((DistributedFileSystem) fs).getErasureCodingPolicy(srcPath);
try (FSDataInputStream in = fs.open(srcPath)) {
int[] startOffsets = { 0, 1, ecPolicy.getCellSize() - 102, ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102, ecPolicy.getCellSize() * (ecPolicy.getNumDataUnits() - 1), ecPolicy.getCellSize() * (ecPolicy.getNumDataUnits() - 1) + 102, ecPolicy.getCellSize() * ecPolicy.getNumDataUnits(), fileLength - 102, fileLength - 1 };
for (int startOffset : startOffsets) {
startOffset = Math.max(0, Math.min(startOffset, fileLength - 1));
int remaining = fileLength - startOffset;
int offset = startOffset;
final byte[] result = new byte[remaining];
while (remaining > 0) {
int target = Math.min(remaining, buf.length);
in.readFully(offset, buf, 0, target);
System.arraycopy(buf, 0, result, offset - startOffset, target);
remaining -= target;
offset += target;
}
for (int i = 0; i < fileLength - startOffset; i++) {
assertEquals("Byte at " + (startOffset + i) + " is different, " + "the startOffset is " + startOffset, expected[startOffset + i], result[i]);
}
}
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class StripedFileTestUtil method verifyStatefulRead.
static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength, byte[] expected, ByteBuffer buf) throws IOException {
try (FSDataInputStream in = fs.open(srcPath)) {
ByteBuffer result = ByteBuffer.allocate(fileLength);
int readLen = 0;
int ret;
while ((ret = in.read(buf)) >= 0) {
readLen += ret;
buf.flip();
result.put(buf);
buf.clear();
}
assertEquals("The length of file should be the same to write size", fileLength, readLen);
Assert.assertArrayEquals(expected, result.array());
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class StripedFileTestUtil method verifySeek.
static void verifySeek(FileSystem fs, Path srcPath, int fileLength, ErasureCodingPolicy ecPolicy, int blkGroupSize) throws IOException {
try (FSDataInputStream in = fs.open(srcPath)) {
// seek to 1/2 of content
int pos = fileLength / 2;
assertSeekAndRead(in, pos, fileLength);
// seek to 1/3 of content
pos = fileLength / 3;
assertSeekAndRead(in, pos, fileLength);
// seek to 0 pos
pos = 0;
assertSeekAndRead(in, pos, fileLength);
if (fileLength > ecPolicy.getCellSize()) {
// seek to cellSize boundary
pos = ecPolicy.getCellSize() - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (fileLength > ecPolicy.getCellSize() * ecPolicy.getNumDataUnits()) {
// seek to striped cell group boundary
pos = ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (fileLength > blkGroupSize) {
// seek to striped block group boundary
pos = blkGroupSize - 1;
assertSeekAndRead(in, pos, fileLength);
}
if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
try {
in.seek(-1);
Assert.fail("Should be failed if seek to negative offset");
} catch (EOFException e) {
// expected
}
try {
in.seek(fileLength + 1);
Assert.fail("Should be failed if seek after EOF");
} catch (EOFException e) {
// expected
}
}
}
}
Aggregations