use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestFileAppend2 method testAppendLessThanChecksumChunk.
/**
* Make sure when the block length after appending is less than 512 bytes, the
* checksum re-calculation and overwrite are performed correctly.
*/
@Test
public void testAppendLessThanChecksumChunk() throws Exception {
final byte[] buf = new byte[1024];
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
cluster.waitActive();
try (DistributedFileSystem fs = cluster.getFileSystem()) {
final int len1 = 200;
final int len2 = 300;
final Path p = new Path("/foo");
FSDataOutputStream out = fs.create(p);
out.write(buf, 0, len1);
out.close();
out = fs.append(p);
out.write(buf, 0, len2);
// flush but leave open
out.hflush();
// read data to verify the replica's content and checksum are correct
FSDataInputStream in = fs.open(p);
final int length = in.read(0, buf, 0, len1 + len2);
assertTrue(length > 0);
in.close();
out.close();
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestFileConcurrentReader method tailFile.
private long tailFile(Path file, long startPos) throws IOException {
long numRead = 0;
FSDataInputStream inputStream = fileSystem.open(file);
inputStream.seek(startPos);
int len = 4 * 1024;
byte[] buf = new byte[len];
int read;
while ((read = inputStream.read(buf)) > -1) {
LOG.info(String.format("read %d bytes", read));
if (!validateSequentialBytes(buf, (int) (startPos + numRead), read)) {
LOG.error(String.format("invalid bytes: [%s]\n", Arrays.toString(buf)));
throw new ChecksumException(String.format("unable to validate bytes"), startPos);
}
numRead += read;
}
inputStream.close();
return numRead + startPos - 1;
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestFileCreation method testFileCreationSyncOnClose.
/**
* Test creating a file whose data gets sync when closed
*/
@Test
public void testFileCreationSyncOnClose() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
Path[] p = { new Path("/foo"), new Path("/bar") };
//write 2 files at the same time
FSDataOutputStream[] out = { fs.create(p[0]), fs.create(p[1]) };
int i = 0;
for (; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for (; i < 200; i++) {
out[1].write(i);
}
out[1].close();
//verify
FSDataInputStream[] in = { fs.open(p[0]), fs.open(p[1]) };
for (i = 0; i < 100; i++) {
assertEquals(i, in[0].read());
}
for (i = 0; i < 200; i++) {
assertEquals(i, in[1].read());
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestHDFSConcat method testConcatNotCompleteBlock.
// test case when final block is not of a full length
@Test
public void testConcatNotCompleteBlock() throws IOException {
long trgFileLen = blockSize * 3;
// block at the end - not full
long srcFileLen = blockSize * 3 + 20;
// create first file
String name1 = "/trg", name2 = "/src";
Path filePath1 = new Path(name1);
DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
HdfsFileStatus fStatus = nn.getFileInfo(name1);
long fileLen = fStatus.getLen();
assertEquals(fileLen, trgFileLen);
//read the file
FSDataInputStream stm = dfs.open(filePath1);
byte[] byteFile1 = new byte[(int) trgFileLen];
stm.readFully(0, byteFile1);
stm.close();
LocatedBlocks lb1 = nn.getBlockLocations(name1, 0, trgFileLen);
Path filePath2 = new Path(name2);
DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1);
fStatus = nn.getFileInfo(name2);
fileLen = fStatus.getLen();
assertEquals(srcFileLen, fileLen);
// read the file
stm = dfs.open(filePath2);
byte[] byteFile2 = new byte[(int) srcFileLen];
stm.readFully(0, byteFile2);
stm.close();
LocatedBlocks lb2 = nn.getBlockLocations(name2, 0, srcFileLen);
System.out.println("trg len=" + trgFileLen + "; src len=" + srcFileLen);
// move the blocks
dfs.concat(filePath1, new Path[] { filePath2 });
long totalLen = trgFileLen + srcFileLen;
fStatus = nn.getFileInfo(name1);
fileLen = fStatus.getLen();
// read the resulting file
stm = dfs.open(filePath1);
byte[] byteFileConcat = new byte[(int) fileLen];
stm.readFully(0, byteFileConcat);
stm.close();
LocatedBlocks lbConcat = nn.getBlockLocations(name1, 0, fileLen);
//verifications
// 1. number of blocks
assertEquals(lbConcat.locatedBlockCount(), lb1.locatedBlockCount() + lb2.locatedBlockCount());
// 2. file lengths
System.out.println("file1 len=" + fileLen + "; total len=" + totalLen);
assertEquals(fileLen, totalLen);
// 3. removal of the src file
fStatus = nn.getFileInfo(name2);
// file shouldn't exist
assertNull("File " + name2 + "still exists", fStatus);
// 4. content
checkFileContent(byteFileConcat, new byte[][] { byteFile1, byteFile2 });
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestVLong method testVLongRandom.
@Test
public void testVLongRandom() throws IOException {
int count = 1024 * 1024;
long[] data = new long[count];
Random rng = new Random();
for (int i = 0; i < data.length; ++i) {
int shift = rng.nextInt(Long.SIZE) + 1;
long mask = (1L << shift) - 1;
long a = ((long) rng.nextInt()) << 32;
long b = ((long) rng.nextInt()) & 0xffffffffL;
data[i] = (a + b) & mask;
}
FSDataOutputStream out = fs.create(path);
for (int i = 0; i < data.length; ++i) {
Utils.writeVLong(out, data[i]);
}
out.close();
FSDataInputStream in = fs.open(path);
for (int i = 0; i < data.length; ++i) {
Assert.assertEquals(Utils.readVLong(in), data[i]);
}
in.close();
fs.delete(path, false);
}
Aggregations