use of org.apache.hadoop.fs.FSDataOutputStream in project flink by apache.
the class ContinuousFileProcessingCheckpointITCase method fillWithData.
/**
* Fill the file with content and put the content in the {@code hdPathContents} list.
* */
private Tuple2<Path, String> fillWithData(String base, String fileName, int fileIdx, String sampleLine) throws IOException, InterruptedException {
assert (localFs != null);
org.apache.hadoop.fs.Path tmp = new org.apache.hadoop.fs.Path(base + "/." + fileName + fileIdx);
FSDataOutputStream stream = localFs.create(tmp);
StringBuilder str = new StringBuilder();
for (int i = 0; i < LINES_PER_FILE; i++) {
String line = fileIdx + ": " + sampleLine + " " + i + "\n";
str.append(line);
stream.write(line.getBytes(ConfigConstants.DEFAULT_CHARSET));
}
stream.close();
return new Tuple2<>(tmp, str.toString());
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestChecksumFileSystem method testVerifyChecksum.
@Test
public void testVerifyChecksum() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testPath");
Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11");
FSDataOutputStream fout = localFs.create(testPath);
fout.write("testing".getBytes());
fout.close();
fout = localFs.create(testPath11);
fout.write("testing you".getBytes());
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
localFs.delete(localFs.getChecksumFile(testPath), true);
assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
//copying the wrong checksum file
FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs, localFs.getChecksumFile(testPath), false, true, localFs.getConf());
assertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath)));
boolean errorRead = false;
try {
readFile(localFs, testPath, 1024);
} catch (ChecksumException ie) {
errorRead = true;
}
assertTrue("error reading", errorRead);
//now setting verify false, the read should succeed
localFs.setVerifyChecksum(false);
String str = readFile(localFs, testPath, 1024).toString();
assertTrue("read", "testing".equals(str));
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestChecksumFileSystem method testMultiChunkFile.
@Test
public void testMultiChunkFile() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testMultiChunk");
FSDataOutputStream fout = localFs.create(testPath);
for (int i = 0; i < 1000; i++) {
fout.write(("testing" + i).getBytes());
}
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class DataGenerator method genFile.
/** Create a file with the name <code>file</code> and
* a length of <code>fileSize</code>. The file is filled with character 'a'.
*/
private void genFile(Path file, long fileSize) throws IOException {
FSDataOutputStream out = fc.create(file, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), CreateOpts.createParent(), CreateOpts.bufferSize(4096), CreateOpts.repFac((short) 3));
for (long i = 0; i < fileSize; i++) {
out.writeByte('a');
}
out.close();
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestCopy method testCopyStreamTargetExists.
@Test
public void testCopyStreamTargetExists() throws Exception {
FSDataOutputStream out = mock(FSDataOutputStream.class);
whenFsCreate().thenReturn(out);
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
// so it's updated as existing
target.refreshStatus();
cmd.setOverwrite(true);
when(mockFs.getFileStatus(eq(tmpPath))).thenReturn(fileStat);
when(mockFs.delete(eq(path), eq(false))).thenReturn(true);
when(mockFs.rename(eq(tmpPath), eq(path))).thenReturn(true);
FSInputStream in = mock(FSInputStream.class);
when(in.read(any(byte[].class), anyInt(), anyInt())).thenReturn(-1);
tryCopyStream(in, true);
verify(mockFs).delete(eq(path), anyBoolean());
verify(mockFs).rename(eq(tmpPath), eq(path));
verify(mockFs, never()).delete(eq(tmpPath), anyBoolean());
verify(mockFs, never()).close();
}
Aggregations