use of org.apache.flink.runtime.fs.hdfs.HadoopFileSystem in project flink by apache.
the class HDFSTest method testHDFS.
@Test
public void testHDFS() {
Path file = new Path(hdfsURI + hdPath);
org.apache.hadoop.fs.Path result = new org.apache.hadoop.fs.Path(hdfsURI + "/result");
try {
FileSystem fs = file.getFileSystem();
assertTrue("Must be HadoopFileSystem", fs instanceof HadoopFileSystem);
DopOneTestEnvironment.setAsContext();
try {
WordCount.main(new String[] { "--input", file.toString(), "--output", result.toString() });
} catch (Throwable t) {
t.printStackTrace();
Assert.fail("Test failed with " + t.getMessage());
} finally {
DopOneTestEnvironment.unsetAsContext();
}
assertTrue("No result file present", hdfs.exists(result));
// validate output:
org.apache.hadoop.fs.FSDataInputStream inStream = hdfs.open(result);
StringWriter writer = new StringWriter();
IOUtils.copy(inStream, writer);
String resultString = writer.toString();
Assert.assertEquals("hdfs 10\n" + "hello 10\n", resultString);
inStream.close();
} catch (IOException e) {
e.printStackTrace();
Assert.fail("Error in test: " + e.getMessage());
}
}
Aggregations