use of org.apache.hadoop.fs.LocalFileSystem in project hazelcast-jet by hazelcast.
the class ReadHdfsPTest method writeToFile.
private void writeToFile() throws IOException {
Configuration conf = new Configuration();
LocalFileSystem local = FileSystem.getLocal(conf);
IntStream.range(0, 4).mapToObj(i -> createPath()).forEach(path -> uncheckRun(() -> {
paths.add(path);
if (SequenceFileInputFormat.class.equals(inputFormatClass)) {
writeToSequenceFile(conf, path);
} else {
writeToTextFile(local, path);
}
}));
}
use of org.apache.hadoop.fs.LocalFileSystem in project vespa by vespa-engine.
the class MapReduceTest method tearDown.
@AfterClass
public static void tearDown() throws IOException {
Path testDir = new Path(hdfsBaseDir.getParent());
hdfs.delete(testDir, true);
cluster.shutdown();
LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
localFileSystem.delete(testDir, true);
}
use of org.apache.hadoop.fs.LocalFileSystem in project hbase by apache.
the class TestCellCounter method beforeClass.
@BeforeClass
public static void beforeClass() throws Exception {
UTIL.startMiniCluster();
FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem());
FileUtil.fullyDelete(new File(OUTPUT_DIR));
}
use of org.apache.hadoop.fs.LocalFileSystem in project hbase by apache.
the class TestCellCounter method testCellCounterForCompleteTable.
/**
* Test CellCounter for complete table all data should print to output
*/
@Test
public void testCellCounterForCompleteTable() throws Exception {
final TableName sourceTable = TableName.valueOf(name.getMethodName());
String outputPath = OUTPUT_DIR + sourceTable;
LocalFileSystem localFileSystem = new LocalFileSystem();
Path outputDir = new Path(outputPath).makeQualified(localFileSystem.getUri(), localFileSystem.getWorkingDirectory());
byte[][] families = { FAMILY_A, FAMILY_B };
Table t = UTIL.createTable(sourceTable, families);
try {
Put p = new Put(ROW1);
p.addColumn(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
p.addColumn(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
p.addColumn(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
t.put(p);
p = new Put(ROW2);
p.addColumn(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
t.put(p);
String[] args = { sourceTable.getNameAsString(), outputDir.toString(), ";" };
runCount(args);
FileInputStream inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000");
String data = IOUtils.toString(inputStream);
inputStream.close();
assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2"));
assertTrue(data.contains("Total Qualifiers across all Rows" + "\t" + "4"));
assertTrue(data.contains("Total ROWS" + "\t" + "2"));
assertTrue(data.contains("b;q" + "\t" + "2"));
assertTrue(data.contains("a;q" + "\t" + "2"));
assertTrue(data.contains("row1;a;q_Versions" + "\t" + "1"));
assertTrue(data.contains("row1;b;q_Versions" + "\t" + "1"));
assertTrue(data.contains("row2;a;q_Versions" + "\t" + "1"));
assertTrue(data.contains("row2;b;q_Versions" + "\t" + "1"));
FileUtil.fullyDelete(new File(outputPath));
args = new String[] { "-D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=a, b", sourceTable.getNameAsString(), outputDir.toString(), ";" };
runCount(args);
inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000");
String data2 = IOUtils.toString(inputStream);
inputStream.close();
assertEquals(data, data2);
} finally {
t.close();
localFileSystem.close();
FileUtil.fullyDelete(new File(outputPath));
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project druid by druid-io.
the class HdfsClasspathSetupTest method setupStatic.
@BeforeClass
public static void setupStatic() throws IOException {
hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
if (!hdfsTmpDir.delete()) {
throw new IOE("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath());
}
conf = new Configuration(true);
localFS = new LocalFileSystem();
localFS.initialize(hdfsTmpDir.toURI(), conf);
localFS.setWorkingDirectory(new Path(hdfsTmpDir.toURI()));
}
Aggregations