Search in sources :

Example 41 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hazelcast-jet by hazelcast.

the class ReadHdfsPTest method writeToFile.

private void writeToFile() throws IOException {
    Configuration conf = new Configuration();
    LocalFileSystem local = FileSystem.getLocal(conf);
    IntStream.range(0, 4).mapToObj(i -> createPath()).forEach(path -> uncheckRun(() -> {
        paths.add(path);
        if (SequenceFileInputFormat.class.equals(inputFormatClass)) {
            writeToSequenceFile(conf, path);
        } else {
            writeToTextFile(local, path);
        }
    }));
}
Also used : Arrays(java.util.Arrays) HdfsProcessors.readHdfsP(com.hazelcast.jet.hadoop.HdfsProcessors.readHdfsP) Util.uncheckRun(com.hazelcast.jet.impl.util.Util.uncheckRun) IntStream.range(java.util.stream.IntStream.range) FileSystem(org.apache.hadoop.fs.FileSystem) Text(org.apache.hadoop.io.Text) SequenceFile(org.apache.hadoop.io.SequenceFile) DistributedBiFunction(com.hazelcast.jet.function.DistributedBiFunction) Future(java.util.concurrent.Future) SequenceFileInputFormat(org.apache.hadoop.mapred.SequenceFileInputFormat) Configuration(org.apache.hadoop.conf.Configuration) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) DAG(com.hazelcast.jet.core.DAG) IntWritable(org.apache.hadoop.io.IntWritable) Parameterized(org.junit.runners.Parameterized) ExceptionUtil(com.hazelcast.jet.impl.util.ExceptionUtil) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) Collection(java.util.Collection) Set(java.util.Set) Category(org.junit.experimental.categories.Category) DistributedStream(com.hazelcast.jet.stream.DistributedStream) IntStream(java.util.stream.IntStream) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) JetInstance(com.hazelcast.jet.JetInstance) RunWith(org.junit.runner.RunWith) HashSet(java.util.HashSet) OutputStreamWriter(java.io.OutputStreamWriter) IList(com.hazelcast.core.IList) Before(org.junit.Before) Option(org.apache.hadoop.io.SequenceFile.Writer.Option) Files(java.nio.file.Files) BufferedWriter(java.io.BufferedWriter) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) IListJet(com.hazelcast.jet.IListJet) ParallelTest(com.hazelcast.test.annotation.ParallelTest) Integer.parseInt(java.lang.Integer.parseInt) JobConf(org.apache.hadoop.mapred.JobConf) Vertex(com.hazelcast.jet.core.Vertex) HdfsSources(com.hazelcast.jet.hadoop.HdfsSources) DistributedCollectors(com.hazelcast.jet.stream.DistributedCollectors) Writer(org.apache.hadoop.io.SequenceFile.Writer) HazelcastParametersRunnerFactory(com.hazelcast.test.HazelcastParametersRunnerFactory) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Util(com.hazelcast.jet.Util) Assert.assertEquals(org.junit.Assert.assertEquals) Edge.between(com.hazelcast.jet.core.Edge.between) SinkProcessors.writeListP(com.hazelcast.jet.core.processor.SinkProcessors.writeListP) Configuration(org.apache.hadoop.conf.Configuration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) SequenceFileInputFormat(org.apache.hadoop.mapred.SequenceFileInputFormat)

Example 42 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project vespa by vespa-engine.

the class MapReduceTest method tearDown.

@AfterClass
public static void tearDown() throws IOException {
    Path testDir = new Path(hdfsBaseDir.getParent());
    hdfs.delete(testDir, true);
    cluster.shutdown();
    LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
    localFileSystem.delete(testDir, true);
}
Also used : Path(org.apache.hadoop.fs.Path) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) AfterClass(org.junit.AfterClass)

Example 43 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hbase by apache.

the class TestCellCounter method beforeClass.

@BeforeClass
public static void beforeClass() throws Exception {
    UTIL.startMiniCluster();
    FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem());
    FileUtil.fullyDelete(new File(OUTPUT_DIR));
}
Also used : Path(org.apache.hadoop.fs.Path) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 44 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project hbase by apache.

the class TestCellCounter method testCellCounterForCompleteTable.

/**
 * Test CellCounter for complete table all data should print to output
 */
@Test
public void testCellCounterForCompleteTable() throws Exception {
    final TableName sourceTable = TableName.valueOf(name.getMethodName());
    String outputPath = OUTPUT_DIR + sourceTable;
    LocalFileSystem localFileSystem = new LocalFileSystem();
    Path outputDir = new Path(outputPath).makeQualified(localFileSystem.getUri(), localFileSystem.getWorkingDirectory());
    byte[][] families = { FAMILY_A, FAMILY_B };
    Table t = UTIL.createTable(sourceTable, families);
    try {
        Put p = new Put(ROW1);
        p.addColumn(FAMILY_A, QUALIFIER, now, Bytes.toBytes("Data11"));
        p.addColumn(FAMILY_B, QUALIFIER, now + 1, Bytes.toBytes("Data12"));
        p.addColumn(FAMILY_A, QUALIFIER, now + 2, Bytes.toBytes("Data13"));
        t.put(p);
        p = new Put(ROW2);
        p.addColumn(FAMILY_B, QUALIFIER, now, Bytes.toBytes("Dat21"));
        p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22"));
        p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23"));
        t.put(p);
        String[] args = { sourceTable.getNameAsString(), outputDir.toString(), ";" };
        runCount(args);
        FileInputStream inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000");
        String data = IOUtils.toString(inputStream);
        inputStream.close();
        assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2"));
        assertTrue(data.contains("Total Qualifiers across all Rows" + "\t" + "4"));
        assertTrue(data.contains("Total ROWS" + "\t" + "2"));
        assertTrue(data.contains("b;q" + "\t" + "2"));
        assertTrue(data.contains("a;q" + "\t" + "2"));
        assertTrue(data.contains("row1;a;q_Versions" + "\t" + "1"));
        assertTrue(data.contains("row1;b;q_Versions" + "\t" + "1"));
        assertTrue(data.contains("row2;a;q_Versions" + "\t" + "1"));
        assertTrue(data.contains("row2;b;q_Versions" + "\t" + "1"));
        FileUtil.fullyDelete(new File(outputPath));
        args = new String[] { "-D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=a, b", sourceTable.getNameAsString(), outputDir.toString(), ";" };
        runCount(args);
        inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000");
        String data2 = IOUtils.toString(inputStream);
        inputStream.close();
        assertEquals(data, data2);
    } finally {
        t.close();
        localFileSystem.close();
        FileUtil.fullyDelete(new File(outputPath));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) File(java.io.File) Put(org.apache.hadoop.hbase.client.Put) FileInputStream(java.io.FileInputStream) Test(org.junit.Test)

Example 45 with LocalFileSystem

use of org.apache.hadoop.fs.LocalFileSystem in project druid by druid-io.

the class HdfsClasspathSetupTest method setupStatic.

@BeforeClass
public static void setupStatic() throws IOException {
    hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
    if (!hdfsTmpDir.delete()) {
        throw new IOE("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath());
    }
    conf = new Configuration(true);
    localFS = new LocalFileSystem();
    localFS.initialize(hdfsTmpDir.toURI(), conf);
    localFS.setWorkingDirectory(new Path(hdfsTmpDir.toURI()));
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) IOE(org.apache.druid.java.util.common.IOE) BeforeClass(org.junit.BeforeClass)

Aggregations

LocalFileSystem (org.apache.hadoop.fs.LocalFileSystem)121 Path (org.apache.hadoop.fs.Path)77 Test (org.junit.Test)64 Configuration (org.apache.hadoop.conf.Configuration)57 FileSystem (org.apache.hadoop.fs.FileSystem)35 IOException (java.io.IOException)33 File (java.io.File)23 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)23 SamplerConfiguration (org.apache.accumulo.core.client.sample.SamplerConfiguration)23 SummarizerConfiguration (org.apache.accumulo.core.client.summary.SummarizerConfiguration)23 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)23 Key (org.apache.accumulo.core.data.Key)22 Value (org.apache.accumulo.core.data.Value)22 ArrayList (java.util.ArrayList)19 ExecutorService (java.util.concurrent.ExecutorService)15 Future (java.util.concurrent.Future)15 Scanner (org.apache.accumulo.core.client.Scanner)14 DataSegment (org.apache.druid.timeline.DataSegment)13 DataSegmentPusher (org.apache.druid.segment.loading.DataSegmentPusher)8 HdfsDataSegmentPusher (org.apache.druid.storage.hdfs.HdfsDataSegmentPusher)8