Search in sources :

Example 1 with LineByLineFileInputOperator

use of org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator in project apex-malhar by apache.

the class AbstractFileInputOperatorTest method testPartitioningStateTransferFailure.

/**
 * Test for testing dynamic partitioning interrupting ongoing read.
 * - Create 4 file with 3 records each.
 * - Create a single partition, and read some records, populating pending files in operator.
 * - Split it in two operators
 * - Try to emit the remaining records.
 */
@Test
public void testPartitioningStateTransferFailure() throws Exception {
    LineByLineFileInputOperator oper = new LineByLineFileInputOperator();
    oper.getScanner().setFilePatternRegexp(".*partition([\\d]*)");
    oper.setDirectory(new File(testMeta.dir).getAbsolutePath());
    oper.setScanIntervalMillis(0);
    oper.setEmitBatchSize(2);
    LineByLineFileInputOperator initialState = new Kryo().copy(oper);
    // Create 4 files with 3 records each.
    Path path = new Path(new File(testMeta.dir).getAbsolutePath());
    FileContext.getLocalFSFileContext().delete(path, true);
    int file;
    for (file = 0; file < 4; file++) {
        FileUtils.write(new File(testMeta.dir, "partition00" + file), "a\nb\nc\n");
    }
    CollectorTestSink<String> queryResults = new CollectorTestSink<String>();
    @SuppressWarnings({ "unchecked", "rawtypes" }) CollectorTestSink<Object> sink = (CollectorTestSink) queryResults;
    oper.output.setSink(sink);
    int wid = 0;
    // Read some records
    oper.setup(testMeta.context);
    for (int i = 0; i < 5; i++) {
        oper.beginWindow(wid);
        oper.emitTuples();
        oper.endWindow();
        wid++;
    }
    Assert.assertEquals("Partial tuples read ", 6, sink.collectedTuples.size());
    Assert.assertEquals(1, initialState.getCurrentPartitions());
    initialState.setPartitionCount(2);
    StatsListener.Response rsp = initialState.processStats(null);
    Assert.assertEquals(true, rsp.repartitionRequired);
    // Create partitions of the operator.
    List<Partition<AbstractFileInputOperator<String>>> partitions = Lists.newArrayList();
    partitions.add(new DefaultPartition<AbstractFileInputOperator<String>>(oper));
    // incremental capacity controlled partitionCount property
    Collection<Partition<AbstractFileInputOperator<String>>> newPartitions = initialState.definePartitions(partitions, new PartitioningContextImpl(null, 0));
    Assert.assertEquals(2, newPartitions.size());
    Assert.assertEquals(1, initialState.getCurrentPartitions());
    Map<Integer, Partition<AbstractFileInputOperator<String>>> m = Maps.newHashMap();
    for (Partition<AbstractFileInputOperator<String>> p : newPartitions) {
        m.put(m.size(), p);
    }
    initialState.partitioned(m);
    Assert.assertEquals(2, initialState.getCurrentPartitions());
    /* Collect all operators in a list */
    List<AbstractFileInputOperator<String>> opers = Lists.newArrayList();
    for (Partition<AbstractFileInputOperator<String>> p : newPartitions) {
        LineByLineFileInputOperator oi = (LineByLineFileInputOperator) p.getPartitionedInstance();
        oi.setup(testMeta.context);
        oi.output.setSink(sink);
        opers.add(oi);
    }
    sink.clear();
    for (int i = 0; i < 10; i++) {
        for (AbstractFileInputOperator<String> o : opers) {
            o.beginWindow(wid);
            o.emitTuples();
            o.endWindow();
        }
        wid++;
    }
    // No record should be read.
    Assert.assertEquals("Remaining tuples read ", 6, sink.collectedTuples.size());
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(com.datatorrent.api.Partitioner.Partition) DefaultPartition(com.datatorrent.api.DefaultPartition) StatsListener(com.datatorrent.api.StatsListener) PartitioningContextImpl(org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl) LineByLineFileInputOperator(org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator) File(java.io.File) Kryo(com.esotericsoftware.kryo.Kryo) CollectorTestSink(org.apache.apex.malhar.lib.testbench.CollectorTestSink) Test(org.junit.Test)

Example 2 with LineByLineFileInputOperator

use of org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator in project apex-malhar by apache.

the class AbstractFileInputOperatorTest method testIdempotencyWhenFileContinued.

@Test
public void testIdempotencyWhenFileContinued() throws Exception {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.dir).getAbsolutePath()), true);
    List<String> lines = Lists.newArrayList();
    for (int line = 0; line < 10; line++) {
        lines.add("l" + line);
    }
    FileUtils.write(new File(testMeta.dir, "file0"), StringUtils.join(lines, '\n'));
    LineByLineFileInputOperator oper = new LineByLineFileInputOperator();
    FSWindowDataManager manager = new FSWindowDataManager();
    manager.setStatePath(testMeta.dir + "/recovery");
    oper.setEmitBatchSize(5);
    oper.setWindowDataManager(manager);
    CollectorTestSink<String> queryResults = new CollectorTestSink<String>();
    @SuppressWarnings({ "unchecked", "rawtypes" }) CollectorTestSink<Object> sink = (CollectorTestSink) queryResults;
    oper.output.setSink(sink);
    oper.setDirectory(testMeta.dir);
    oper.getScanner().setFilePatternRegexp(".*file[\\d]");
    oper.setup(testMeta.context);
    int offset = 0;
    for (long wid = 0; wid < 3; wid++) {
        oper.beginWindow(wid);
        oper.emitTuples();
        oper.endWindow();
        if (wid > 0) {
            Assert.assertEquals("number tuples", 5, queryResults.collectedTuples.size());
            Assert.assertEquals("lines", lines.subList(offset, offset + 5), queryResults.collectedTuples);
            offset += 5;
        }
        sink.clear();
    }
    oper.teardown();
    sink.clear();
    // idempotency  part
    offset = 0;
    oper.setup(testMeta.context);
    for (long wid = 0; wid < 3; wid++) {
        oper.beginWindow(wid);
        oper.endWindow();
        if (wid > 0) {
            Assert.assertEquals("number tuples", 5, queryResults.collectedTuples.size());
            Assert.assertEquals("lines", lines.subList(offset, offset + 5), queryResults.collectedTuples);
            offset += 5;
        }
        sink.clear();
    }
    oper.teardown();
}
Also used : Path(org.apache.hadoop.fs.Path) FSWindowDataManager(org.apache.apex.malhar.lib.wal.FSWindowDataManager) LineByLineFileInputOperator(org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator) File(java.io.File) CollectorTestSink(org.apache.apex.malhar.lib.testbench.CollectorTestSink) Test(org.junit.Test)

Example 3 with LineByLineFileInputOperator

use of org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator in project apex-malhar by apache.

the class AbstractFileInputOperatorTest method testPartitioningStateTransfer.

/**
 * Test for testing dynamic partitioning.
 * - Create 4 file with 3 records each.
 * - Create a single partition, and read all records, populating pending files in operator.
 * - Split it in two operators
 * - Try to emit records again, expected result is no record is emitted, as all files are
 *   processed.
 * - Create another 4 files with 3 records each
 * - Try to emit records again, expected result total record emitted 4 * 3 = 12.
 */
@Test
public void testPartitioningStateTransfer() throws Exception {
    LineByLineFileInputOperator oper = new LineByLineFileInputOperator();
    oper.getScanner().setFilePatternRegexp(".*partition([\\d]*)");
    oper.setDirectory(new File(testMeta.dir).getAbsolutePath());
    oper.setScanIntervalMillis(0);
    LineByLineFileInputOperator initialState = new Kryo().copy(oper);
    // Create 4 files with 3 records each.
    Path path = new Path(new File(testMeta.dir).getAbsolutePath());
    FileContext.getLocalFSFileContext().delete(path, true);
    int file;
    for (file = 0; file < 4; file++) {
        FileUtils.write(new File(testMeta.dir, "partition00" + file), "a\nb\nc\n");
    }
    CollectorTestSink<String> queryResults = new CollectorTestSink<String>();
    @SuppressWarnings({ "unchecked", "rawtypes" }) CollectorTestSink<Object> sink = (CollectorTestSink) queryResults;
    oper.output.setSink(sink);
    int wid = 0;
    // Read all records to populate processedList in operator.
    oper.setup(testMeta.context);
    for (int i = 0; i < 10; i++) {
        oper.beginWindow(wid);
        oper.emitTuples();
        oper.endWindow();
        wid++;
    }
    Assert.assertEquals("All tuples read ", 12, sink.collectedTuples.size());
    Assert.assertEquals(1, initialState.getCurrentPartitions());
    initialState.setPartitionCount(2);
    StatsListener.Response rsp = initialState.processStats(null);
    Assert.assertEquals(true, rsp.repartitionRequired);
    // Create partitions of the operator.
    List<Partition<AbstractFileInputOperator<String>>> partitions = Lists.newArrayList();
    partitions.add(new DefaultPartition<AbstractFileInputOperator<String>>(oper));
    // incremental capacity controlled partitionCount property
    Collection<Partition<AbstractFileInputOperator<String>>> newPartitions = initialState.definePartitions(partitions, new PartitioningContextImpl(null, 0));
    Assert.assertEquals(2, newPartitions.size());
    Assert.assertEquals(1, initialState.getCurrentPartitions());
    Map<Integer, Partition<AbstractFileInputOperator<String>>> m = Maps.newHashMap();
    for (Partition<AbstractFileInputOperator<String>> p : newPartitions) {
        m.put(m.size(), p);
    }
    initialState.partitioned(m);
    Assert.assertEquals(2, initialState.getCurrentPartitions());
    /* Collect all operators in a list */
    List<AbstractFileInputOperator<String>> opers = Lists.newArrayList();
    for (Partition<AbstractFileInputOperator<String>> p : newPartitions) {
        LineByLineFileInputOperator oi = (LineByLineFileInputOperator) p.getPartitionedInstance();
        oi.setup(testMeta.context);
        oi.output.setSink(sink);
        opers.add(oi);
    }
    sink.clear();
    for (int i = 0; i < 10; i++) {
        for (AbstractFileInputOperator<String> o : opers) {
            o.beginWindow(wid);
            o.emitTuples();
            o.endWindow();
        }
        wid++;
    }
    // No record should be read.
    Assert.assertEquals("No new tuples read ", 0, sink.collectedTuples.size());
    // Add four new files with 3 records each.
    for (; file < 8; file++) {
        FileUtils.write(new File(testMeta.dir, "partition00" + file), "a\nb\nc\n");
    }
    for (int i = 0; i < 10; i++) {
        for (AbstractFileInputOperator<String> o : opers) {
            o.beginWindow(wid);
            o.emitTuples();
            o.endWindow();
        }
        wid++;
    }
    // If all files are processed only once then number of records emitted should
    // be 12.
    Assert.assertEquals("All tuples read ", 12, sink.collectedTuples.size());
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(com.datatorrent.api.Partitioner.Partition) DefaultPartition(com.datatorrent.api.DefaultPartition) StatsListener(com.datatorrent.api.StatsListener) PartitioningContextImpl(org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl) LineByLineFileInputOperator(org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator) File(java.io.File) Kryo(com.esotericsoftware.kryo.Kryo) CollectorTestSink(org.apache.apex.malhar.lib.testbench.CollectorTestSink) Test(org.junit.Test)

Example 4 with LineByLineFileInputOperator

use of org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator in project apex-malhar by apache.

the class AbstractFileInputOperatorTest method testRecoveryWithUnfinishedFile.

@Test
public void testRecoveryWithUnfinishedFile() throws Exception {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.dir).getAbsolutePath()), true);
    List<String> allLines = Lists.newArrayList();
    HashSet<String> lines = Sets.newHashSet();
    for (int line = 0; line < 5; line++) {
        lines.add("f0" + "l" + line);
    }
    allLines.addAll(lines);
    File testFile = new File(testMeta.dir, "file0");
    FileUtils.write(testFile, StringUtils.join(lines, '\n'));
    LineByLineFileInputOperator oper = new LineByLineFileInputOperator();
    oper.scanner = null;
    oper.unfinishedFiles.add(new AbstractFileInputOperator.FailedFile(testFile.getAbsolutePath(), 2));
    CollectorTestSink<String> queryResults = new CollectorTestSink<String>();
    @SuppressWarnings({ "unchecked", "rawtypes" }) CollectorTestSink<Object> sink = (CollectorTestSink) queryResults;
    oper.output.setSink(sink);
    oper.setDirectory(testMeta.dir);
    oper.setup(testMeta.context);
    oper.beginWindow(0);
    oper.emitTuples();
    oper.endWindow();
    oper.teardown();
    Assert.assertEquals("number tuples", 3, queryResults.collectedTuples.size());
    Assert.assertEquals("lines", allLines.subList(2, allLines.size()), new ArrayList<String>(queryResults.collectedTuples));
}
Also used : Path(org.apache.hadoop.fs.Path) LineByLineFileInputOperator(org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator) File(java.io.File) CollectorTestSink(org.apache.apex.malhar.lib.testbench.CollectorTestSink) Test(org.junit.Test)

Example 5 with LineByLineFileInputOperator

use of org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator in project apex-malhar by apache.

the class StreamFactory method fromFolder.

/**
 * Create a stream of string tuples from reading files in hdfs folder line by line
 * @param folderName
 * @param opts
 * @return
 */
public static ApexStream<String> fromFolder(String folderName, Option... opts) {
    LineByLineFileInputOperator fileLineInputOperator = new LineByLineFileInputOperator();
    fileLineInputOperator.setDirectory(folderName);
    ApexStreamImpl<String> newStream = new ApexStreamImpl<>();
    return newStream.addOperator(fileLineInputOperator, null, fileLineInputOperator.output, opts);
}
Also used : LineByLineFileInputOperator(org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator)

Aggregations

LineByLineFileInputOperator (org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator)17 File (java.io.File)15 Path (org.apache.hadoop.fs.Path)15 Test (org.junit.Test)14 CollectorTestSink (org.apache.apex.malhar.lib.testbench.CollectorTestSink)12 DefaultPartition (com.datatorrent.api.DefaultPartition)6 Partition (com.datatorrent.api.Partitioner.Partition)6 PartitioningContextImpl (org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl)6 FSWindowDataManager (org.apache.apex.malhar.lib.wal.FSWindowDataManager)5 StatsListener (com.datatorrent.api.StatsListener)3 Kryo (com.esotericsoftware.kryo.Kryo)3 Configuration (org.apache.hadoop.conf.Configuration)2 Random (java.util.Random)1