Search in sources :

Example 6 with PartitioningContextImpl

use of org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl in project apex-malhar by apache.

the class AbstractFileInputOperatorTest method testWindowDataManagerPartitioning.

@Test
public void testWindowDataManagerPartitioning() throws Exception {
    LineByLineFileInputOperator oper = new LineByLineFileInputOperator();
    oper.getScanner().setFilePatternRegexp(".*partition([\\d]*)");
    oper.setDirectory(new File(testMeta.dir).getAbsolutePath());
    oper.setWindowDataManager(new FSWindowDataManager());
    oper.operatorId = 7;
    Path path = new Path(new File(testMeta.dir).getAbsolutePath());
    FileContext.getLocalFSFileContext().delete(path, true);
    for (int file = 0; file < 4; file++) {
        FileUtils.write(new File(testMeta.dir, "partition00" + file), "");
    }
    List<Partition<AbstractFileInputOperator<String>>> partitions = Lists.newArrayList();
    partitions.add(new DefaultPartition<AbstractFileInputOperator<String>>(oper));
    Collection<Partition<AbstractFileInputOperator<String>>> newPartitions = oper.definePartitions(partitions, new PartitioningContextImpl(null, 2));
    Assert.assertEquals(2, newPartitions.size());
    Assert.assertEquals(1, oper.getCurrentPartitions());
    List<FSWindowDataManager> storageManagers = Lists.newLinkedList();
    for (Partition<AbstractFileInputOperator<String>> p : newPartitions) {
        storageManagers.add((FSWindowDataManager) p.getPartitionedInstance().getWindowDataManager());
    }
    Assert.assertEquals("count of storage managers", 2, storageManagers.size());
    int countOfDeleteManagers = 0;
    FSWindowDataManager deleteManager = null;
    for (FSWindowDataManager storageManager : storageManagers) {
        if (storageManager.getDeletedOperators() != null) {
            countOfDeleteManagers++;
            deleteManager = storageManager;
        }
    }
    Assert.assertEquals("count of delete managers", 1, countOfDeleteManagers);
    Assert.assertNotNull("deleted operators manager", deleteManager);
    Assert.assertEquals("deleted operators", Sets.newHashSet(7), deleteManager.getDeletedOperators());
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(com.datatorrent.api.Partitioner.Partition) DefaultPartition(com.datatorrent.api.DefaultPartition) FSWindowDataManager(org.apache.apex.malhar.lib.wal.FSWindowDataManager) PartitioningContextImpl(org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl) LineByLineFileInputOperator(org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator) File(java.io.File) Test(org.junit.Test)

Example 7 with PartitioningContextImpl

use of org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl in project apex-malhar by apache.

the class CouchBaseInputOperatorTest method TestCouchBaseInputOperator.

@Test
public void TestCouchBaseInputOperator() throws Exception {
    BucketConfiguration bucketConfiguration = new BucketConfiguration();
    CouchbaseConnectionFactoryBuilder cfb = new CouchbaseConnectionFactoryBuilder();
    CouchbaseMock mockCouchbase1 = createMock("default", "", bucketConfiguration);
    CouchbaseMock mockCouchbase2 = createMock("default", "", bucketConfiguration);
    mockCouchbase1.start();
    mockCouchbase1.waitForStartup();
    List<URI> uriList = new ArrayList<URI>();
    int port1 = mockCouchbase1.getHttpPort();
    logger.debug("port is {}", port1);
    mockCouchbase2.start();
    mockCouchbase2.waitForStartup();
    int port2 = mockCouchbase2.getHttpPort();
    logger.debug("port is {}", port2);
    uriList.add(new URI("http", null, "localhost", port1, "/pools", "", ""));
    connectionFactory = cfb.buildCouchbaseConnection(uriList, bucketConfiguration.name, bucketConfiguration.password);
    client = new CouchbaseClient(connectionFactory);
    CouchBaseStore store = new CouchBaseStore();
    keyList = new ArrayList<String>();
    store.setBucket(bucketConfiguration.name);
    store.setPasswordConfig(password);
    store.setPassword(bucketConfiguration.password);
    store.setUriString("localhost:" + port1 + "," + "localhost:" + port1);
    // couchbaseBucket.getCouchServers();
    AttributeMap.DefaultAttributeMap attributeMap = new AttributeMap.DefaultAttributeMap();
    attributeMap.put(DAG.APPLICATION_ID, APP_ID);
    TestInputOperator inputOperator = new TestInputOperator();
    inputOperator.setStore(store);
    inputOperator.insertEventsInTable(10);
    CollectorTestSink<Object> sink = new CollectorTestSink<Object>();
    inputOperator.outputPort.setSink(sink);
    List<Partition<AbstractCouchBaseInputOperator<String>>> partitions = Lists.newArrayList();
    Collection<Partition<AbstractCouchBaseInputOperator<String>>> newPartitions = inputOperator.definePartitions(partitions, new PartitioningContextImpl(null, 0));
    Assert.assertEquals(2, newPartitions.size());
    for (Partition<AbstractCouchBaseInputOperator<String>> p : newPartitions) {
        Assert.assertNotSame(inputOperator, p.getPartitionedInstance());
    }
    // Collect all operators in a list
    List<AbstractCouchBaseInputOperator<String>> opers = Lists.newArrayList();
    for (Partition<AbstractCouchBaseInputOperator<String>> p : newPartitions) {
        TestInputOperator oi = (TestInputOperator) p.getPartitionedInstance();
        oi.setServerURIString("localhost:" + port1);
        oi.setStore(store);
        oi.setup(null);
        oi.outputPort.setSink(sink);
        opers.add(oi);
        port1 = port2;
    }
    sink.clear();
    int wid = 0;
    for (int i = 0; i < 10; i++) {
        for (AbstractCouchBaseInputOperator<String> o : opers) {
            o.beginWindow(wid);
            o.emitTuples();
            o.endWindow();
        }
        wid++;
    }
    Assert.assertEquals("Tuples read should be same ", 10, sink.collectedTuples.size());
    for (AbstractCouchBaseInputOperator<String> o : opers) {
        o.teardown();
    }
    mockCouchbase1.stop();
    mockCouchbase2.stop();
}
Also used : CouchbaseConnectionFactoryBuilder(com.couchbase.client.CouchbaseConnectionFactoryBuilder) ArrayList(java.util.ArrayList) URI(java.net.URI) CouchbaseClient(com.couchbase.client.CouchbaseClient) Partition(com.datatorrent.api.Partitioner.Partition) CouchbaseMock(org.couchbase.mock.CouchbaseMock) BucketConfiguration(org.couchbase.mock.BucketConfiguration) AttributeMap(com.datatorrent.api.Attribute.AttributeMap) PartitioningContextImpl(org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl) CollectorTestSink(org.apache.apex.malhar.lib.testbench.CollectorTestSink) Test(org.junit.Test)

Aggregations

Partition (com.datatorrent.api.Partitioner.Partition)7 PartitioningContextImpl (org.apache.apex.malhar.lib.partitioner.StatelessPartitionerTest.PartitioningContextImpl)7 Test (org.junit.Test)7 DefaultPartition (com.datatorrent.api.DefaultPartition)6 File (java.io.File)6 LineByLineFileInputOperator (org.apache.apex.malhar.lib.fs.LineByLineFileInputOperator)6 Path (org.apache.hadoop.fs.Path)6 CollectorTestSink (org.apache.apex.malhar.lib.testbench.CollectorTestSink)4 StatsListener (com.datatorrent.api.StatsListener)3 Kryo (com.esotericsoftware.kryo.Kryo)3 Configuration (org.apache.hadoop.conf.Configuration)2 CouchbaseClient (com.couchbase.client.CouchbaseClient)1 CouchbaseConnectionFactoryBuilder (com.couchbase.client.CouchbaseConnectionFactoryBuilder)1 AttributeMap (com.datatorrent.api.Attribute.AttributeMap)1 URI (java.net.URI)1 ArrayList (java.util.ArrayList)1 Random (java.util.Random)1 FSWindowDataManager (org.apache.apex.malhar.lib.wal.FSWindowDataManager)1 BucketConfiguration (org.couchbase.mock.BucketConfiguration)1 CouchbaseMock (org.couchbase.mock.CouchbaseMock)1