Search in sources :

Example 11 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class HBaseTableTest method testColumnFamily.

@Test
public void testColumnFamily() throws Exception {
    DatasetProperties props = TableProperties.builder().setColumnFamily("t").build();
    String tableName = "testcf";
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    final BufferingTable table = getTable(CONTEXT1, tableName, props);
    TransactionSystemClient txClient = new DetachedTxSystemClient();
    TransactionExecutor executor = new DefaultTransactionExecutor(txClient, table);
    executor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            table.put(new Put("row", "column", "testValue"));
        }
    });
    final BufferingTable table2 = getTable(CONTEXT1, tableName, props);
    executor = new DefaultTransactionExecutor(txClient, table2);
    executor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Assert.assertEquals("testValue", table2.get(new Get("row", "column")).getString("column"));
        }
    });
    // Verify the column family name
    TableId hTableId = hBaseTableUtil.createHTableId(new NamespaceId(CONTEXT1.getNamespaceId()), tableName);
    HTableDescriptor htd = hBaseTableUtil.getHTableDescriptor(TEST_HBASE.getHBaseAdmin(), hTableId);
    HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes("t"));
    Assert.assertNotNull(hcd);
    Assert.assertEquals("t", hcd.getNameAsString());
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) TransactionExecutor(org.apache.tephra.TransactionExecutor) DefaultTransactionExecutor(org.apache.tephra.DefaultTransactionExecutor) ScannerTimeoutException(org.apache.hadoop.hbase.client.ScannerTimeoutException) RetriesExhaustedWithDetailsException(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Put(co.cask.cdap.api.dataset.table.Put) BufferingTable(co.cask.cdap.data2.dataset2.lib.table.BufferingTable) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) Get(co.cask.cdap.api.dataset.table.Get) DefaultTransactionExecutor(org.apache.tephra.DefaultTransactionExecutor) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) NamespaceId(co.cask.cdap.proto.id.NamespaceId) BufferingTableTest(co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 12 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class TransactionServiceClientTest method resetState.

@Before
public void resetState() throws Exception {
    TransactionSystemClient txClient = getClient();
    txClient.resetState();
}
Also used : TransactionSystemClient(org.apache.tephra.TransactionSystemClient) Before(org.junit.Before)

Example 13 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class BaseHiveExploreServiceTest method initialize.

protected static void initialize(CConfiguration cConf, TemporaryFolder tmpFolder, boolean useStandalone, boolean enableAuthorization) throws Exception {
    if (!runBefore) {
        return;
    }
    Configuration hConf = new Configuration();
    if (enableAuthorization) {
        LocationFactory locationFactory = new LocalLocationFactory(tmpFolder.newFolder());
        Location authExtensionJar = AppJarHelper.createDeploymentJar(locationFactory, InMemoryAuthorizer.class);
        cConf.setBoolean(Constants.Security.ENABLED, true);
        cConf.setBoolean(Constants.Security.Authorization.ENABLED, true);
        cConf.set(Constants.Security.Authorization.EXTENSION_JAR_PATH, authExtensionJar.toURI().getPath());
        cConf.setBoolean(Constants.Security.KERBEROS_ENABLED, false);
        cConf.setInt(Constants.Security.Authorization.CACHE_MAX_ENTRIES, 0);
    }
    List<Module> modules = useStandalone ? createStandaloneModules(cConf, hConf, tmpFolder) : createInMemoryModules(cConf, hConf, tmpFolder);
    injector = Guice.createInjector(modules);
    transactionManager = injector.getInstance(TransactionManager.class);
    transactionManager.startAndWait();
    transactionSystemClient = injector.getInstance(TransactionSystemClient.class);
    dsOpService = injector.getInstance(DatasetOpExecutor.class);
    dsOpService.startAndWait();
    datasetService = injector.getInstance(DatasetService.class);
    datasetService.startAndWait();
    exploreExecutorService = injector.getInstance(ExploreExecutorService.class);
    exploreExecutorService.startAndWait();
    datasetFramework = injector.getInstance(DatasetFramework.class);
    exploreClient = injector.getInstance(DiscoveryExploreClient.class);
    exploreService = injector.getInstance(ExploreService.class);
    exploreClient.ping();
    notificationService = injector.getInstance(NotificationService.class);
    notificationService.startAndWait();
    streamService = injector.getInstance(StreamService.class);
    streamService.startAndWait();
    streamHttpService = injector.getInstance(StreamHttpService.class);
    streamHttpService.startAndWait();
    exploreTableManager = injector.getInstance(ExploreTableManager.class);
    streamAdmin = injector.getInstance(StreamAdmin.class);
    streamMetaStore = injector.getInstance(StreamMetaStore.class);
    namespaceAdmin = injector.getInstance(NamespaceAdmin.class);
    namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
    // create namespaces
    // This happens when you create a namespace via REST APIs. However, since we do not start AppFabricServer in
    // Explore tests, simulating that scenario by explicitly calling DatasetFramework APIs.
    createNamespace(NamespaceId.DEFAULT);
    createNamespace(NAMESPACE_ID);
    createNamespace(OTHER_NAMESPACE_ID);
}
Also used : StreamService(co.cask.cdap.data.stream.service.StreamService) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Configuration(org.apache.hadoop.conf.Configuration) NamespaceAdmin(co.cask.cdap.common.namespace.NamespaceAdmin) DatasetService(co.cask.cdap.data2.datafabric.dataset.service.DatasetService) NotificationService(co.cask.cdap.notifications.service.NotificationService) DatasetOpExecutor(co.cask.cdap.data2.datafabric.dataset.service.executor.DatasetOpExecutor) StreamHttpService(co.cask.cdap.data.stream.service.StreamHttpService) NamespacedLocationFactory(co.cask.cdap.common.namespace.NamespacedLocationFactory) NamespacedLocationFactory(co.cask.cdap.common.namespace.NamespacedLocationFactory) LocalLocationFactory(org.apache.twill.filesystem.LocalLocationFactory) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) DiscoveryExploreClient(co.cask.cdap.explore.client.DiscoveryExploreClient) StreamAdmin(co.cask.cdap.data2.transaction.stream.StreamAdmin) TransactionManager(org.apache.tephra.TransactionManager) ExploreExecutorService(co.cask.cdap.explore.executor.ExploreExecutorService) StreamMetaStore(co.cask.cdap.data.stream.service.StreamMetaStore) AuthorizationEnforcementModule(co.cask.cdap.security.authorization.AuthorizationEnforcementModule) NonCustomLocationUnitTestModule(co.cask.cdap.common.guice.NonCustomLocationUnitTestModule) ExploreRuntimeModule(co.cask.cdap.explore.guice.ExploreRuntimeModule) ExploreClientModule(co.cask.cdap.explore.guice.ExploreClientModule) MetricsClientRuntimeModule(co.cask.cdap.metrics.guice.MetricsClientRuntimeModule) NamespaceClientRuntimeModule(co.cask.cdap.common.namespace.guice.NamespaceClientRuntimeModule) ConfigModule(co.cask.cdap.common.guice.ConfigModule) AbstractModule(com.google.inject.AbstractModule) Module(com.google.inject.Module) NotificationServiceRuntimeModule(co.cask.cdap.notifications.guice.NotificationServiceRuntimeModule) DiscoveryRuntimeModule(co.cask.cdap.common.guice.DiscoveryRuntimeModule) NamespaceClientUnitTestModule(co.cask.cdap.common.guice.NamespaceClientUnitTestModule) StreamServiceRuntimeModule(co.cask.cdap.data.stream.service.StreamServiceRuntimeModule) AuthorizationTestModule(co.cask.cdap.security.authorization.AuthorizationTestModule) IOModule(co.cask.cdap.common.guice.IOModule) LocalLocationFactory(org.apache.twill.filesystem.LocalLocationFactory) Location(org.apache.twill.filesystem.Location)

Example 14 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class TransactionServiceTest method testHA.

@Test(timeout = 30000)
public void testHA() throws Exception {
    // NOTE: we play with blocking/nonblocking a lot below
    // as until we integrate with "leader election" stuff, service blocks on start if it is not a leader
    // TODO: fix this by integration with generic leader election stuff
    CConfiguration cConf = CConfiguration.create();
    // tests should use the current user for HDFS
    cConf.set(Constants.CFG_HDFS_USER, System.getProperty("user.name"));
    cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());
    cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
    Injector injector = Guice.createInjector(new ConfigModule(cConf), new ZKClientModule(), new NonCustomLocationUnitTestModule().getModule(), new DiscoveryRuntimeModule().getDistributedModules(), new TransactionMetricsModule(), new AbstractModule() {

        @Override
        protected void configure() {
            bind(NamespaceQueryAdmin.class).to(SimpleNamespaceQueryAdmin.class);
            bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
            bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
        }
    }, new DataFabricModules().getDistributedModules(), Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {

        @Override
        protected void configure() {
            bind(MetadataStore.class).to(NoOpMetadataStore.class);
        }
    }), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getNoOpModule());
    ZKClientService zkClient = injector.getInstance(ZKClientService.class);
    zkClient.startAndWait();
    try {
        final Table table = createTable("myTable");
        // tx service client
        // NOTE: we can init it earlier than we start services, it should pick them up when they are available
        TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class);
        TransactionExecutor txExecutor = new DefaultTransactionExecutor(txClient, ImmutableList.of((TransactionAware) table));
        // starting tx service, tx client can pick it up
        TransactionService first = createTxService(zkServer.getConnectionStr(), Networks.getRandomPort(), hConf, tmpFolder.newFolder());
        first.startAndWait();
        Assert.assertNotNull(txClient.startShort());
        verifyGetAndPut(table, txExecutor, null, "val1");
        // starting another tx service should not hurt
        TransactionService second = createTxService(zkServer.getConnectionStr(), Networks.getRandomPort(), hConf, tmpFolder.newFolder());
        // NOTE: we don't have to wait for start as client should pick it up anyways, but we do wait to ensure
        // the case with two active is handled well
        second.startAndWait();
        // wait for affect a bit
        TimeUnit.SECONDS.sleep(1);
        Assert.assertNotNull(txClient.startShort());
        verifyGetAndPut(table, txExecutor, "val1", "val2");
        // shutting down the first one is fine: we have another one to pick up the leader role
        first.stopAndWait();
        Assert.assertNotNull(txClient.startShort());
        verifyGetAndPut(table, txExecutor, "val2", "val3");
        // doing same trick again to failover to the third one
        TransactionService third = createTxService(zkServer.getConnectionStr(), Networks.getRandomPort(), hConf, tmpFolder.newFolder());
        // NOTE: we don't have to wait for start as client should pick it up anyways
        third.start();
        // stopping second one
        second.stopAndWait();
        Assert.assertNotNull(txClient.startShort());
        verifyGetAndPut(table, txExecutor, "val3", "val4");
        // releasing resources
        third.stop();
    } finally {
        try {
            dropTable("myTable");
        } finally {
            zkClient.stopAndWait();
        }
    }
}
Also used : ConfigModule(co.cask.cdap.common.guice.ConfigModule) TransactionMetricsModule(co.cask.cdap.data.runtime.TransactionMetricsModule) ZKClientModule(co.cask.cdap.common.guice.ZKClientModule) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) Injector(com.google.inject.Injector) SimpleNamespaceQueryAdmin(co.cask.cdap.common.namespace.SimpleNamespaceQueryAdmin) DiscoveryRuntimeModule(co.cask.cdap.common.guice.DiscoveryRuntimeModule) UnsupportedUGIProvider(co.cask.cdap.security.impersonation.UnsupportedUGIProvider) InMemoryTable(co.cask.cdap.data2.dataset2.lib.table.inmemory.InMemoryTable) Table(co.cask.cdap.api.dataset.table.Table) TransactionService(org.apache.tephra.distributed.TransactionService) AuthenticationContextModules(co.cask.cdap.security.auth.context.AuthenticationContextModules) DataSetsModules(co.cask.cdap.data.runtime.DataSetsModules) TransactionExecutor(org.apache.tephra.TransactionExecutor) DefaultTransactionExecutor(org.apache.tephra.DefaultTransactionExecutor) NonCustomLocationUnitTestModule(co.cask.cdap.common.guice.NonCustomLocationUnitTestModule) DefaultOwnerAdmin(co.cask.cdap.security.impersonation.DefaultOwnerAdmin) CConfiguration(co.cask.cdap.common.conf.CConfiguration) AuthorizationTestModule(co.cask.cdap.security.authorization.AuthorizationTestModule) AbstractModule(com.google.inject.AbstractModule) MetadataStore(co.cask.cdap.data2.metadata.store.MetadataStore) NoOpMetadataStore(co.cask.cdap.data2.metadata.store.NoOpMetadataStore) ZKClientService(org.apache.twill.zookeeper.ZKClientService) TransactionAware(org.apache.tephra.TransactionAware) DefaultTransactionExecutor(org.apache.tephra.DefaultTransactionExecutor) DataFabricModules(co.cask.cdap.data.runtime.DataFabricModules) AuthorizationEnforcementModule(co.cask.cdap.security.authorization.AuthorizationEnforcementModule) Test(org.junit.Test)

Example 15 with TransactionSystemClient

use of org.apache.tephra.TransactionSystemClient in project cdap by caskdata.

the class ProgramScheduleStoreDatasetTest method testFindSchedulesByEventAndUpdateSchedule.

@Test
public void testFindSchedulesByEventAndUpdateSchedule() throws Exception {
    DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
    TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
    TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
    dsFramework.truncateInstance(Schedulers.STORE_DATASET_ID);
    final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
    Assert.assertNotNull(store);
    TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
    final ProgramSchedule sched11 = new ProgramSchedule("sched11", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
    final ProgramSchedule sched12 = new ProgramSchedule("sched12", "two partition schedule", PROG1_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
    final ProgramSchedule sched22 = new ProgramSchedule("sched22", "twentytwo partition schedule", PROG2_ID, ImmutableMap.of("nn", "4"), new PartitionTrigger(DS2_ID, 22), ImmutableList.<Constraint>of());
    final ProgramSchedule sched31 = new ProgramSchedule("sched31", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new ProgramStatusTrigger(PROG1_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED), ImmutableList.<Constraint>of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // event for DS1 or DS2 should trigger nothing. validate it returns an empty collection
            Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID)).isEmpty());
            Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID)).isEmpty());
            // event for PROG1 should trigger nothing. it should also return an empty collection
            Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED)).isEmpty());
            Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED)).isEmpty());
            Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED)).isEmpty());
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.addSchedules(ImmutableList.of(sched11, sched12, sched22, sched31));
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // event for ProgramStatus should trigger only sched31
            Assert.assertEquals(ImmutableSet.of(sched31), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
            Assert.assertEquals(ImmutableSet.of(sched31), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED))));
            Assert.assertEquals(ImmutableSet.of(sched31), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED))));
            // event for DS1 should trigger only sched11
            Assert.assertEquals(ImmutableSet.of(sched11), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
            // event for DS2 triggers only sched12 and sched22
            Assert.assertEquals(ImmutableSet.of(sched12, sched22), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
        }
    });
    final ProgramSchedule sched11New = new ProgramSchedule(sched11.getName(), "time schedule", PROG1_ID, ImmutableMap.of("timeprop", "time"), new TimeTrigger("* * * * *"), ImmutableList.<Constraint>of());
    final ProgramSchedule sched12New = new ProgramSchedule(sched12.getName(), "one partition schedule", PROG1_ID, ImmutableMap.of("pp", "p"), new PartitionTrigger(DS1_ID, 2), ImmutableList.<Constraint>of());
    final ProgramSchedule sched22New = new ProgramSchedule(sched22.getName(), "program3 failed schedule", PROG2_ID, ImmutableMap.of("ss", "s"), new ProgramStatusTrigger(PROG3_ID, ProgramStatus.FAILED), ImmutableList.<Constraint>of());
    final ProgramSchedule sched31New = new ProgramSchedule(sched31.getName(), "program1 failed schedule", PROG3_ID, ImmutableMap.of("abcd", "efgh"), new ProgramStatusTrigger(PROG1_ID, ProgramStatus.FAILED), ImmutableList.<Constraint>of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.updateSchedule(sched11New);
            store.updateSchedule(sched12New);
            store.updateSchedule(sched22New);
            store.updateSchedule(sched31New);
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // event for DS1 should trigger only sched12New after update
            Assert.assertEquals(ImmutableSet.of(sched12New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
            // event for DS2 triggers no schedule after update
            Assert.assertEquals(ImmutableSet.<ProgramSchedule>of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
            // event for PS triggers only for failed program statuses, not completed nor killed
            Assert.assertEquals(ImmutableSet.of(sched31New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED))));
            Assert.assertEquals(ImmutableSet.of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
            Assert.assertEquals(ImmutableSet.of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED))));
        }
    });
}
Also used : TimeTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.TimeTrigger) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) TransactionExecutor(org.apache.tephra.TransactionExecutor) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) TransactionExecutorFactory(co.cask.cdap.data2.transaction.TransactionExecutorFactory) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) TransactionAware(org.apache.tephra.TransactionAware) ProgramStatusTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.ProgramStatusTrigger) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) Test(org.junit.Test)

Aggregations

TransactionSystemClient (org.apache.tephra.TransactionSystemClient)18 Test (org.junit.Test)12 Transaction (org.apache.tephra.Transaction)10 TransactionAware (org.apache.tephra.TransactionAware)6 CConfiguration (co.cask.cdap.common.conf.CConfiguration)5 DatasetFramework (co.cask.cdap.data2.dataset2.DatasetFramework)5 HttpResponse (org.apache.http.HttpResponse)5 DynamicTransactionExecutorFactory (co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory)4 DiscoveryServiceClient (org.apache.twill.discovery.DiscoveryServiceClient)4 NamespaceAdmin (co.cask.cdap.common.namespace.NamespaceAdmin)3 TransactionExecutorFactory (co.cask.cdap.data2.transaction.TransactionExecutorFactory)3 BasicArguments (co.cask.cdap.internal.app.runtime.BasicArguments)3 TransactionExecutor (org.apache.tephra.TransactionExecutor)3 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)2 StreamEvent (co.cask.cdap.api.flow.flowlet.StreamEvent)2 MetricsCollectionService (co.cask.cdap.api.metrics.MetricsCollectionService)2 ProgramDescriptor (co.cask.cdap.app.program.ProgramDescriptor)2 ProgramController (co.cask.cdap.app.runtime.ProgramController)2 RandomEndpointStrategy (co.cask.cdap.common.discovery.RandomEndpointStrategy)2 ConfigModule (co.cask.cdap.common.guice.ConfigModule)2