use of co.cask.cdap.data2.transaction.TransactionExecutorFactory in project cdap by caskdata.
the class RemoteDatasetFrameworkTest method before.
@Before
public void before() throws Exception {
cConf.set(Constants.Service.MASTER_SERVICES_BIND_ADDRESS, "localhost");
cConf.setBoolean(Constants.Dangerous.UNRECOVERABLE_RESET, true);
Configuration txConf = HBaseConfiguration.create();
CConfigurationUtil.copyTxProperties(cConf, txConf);
// ok to pass null, since the impersonator won't actually be called, if kerberos security is not enabled
Impersonator impersonator = new DefaultImpersonator(cConf, null);
// TODO: Refactor to use injector for everything
Injector injector = Guice.createInjector(new ConfigModule(cConf, txConf), new DiscoveryRuntimeModule().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new TransactionInMemoryModule(), new AbstractModule() {
@Override
protected void configure() {
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class).in(Singleton.class);
install(new FactoryModuleBuilder().implement(DatasetDefinitionRegistry.class, DefaultDatasetDefinitionRegistry.class).build(DatasetDefinitionRegistryFactory.class));
// through the injector, we only need RemoteDatasetFramework in these tests
bind(RemoteDatasetFramework.class);
}
});
// Tx Manager to support working with datasets
txManager = new TransactionManager(txConf);
txManager.startAndWait();
InMemoryTxSystemClient txSystemClient = new InMemoryTxSystemClient(txManager);
TransactionSystemClientService txSystemClientService = new DelegatingTransactionSystemClientService(txSystemClient);
DiscoveryService discoveryService = injector.getInstance(DiscoveryService.class);
DiscoveryServiceClient discoveryServiceClient = injector.getInstance(DiscoveryServiceClient.class);
MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
AuthenticationContext authenticationContext = injector.getInstance(AuthenticationContext.class);
framework = new RemoteDatasetFramework(cConf, discoveryServiceClient, registryFactory, authenticationContext);
SystemDatasetInstantiatorFactory datasetInstantiatorFactory = new SystemDatasetInstantiatorFactory(locationFactory, framework, cConf);
DatasetAdminService datasetAdminService = new DatasetAdminService(framework, cConf, locationFactory, datasetInstantiatorFactory, new NoOpMetadataStore(), impersonator);
ImmutableSet<HttpHandler> handlers = ImmutableSet.<HttpHandler>of(new DatasetAdminOpHTTPHandler(datasetAdminService));
opExecutorService = new DatasetOpExecutorService(cConf, discoveryService, metricsCollectionService, handlers);
opExecutorService.startAndWait();
ImmutableMap<String, DatasetModule> modules = ImmutableMap.<String, DatasetModule>builder().put("memoryTable", new InMemoryTableModule()).put("core", new CoreDatasetsModule()).putAll(DatasetMetaTableUtil.getModules()).build();
InMemoryDatasetFramework mdsFramework = new InMemoryDatasetFramework(registryFactory, modules);
DiscoveryExploreClient exploreClient = new DiscoveryExploreClient(discoveryServiceClient, authenticationContext);
ExploreFacade exploreFacade = new ExploreFacade(exploreClient, cConf);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txSystemClient);
AuthorizationEnforcer authorizationEnforcer = injector.getInstance(AuthorizationEnforcer.class);
DatasetTypeManager typeManager = new DatasetTypeManager(cConf, locationFactory, txSystemClientService, txExecutorFactory, mdsFramework, impersonator);
DatasetInstanceManager instanceManager = new DatasetInstanceManager(txSystemClientService, txExecutorFactory, mdsFramework);
PrivilegesManager privilegesManager = injector.getInstance(PrivilegesManager.class);
DatasetTypeService typeService = new DatasetTypeService(typeManager, namespaceQueryAdmin, namespacedLocationFactory, authorizationEnforcer, privilegesManager, authenticationContext, cConf, impersonator, txSystemClientService, mdsFramework, txExecutorFactory, DEFAULT_MODULES);
DatasetOpExecutor opExecutor = new LocalDatasetOpExecutor(cConf, discoveryServiceClient, opExecutorService, authenticationContext);
DatasetInstanceService instanceService = new DatasetInstanceService(typeService, instanceManager, opExecutor, exploreFacade, namespaceQueryAdmin, ownerAdmin, authorizationEnforcer, privilegesManager, authenticationContext);
instanceService.setAuditPublisher(inMemoryAuditPublisher);
service = new DatasetService(cConf, discoveryService, discoveryServiceClient, metricsCollectionService, new InMemoryDatasetOpExecutor(framework), new HashSet<DatasetMetricsReporter>(), typeService, instanceService);
// Start dataset service, wait for it to be discoverable
service.startAndWait();
EndpointStrategy endpointStrategy = new RandomEndpointStrategy(discoveryServiceClient.discover(Constants.Service.DATASET_MANAGER));
Preconditions.checkNotNull(endpointStrategy.pick(5, TimeUnit.SECONDS), "%s service is not up after 5 seconds", service);
createNamespace(NamespaceId.SYSTEM);
createNamespace(NAMESPACE_ID);
}
use of co.cask.cdap.data2.transaction.TransactionExecutorFactory in project cdap by caskdata.
the class UpgradeTool method createInjector.
@VisibleForTesting
Injector createInjector() throws Exception {
return Guice.createInjector(new ConfigModule(cConf, hConf), new LocationRuntimeModule().getDistributedModules(), new ZKClientModule(), new DiscoveryRuntimeModule().getDistributedModules(), new MessagingClientModule(), Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(DatasetFramework.class).to(InMemoryDatasetFramework.class).in(Scopes.SINGLETON);
// the DataSetsModules().getDistributedModules() binds to RemoteDatasetFramework so override that to
// the same InMemoryDatasetFramework
bind(DatasetFramework.class).annotatedWith(Names.named(DataSetsModules.BASE_DATASET_FRAMEWORK)).to(DatasetFramework.class);
install(new FactoryModuleBuilder().implement(DatasetDefinitionRegistry.class, DefaultDatasetDefinitionRegistry.class).build(DatasetDefinitionRegistryFactory.class));
// CDAP-5954 Upgrade tool does not need to record lineage and metadata changes for now.
bind(LineageWriter.class).to(NoOpLineageWriter.class);
}
}), new ViewAdminModules().getDistributedModules(), new StreamAdminModules().getDistributedModules(), new NotificationFeedClientModule(), new TwillModule(), new ExploreClientModule(), new ProgramRunnerRuntimeModule().getDistributedModules(), new ServiceStoreModules().getDistributedModules(), new SystemDatasetRuntimeModule().getDistributedModules(), // don't need real notifications for upgrade, so use the in-memory implementations
new NotificationServiceRuntimeModule().getInMemoryModules(), new KafkaClientModule(), new NamespaceStoreModule().getDistributedModules(), new AuthenticationContextModules().getMasterModule(), new AuthorizationModule(), new AuthorizationEnforcementModule().getMasterModule(), new SecureStoreModules().getDistributedModules(), new DataFabricModules(UpgradeTool.class.getName()).getDistributedModules(), new AppFabricServiceRuntimeModule().getDistributedModules(), new AbstractModule() {
@Override
protected void configure() {
// the DataFabricDistributedModule needs MetricsCollectionService binding and since Upgrade tool does not do
// anything with Metrics we just bind it to NoOpMetricsCollectionService
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class).in(Scopes.SINGLETON);
bind(MetricDatasetFactory.class).to(DefaultMetricDatasetFactory.class).in(Scopes.SINGLETON);
bind(MetricStore.class).to(DefaultMetricStore.class);
}
@Provides
@Singleton
@Named("datasetInstanceManager")
@SuppressWarnings("unused")
public DatasetInstanceManager getDatasetInstanceManager(TransactionSystemClientService txClient, TransactionExecutorFactory txExecutorFactory, @Named("datasetMDS") DatasetFramework framework) {
return new DatasetInstanceManager(txClient, txExecutorFactory, framework);
}
// This is needed because the LocalApplicationManager
// expects a dsframework injection named datasetMDS
@Provides
@Singleton
@Named("datasetMDS")
@SuppressWarnings("unused")
public DatasetFramework getInDsFramework(DatasetFramework dsFramework) {
return dsFramework;
}
});
}
use of co.cask.cdap.data2.transaction.TransactionExecutorFactory in project cdap by caskdata.
the class MultiConsumerTest method testMulti.
@Test
public void testMulti() throws Exception {
// TODO: Fix this test case to really test with numGroups settings.
final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(MultiApp.class, TEMP_FOLDER_SUPPLIER);
List<ProgramController> controllers = Lists.newArrayList();
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER));
}
DatasetFramework datasetFramework = AppFabricTestHelper.getInjector().getInstance(DatasetFramework.class);
DynamicDatasetCache datasetCache = new SingleThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework, getClass().getClassLoader(), null), AppFabricTestHelper.getInjector().getInstance(TransactionSystemClient.class), NamespaceId.DEFAULT, DatasetDefinition.NO_ARGUMENTS, null, null);
final KeyValueTable accumulated = datasetCache.getDataset("accumulated");
TransactionExecutorFactory txExecutorFactory = AppFabricTestHelper.getInjector().getInstance(TransactionExecutorFactory.class);
// Try to get accumulated result and verify it. Expect result appear in max of 60 seconds.
int trial = 0;
while (trial < 60) {
try {
Transactions.createTransactionExecutor(txExecutorFactory, accumulated).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
byte[] value = accumulated.read(MultiApp.KEY);
// Sum(1..100) * 3
Assert.assertEquals(((1 + 99) * 99 / 2) * 3, Longs.fromByteArray(value));
}
});
break;
} catch (TransactionFailureException e) {
// No-op
trial++;
TimeUnit.SECONDS.sleep(1);
}
}
Assert.assertTrue(trial < 60);
for (ProgramController controller : controllers) {
controller.stop().get();
}
}
use of co.cask.cdap.data2.transaction.TransactionExecutorFactory in project cdap by caskdata.
the class ProgramScheduleStoreDatasetTest method testFindSchedulesByEventAndUpdateSchedule.
@Test
public void testFindSchedulesByEventAndUpdateSchedule() throws Exception {
DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
Assert.assertNotNull(store);
TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
final ProgramSchedule sched11 = new ProgramSchedule("sched11", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
final ProgramSchedule sched12 = new ProgramSchedule("sched12", "two partition schedule", PROG1_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22 = new ProgramSchedule("sched22", "twentytwo partition schedule", PROG2_ID, ImmutableMap.of("nn", "4"), new PartitionTrigger(DS2_ID, 22), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 or DS2 should trigger nothing. validate it returns an empty collection
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID)).isEmpty());
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID)).isEmpty());
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.addSchedules(ImmutableList.of(sched11, sched12, sched22));
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched11
Assert.assertEquals(ImmutableSet.of(sched11), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers only sched12 and sched22
Assert.assertEquals(ImmutableSet.of(sched12, sched22), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
final ProgramSchedule sched11New = new ProgramSchedule(sched11.getName(), "time schedule", PROG1_ID, ImmutableMap.of("timeprop", "time"), new TimeTrigger("* * * * *"), ImmutableList.<Constraint>of());
final ProgramSchedule sched12New = new ProgramSchedule(sched12.getName(), "one partition schedule", PROG1_ID, ImmutableMap.of("pp", "p"), new PartitionTrigger(DS1_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22New = new ProgramSchedule(sched22.getName(), "one streamsize schedule", PROG2_ID, ImmutableMap.of("ss", "s"), new StreamSizeTrigger(NS_ID.stream("stream"), 1), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.updateSchedule(sched11New);
store.updateSchedule(sched12New);
store.updateSchedule(sched22New);
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched12New after update
Assert.assertEquals(ImmutableSet.of(sched12New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers no schedule after update
Assert.assertEquals(ImmutableSet.<ProgramSchedule>of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
}
use of co.cask.cdap.data2.transaction.TransactionExecutorFactory in project cdap by caskdata.
the class FlowUtils method configureQueue.
/**
* Configures all queues being used in a flow.
*
* @return A Multimap from flowletId to QueueName where the flowlet is a consumer of.
*/
public static Multimap<String, QueueName> configureQueue(Program program, FlowSpecification flowSpec, final StreamAdmin streamAdmin, QueueAdmin queueAdmin, TransactionExecutorFactory txExecutorFactory) {
// Generate all queues specifications
ApplicationId appId = new ApplicationId(program.getNamespaceId(), program.getApplicationId());
Table<QueueSpecificationGenerator.Node, String, Set<QueueSpecification>> queueSpecs = new SimpleQueueSpecificationGenerator(appId).create(flowSpec);
// For each queue in the flow, gather all consumer groups information
Multimap<QueueName, ConsumerGroupConfig> queueConfigs = HashMultimap.create();
// Loop through each flowlet and generate the map from consumer flowlet id to queue
ImmutableSetMultimap.Builder<String, QueueName> resultBuilder = ImmutableSetMultimap.builder();
for (Map.Entry<String, FlowletDefinition> entry : flowSpec.getFlowlets().entrySet()) {
String flowletId = entry.getKey();
for (QueueSpecification queueSpec : Iterables.concat(queueSpecs.column(flowletId).values())) {
resultBuilder.put(flowletId, queueSpec.getQueueName());
}
}
// For each queue, gather all consumer groups.
for (QueueSpecification queueSpec : Iterables.concat(queueSpecs.values())) {
QueueName queueName = queueSpec.getQueueName();
queueConfigs.putAll(queueName, getAllConsumerGroups(program, flowSpec, queueName, queueSpecs));
}
try {
// Configure each stream consumer in the Flow. Also collects all queue configurers.
final List<ConsumerGroupConfigurer> groupConfigurers = Lists.newArrayList();
final Map<StreamId, Map<Long, Integer>> streamConfigurers = Maps.newHashMap();
for (Map.Entry<QueueName, Collection<ConsumerGroupConfig>> entry : queueConfigs.asMap().entrySet()) {
LOG.info("Queue config for {} : {}", entry.getKey(), entry.getValue());
if (entry.getKey().isStream()) {
Map<Long, Integer> configs = Maps.newHashMap();
for (ConsumerGroupConfig config : entry.getValue()) {
configs.put(config.getGroupId(), config.getGroupSize());
}
streamConfigurers.put(entry.getKey().toStreamId(), configs);
} else {
groupConfigurers.add(new ConsumerGroupConfigurer(queueAdmin.getQueueConfigurer(entry.getKey()), entry.getValue()));
}
}
// Configure queue transactionally
try {
Transactions.createTransactionExecutor(txExecutorFactory, groupConfigurers).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (ConsumerGroupConfigurer configurer : groupConfigurers) {
configurer.configure();
}
for (Map.Entry<StreamId, Map<Long, Integer>> entry : streamConfigurers.entrySet()) {
streamAdmin.configureGroups(entry.getKey(), entry.getValue());
}
}
});
} finally {
for (ConsumerGroupConfigurer configurer : groupConfigurers) {
Closeables.closeQuietly(configurer);
}
}
return resultBuilder.build();
} catch (Exception e) {
LOG.error("Failed to configure queues", e);
throw Throwables.propagate(e);
}
}
Aggregations