use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class AbstractStreamFileConsumerFactory method create.
@Override
public final StreamConsumer create(StreamId streamId, String namespace, ConsumerConfig consumerConfig) throws IOException {
StreamConfig streamConfig = StreamUtils.ensureExists(streamAdmin, streamId);
TableId tableId = getTableId(streamId, namespace);
StreamConsumerStateStore stateStore = stateStoreFactory.create(streamConfig);
StreamConsumerState consumerState = stateStore.get(consumerConfig.getGroupId(), consumerConfig.getInstanceId());
return create(tableId, streamConfig, consumerConfig, stateStore, consumerState, createReader(streamConfig, consumerState), new TTLReadFilter(streamConfig.getTTL()));
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class SparkProgramRunner method run.
@Override
public ProgramController run(Program program, ProgramOptions options) {
// Get the RunId first. It is used for the creation of the ClassLoader closing thread.
Arguments arguments = options.getArguments();
RunId runId = ProgramRunners.getRunId(options);
Deque<Closeable> closeables = new LinkedList<>();
try {
// Extract and verify parameters
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification.");
ProgramType processorType = program.getType();
Preconditions.checkNotNull(processorType, "Missing processor type.");
Preconditions.checkArgument(processorType == ProgramType.SPARK, "Only Spark process type is supported.");
SparkSpecification spec = appSpec.getSpark().get(program.getName());
Preconditions.checkNotNull(spec, "Missing SparkSpecification for %s", program.getName());
String host = options.getArguments().getOption(ProgramOptionConstants.HOST);
Preconditions.checkArgument(host != null, "No hostname is provided");
// Get the WorkflowProgramInfo if it is started by Workflow
WorkflowProgramInfo workflowInfo = WorkflowProgramInfo.create(arguments);
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, appSpec);
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramId programId = program.getId();
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
}
PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
if (pluginInstantiator != null) {
closeables.addFirst(pluginInstantiator);
}
SparkRuntimeContext runtimeContext = new SparkRuntimeContext(new Configuration(hConf), program, options, cConf, host, txClient, programDatasetFramework, discoveryServiceClient, metricsCollectionService, streamAdmin, workflowInfo, pluginInstantiator, secureStore, secureStoreManager, authorizationEnforcer, authenticationContext, messagingService, serviceAnnouncer, pluginFinder, locationFactory);
closeables.addFirst(runtimeContext);
Spark spark;
try {
spark = new InstantiatorFactory(false).get(TypeToken.of(program.<Spark>getMainClass())).create();
} catch (Exception e) {
LOG.error("Failed to instantiate Spark class for {}", spec.getClassName(), e);
throw Throwables.propagate(e);
}
SparkSubmitter submitter = SparkRuntimeContextConfig.isLocal(hConf) ? new LocalSparkSubmitter() : new DistributedSparkSubmitter(hConf, locationFactory, host, runtimeContext, options.getArguments().getOption(Constants.AppFabric.APP_SCHEDULER_QUEUE));
Service sparkRuntimeService = new SparkRuntimeService(cConf, spark, getPluginArchive(options), runtimeContext, submitter, locationFactory);
sparkRuntimeService.addListener(createRuntimeServiceListener(closeables), Threads.SAME_THREAD_EXECUTOR);
ProgramController controller = new SparkProgramController(sparkRuntimeService, runtimeContext);
LOG.debug("Starting Spark Job. Context: {}", runtimeContext);
if (SparkRuntimeContextConfig.isLocal(hConf) || UserGroupInformation.isSecurityEnabled()) {
sparkRuntimeService.start();
} else {
ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
}
return controller;
} catch (Throwable t) {
closeAllQuietly(closeables);
throw Throwables.propagate(t);
}
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class InMemoryStreamCoordinatorClientTest method init.
@BeforeClass
public static void init() throws Exception {
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(new ConfigModule(cConf), new DiscoveryRuntimeModule().getInMemoryModules(), new SystemDatasetRuntimeModule().getInMemoryModules(), Modules.override(new DataSetsModules().getInMemoryModules()).with(new AbstractModule() {
@Override
protected void configure() {
// bind to an in mem implementation for this test since the DefaultOwnerStore uses transaction and in this
// test we are not starting a transaction service
bind(OwnerStore.class).to(InMemoryOwnerStore.class).in(Scopes.SINGLETON);
}
}), new DataFabricModules().getInMemoryModules(), new NonCustomLocationUnitTestModule().getModule(), new TransactionMetricsModule(), new NotificationFeedServiceRuntimeModule().getInMemoryModules(), new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(), new AuthorizationTestModule(), new AuthenticationContextModules().getNoOpModule(), new AuthorizationEnforcementModule().getInMemoryModules(), Modules.override(new StreamAdminModules().getInMemoryModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
bind(NamespaceQueryAdmin.class).to(SimpleNamespaceQueryAdmin.class);
}
}));
setupNamespaces(injector.getInstance(NamespacedLocationFactory.class));
streamAdmin = injector.getInstance(StreamAdmin.class);
coordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
coordinatorClient.startAndWait();
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class LocalStreamFileJanitorTest method init.
@BeforeClass
public static void init() throws IOException {
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
setupAuthzConfig();
Injector injector = Guice.createInjector(new ConfigModule(cConf), new NonCustomLocationUnitTestModule().getModule(), new SystemDatasetRuntimeModule().getInMemoryModules(), Modules.override(new DataSetsModules().getInMemoryModules()).with(new AbstractModule() {
@Override
protected void configure() {
// bind to an in mem implementation for this test since the DefaultOwnerStore uses transaction and in this
// test we are not starting a transaction service
bind(OwnerStore.class).to(InMemoryOwnerStore.class).in(Scopes.SINGLETON);
}
}), new TransactionMetricsModule(), new DataFabricLevelDBModule(), new DiscoveryRuntimeModule().getInMemoryModules(), new NamespaceClientRuntimeModule().getInMemoryModules(), new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), Modules.override(new StreamAdminModules().getStandaloneModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
}
}), new AbstractModule() {
@Override
protected void configure() {
// We don't need notification in this test, hence inject an no-op one
bind(NotificationFeedManager.class).to(NoOpNotificationFeedManager.class);
bind(NamespaceStore.class).to(InMemoryNamespaceStore.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
}
});
locationFactory = injector.getInstance(LocationFactory.class);
namespaceAdmin = injector.getInstance(NamespaceAdmin.class);
namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
namespaceStore = injector.getInstance(NamespaceStore.class);
streamAdmin = injector.getInstance(StreamAdmin.class);
janitor = injector.getInstance(StreamFileJanitor.class);
fileWriterFactory = injector.getInstance(StreamFileWriterFactory.class);
streamCoordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
authorizer = injector.getInstance(AuthorizerInstantiator.class).get();
streamCoordinatorClient.startAndWait();
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class StreamCoordinatorTestBase method testGeneration.
@Test
public void testGeneration() throws Exception {
final StreamAdmin streamAdmin = getStreamAdmin();
final String streamName = "testGen";
final StreamId streamId = NamespaceId.DEFAULT.stream(streamName);
streamAdmin.create(streamId);
StreamCoordinatorClient coordinator = getStreamCoordinator();
final CountDownLatch genIdChanged = new CountDownLatch(1);
coordinator.addListener(streamId, new StreamPropertyListener() {
@Override
public void generationChanged(StreamId streamId, int generation) {
if (generation == 10) {
genIdChanged.countDown();
}
}
});
// Do concurrent calls to nextGeneration using two threads
final CyclicBarrier barrier = new CyclicBarrier(2);
for (int i = 0; i < 2; i++) {
Thread t = new Thread() {
@Override
public void run() {
try {
barrier.await();
for (int i = 0; i < 5; i++) {
streamAdmin.truncate(streamId);
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
};
t.start();
}
Assert.assertTrue(genIdChanged.await(10, TimeUnit.SECONDS));
}
Aggregations