use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class StreamFileJanitorTestBase method testCleanupGeneration.
@Test
public void testCleanupGeneration() throws Exception {
// Create a stream and performs couple truncate
String streamName = "testCleanupGeneration";
StreamId streamId = NamespaceId.DEFAULT.stream(streamName);
StreamAdmin streamAdmin = getStreamAdmin();
streamAdmin.create(streamId);
StreamConfig streamConfig = streamAdmin.getConfig(streamId);
StreamFileJanitor janitor = new StreamFileJanitor(getCConfiguration(), getStreamAdmin(), getNamespacedLocationFactory(), getNamespaceAdmin(), impersonator);
for (int i = 0; i < 5; i++) {
FileWriter<StreamEvent> writer = createWriter(streamId);
writer.append(StreamFileTestUtils.createEvent(System.currentTimeMillis(), "Testing"));
writer.close();
// Call cleanup before truncate. The current generation should stand.
janitor.clean(streamConfig.getLocation(), streamConfig.getTTL(), System.currentTimeMillis());
verifyGeneration(streamConfig, i);
streamAdmin.truncate(streamId);
}
int generation = StreamUtils.getGeneration(streamConfig);
Assert.assertEquals(5, generation);
janitor.clean(streamConfig.getLocation(), streamConfig.getTTL(), System.currentTimeMillis());
// Verify the stream directory should only contains the generation directory
for (Location location : streamConfig.getLocation().list()) {
if (location.isDirectory()) {
Assert.assertEquals(generation, Integer.parseInt(location.getName()));
}
}
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class AbstractStreamFileConsumerFactory method create.
@Override
public final StreamConsumer create(StreamId streamId, String namespace, ConsumerConfig consumerConfig) throws IOException {
StreamConfig streamConfig = StreamUtils.ensureExists(streamAdmin, streamId);
TableId tableId = getTableId(streamId, namespace);
StreamConsumerStateStore stateStore = stateStoreFactory.create(streamConfig);
StreamConsumerState consumerState = stateStore.get(consumerConfig.getGroupId(), consumerConfig.getInstanceId());
return create(tableId, streamConfig, consumerConfig, stateStore, consumerState, createReader(streamConfig, consumerState), new TTLReadFilter(streamConfig.getTTL()));
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class ContextManager method createContext.
// this method is called by the mappers/reducers of jobs launched by Hive.
private static Context createContext(Configuration conf) throws IOException {
// Create context needs to happen only when running in as a MapReduce job.
// In other cases, ContextManager will be initialized using saveContext method.
CConfiguration cConf = ConfigurationUtil.get(conf, Constants.Explore.CCONF_KEY, CConfCodec.INSTANCE);
Configuration hConf = ConfigurationUtil.get(conf, Constants.Explore.HCONF_KEY, HConfCodec.INSTANCE);
Injector injector = createInjector(cConf, hConf);
ZKClientService zkClientService = injector.getInstance(ZKClientService.class);
zkClientService.startAndWait();
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
StreamAdmin streamAdmin = injector.getInstance(StreamAdmin.class);
SystemDatasetInstantiatorFactory datasetInstantiatorFactory = injector.getInstance(SystemDatasetInstantiatorFactory.class);
AuthenticationContext authenticationContext = injector.getInstance(AuthenticationContext.class);
AuthorizationEnforcer authorizationEnforcer = injector.getInstance(AuthorizationEnforcer.class);
return new Context(datasetFramework, streamAdmin, zkClientService, datasetInstantiatorFactory, authenticationContext, authorizationEnforcer);
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class DistributedStreamCoordinatorClientTest method init.
@BeforeClass
public static void init() throws Exception {
zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build();
zkServer.startAndWait();
Configuration hConf = new Configuration();
hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
final LocationFactory lf = new FileContextLocationFactory(dfsCluster.getFileSystem().getConf());
final NamespacedLocationFactory nlf = new NamespacedLocationFactoryTestClient(cConf, lf);
cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());
Injector injector = Guice.createInjector(new ConfigModule(cConf), new ZKClientModule(), new DiscoveryRuntimeModule().getDistributedModules(), new DataFabricModules().getDistributedModules(), Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(MetadataStore.class).to(NoOpMetadataStore.class);
// bind to an in mem implementation for this test since the DefaultOwnerStore uses transaction and in this
// test we are not starting a transaction service
bind(OwnerStore.class).to(InMemoryOwnerStore.class).in(Scopes.SINGLETON);
}
}), new TransactionMetricsModule(), new NotificationFeedServiceRuntimeModule().getInMemoryModules(), new AbstractModule() {
@Override
protected void configure() {
bind(LocationFactory.class).toInstance(lf);
bind(NamespacedLocationFactory.class).toInstance(nlf);
bind(NamespaceQueryAdmin.class).to(SimpleNamespaceQueryAdmin.class);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
}
}, new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(), Modules.override(new StreamAdminModules().getDistributedModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
}
}), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule());
zkClient = injector.getInstance(ZKClientService.class);
zkClient.startAndWait();
setupNamespaces(injector.getInstance(NamespacedLocationFactory.class));
streamAdmin = injector.getInstance(StreamAdmin.class);
coordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
coordinatorClient.startAndWait();
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class InMemoryStreamCoordinatorClientTest method init.
@BeforeClass
public static void init() throws Exception {
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(new ConfigModule(cConf), new DiscoveryRuntimeModule().getInMemoryModules(), new SystemDatasetRuntimeModule().getInMemoryModules(), Modules.override(new DataSetsModules().getInMemoryModules()).with(new AbstractModule() {
@Override
protected void configure() {
// bind to an in mem implementation for this test since the DefaultOwnerStore uses transaction and in this
// test we are not starting a transaction service
bind(OwnerStore.class).to(InMemoryOwnerStore.class).in(Scopes.SINGLETON);
}
}), new DataFabricModules().getInMemoryModules(), new NonCustomLocationUnitTestModule().getModule(), new TransactionMetricsModule(), new NotificationFeedServiceRuntimeModule().getInMemoryModules(), new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(), new AuthorizationTestModule(), new AuthenticationContextModules().getNoOpModule(), new AuthorizationEnforcementModule().getInMemoryModules(), Modules.override(new StreamAdminModules().getInMemoryModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
bind(NamespaceQueryAdmin.class).to(SimpleNamespaceQueryAdmin.class);
}
}));
setupNamespaces(injector.getInstance(NamespacedLocationFactory.class));
streamAdmin = injector.getInstance(StreamAdmin.class);
coordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
coordinatorClient.startAndWait();
}
Aggregations