use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class StreamCoordinatorTestBase method testDeleteStream.
@Test
public void testDeleteStream() throws Exception {
final StreamId streamId = NamespaceId.DEFAULT.stream("test");
StreamAdmin streamAdmin = getStreamAdmin();
streamAdmin.create(streamId);
Assert.assertTrue(streamAdmin.exists(streamId));
StreamCoordinatorClient streamCoordinator = getStreamCoordinator();
final CountDownLatch latch = new CountDownLatch(1);
streamCoordinator.addListener(streamId, new StreamPropertyListener() {
@Override
public void deleted(StreamId id) {
if (id.equals(streamId)) {
latch.countDown();
}
}
});
streamAdmin.drop(streamId);
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class StreamCoordinatorTestBase method testConfig.
@Test
public void testConfig() throws Exception {
final StreamAdmin streamAdmin = getStreamAdmin();
final String streamName = "testConfig";
final StreamId streamId = NamespaceId.DEFAULT.stream(streamName);
streamAdmin.create(streamId);
StreamCoordinatorClient coordinator = getStreamCoordinator();
final BlockingDeque<Integer> thresholds = new LinkedBlockingDeque<>();
final BlockingDeque<Long> ttls = new LinkedBlockingDeque<>();
coordinator.addListener(streamId, new StreamPropertyListener() {
@Override
public void thresholdChanged(StreamId streamId, int threshold) {
thresholds.add(threshold);
}
@Override
public void ttlChanged(StreamId streamId, long ttl) {
ttls.add(ttl);
}
});
// Have two threads, one update the threshold, one update the ttl
final CyclicBarrier barrier = new CyclicBarrier(2);
final CountDownLatch completeLatch = new CountDownLatch(2);
for (int i = 0; i < 2; i++) {
final int threadId = i;
Thread t = new Thread() {
@Override
public void run() {
try {
barrier.await();
for (int i = 0; i < 100; i++) {
Long ttl = (threadId == 0) ? (long) (i * 1000) : null;
Integer threshold = (threadId == 1) ? i : null;
streamAdmin.updateConfig(streamId, new StreamProperties(ttl, null, threshold));
}
completeLatch.countDown();
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
};
t.start();
}
Assert.assertTrue(completeLatch.await(60, TimeUnit.SECONDS));
// Check the last threshold and ttl are correct. We don't check if the listener gets every update as it's
// possible that it doesn't see every updates, but only the latest value (that's what ZK watch guarantees).
Assert.assertTrue(validateLastElement(thresholds, 99));
Assert.assertTrue(validateLastElement(ttls, 99000L));
// Verify the config is right
StreamConfig config = streamAdmin.getConfig(streamId);
Assert.assertEquals(99, config.getNotificationThresholdMB());
Assert.assertEquals(99000L, config.getTTL());
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class StreamFileJanitorTestBase method testCleanupDeletedStream.
@Test
public void testCleanupDeletedStream() throws Exception {
StreamId streamId = NamespaceId.DEFAULT.stream("cleanupDelete");
StreamAdmin streamAdmin = getStreamAdmin();
StreamFileJanitor janitor = new StreamFileJanitor(getCConfiguration(), streamAdmin, getNamespacedLocationFactory(), getNamespaceAdmin(), impersonator);
streamAdmin.create(streamId);
// Write some data
try (FileWriter<StreamEvent> writer = createWriter(streamId)) {
for (int i = 0; i < 10; i++) {
writer.append(StreamFileTestUtils.createEvent(i * 1000, "Testing " + i));
}
}
// Delete the stream
streamAdmin.drop(streamId);
// Run janitor. Should be running fine without exception.
janitor.cleanAll();
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class StreamFileJanitorTestBase method testCleanupTTL.
@Test
public void testCleanupTTL() throws Exception {
// Create a stream with 5 seconds TTL, partition duration of 2 seconds
String streamName = "testCleanupTTL";
StreamId streamId = NamespaceId.DEFAULT.stream(streamName);
StreamAdmin streamAdmin = getStreamAdmin();
StreamFileJanitor janitor = new StreamFileJanitor(getCConfiguration(), getStreamAdmin(), getNamespacedLocationFactory(), getNamespaceAdmin(), impersonator);
Properties properties = new Properties();
properties.setProperty(Constants.Stream.PARTITION_DURATION, "2000");
properties.setProperty(Constants.Stream.TTL, "5000");
streamAdmin.create(streamId, properties);
// Truncate to increment generation to 1. This make verification condition easier (won't affect correctness).
streamAdmin.truncate(streamId);
StreamConfig config = streamAdmin.getConfig(streamId);
// Write data with different timestamps that spans across 5 partitions
FileWriter<StreamEvent> writer = createWriter(streamId);
for (int i = 0; i < 10; i++) {
writer.append(StreamFileTestUtils.createEvent(i * 1000, "Testing " + i));
}
writer.close();
// Should see 5 partitions
Location generationLocation = StreamUtils.createGenerationLocation(config.getLocation(), 1);
Assert.assertEquals(5, generationLocation.list().size());
// Perform clean with current time = 10000 (10 seconds since epoch).
// Since TTL = 5 seconds, 2 partitions will be remove (Ends at 2000 and ends at 4000).
janitor.clean(config.getLocation(), config.getTTL(), 10000);
Assert.assertEquals(3, generationLocation.list().size());
// Cleanup again with current time = 16000, all partitions should be deleted.
janitor.clean(config.getLocation(), config.getTTL(), 16000);
Assert.assertTrue(generationLocation.list().isEmpty());
}
use of co.cask.cdap.data2.transaction.stream.StreamAdmin in project cdap by caskdata.
the class BaseHiveExploreServiceTest method initialize.
protected static void initialize(CConfiguration cConf, TemporaryFolder tmpFolder, boolean useStandalone, boolean enableAuthorization) throws Exception {
if (!runBefore) {
return;
}
Configuration hConf = new Configuration();
if (enableAuthorization) {
LocationFactory locationFactory = new LocalLocationFactory(tmpFolder.newFolder());
Location authExtensionJar = AppJarHelper.createDeploymentJar(locationFactory, InMemoryAuthorizer.class);
cConf.setBoolean(Constants.Security.ENABLED, true);
cConf.setBoolean(Constants.Security.Authorization.ENABLED, true);
cConf.set(Constants.Security.Authorization.EXTENSION_JAR_PATH, authExtensionJar.toURI().getPath());
cConf.setBoolean(Constants.Security.KERBEROS_ENABLED, false);
cConf.setInt(Constants.Security.Authorization.CACHE_MAX_ENTRIES, 0);
}
List<Module> modules = useStandalone ? createStandaloneModules(cConf, hConf, tmpFolder) : createInMemoryModules(cConf, hConf, tmpFolder);
injector = Guice.createInjector(modules);
if (enableAuthorization) {
injector.getInstance(AuthorizationBootstrapper.class).run();
}
transactionManager = injector.getInstance(TransactionManager.class);
transactionManager.startAndWait();
transactionSystemClient = injector.getInstance(TransactionSystemClient.class);
dsOpService = injector.getInstance(DatasetOpExecutor.class);
dsOpService.startAndWait();
datasetService = injector.getInstance(DatasetService.class);
datasetService.startAndWait();
exploreExecutorService = injector.getInstance(ExploreExecutorService.class);
exploreExecutorService.startAndWait();
datasetFramework = injector.getInstance(DatasetFramework.class);
exploreClient = injector.getInstance(DiscoveryExploreClient.class);
exploreService = injector.getInstance(ExploreService.class);
exploreClient.ping();
notificationService = injector.getInstance(NotificationService.class);
notificationService.startAndWait();
streamService = injector.getInstance(StreamService.class);
streamService.startAndWait();
streamHttpService = injector.getInstance(StreamHttpService.class);
streamHttpService.startAndWait();
exploreTableManager = injector.getInstance(ExploreTableManager.class);
streamAdmin = injector.getInstance(StreamAdmin.class);
streamMetaStore = injector.getInstance(StreamMetaStore.class);
namespaceAdmin = injector.getInstance(NamespaceAdmin.class);
namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
// create namespaces
// This happens when you create a namespace via REST APIs. However, since we do not start AppFabricServer in
// Explore tests, simulating that scenario by explicitly calling DatasetFramework APIs.
createNamespace(NamespaceId.DEFAULT);
createNamespace(NAMESPACE_ID);
createNamespace(OTHER_NAMESPACE_ID);
}
Aggregations