use of co.cask.cdap.common.guice.DiscoveryRuntimeModule in project cdap by caskdata.
the class ResourceCoordinatorTest method testAssignment.
@Test
public void testAssignment() throws InterruptedException, ExecutionException {
CConfiguration cConf = CConfiguration.create();
cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());
String serviceName = "test-assignment";
Injector injector = Guice.createInjector(new ConfigModule(cConf), new ZKClientModule(), new DiscoveryRuntimeModule().getDistributedModules());
ZKClientService zkClient = injector.getInstance(ZKClientService.class);
zkClient.startAndWait();
DiscoveryService discoveryService = injector.getInstance(DiscoveryService.class);
try {
ResourceCoordinator coordinator = new ResourceCoordinator(zkClient, injector.getInstance(DiscoveryServiceClient.class), new BalancedAssignmentStrategy());
coordinator.startAndWait();
try {
ResourceCoordinatorClient client = new ResourceCoordinatorClient(zkClient);
client.startAndWait();
try {
// Create a requirement
ResourceRequirement requirement = ResourceRequirement.builder(serviceName).addPartitions("p", 5, 1).build();
client.submitRequirement(requirement).get();
// Fetch the requirement, just to verify it's the same as the one get submitted.
Assert.assertEquals(requirement, client.fetchRequirement(requirement.getName()).get());
// Register a discovery endpoint
final Discoverable discoverable1 = createDiscoverable(serviceName, 10000);
Cancellable cancelDiscoverable1 = discoveryService.register(ResolvingDiscoverable.of(discoverable1));
// Add a change handler for this discoverable.
final BlockingQueue<Collection<PartitionReplica>> assignmentQueue = new SynchronousQueue<>();
final Semaphore finishSemaphore = new Semaphore(0);
Cancellable cancelSubscribe1 = subscribe(client, discoverable1, assignmentQueue, finishSemaphore);
// Assert that it received the changes.
Collection<PartitionReplica> assigned = assignmentQueue.poll(30, TimeUnit.SECONDS);
Assert.assertNotNull(assigned);
Assert.assertEquals(5, assigned.size());
// Unregister from discovery, the handler should receive a change with empty collection
cancelDiscoverable1.cancel();
Assert.assertTrue(assignmentQueue.poll(30, TimeUnit.SECONDS).isEmpty());
// Register to discovery again, would receive changes.
cancelDiscoverable1 = discoveryService.register(ResolvingDiscoverable.of(discoverable1));
assigned = assignmentQueue.poll(30, TimeUnit.SECONDS);
Assert.assertNotNull(assigned);
Assert.assertEquals(5, assigned.size());
// Register another discoverable
final Discoverable discoverable2 = createDiscoverable(serviceName, 10001);
Cancellable cancelDiscoverable2 = discoveryService.register(ResolvingDiscoverable.of(discoverable2));
// Changes should be received by the handler, with only 3 resources,
// as 2 out of 5 should get moved to the new discoverable.
assigned = assignmentQueue.poll(30, TimeUnit.SECONDS);
Assert.assertNotNull(assigned);
Assert.assertEquals(3, assigned.size());
// Cancel the first discoverable again, should expect empty result.
// This also make sure the latest assignment get cached in the ResourceCoordinatorClient.
// It is the the next test step.
cancelDiscoverable1.cancel();
Assert.assertTrue(assignmentQueue.poll(30, TimeUnit.SECONDS).isEmpty());
// Cancel the handler.
cancelSubscribe1.cancel();
Assert.assertTrue(finishSemaphore.tryAcquire(2, TimeUnit.SECONDS));
// Subscribe to changes for the second discoverable,
// it should see the latest assignment, even though no new fetch from ZK is triggered.
Cancellable cancelSubscribe2 = subscribe(client, discoverable2, assignmentQueue, finishSemaphore);
assigned = assignmentQueue.poll(30, TimeUnit.SECONDS);
Assert.assertNotNull(assigned);
Assert.assertEquals(5, assigned.size());
// Update the requirement to be an empty requirement, the handler should receive an empty collection
client.submitRequirement(ResourceRequirement.builder(serviceName).build());
Assert.assertTrue(assignmentQueue.poll(30, TimeUnit.SECONDS).isEmpty());
// Update the requirement to have one partition, the handler should receive one resource
client.submitRequirement(ResourceRequirement.builder(serviceName).addPartitions("p", 1, 1).build());
assigned = assignmentQueue.poll(30, TimeUnit.SECONDS);
Assert.assertNotNull(assigned);
Assert.assertEquals(1, assigned.size());
// Delete the requirement, the handler should receive a empty collection
client.deleteRequirement(requirement.getName());
Assert.assertTrue(assignmentQueue.poll(30, TimeUnit.SECONDS).isEmpty());
// Cancel the second handler.
cancelSubscribe2.cancel();
Assert.assertTrue(finishSemaphore.tryAcquire(2, TimeUnit.SECONDS));
cancelDiscoverable2.cancel();
} finally {
client.stopAndWait();
}
} finally {
coordinator.stopAndWait();
}
} finally {
zkClient.stopAndWait();
}
}
use of co.cask.cdap.common.guice.DiscoveryRuntimeModule in project cdap by caskdata.
the class DFSStreamHeartbeatsTest method beforeClass.
@BeforeClass
public static void beforeClass() throws IOException {
zkServer = InMemoryZKServer.builder().setDataDir(TEMP_FOLDER.newFolder()).build();
zkServer.startAndWait();
CConfiguration cConf = CConfiguration.create();
cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());
cConf.setInt(Constants.Stream.CONTAINER_INSTANCE_ID, 0);
cConf.set(Constants.CFG_LOCAL_DATA_DIR, TEMP_FOLDER.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(Modules.override(new ZKClientModule(), new DataFabricModules().getInMemoryModules(), new ConfigModule(cConf, new Configuration()), new DiscoveryRuntimeModule().getInMemoryModules(), new NonCustomLocationUnitTestModule().getModule(), new ExploreClientModule(), new DataSetServiceModules().getInMemoryModules(), new DataSetsModules().getStandaloneModules(), new NotificationFeedServiceRuntimeModule().getInMemoryModules(), new NotificationServiceRuntimeModule().getInMemoryModules(), new MetricsClientRuntimeModule().getInMemoryModules(), new ViewAdminModules().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), // that performs heartbeats aggregation
new StreamServiceRuntimeModule().getDistributedModules(), new StreamAdminModules().getInMemoryModules()).with(new AbstractModule() {
@Override
protected void configure() {
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class);
install(new NamespaceClientRuntimeModule().getInMemoryModules());
bind(StreamConsumerStateStoreFactory.class).to(LevelDBStreamConsumerStateStoreFactory.class).in(Singleton.class);
bind(StreamAdmin.class).to(FileStreamAdmin.class).in(Singleton.class);
bind(StreamConsumerFactory.class).to(LevelDBStreamFileConsumerFactory.class).in(Singleton.class);
bind(StreamFileWriterFactory.class).to(LocationStreamFileWriterFactory.class).in(Singleton.class);
bind(StreamFileJanitorService.class).to(LocalStreamFileJanitorService.class).in(Scopes.SINGLETON);
bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class).in(Scopes.SINGLETON);
bind(HeartbeatPublisher.class).to(MockHeartbeatPublisher.class).in(Scopes.SINGLETON);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
}
}));
zkClient = injector.getInstance(ZKClientService.class);
txManager = injector.getInstance(TransactionManager.class);
datasetService = injector.getInstance(DatasetService.class);
notificationService = injector.getInstance(NotificationService.class);
streamHttpService = injector.getInstance(StreamHttpService.class);
streamService = injector.getInstance(StreamService.class);
heartbeatPublisher = (MockHeartbeatPublisher) injector.getInstance(HeartbeatPublisher.class);
namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
streamAdmin = injector.getInstance(StreamAdmin.class);
zkClient.startAndWait();
txManager.startAndWait();
datasetService.startAndWait();
notificationService.startAndWait();
streamHttpService.startAndWait();
streamService.startAndWait();
hostname = streamHttpService.getBindAddress().getHostName();
port = streamHttpService.getBindAddress().getPort();
Locations.mkdirsIfNotExists(namespacedLocationFactory.get(NamespaceId.DEFAULT));
}
use of co.cask.cdap.common.guice.DiscoveryRuntimeModule in project cdap by caskdata.
the class HBaseQueueTest method init.
@BeforeClass
public static void init() throws Exception {
hConf = TEST_HBASE.getConfiguration();
// Customize test configuration
cConf = CConfiguration.create();
cConf.set(Constants.Zookeeper.QUORUM, TEST_HBASE.getZkConnectionString());
cConf.set(TxConstants.Service.CFG_DATA_TX_BIND_PORT, Integer.toString(Networks.getRandomPort()));
cConf.set(Constants.Dataset.TABLE_PREFIX, TABLE_PREFIX);
cConf.set(Constants.CFG_HDFS_USER, System.getProperty("user.name"));
cConf.setLong(QueueConstants.QUEUE_CONFIG_UPDATE_FREQUENCY, 10000L);
// Test with fewer splits than default (16).
// Fewer splits make the forceEvict runs faster, which makes all queue tests run faster
cConf.setInt(QueueConstants.ConfigKeys.QUEUE_TABLE_PRESPLITS, 4);
cConf.setLong(TxConstants.Manager.CFG_TX_TIMEOUT, 100000000L);
cConf.setLong(TxConstants.Manager.CFG_TX_MAX_TIMEOUT, 100000000L);
injector = Guice.createInjector(new DataFabricModules().getDistributedModules(), new ConfigModule(cConf, hConf), new ZKClientModule(), new LocationRuntimeModule().getDistributedModules(), new NamespaceClientUnitTestModule().getModule(), new DiscoveryRuntimeModule().getDistributedModules(), new TransactionMetricsModule(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new DataSetsModules().getInMemoryModules(), new SystemDatasetRuntimeModule().getDistributedModules(), new AbstractModule() {
@Override
protected void configure() {
bind(NotificationFeedManager.class).to(NoOpNotificationFeedManager.class).in(Scopes.SINGLETON);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
}
});
//create HBase namespace
hbaseAdmin = TEST_HBASE.getHBaseAdmin();
ddlExecutor = new HBaseDDLExecutorFactory(cConf, hbaseAdmin.getConfiguration()).get();
tableUtil = injector.getInstance(HBaseTableUtil.class);
ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(NamespaceId.SYSTEM));
ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(NAMESPACE_ID));
ddlExecutor.createNamespaceIfNotExists(tableUtil.getHBaseNamespace(NAMESPACE_ID1));
ConfigurationTable configTable = new ConfigurationTable(hConf);
configTable.write(ConfigurationTable.Type.DEFAULT, cConf);
zkClientService = injector.getInstance(ZKClientService.class);
zkClientService.startAndWait();
txService = injector.getInstance(TransactionService.class);
Thread t = new Thread() {
@Override
public void run() {
txService.start();
}
};
t.start();
// The TransactionManager should be started by the txService.
// We just want a reference to that so that we can ask for tx snapshot
txSystemClient = injector.getInstance(TransactionSystemClient.class);
queueClientFactory = injector.getInstance(QueueClientFactory.class);
queueAdmin = injector.getInstance(QueueAdmin.class);
executorFactory = injector.getInstance(TransactionExecutorFactory.class);
}
use of co.cask.cdap.common.guice.DiscoveryRuntimeModule in project cdap by caskdata.
the class InMemoryQueueTest method init.
@BeforeClass
public static void init() throws Exception {
injector = Guice.createInjector(new ConfigModule(), new NonCustomLocationUnitTestModule().getModule(), new DiscoveryRuntimeModule().getInMemoryModules(), new SystemDatasetRuntimeModule().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new DataSetsModules().getInMemoryModules(), new DataFabricModules().getInMemoryModules(), new TransactionMetricsModule(), new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(), Modules.override(new StreamAdminModules().getInMemoryModules()).with(new AbstractModule() {
@Override
protected void configure() {
// The tests are actually testing stream on queue implementation, hence bind it to the queue implementation
bind(StreamAdmin.class).to(InMemoryStreamAdmin.class);
bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
}
}));
// transaction manager is a "service" and must be started
transactionManager = injector.getInstance(TransactionManager.class);
transactionManager.startAndWait();
txSystemClient = injector.getInstance(TransactionSystemClient.class);
queueClientFactory = injector.getInstance(QueueClientFactory.class);
queueAdmin = injector.getInstance(QueueAdmin.class);
executorFactory = injector.getInstance(TransactionExecutorFactory.class);
}
use of co.cask.cdap.common.guice.DiscoveryRuntimeModule in project cdap by caskdata.
the class LevelDBQueueTest method init.
@BeforeClass
public static void init() throws Exception {
CConfiguration conf = CConfiguration.create();
conf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
conf.set(Constants.Dataset.TABLE_PREFIX, "test");
Injector injector = Guice.createInjector(new ConfigModule(conf), new NonCustomLocationUnitTestModule().getModule(), new DiscoveryRuntimeModule().getStandaloneModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new DataSetsModules().getStandaloneModules(), new DataFabricLevelDBModule(), new TransactionMetricsModule());
// transaction manager is a "service" and must be started
transactionManager = injector.getInstance(TransactionManager.class);
transactionManager.startAndWait();
txSystemClient = injector.getInstance(TransactionSystemClient.class);
queueClientFactory = injector.getInstance(QueueClientFactory.class);
queueAdmin = injector.getInstance(QueueAdmin.class);
executorFactory = injector.getInstance(TransactionExecutorFactory.class);
LevelDBTableService.getInstance().clearTables();
}
Aggregations