use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.
the class DistributedKeyManagerTest method getKeyManager.
private DistributedKeyManager getKeyManager(Injector injector, boolean expectLeader) throws Exception {
ZKClientService zk = injector.getInstance(ZKClientService.class);
zk.startAndWait();
WaitableDistributedKeyManager keyManager = new WaitableDistributedKeyManager(injector.getInstance(CConfiguration.class), injector.getInstance(Key.get(new TypeLiteral<Codec<KeyIdentifier>>() {
})), zk);
keyManager.startAndWait();
if (expectLeader) {
keyManager.waitForLeader(5000, TimeUnit.MILLISECONDS);
}
return keyManager;
}
use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.
the class SparkRuntimeContextProvider method createIfNotExists.
/**
* Creates a singleton {@link SparkRuntimeContext}.
* It has assumption on file location that are localized by the SparkRuntimeService.
*/
private static synchronized SparkRuntimeContext createIfNotExists() {
if (sparkRuntimeContext != null) {
return sparkRuntimeContext;
}
try {
CConfiguration cConf = createCConf();
Configuration hConf = createHConf();
SparkRuntimeContextConfig contextConfig = new SparkRuntimeContextConfig(hConf);
// Should be yarn only and only for executor node, not the driver node.
Preconditions.checkState(!contextConfig.isLocal() && Boolean.parseBoolean(System.getenv("SPARK_YARN_MODE")), "SparkContextProvider.getSparkContext should only be called in Spark executor process.");
// Create the program
Program program = createProgram(cConf, contextConfig);
Injector injector = createInjector(cConf, hConf, contextConfig.getProgramId(), contextConfig.getProgramOptions());
Service logAppenderService = new LogAppenderService(injector.getInstance(LogAppenderInitializer.class), contextConfig.getProgramOptions());
ZKClientService zkClientService = injector.getInstance(ZKClientService.class);
KafkaClientService kafkaClientService = injector.getInstance(KafkaClientService.class);
MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
StreamCoordinatorClient streamCoordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
SparkServiceAnnouncer serviceAnnouncer = injector.getInstance(SparkServiceAnnouncer.class);
// Use the shutdown hook to shutdown services, since this class should only be loaded from System classloader
// of the spark executor, hence there should be exactly one instance only.
// The problem with not shutting down nicely is that some logs/metrics might be lost
Services.chainStart(logAppenderService, zkClientService, kafkaClientService, metricsCollectionService, streamCoordinatorClient);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
// The logger may already been shutdown. Use System.out/err instead
System.out.println("Shutting SparkClassLoader services");
serviceAnnouncer.close();
Future<List<ListenableFuture<Service.State>>> future = Services.chainStop(logAppenderService, streamCoordinatorClient, metricsCollectionService, kafkaClientService, zkClientService);
try {
List<ListenableFuture<Service.State>> futures = future.get(5, TimeUnit.SECONDS);
System.out.println("SparkClassLoader services shutdown completed: " + futures);
} catch (Exception e) {
System.err.println("Exception when shutting down services");
e.printStackTrace(System.err);
}
}
});
// Constructor the DatasetFramework
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, contextConfig.getApplicationSpecification());
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
}
PluginInstantiator pluginInstantiator = createPluginInstantiator(cConf, contextConfig, program.getClassLoader());
// Create the context object
sparkRuntimeContext = new SparkRuntimeContext(contextConfig.getConfiguration(), program, contextConfig.getProgramOptions(), cConf, getHostname(), injector.getInstance(TransactionSystemClient.class), programDatasetFramework, injector.getInstance(DiscoveryServiceClient.class), metricsCollectionService, injector.getInstance(StreamAdmin.class), contextConfig.getWorkflowProgramInfo(), pluginInstantiator, injector.getInstance(SecureStore.class), injector.getInstance(SecureStoreManager.class), injector.getInstance(AuthorizationEnforcer.class), injector.getInstance(AuthenticationContext.class), injector.getInstance(MessagingService.class), serviceAnnouncer, injector.getInstance(PluginFinder.class), injector.getInstance(LocationFactory.class));
LoggingContextAccessor.setLoggingContext(sparkRuntimeContext.getLoggingContext());
return sparkRuntimeContext;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.
the class SharedResourceCacheTest method testCache.
@Test
public void testCache() throws Exception {
String parentZNode = ZK_NAMESPACE + "/testCache";
List<ACL> acls = Lists.newArrayList(ZooDefs.Ids.OPEN_ACL_UNSAFE);
// create 2 cache instances
ZKClientService zkClient1 = injector1.getInstance(ZKClientService.class);
zkClient1.startAndWait();
SharedResourceCache<String> cache1 = new SharedResourceCache<>(zkClient1, new StringCodec(), parentZNode, acls);
cache1.init();
// add items to one and wait for them to show up in the second
String key1 = "key1";
String value1 = "value1";
cache1.put(key1, value1);
ZKClientService zkClient2 = injector2.getInstance(ZKClientService.class);
zkClient2.startAndWait();
SharedResourceCache<String> cache2 = new SharedResourceCache<>(zkClient2, new StringCodec(), parentZNode, acls);
cache2.init();
waitForEntry(cache2, key1, value1, 10000);
assertEquals(cache1.get(key1), cache2.get(key1));
final String key2 = "key2";
String value2 = "value2";
cache1.put(key2, value2);
waitForEntry(cache2, key2, value2, 10000);
assertEquals(cache1.get(key2), cache2.get(key2));
final String key3 = "key3";
String value3 = "value3";
cache2.put(key3, value3);
waitForEntry(cache1, key3, value3, 10000);
assertEquals(cache2.get(key3), cache1.get(key3));
// replace an existing key
final String value2new = "value2.2";
final SettableFuture<String> value2future = SettableFuture.create();
ResourceListener<String> value2listener = new BaseResourceListener<String>() {
@Override
public void onResourceUpdate(String name, String instance) {
LOG.info("Resource updated: {}={}", name, instance);
if (key2.equals(name) && value2new.equals(instance)) {
value2future.set(instance);
}
}
};
cache2.addListener(value2listener);
cache1.put(key2, value2new);
assertEquals(value2new, value2future.get(10, TimeUnit.SECONDS));
assertEquals(value2new, cache2.get(key2));
cache2.removeListener(value2listener);
// remove items from the second and wait for them to disappear from the first
// Use a latch to make sure both cache see the changes
final CountDownLatch key3RemoveLatch = new CountDownLatch(2);
cache1.addListener(new BaseResourceListener<String>() {
@Override
public void onResourceDelete(String name) {
LOG.info("Resource deleted on cache 1 {}", name);
if (name.equals(key3)) {
key3RemoveLatch.countDown();
}
}
});
final SettableFuture<String> key3RemoveFuture = SettableFuture.create();
ResourceListener<String> key3Listener = new BaseResourceListener<String>() {
@Override
public void onResourceDelete(String name) {
LOG.info("Resource deleted on cache 2 {}", name);
if (name.equals(key3)) {
key3RemoveFuture.set(name);
key3RemoveLatch.countDown();
}
}
};
cache2.addListener(key3Listener);
cache1.remove(key3);
String removedKey = key3RemoveFuture.get();
assertEquals(key3, removedKey);
assertNull(cache2.get(key3));
key3RemoveLatch.await(5, TimeUnit.SECONDS);
// verify that cache contents are equal
assertEquals(cache1, cache2);
}
use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.
the class LeaderElectionInfoServiceTest method testParticipants.
@Test
public void testParticipants() throws Exception {
final int size = 5;
String prefix = "/election";
List<ZKClientService> zkClients = new ArrayList<>();
ZKClientService infoZKClient = DefaultZKClientService.Builder.of(zkServer.getConnectionStr()).build();
infoZKClient.startAndWait();
zkClients.add(infoZKClient);
// Start the LeaderElectionInfoService
LeaderElectionInfoService infoService = new LeaderElectionInfoService(infoZKClient, prefix);
infoService.startAndWait();
// This will timeout as there is no leader election node created yet
try {
infoService.getParticipants(1, TimeUnit.SECONDS);
Assert.fail("Expected timeout");
} catch (TimeoutException e) {
// Expected
}
// Starts multiple leader elections
List<LeaderElection> leaderElections = new ArrayList<>();
for (int i = 0; i < size; i++) {
ZKClientService zkClient = DefaultZKClientService.Builder.of(zkServer.getConnectionStr()).build();
zkClient.startAndWait();
zkClients.add(zkClient);
final int participantId = i;
LeaderElection leaderElection = new LeaderElection(zkClient, prefix, new ElectionHandler() {
@Override
public void leader() {
LOG.info("Leader: {}", participantId);
}
@Override
public void follower() {
LOG.info("Follow: {}", participantId);
}
});
leaderElection.start();
leaderElections.add(leaderElection);
}
// Get the dynamic participants map
final SortedMap<Integer, LeaderElectionInfoService.Participant> participants = infoService.getParticipants(5, TimeUnit.SECONDS);
// Expects to set all participants with hostname information
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
if (participants.size() != size) {
return false;
}
return Iterables.all(participants.values(), new Predicate<LeaderElectionInfoService.Participant>() {
@Override
public boolean apply(LeaderElectionInfoService.Participant input) {
return input.getHostname() != null;
}
});
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Fetch the static snapshot. It should be the same as the dynamic participants.
SortedMap<Integer, LeaderElectionInfoService.Participant> snapshot = infoService.fetchCurrentParticipants();
Assert.assertEquals(size, snapshot.size());
Assert.assertEquals(participants, snapshot);
int expectedSize = size;
for (LeaderElection leaderElection : leaderElections) {
leaderElection.stopAndWait();
Tasks.waitFor(--expectedSize, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return participants.size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
}
// Fetch the static snapshot again. It should be empty and the same as the dynamic one.
snapshot = infoService.fetchCurrentParticipants();
Assert.assertTrue(snapshot.isEmpty());
Assert.assertEquals(participants, snapshot);
infoService.stopAndWait();
for (ZKClientService zkClient : zkClients) {
zkClient.stopAndWait();
}
}
use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.
the class KafkaClientModuleTest method beforeTest.
@Before
public void beforeTest() throws Exception {
zkServer = InMemoryZKServer.builder().setDataDir(TEMP_FOLDER.newFolder()).build();
zkServer.startAndWait();
CConfiguration cConf = CConfiguration.create();
String kafkaZKNamespace = cConf.get(KafkaConstants.ConfigKeys.ZOOKEEPER_NAMESPACE_CONFIG);
kafkaZKConnect = zkServer.getConnectionStr();
if (kafkaZKNamespace != null) {
ZKClientService zkClient = new DefaultZKClientService(zkServer.getConnectionStr(), 2000, null, ImmutableMultimap.<String, byte[]>of());
zkClient.startAndWait();
zkClient.create("/" + kafkaZKNamespace, null, CreateMode.PERSISTENT);
zkClient.stopAndWait();
kafkaZKConnect += "/" + kafkaZKNamespace;
}
kafkaServer = createKafkaServer(kafkaZKConnect, TEMP_FOLDER.newFolder());
kafkaServer.startAndWait();
}
Aggregations