Search in sources :

Example 1 with ZKClientService

use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.

the class DistributedKeyManagerTest method getKeyManager.

private DistributedKeyManager getKeyManager(Injector injector, boolean expectLeader) throws Exception {
    ZKClientService zk = injector.getInstance(ZKClientService.class);
    zk.startAndWait();
    WaitableDistributedKeyManager keyManager = new WaitableDistributedKeyManager(injector.getInstance(CConfiguration.class), injector.getInstance(Key.get(new TypeLiteral<Codec<KeyIdentifier>>() {
    })), zk);
    keyManager.startAndWait();
    if (expectLeader) {
        keyManager.waitForLeader(5000, TimeUnit.MILLISECONDS);
    }
    return keyManager;
}
Also used : Codec(co.cask.cdap.common.io.Codec) ZKClientService(org.apache.twill.zookeeper.ZKClientService) CConfiguration(co.cask.cdap.common.conf.CConfiguration)

Example 2 with ZKClientService

use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.

the class SparkRuntimeContextProvider method createIfNotExists.

/**
 * Creates a singleton {@link SparkRuntimeContext}.
 * It has assumption on file location that are localized by the SparkRuntimeService.
 */
private static synchronized SparkRuntimeContext createIfNotExists() {
    if (sparkRuntimeContext != null) {
        return sparkRuntimeContext;
    }
    try {
        CConfiguration cConf = createCConf();
        Configuration hConf = createHConf();
        SparkRuntimeContextConfig contextConfig = new SparkRuntimeContextConfig(hConf);
        // Should be yarn only and only for executor node, not the driver node.
        Preconditions.checkState(!contextConfig.isLocal() && Boolean.parseBoolean(System.getenv("SPARK_YARN_MODE")), "SparkContextProvider.getSparkContext should only be called in Spark executor process.");
        // Create the program
        Program program = createProgram(cConf, contextConfig);
        Injector injector = createInjector(cConf, hConf, contextConfig.getProgramId(), contextConfig.getProgramOptions());
        Service logAppenderService = new LogAppenderService(injector.getInstance(LogAppenderInitializer.class), contextConfig.getProgramOptions());
        ZKClientService zkClientService = injector.getInstance(ZKClientService.class);
        KafkaClientService kafkaClientService = injector.getInstance(KafkaClientService.class);
        MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
        StreamCoordinatorClient streamCoordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
        SparkServiceAnnouncer serviceAnnouncer = injector.getInstance(SparkServiceAnnouncer.class);
        // Use the shutdown hook to shutdown services, since this class should only be loaded from System classloader
        // of the spark executor, hence there should be exactly one instance only.
        // The problem with not shutting down nicely is that some logs/metrics might be lost
        Services.chainStart(logAppenderService, zkClientService, kafkaClientService, metricsCollectionService, streamCoordinatorClient);
        Runtime.getRuntime().addShutdownHook(new Thread() {

            @Override
            public void run() {
                // The logger may already been shutdown. Use System.out/err instead
                System.out.println("Shutting SparkClassLoader services");
                serviceAnnouncer.close();
                Future<List<ListenableFuture<Service.State>>> future = Services.chainStop(logAppenderService, streamCoordinatorClient, metricsCollectionService, kafkaClientService, zkClientService);
                try {
                    List<ListenableFuture<Service.State>> futures = future.get(5, TimeUnit.SECONDS);
                    System.out.println("SparkClassLoader services shutdown completed: " + futures);
                } catch (Exception e) {
                    System.err.println("Exception when shutting down services");
                    e.printStackTrace(System.err);
                }
            }
        });
        // Constructor the DatasetFramework
        DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
        WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
        DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, contextConfig.getApplicationSpecification());
        // Setup dataset framework context, if required
        if (programDatasetFramework instanceof ProgramContextAware) {
            ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
            ((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
        }
        PluginInstantiator pluginInstantiator = createPluginInstantiator(cConf, contextConfig, program.getClassLoader());
        // Create the context object
        sparkRuntimeContext = new SparkRuntimeContext(contextConfig.getConfiguration(), program, contextConfig.getProgramOptions(), cConf, getHostname(), injector.getInstance(TransactionSystemClient.class), programDatasetFramework, injector.getInstance(DiscoveryServiceClient.class), metricsCollectionService, injector.getInstance(StreamAdmin.class), contextConfig.getWorkflowProgramInfo(), pluginInstantiator, injector.getInstance(SecureStore.class), injector.getInstance(SecureStoreManager.class), injector.getInstance(AuthorizationEnforcer.class), injector.getInstance(AuthenticationContext.class), injector.getInstance(MessagingService.class), serviceAnnouncer, injector.getInstance(PluginFinder.class), injector.getInstance(LocationFactory.class));
        LoggingContextAccessor.setLoggingContext(sparkRuntimeContext.getLoggingContext());
        return sparkRuntimeContext;
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
Also used : CConfiguration(co.cask.cdap.common.conf.CConfiguration) Configuration(org.apache.hadoop.conf.Configuration) NameMappedDatasetFramework(co.cask.cdap.internal.app.runtime.workflow.NameMappedDatasetFramework) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) LogAppenderInitializer(co.cask.cdap.logging.appender.LogAppenderInitializer) Injector(com.google.inject.Injector) List(java.util.List) Program(co.cask.cdap.app.program.Program) DefaultProgram(co.cask.cdap.app.program.DefaultProgram) KafkaClientService(org.apache.twill.kafka.client.KafkaClientService) MetricsCollectionService(co.cask.cdap.api.metrics.MetricsCollectionService) MessagingService(co.cask.cdap.messaging.MessagingService) MetricsCollectionService(co.cask.cdap.api.metrics.MetricsCollectionService) AbstractService(com.google.common.util.concurrent.AbstractService) ZKClientService(org.apache.twill.zookeeper.ZKClientService) ZKDiscoveryService(org.apache.twill.discovery.ZKDiscoveryService) Service(com.google.common.util.concurrent.Service) KafkaClientService(org.apache.twill.kafka.client.KafkaClientService) StreamCoordinatorClient(co.cask.cdap.data.stream.StreamCoordinatorClient) BasicProgramContext(co.cask.cdap.internal.app.runtime.BasicProgramContext) CConfiguration(co.cask.cdap.common.conf.CConfiguration) InvocationTargetException(java.lang.reflect.InvocationTargetException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ZKClientService(org.apache.twill.zookeeper.ZKClientService) WorkflowProgramInfo(co.cask.cdap.internal.app.runtime.workflow.WorkflowProgramInfo) Future(java.util.concurrent.Future) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) PluginInstantiator(co.cask.cdap.internal.app.runtime.plugin.PluginInstantiator) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) ProgramContextAware(co.cask.cdap.data.ProgramContextAware)

Example 3 with ZKClientService

use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.

the class SharedResourceCacheTest method testCache.

@Test
public void testCache() throws Exception {
    String parentZNode = ZK_NAMESPACE + "/testCache";
    List<ACL> acls = Lists.newArrayList(ZooDefs.Ids.OPEN_ACL_UNSAFE);
    // create 2 cache instances
    ZKClientService zkClient1 = injector1.getInstance(ZKClientService.class);
    zkClient1.startAndWait();
    SharedResourceCache<String> cache1 = new SharedResourceCache<>(zkClient1, new StringCodec(), parentZNode, acls);
    cache1.init();
    // add items to one and wait for them to show up in the second
    String key1 = "key1";
    String value1 = "value1";
    cache1.put(key1, value1);
    ZKClientService zkClient2 = injector2.getInstance(ZKClientService.class);
    zkClient2.startAndWait();
    SharedResourceCache<String> cache2 = new SharedResourceCache<>(zkClient2, new StringCodec(), parentZNode, acls);
    cache2.init();
    waitForEntry(cache2, key1, value1, 10000);
    assertEquals(cache1.get(key1), cache2.get(key1));
    final String key2 = "key2";
    String value2 = "value2";
    cache1.put(key2, value2);
    waitForEntry(cache2, key2, value2, 10000);
    assertEquals(cache1.get(key2), cache2.get(key2));
    final String key3 = "key3";
    String value3 = "value3";
    cache2.put(key3, value3);
    waitForEntry(cache1, key3, value3, 10000);
    assertEquals(cache2.get(key3), cache1.get(key3));
    // replace an existing key
    final String value2new = "value2.2";
    final SettableFuture<String> value2future = SettableFuture.create();
    ResourceListener<String> value2listener = new BaseResourceListener<String>() {

        @Override
        public void onResourceUpdate(String name, String instance) {
            LOG.info("Resource updated: {}={}", name, instance);
            if (key2.equals(name) && value2new.equals(instance)) {
                value2future.set(instance);
            }
        }
    };
    cache2.addListener(value2listener);
    cache1.put(key2, value2new);
    assertEquals(value2new, value2future.get(10, TimeUnit.SECONDS));
    assertEquals(value2new, cache2.get(key2));
    cache2.removeListener(value2listener);
    // remove items from the second and wait for them to disappear from the first
    // Use a latch to make sure both cache see the changes
    final CountDownLatch key3RemoveLatch = new CountDownLatch(2);
    cache1.addListener(new BaseResourceListener<String>() {

        @Override
        public void onResourceDelete(String name) {
            LOG.info("Resource deleted on cache 1 {}", name);
            if (name.equals(key3)) {
                key3RemoveLatch.countDown();
            }
        }
    });
    final SettableFuture<String> key3RemoveFuture = SettableFuture.create();
    ResourceListener<String> key3Listener = new BaseResourceListener<String>() {

        @Override
        public void onResourceDelete(String name) {
            LOG.info("Resource deleted on cache 2 {}", name);
            if (name.equals(key3)) {
                key3RemoveFuture.set(name);
                key3RemoveLatch.countDown();
            }
        }
    };
    cache2.addListener(key3Listener);
    cache1.remove(key3);
    String removedKey = key3RemoveFuture.get();
    assertEquals(key3, removedKey);
    assertNull(cache2.get(key3));
    key3RemoveLatch.await(5, TimeUnit.SECONDS);
    // verify that cache contents are equal
    assertEquals(cache1, cache2);
}
Also used : ACL(org.apache.zookeeper.data.ACL) CountDownLatch(java.util.concurrent.CountDownLatch) ZKClientService(org.apache.twill.zookeeper.ZKClientService) Test(org.junit.Test)

Example 4 with ZKClientService

use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.

the class LeaderElectionInfoServiceTest method testParticipants.

@Test
public void testParticipants() throws Exception {
    final int size = 5;
    String prefix = "/election";
    List<ZKClientService> zkClients = new ArrayList<>();
    ZKClientService infoZKClient = DefaultZKClientService.Builder.of(zkServer.getConnectionStr()).build();
    infoZKClient.startAndWait();
    zkClients.add(infoZKClient);
    // Start the LeaderElectionInfoService
    LeaderElectionInfoService infoService = new LeaderElectionInfoService(infoZKClient, prefix);
    infoService.startAndWait();
    // This will timeout as there is no leader election node created yet
    try {
        infoService.getParticipants(1, TimeUnit.SECONDS);
        Assert.fail("Expected timeout");
    } catch (TimeoutException e) {
    // Expected
    }
    // Starts multiple leader elections
    List<LeaderElection> leaderElections = new ArrayList<>();
    for (int i = 0; i < size; i++) {
        ZKClientService zkClient = DefaultZKClientService.Builder.of(zkServer.getConnectionStr()).build();
        zkClient.startAndWait();
        zkClients.add(zkClient);
        final int participantId = i;
        LeaderElection leaderElection = new LeaderElection(zkClient, prefix, new ElectionHandler() {

            @Override
            public void leader() {
                LOG.info("Leader: {}", participantId);
            }

            @Override
            public void follower() {
                LOG.info("Follow: {}", participantId);
            }
        });
        leaderElection.start();
        leaderElections.add(leaderElection);
    }
    // Get the dynamic participants map
    final SortedMap<Integer, LeaderElectionInfoService.Participant> participants = infoService.getParticipants(5, TimeUnit.SECONDS);
    // Expects to set all participants with hostname information
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            if (participants.size() != size) {
                return false;
            }
            return Iterables.all(participants.values(), new Predicate<LeaderElectionInfoService.Participant>() {

                @Override
                public boolean apply(LeaderElectionInfoService.Participant input) {
                    return input.getHostname() != null;
                }
            });
        }
    }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    // Fetch the static snapshot. It should be the same as the dynamic participants.
    SortedMap<Integer, LeaderElectionInfoService.Participant> snapshot = infoService.fetchCurrentParticipants();
    Assert.assertEquals(size, snapshot.size());
    Assert.assertEquals(participants, snapshot);
    int expectedSize = size;
    for (LeaderElection leaderElection : leaderElections) {
        leaderElection.stopAndWait();
        Tasks.waitFor(--expectedSize, new Callable<Integer>() {

            @Override
            public Integer call() throws Exception {
                return participants.size();
            }
        }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    }
    // Fetch the static snapshot again. It should be empty and the same as the dynamic one.
    snapshot = infoService.fetchCurrentParticipants();
    Assert.assertTrue(snapshot.isEmpty());
    Assert.assertEquals(participants, snapshot);
    infoService.stopAndWait();
    for (ZKClientService zkClient : zkClients) {
        zkClient.stopAndWait();
    }
}
Also used : ArrayList(java.util.ArrayList) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) Predicate(com.google.common.base.Predicate) ZKClientService(org.apache.twill.zookeeper.ZKClientService) DefaultZKClientService(org.apache.twill.internal.zookeeper.DefaultZKClientService) LeaderElection(org.apache.twill.internal.zookeeper.LeaderElection) ElectionHandler(org.apache.twill.api.ElectionHandler) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 5 with ZKClientService

use of org.apache.twill.zookeeper.ZKClientService in project cdap by caskdata.

the class KafkaClientModuleTest method beforeTest.

@Before
public void beforeTest() throws Exception {
    zkServer = InMemoryZKServer.builder().setDataDir(TEMP_FOLDER.newFolder()).build();
    zkServer.startAndWait();
    CConfiguration cConf = CConfiguration.create();
    String kafkaZKNamespace = cConf.get(KafkaConstants.ConfigKeys.ZOOKEEPER_NAMESPACE_CONFIG);
    kafkaZKConnect = zkServer.getConnectionStr();
    if (kafkaZKNamespace != null) {
        ZKClientService zkClient = new DefaultZKClientService(zkServer.getConnectionStr(), 2000, null, ImmutableMultimap.<String, byte[]>of());
        zkClient.startAndWait();
        zkClient.create("/" + kafkaZKNamespace, null, CreateMode.PERSISTENT);
        zkClient.stopAndWait();
        kafkaZKConnect += "/" + kafkaZKNamespace;
    }
    kafkaServer = createKafkaServer(kafkaZKConnect, TEMP_FOLDER.newFolder());
    kafkaServer.startAndWait();
}
Also used : ZKClientService(org.apache.twill.zookeeper.ZKClientService) DefaultZKClientService(org.apache.twill.internal.zookeeper.DefaultZKClientService) DefaultZKClientService(org.apache.twill.internal.zookeeper.DefaultZKClientService) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Before(org.junit.Before)

Aggregations

ZKClientService (org.apache.twill.zookeeper.ZKClientService)27 Injector (com.google.inject.Injector)14 Test (org.junit.Test)14 CConfiguration (co.cask.cdap.common.conf.CConfiguration)11 IOException (java.io.IOException)9 ConfigModule (co.cask.cdap.common.guice.ConfigModule)8 ZKClientModule (co.cask.cdap.common.guice.ZKClientModule)8 DiscoveryRuntimeModule (co.cask.cdap.common.guice.DiscoveryRuntimeModule)7 AbstractModule (com.google.inject.AbstractModule)6 Configuration (org.apache.hadoop.conf.Configuration)6 DataFabricModules (co.cask.cdap.data.runtime.DataFabricModules)5 DataSetsModules (co.cask.cdap.data.runtime.DataSetsModules)5 TransactionMetricsModule (co.cask.cdap.data.runtime.TransactionMetricsModule)5 AuthenticationContextModules (co.cask.cdap.security.auth.context.AuthenticationContextModules)5 AuthorizationEnforcementModule (co.cask.cdap.security.authorization.AuthorizationEnforcementModule)5 AuthorizationTestModule (co.cask.cdap.security.authorization.AuthorizationTestModule)5 DefaultOwnerAdmin (co.cask.cdap.security.impersonation.DefaultOwnerAdmin)5 UnsupportedUGIProvider (co.cask.cdap.security.impersonation.UnsupportedUGIProvider)5 ZKDiscoveryService (org.apache.twill.discovery.ZKDiscoveryService)5 SystemDatasetRuntimeModule (co.cask.cdap.data.runtime.SystemDatasetRuntimeModule)4