Search in sources :

Example 1 with AbstractIdleService

use of com.google.common.util.concurrent.AbstractIdleService in project weave by continuuity.

the class ZKServiceDecoratorTest method testStateTransition.

@Test
public void testStateTransition() throws InterruptedException, ExecutionException, TimeoutException {
    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
    zkServer.startAndWait();
    try {
        final String namespace = Joiner.on('/').join("/weave", RunIds.generate(), "runnables", "Runner1");
        final ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
        zkClient.startAndWait();
        zkClient.create(namespace, null, CreateMode.PERSISTENT).get();
        try {
            JsonObject content = new JsonObject();
            content.addProperty("containerId", "container-123");
            content.addProperty("host", "localhost");
            RunId runId = RunIds.generate();
            final Semaphore semaphore = new Semaphore(0);
            ZKServiceDecorator service = new ZKServiceDecorator(ZKClients.namespace(zkClient, namespace), runId, Suppliers.ofInstance(content), new AbstractIdleService() {

                @Override
                protected void startUp() throws Exception {
                    Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to start");
                }

                @Override
                protected void shutDown() throws Exception {
                    Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to stop");
                }
            });
            final String runnablePath = namespace + "/" + runId.getId();
            final AtomicReference<String> stateMatch = new AtomicReference<String>("STARTING");
            watchDataChange(zkClient, runnablePath + "/state", semaphore, stateMatch);
            Assert.assertEquals(Service.State.RUNNING, service.start().get(5, TimeUnit.SECONDS));
            stateMatch.set("STOPPING");
            Assert.assertEquals(Service.State.TERMINATED, service.stop().get(5, TimeUnit.SECONDS));
        } finally {
            zkClient.stopAndWait();
        }
    } finally {
        zkServer.stopAndWait();
    }
}
Also used : ZKServiceDecorator(com.continuuity.weave.internal.ZKServiceDecorator) ZKClientService(com.continuuity.weave.zookeeper.ZKClientService) JsonObject(com.google.gson.JsonObject) AtomicReference(java.util.concurrent.atomic.AtomicReference) Semaphore(java.util.concurrent.Semaphore) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) RunId(com.continuuity.weave.api.RunId) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) InMemoryZKServer(com.continuuity.weave.internal.zookeeper.InMemoryZKServer) Test(org.junit.Test)

Example 2 with AbstractIdleService

use of com.google.common.util.concurrent.AbstractIdleService in project graylog2-server by Graylog2.

the class LookupTableService method handleCacheUpdate.

@Subscribe
public void handleCacheUpdate(CachesUpdated updated) {
    scheduler.schedule(() -> {
        // first we create the new cache instance and start it
        // then we retrieve the old one so we can safely stop it later
        // then we build a new lookup table instance with the new cache instance
        // last we can remove the old lookup table instance and stop the original cache
        // collect old cache instances
        final ImmutableSet.Builder<LookupCache> existingCaches = ImmutableSet.builder();
        // create new cache and lookup table instances
        final Map<CacheDto, LookupCache> newCaches = createCaches(configService.findCachesForIds(updated.ids()));
        final CountDownLatch runningLatch = new CountDownLatch(newCaches.size());
        newCaches.forEach((cacheDto, cache) -> {
            cache.addListener(new CacheListener(cacheDto, cache, runningLatch, existingCaches::add), scheduler);
            cache.startAsync();
        });
        // wait until everything is either running or failed before starting the
        awaitUninterruptibly(runningLatch);
        // when a cache is updated, the lookup tables that use it need to be updated as well
        final Collection<LookupTableDto> tablesToUpdate = configService.findTablesForCacheIds(updated.ids());
        tablesToUpdate.forEach(this::createLookupTable);
        // stop old caches
        existingCaches.build().forEach(AbstractIdleService::stopAsync);
    }, 0, TimeUnit.SECONDS);
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) LookupCache(org.graylog2.plugin.lookup.LookupCache) LookupTableDto(org.graylog2.lookup.dto.LookupTableDto) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) CountDownLatch(java.util.concurrent.CountDownLatch) CacheDto(org.graylog2.lookup.dto.CacheDto) Subscribe(com.google.common.eventbus.Subscribe)

Example 3 with AbstractIdleService

use of com.google.common.util.concurrent.AbstractIdleService in project graylog2-server by Graylog2.

the class LookupTableService method handleAdapterUpdate.

@Subscribe
public void handleAdapterUpdate(DataAdaptersUpdated updated) {
    scheduler.schedule(() -> {
        // first we create the new adapter instance and start it
        // then we retrieve the old one so we can safely stop it later
        // then we build a new lookup table instance with the new adapter instance
        // last we can remove the old lookup table instance and stop the original adapter
        // collect old adapter instances
        final ImmutableSet.Builder<LookupDataAdapter> existingAdapters = ImmutableSet.builder();
        // create new adapter and lookup table instances
        final Map<DataAdapterDto, LookupDataAdapter> newAdapters = createAdapters(configService.findDataAdaptersForIds(updated.ids()));
        final CountDownLatch runningLatch = new CountDownLatch(newAdapters.size());
        newAdapters.forEach((dto, adapter) -> {
            adapter.addListener(new DataAdapterListener(dto, adapter, runningLatch, existingAdapters::add), scheduler);
            adapter.startAsync();
        });
        // wait until everything is either running or failed before starting the
        awaitUninterruptibly(runningLatch);
        // when a data adapter is updated, the lookup tables that use it need to be updated as well
        final Collection<LookupTableDto> tablesToUpdate = configService.findTablesForDataAdapterIds(updated.ids());
        tablesToUpdate.forEach(this::createLookupTable);
        // stop old adapters
        existingAdapters.build().forEach(AbstractIdleService::stopAsync);
    }, 0, TimeUnit.SECONDS);
}
Also used : LookupDataAdapter(org.graylog2.plugin.lookup.LookupDataAdapter) DataAdapterDto(org.graylog2.lookup.dto.DataAdapterDto) ImmutableSet(com.google.common.collect.ImmutableSet) LookupTableDto(org.graylog2.lookup.dto.LookupTableDto) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) CountDownLatch(java.util.concurrent.CountDownLatch) Subscribe(com.google.common.eventbus.Subscribe)

Example 4 with AbstractIdleService

use of com.google.common.util.concurrent.AbstractIdleService in project cdap by caskdata.

the class DistributedLogFramework method createService.

@Override
protected Service createService(Set<Integer> partitions) {
    Map<String, LogPipelineSpecification<AppenderContext>> specs = new LogPipelineLoader(cConf).load(contextProvider);
    int pipelineCount = specs.size();
    // Create one KafkaLogProcessorPipeline per spec
    final List<Service> pipelines = new ArrayList<>();
    for (final LogPipelineSpecification<AppenderContext> pipelineSpec : specs.values()) {
        final CConfiguration cConf = pipelineSpec.getConf();
        final AppenderContext context = pipelineSpec.getContext();
        long bufferSize = getBufferSize(pipelineCount, cConf, partitions.size());
        final String topic = cConf.get(Constants.Logging.KAFKA_TOPIC);
        final KafkaPipelineConfig config = new KafkaPipelineConfig(topic, partitions, bufferSize, cConf.getLong(Constants.Logging.PIPELINE_EVENT_DELAY_MS), cConf.getInt(Constants.Logging.PIPELINE_KAFKA_FETCH_SIZE), cConf.getLong(Constants.Logging.PIPELINE_CHECKPOINT_INTERVAL_MS));
        RetryStrategy retryStrategy = RetryStrategies.fromConfiguration(cConf, "system.log.process.");
        pipelines.add(new RetryOnStartFailureService(new Supplier<Service>() {

            @Override
            public Service get() {
                return new KafkaLogProcessorPipeline(new LogProcessorPipelineContext(cConf, context.getName(), context, context.getMetricsContext(), context.getInstanceId()), checkpointManagerFactory.create(topic, pipelineSpec.getCheckpointPrefix()), brokerService, config);
            }
        }, retryStrategy));
    }
    // Returns a Service that start/stop all pipelines.
    return new AbstractIdleService() {

        @Override
        protected void startUp() throws Exception {
            // Starts all pipeline
            validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {

                @Override
                public ListenableFuture<State> apply(Service service) {
                    return service.start();
                }
            }));
        }

        @Override
        protected void shutDown() throws Exception {
            // Stops all pipeline
            validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {

                @Override
                public ListenableFuture<State> apply(Service service) {
                    return service.stop();
                }
            }));
        }
    };
}
Also used : LogPipelineSpecification(co.cask.cdap.logging.framework.LogPipelineSpecification) ArrayList(java.util.ArrayList) KafkaPipelineConfig(co.cask.cdap.logging.pipeline.kafka.KafkaPipelineConfig) ResourceBalancerService(co.cask.cdap.common.resource.ResourceBalancerService) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) RetryOnStartFailureService(co.cask.cdap.common.service.RetryOnStartFailureService) DiscoveryService(org.apache.twill.discovery.DiscoveryService) Service(com.google.common.util.concurrent.Service) BrokerService(org.apache.twill.kafka.client.BrokerService) LogPipelineLoader(co.cask.cdap.logging.framework.LogPipelineLoader) LogProcessorPipelineContext(co.cask.cdap.logging.pipeline.LogProcessorPipelineContext) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Function(com.google.common.base.Function) KafkaLogProcessorPipeline(co.cask.cdap.logging.pipeline.kafka.KafkaLogProcessorPipeline) AppenderContext(co.cask.cdap.api.logging.AppenderContext) RetryOnStartFailureService(co.cask.cdap.common.service.RetryOnStartFailureService) Supplier(com.google.common.base.Supplier) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) RetryStrategy(co.cask.cdap.common.service.RetryStrategy)

Example 5 with AbstractIdleService

use of com.google.common.util.concurrent.AbstractIdleService in project cdap by caskdata.

the class RetryOnStartFailureServiceTest method testLoggingContext.

@Test
public void testLoggingContext() {
    // This test logging context set before the service started is propagated into the service
    final Map<String, String> context = Collections.singletonMap("key", "value");
    // Create the service before setting the context.
    Service service = new RetryOnStartFailureService(new Supplier<Service>() {

        @Override
        public Service get() {
            return new AbstractIdleService() {

                @Override
                protected void startUp() throws Exception {
                    Assert.assertEquals(context, MDC.getCopyOfContextMap());
                }

                @Override
                protected void shutDown() throws Exception {
                    Assert.assertEquals(context, MDC.getCopyOfContextMap());
                }
            };
        }
    }, RetryStrategies.noRetry());
    // Set the MDC context
    MDC.setContextMap(context);
    // Start and stop shouldn't throw
    service.startAndWait();
    service.stopAndWait();
}
Also used : Service(com.google.common.util.concurrent.Service) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.Test)

Aggregations

AbstractIdleService (com.google.common.util.concurrent.AbstractIdleService)5 ImmutableSet (com.google.common.collect.ImmutableSet)2 Subscribe (com.google.common.eventbus.Subscribe)2 Service (com.google.common.util.concurrent.Service)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ExecutionException (java.util.concurrent.ExecutionException)2 TimeoutException (java.util.concurrent.TimeoutException)2 LookupTableDto (org.graylog2.lookup.dto.LookupTableDto)2 Test (org.junit.Test)2 AppenderContext (co.cask.cdap.api.logging.AppenderContext)1 CConfiguration (co.cask.cdap.common.conf.CConfiguration)1 ResourceBalancerService (co.cask.cdap.common.resource.ResourceBalancerService)1 RetryOnStartFailureService (co.cask.cdap.common.service.RetryOnStartFailureService)1 RetryStrategy (co.cask.cdap.common.service.RetryStrategy)1 LogPipelineLoader (co.cask.cdap.logging.framework.LogPipelineLoader)1 LogPipelineSpecification (co.cask.cdap.logging.framework.LogPipelineSpecification)1 LogProcessorPipelineContext (co.cask.cdap.logging.pipeline.LogProcessorPipelineContext)1 KafkaLogProcessorPipeline (co.cask.cdap.logging.pipeline.kafka.KafkaLogProcessorPipeline)1 KafkaPipelineConfig (co.cask.cdap.logging.pipeline.kafka.KafkaPipelineConfig)1 RunId (com.continuuity.weave.api.RunId)1