use of com.google.common.util.concurrent.AbstractIdleService in project weave by continuuity.
the class ZKServiceDecoratorTest method testStateTransition.
@Test
public void testStateTransition() throws InterruptedException, ExecutionException, TimeoutException {
InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
zkServer.startAndWait();
try {
final String namespace = Joiner.on('/').join("/weave", RunIds.generate(), "runnables", "Runner1");
final ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
zkClient.startAndWait();
zkClient.create(namespace, null, CreateMode.PERSISTENT).get();
try {
JsonObject content = new JsonObject();
content.addProperty("containerId", "container-123");
content.addProperty("host", "localhost");
RunId runId = RunIds.generate();
final Semaphore semaphore = new Semaphore(0);
ZKServiceDecorator service = new ZKServiceDecorator(ZKClients.namespace(zkClient, namespace), runId, Suppliers.ofInstance(content), new AbstractIdleService() {
@Override
protected void startUp() throws Exception {
Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to start");
}
@Override
protected void shutDown() throws Exception {
Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to stop");
}
});
final String runnablePath = namespace + "/" + runId.getId();
final AtomicReference<String> stateMatch = new AtomicReference<String>("STARTING");
watchDataChange(zkClient, runnablePath + "/state", semaphore, stateMatch);
Assert.assertEquals(Service.State.RUNNING, service.start().get(5, TimeUnit.SECONDS));
stateMatch.set("STOPPING");
Assert.assertEquals(Service.State.TERMINATED, service.stop().get(5, TimeUnit.SECONDS));
} finally {
zkClient.stopAndWait();
}
} finally {
zkServer.stopAndWait();
}
}
use of com.google.common.util.concurrent.AbstractIdleService in project graylog2-server by Graylog2.
the class LookupTableService method handleCacheUpdate.
@Subscribe
public void handleCacheUpdate(CachesUpdated updated) {
scheduler.schedule(() -> {
// first we create the new cache instance and start it
// then we retrieve the old one so we can safely stop it later
// then we build a new lookup table instance with the new cache instance
// last we can remove the old lookup table instance and stop the original cache
// collect old cache instances
final ImmutableSet.Builder<LookupCache> existingCaches = ImmutableSet.builder();
// create new cache and lookup table instances
final Map<CacheDto, LookupCache> newCaches = createCaches(configService.findCachesForIds(updated.ids()));
final CountDownLatch runningLatch = new CountDownLatch(newCaches.size());
newCaches.forEach((cacheDto, cache) -> {
cache.addListener(new CacheListener(cacheDto, cache, runningLatch, existingCaches::add), scheduler);
cache.startAsync();
});
// wait until everything is either running or failed before starting the
awaitUninterruptibly(runningLatch);
// when a cache is updated, the lookup tables that use it need to be updated as well
final Collection<LookupTableDto> tablesToUpdate = configService.findTablesForCacheIds(updated.ids());
tablesToUpdate.forEach(this::createLookupTable);
// stop old caches
existingCaches.build().forEach(AbstractIdleService::stopAsync);
}, 0, TimeUnit.SECONDS);
}
use of com.google.common.util.concurrent.AbstractIdleService in project graylog2-server by Graylog2.
the class LookupTableService method handleAdapterUpdate.
@Subscribe
public void handleAdapterUpdate(DataAdaptersUpdated updated) {
scheduler.schedule(() -> {
// first we create the new adapter instance and start it
// then we retrieve the old one so we can safely stop it later
// then we build a new lookup table instance with the new adapter instance
// last we can remove the old lookup table instance and stop the original adapter
// collect old adapter instances
final ImmutableSet.Builder<LookupDataAdapter> existingAdapters = ImmutableSet.builder();
// create new adapter and lookup table instances
final Map<DataAdapterDto, LookupDataAdapter> newAdapters = createAdapters(configService.findDataAdaptersForIds(updated.ids()));
final CountDownLatch runningLatch = new CountDownLatch(newAdapters.size());
newAdapters.forEach((dto, adapter) -> {
adapter.addListener(new DataAdapterListener(dto, adapter, runningLatch, existingAdapters::add), scheduler);
adapter.startAsync();
});
// wait until everything is either running or failed before starting the
awaitUninterruptibly(runningLatch);
// when a data adapter is updated, the lookup tables that use it need to be updated as well
final Collection<LookupTableDto> tablesToUpdate = configService.findTablesForDataAdapterIds(updated.ids());
tablesToUpdate.forEach(this::createLookupTable);
// stop old adapters
existingAdapters.build().forEach(AbstractIdleService::stopAsync);
}, 0, TimeUnit.SECONDS);
}
use of com.google.common.util.concurrent.AbstractIdleService in project cdap by caskdata.
the class DistributedLogFramework method createService.
@Override
protected Service createService(Set<Integer> partitions) {
Map<String, LogPipelineSpecification<AppenderContext>> specs = new LogPipelineLoader(cConf).load(contextProvider);
int pipelineCount = specs.size();
// Create one KafkaLogProcessorPipeline per spec
final List<Service> pipelines = new ArrayList<>();
for (final LogPipelineSpecification<AppenderContext> pipelineSpec : specs.values()) {
final CConfiguration cConf = pipelineSpec.getConf();
final AppenderContext context = pipelineSpec.getContext();
long bufferSize = getBufferSize(pipelineCount, cConf, partitions.size());
final String topic = cConf.get(Constants.Logging.KAFKA_TOPIC);
final KafkaPipelineConfig config = new KafkaPipelineConfig(topic, partitions, bufferSize, cConf.getLong(Constants.Logging.PIPELINE_EVENT_DELAY_MS), cConf.getInt(Constants.Logging.PIPELINE_KAFKA_FETCH_SIZE), cConf.getLong(Constants.Logging.PIPELINE_CHECKPOINT_INTERVAL_MS));
RetryStrategy retryStrategy = RetryStrategies.fromConfiguration(cConf, "system.log.process.");
pipelines.add(new RetryOnStartFailureService(new Supplier<Service>() {
@Override
public Service get() {
return new KafkaLogProcessorPipeline(new LogProcessorPipelineContext(cConf, context.getName(), context, context.getMetricsContext(), context.getInstanceId()), checkpointManagerFactory.create(topic, pipelineSpec.getCheckpointPrefix()), brokerService, config);
}
}, retryStrategy));
}
// Returns a Service that start/stop all pipelines.
return new AbstractIdleService() {
@Override
protected void startUp() throws Exception {
// Starts all pipeline
validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {
@Override
public ListenableFuture<State> apply(Service service) {
return service.start();
}
}));
}
@Override
protected void shutDown() throws Exception {
// Stops all pipeline
validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {
@Override
public ListenableFuture<State> apply(Service service) {
return service.stop();
}
}));
}
};
}
use of com.google.common.util.concurrent.AbstractIdleService in project cdap by caskdata.
the class RetryOnStartFailureServiceTest method testLoggingContext.
@Test
public void testLoggingContext() {
// This test logging context set before the service started is propagated into the service
final Map<String, String> context = Collections.singletonMap("key", "value");
// Create the service before setting the context.
Service service = new RetryOnStartFailureService(new Supplier<Service>() {
@Override
public Service get() {
return new AbstractIdleService() {
@Override
protected void startUp() throws Exception {
Assert.assertEquals(context, MDC.getCopyOfContextMap());
}
@Override
protected void shutDown() throws Exception {
Assert.assertEquals(context, MDC.getCopyOfContextMap());
}
};
}
}, RetryStrategies.noRetry());
// Set the MDC context
MDC.setContextMap(context);
// Start and stop shouldn't throw
service.startAndWait();
service.stopAndWait();
}
Aggregations