use of com.google.common.base.Stopwatch in project druid by druid-io.
the class KafkaLookupExtractorFactory method start.
@Override
public boolean start() {
synchronized (started) {
if (started.get()) {
LOG.warn("Already started, not starting again");
return started.get();
}
if (executorService.isShutdown()) {
LOG.warn("Already shut down, not starting again");
return false;
}
final Properties kafkaProperties = new Properties();
kafkaProperties.putAll(getKafkaProperties());
if (kafkaProperties.containsKey("group.id")) {
throw new IAE("Cannot set kafka property [group.id]. Property is randomly generated for you. Found [%s]", kafkaProperties.getProperty("group.id"));
}
if (kafkaProperties.containsKey("auto.offset.reset")) {
throw new IAE("Cannot set kafka property [auto.offset.reset]. Property will be forced to [smallest]. Found [%s]", kafkaProperties.getProperty("auto.offset.reset"));
}
Preconditions.checkNotNull(kafkaProperties.getProperty("zookeeper.connect"), "zookeeper.connect required property");
kafkaProperties.setProperty("group.id", factoryId);
final String topic = getKafkaTopic();
LOG.debug("About to listen to topic [%s] with group.id [%s]", topic, factoryId);
cacheHandler = cacheManager.createCache();
final Map<String, String> map = cacheHandler.getCache();
mapRef.set(map);
// Enable publish-subscribe
kafkaProperties.setProperty("auto.offset.reset", "smallest");
final CountDownLatch startingReads = new CountDownLatch(1);
final ListenableFuture<?> future = executorService.submit(new Runnable() {
@Override
public void run() {
while (!executorService.isShutdown()) {
consumerConnector = buildConnector(kafkaProperties);
try {
if (executorService.isShutdown()) {
break;
}
final List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreamsByFilter(new Whitelist(Pattern.quote(topic)), 1, DEFAULT_STRING_DECODER, DEFAULT_STRING_DECODER);
if (streams == null || streams.isEmpty()) {
throw new IAE("Topic [%s] had no streams", topic);
}
if (streams.size() > 1) {
throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
}
final KafkaStream<String, String> kafkaStream = streams.get(0);
startingReads.countDown();
for (final MessageAndMetadata<String, String> messageAndMetadata : kafkaStream) {
final String key = messageAndMetadata.key();
final String message = messageAndMetadata.message();
if (key == null || message == null) {
LOG.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
continue;
}
doubleEventCount.incrementAndGet();
map.put(key, message);
doubleEventCount.incrementAndGet();
LOG.trace("Placed key[%s] val[%s]", key, message);
}
} catch (Exception e) {
LOG.error(e, "Error reading stream for topic [%s]", topic);
} finally {
consumerConnector.shutdown();
}
}
}
});
Futures.addCallback(future, new FutureCallback<Object>() {
@Override
public void onSuccess(Object result) {
LOG.debug("Success listening to [%s]", topic);
}
@Override
public void onFailure(Throwable t) {
if (t instanceof CancellationException) {
LOG.debug("Topic [%s] cancelled", topic);
} else {
LOG.error(t, "Error in listening to [%s]", topic);
}
}
}, MoreExecutors.sameThreadExecutor());
this.future = future;
final Stopwatch stopwatch = Stopwatch.createStarted();
try {
while (!startingReads.await(100, TimeUnit.MILLISECONDS) && connectTimeout > 0L) {
// Don't return until we have actually connected
if (future.isDone()) {
future.get();
} else {
if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > connectTimeout) {
throw new TimeoutException("Failed to connect to kafka in sufficient time");
}
}
}
} catch (InterruptedException | ExecutionException | TimeoutException e) {
executorService.shutdown();
if (!future.isDone() && !future.cancel(false)) {
LOG.warn("Could not cancel kafka listening thread");
}
LOG.error(e, "Failed to start kafka extraction factory");
cacheHandler.close();
return false;
}
started.set(true);
return true;
}
}
use of com.google.common.base.Stopwatch in project druid by druid-io.
the class RemoteTaskRunner method announceTask.
/**
* Creates a ZK entry under a specific path associated with a worker. The worker is responsible for
* removing the task ZK entry and creating a task status ZK entry.
*
* @param theZkWorker The worker the task is assigned to
* @param taskRunnerWorkItem The task to be assigned
*
* @return boolean indicating whether the task was successfully assigned or not
*/
private boolean announceTask(final Task task, final ZkWorker theZkWorker, final RemoteTaskRunnerWorkItem taskRunnerWorkItem) throws Exception {
Preconditions.checkArgument(task.getId().equals(taskRunnerWorkItem.getTaskId()), "task id != workItem id");
final String worker = theZkWorker.getWorker().getHost();
synchronized (statusLock) {
if (!zkWorkers.containsKey(worker) || lazyWorkers.containsKey(worker)) {
// the worker might got killed or has been marked as lazy.
log.info("Not assigning task to already removed worker[%s]", worker);
return false;
}
log.info("Coordinator asking Worker[%s] to add task[%s]", worker, task.getId());
CuratorUtils.createIfNotExists(cf, JOINER.join(indexerZkConfig.getTasksPath(), worker, task.getId()), CreateMode.EPHEMERAL, jsonMapper.writeValueAsBytes(task), config.getMaxZnodeBytes());
RemoteTaskRunnerWorkItem workItem = pendingTasks.remove(task.getId());
if (workItem == null) {
log.makeAlert("WTF?! Got a null work item from pending tasks?! How can this be?!").addData("taskId", task.getId()).emit();
return false;
}
RemoteTaskRunnerWorkItem newWorkItem = workItem.withWorker(theZkWorker.getWorker(), null);
runningTasks.put(task.getId(), newWorkItem);
log.info("Task %s switched from pending to running (on [%s])", task.getId(), newWorkItem.getWorker().getHost());
TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.running(task.getId()));
// Syncing state with Zookeeper - don't assign new tasks until the task we just assigned is actually running
// on a worker - this avoids overflowing a worker with tasks
Stopwatch timeoutStopwatch = Stopwatch.createStarted();
while (!isWorkerRunningTask(theZkWorker.getWorker(), task.getId())) {
final long waitMs = config.getTaskAssignmentTimeout().toStandardDuration().getMillis();
statusLock.wait(waitMs);
long elapsed = timeoutStopwatch.elapsed(TimeUnit.MILLISECONDS);
if (elapsed >= waitMs) {
log.makeAlert("Task assignment timed out on worker [%s], never ran task [%s]! Timeout: (%s >= %s)!", worker, task.getId(), elapsed, config.getTaskAssignmentTimeout());
taskComplete(taskRunnerWorkItem, theZkWorker, TaskStatus.failure(task.getId()));
break;
}
}
return true;
}
}
use of com.google.common.base.Stopwatch in project druid by druid-io.
the class LoggingProgressIndicator method startSection.
@Override
public void startSection(String section) {
log.info("[%s]: Starting [%s]", progressName, section);
Stopwatch sectionWatch = sections.get(section);
if (sectionWatch != null) {
throw new ISE("[%s]: Cannot start progress tracker for [%s]. It is already started.", progressName, section);
}
sectionWatch = Stopwatch.createStarted();
sections.put(section, sectionWatch);
}
use of com.google.common.base.Stopwatch in project druid by druid-io.
the class LoggingProgressIndicator method progressSection.
@Override
public void progressSection(String section, String message) {
Stopwatch sectionWatch = sections.get(section);
if (sectionWatch == null) {
throw new ISE("[%s]: Cannot progress tracker for [%s]. Nothing started.", progressName, section);
}
long time = sectionWatch.elapsed(TimeUnit.MILLISECONDS);
log.info("[%s]: [%s] : %s. Elapsed time: [%,d] millis", progressName, section, message, time);
}
use of com.google.common.base.Stopwatch in project hbase by apache.
the class TestClientNoCluster method cycle.
/**
* Code for each 'client' to run.
*
* @param id
* @param c
* @param sharedConnection
* @throws IOException
*/
static void cycle(int id, final Configuration c, final Connection sharedConnection) throws IOException {
long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000);
long startTime = System.currentTimeMillis();
final int printInterval = 100000;
Random rd = new Random(id);
boolean get = c.getBoolean("hbase.test.do.gets", false);
TableName tableName = TableName.valueOf(BIG_USER_TABLE);
if (get) {
try (Table table = sharedConnection.getTable(tableName)) {
Stopwatch stopWatch = new Stopwatch();
stopWatch.start();
for (int i = 0; i < namespaceSpan; i++) {
byte[] b = format(rd.nextLong());
Get g = new Get(b);
table.get(g);
if (i % printInterval == 0) {
LOG.info("Get " + printInterval + "/" + stopWatch.elapsedMillis());
stopWatch.reset();
stopWatch.start();
}
}
LOG.info("Finished a cycle putting " + namespaceSpan + " in " + (System.currentTimeMillis() - startTime) + "ms");
}
} else {
try (BufferedMutator mutator = sharedConnection.getBufferedMutator(tableName)) {
Stopwatch stopWatch = new Stopwatch();
stopWatch.start();
for (int i = 0; i < namespaceSpan; i++) {
byte[] b = format(rd.nextLong());
Put p = new Put(b);
p.addColumn(HConstants.CATALOG_FAMILY, b, b);
mutator.mutate(p);
if (i % printInterval == 0) {
LOG.info("Put " + printInterval + "/" + stopWatch.elapsedMillis());
stopWatch.reset();
stopWatch.start();
}
}
LOG.info("Finished a cycle putting " + namespaceSpan + " in " + (System.currentTimeMillis() - startTime) + "ms");
}
}
}
Aggregations