Search in sources :

Example 16 with Stopwatch

use of com.google.common.base.Stopwatch in project druid by druid-io.

the class KafkaLookupExtractorFactory method start.

@Override
public boolean start() {
    synchronized (started) {
        if (started.get()) {
            LOG.warn("Already started, not starting again");
            return started.get();
        }
        if (executorService.isShutdown()) {
            LOG.warn("Already shut down, not starting again");
            return false;
        }
        final Properties kafkaProperties = new Properties();
        kafkaProperties.putAll(getKafkaProperties());
        if (kafkaProperties.containsKey("group.id")) {
            throw new IAE("Cannot set kafka property [group.id]. Property is randomly generated for you. Found [%s]", kafkaProperties.getProperty("group.id"));
        }
        if (kafkaProperties.containsKey("auto.offset.reset")) {
            throw new IAE("Cannot set kafka property [auto.offset.reset]. Property will be forced to [smallest]. Found [%s]", kafkaProperties.getProperty("auto.offset.reset"));
        }
        Preconditions.checkNotNull(kafkaProperties.getProperty("zookeeper.connect"), "zookeeper.connect required property");
        kafkaProperties.setProperty("group.id", factoryId);
        final String topic = getKafkaTopic();
        LOG.debug("About to listen to topic [%s] with group.id [%s]", topic, factoryId);
        cacheHandler = cacheManager.createCache();
        final Map<String, String> map = cacheHandler.getCache();
        mapRef.set(map);
        // Enable publish-subscribe
        kafkaProperties.setProperty("auto.offset.reset", "smallest");
        final CountDownLatch startingReads = new CountDownLatch(1);
        final ListenableFuture<?> future = executorService.submit(new Runnable() {

            @Override
            public void run() {
                while (!executorService.isShutdown()) {
                    consumerConnector = buildConnector(kafkaProperties);
                    try {
                        if (executorService.isShutdown()) {
                            break;
                        }
                        final List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreamsByFilter(new Whitelist(Pattern.quote(topic)), 1, DEFAULT_STRING_DECODER, DEFAULT_STRING_DECODER);
                        if (streams == null || streams.isEmpty()) {
                            throw new IAE("Topic [%s] had no streams", topic);
                        }
                        if (streams.size() > 1) {
                            throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
                        }
                        final KafkaStream<String, String> kafkaStream = streams.get(0);
                        startingReads.countDown();
                        for (final MessageAndMetadata<String, String> messageAndMetadata : kafkaStream) {
                            final String key = messageAndMetadata.key();
                            final String message = messageAndMetadata.message();
                            if (key == null || message == null) {
                                LOG.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
                                continue;
                            }
                            doubleEventCount.incrementAndGet();
                            map.put(key, message);
                            doubleEventCount.incrementAndGet();
                            LOG.trace("Placed key[%s] val[%s]", key, message);
                        }
                    } catch (Exception e) {
                        LOG.error(e, "Error reading stream for topic [%s]", topic);
                    } finally {
                        consumerConnector.shutdown();
                    }
                }
            }
        });
        Futures.addCallback(future, new FutureCallback<Object>() {

            @Override
            public void onSuccess(Object result) {
                LOG.debug("Success listening to [%s]", topic);
            }

            @Override
            public void onFailure(Throwable t) {
                if (t instanceof CancellationException) {
                    LOG.debug("Topic [%s] cancelled", topic);
                } else {
                    LOG.error(t, "Error in listening to [%s]", topic);
                }
            }
        }, MoreExecutors.sameThreadExecutor());
        this.future = future;
        final Stopwatch stopwatch = Stopwatch.createStarted();
        try {
            while (!startingReads.await(100, TimeUnit.MILLISECONDS) && connectTimeout > 0L) {
                // Don't return until we have actually connected
                if (future.isDone()) {
                    future.get();
                } else {
                    if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > connectTimeout) {
                        throw new TimeoutException("Failed to connect to kafka in sufficient time");
                    }
                }
            }
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            executorService.shutdown();
            if (!future.isDone() && !future.cancel(false)) {
                LOG.warn("Could not cancel kafka listening thread");
            }
            LOG.error(e, "Failed to start kafka extraction factory");
            cacheHandler.close();
            return false;
        }
        started.set(true);
        return true;
    }
}
Also used : MessageAndMetadata(kafka.message.MessageAndMetadata) Stopwatch(com.google.common.base.Stopwatch) KafkaStream(kafka.consumer.KafkaStream) Properties(java.util.Properties) IAE(io.druid.java.util.common.IAE) CountDownLatch(java.util.concurrent.CountDownLatch) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) ExecutionException(java.util.concurrent.ExecutionException) CancellationException(java.util.concurrent.CancellationException) Whitelist(kafka.consumer.Whitelist) List(java.util.List) ISE(io.druid.java.util.common.ISE) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Example 17 with Stopwatch

use of com.google.common.base.Stopwatch in project buck by facebook.

the class ThriftCoordinatorClient method start.

public ThriftCoordinatorClient start() throws IOException {
    transport = new TFramedTransport(new TSocket(remoteHost, remotePort));
    Stopwatch stopwatch = Stopwatch.createStarted();
    while (true) {
        try {
            transport.open();
            break;
        } catch (TTransportException e) {
            if (stopwatch.elapsed(TimeUnit.SECONDS) > MAX_CONNECT_TIMEOUT_SECONDS) {
                throw new IOException(String.format("Failed to connect. Coordinator is still not healthy after [%d] seconds.", MAX_CONNECT_TIMEOUT_SECONDS));
            }
            LOG.debug("Coordinator server currently not available. Retrying in a bit...");
            try {
                Thread.sleep(TimeUnit.SECONDS.toMillis(RETRY_TIMEOUT_SECONDS));
            } catch (InterruptedException innerException) {
                throw new RuntimeException(innerException);
            }
        }
    }
    TProtocol protocol = new TBinaryProtocol(transport);
    client = new CoordinatorService.Client(protocol);
    return this;
}
Also used : TBinaryProtocol(org.apache.thrift.protocol.TBinaryProtocol) TProtocol(org.apache.thrift.protocol.TProtocol) TFramedTransport(org.apache.thrift.transport.TFramedTransport) Stopwatch(com.google.common.base.Stopwatch) TTransportException(org.apache.thrift.transport.TTransportException) CoordinatorService(com.facebook.buck.distributed.thrift.CoordinatorService) IOException(java.io.IOException) TSocket(org.apache.thrift.transport.TSocket)

Example 18 with Stopwatch

use of com.google.common.base.Stopwatch in project buck by facebook.

the class DistBuildClientExecutor method executeAndPrintFailuresToEventBus.

public int executeAndPrintFailuresToEventBus(final WeightedListeningExecutorService executorService, ProjectFilesystem projectFilesystem, FileHashCache fileHashCache, BuckEventBus eventBus) throws IOException, InterruptedException {
    BuildJob job = distBuildService.createBuild();
    final StampedeId id = job.getStampedeId();
    LOG.info("Created job. Build id = " + id.getId());
    logDebugInfo(job);
    List<ListenableFuture<Void>> asyncJobs = new LinkedList<>();
    LOG.info("Uploading local changes.");
    asyncJobs.add(distBuildService.uploadMissingFiles(buildJobState.fileHashes, executorService));
    LOG.info("Uploading target graph.");
    asyncJobs.add(distBuildService.uploadTargetGraph(buildJobState, id, executorService));
    LOG.info("Uploading buck dot-files.");
    asyncJobs.add(distBuildService.uploadBuckDotFiles(id, projectFilesystem, fileHashCache, executorService));
    try {
        Futures.allAsList(asyncJobs).get();
    } catch (ExecutionException e) {
        LOG.error("Upload failed.");
        throw new RuntimeException(e);
    }
    distBuildService.setBuckVersion(id, buckVersion);
    LOG.info("Set Buck Version. Build status: " + job.getStatus().toString());
    job = distBuildService.startBuild(id);
    LOG.info("Started job. Build status: " + job.getStatus().toString());
    logDebugInfo(job);
    Stopwatch stopwatch = Stopwatch.createStarted();
    // Keep polling until the build is complete or failed.
    do {
        job = distBuildService.getCurrentBuildJobState(id);
        LOG.info("Got build status: " + job.getStatus().toString());
        DistBuildStatus distBuildStatus = prepareStatusFromJob(job).setETAMillis(MAX_BUILD_DURATION_MILLIS - stopwatch.elapsed(TimeUnit.MILLISECONDS)).build();
        eventBus.post(new DistBuildStatusEvent(distBuildStatus));
        List<LogLineBatchRequest> newLogLineRequests = distBuildLogStateTracker.createRealtimeLogRequests(job.getSlaveInfoByRunId().values());
        MultiGetBuildSlaveRealTimeLogsResponse slaveLogsResponse = distBuildService.fetchSlaveLogLines(job.stampedeId, newLogLineRequests);
        distBuildLogStateTracker.processStreamLogs(slaveLogsResponse.getMultiStreamLogs());
        try {
            // TODO(shivanker): Get rid of sleeps in methods which we want to unit test
            Thread.sleep(millisBetweenStatusPoll);
        } catch (InterruptedException e) {
            LOG.error(e, "BuildStatus polling sleep call has been interrupted unexpectedly.");
        }
    } while (!(job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY) || job.getStatus().equals(BuildStatus.FAILED)));
    try {
        MultiGetBuildSlaveLogDirResponse logDirsResponse = distBuildService.fetchBuildSlaveLogDir(job.stampedeId, distBuildLogStateTracker.runIdsToMaterializeLogDirsFor(job.slaveInfoByRunId.values()));
        distBuildLogStateTracker.materializeLogDirs(logDirsResponse.getLogDirs());
    } catch (IOException ex) {
        LOG.error(ex);
    }
    LOG.info("Build was " + (job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY) ? "" : "not ") + "successful!");
    logDebugInfo(job);
    DistBuildStatus distBuildStatus = prepareStatusFromJob(job).setETAMillis(0).build();
    eventBus.post(new DistBuildStatusEvent(distBuildStatus));
    return job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY) ? 0 : 1;
}
Also used : LogLineBatchRequest(com.facebook.buck.distributed.thrift.LogLineBatchRequest) MultiGetBuildSlaveRealTimeLogsResponse(com.facebook.buck.distributed.thrift.MultiGetBuildSlaveRealTimeLogsResponse) MultiGetBuildSlaveLogDirResponse(com.facebook.buck.distributed.thrift.MultiGetBuildSlaveLogDirResponse) Stopwatch(com.google.common.base.Stopwatch) IOException(java.io.IOException) LinkedList(java.util.LinkedList) StampedeId(com.facebook.buck.distributed.thrift.StampedeId) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) BuildJob(com.facebook.buck.distributed.thrift.BuildJob) ExecutionException(java.util.concurrent.ExecutionException)

Example 19 with Stopwatch

use of com.google.common.base.Stopwatch in project jirm by agentgt.

the class PlainSqlTest method testPerformanceFromClasspath.

@Test
public void testPerformanceFromClasspath() throws Exception {
    Stopwatch sw = new Stopwatch().start();
    for (int i = 0; i < 300000; i++) {
        PlainSql.fromResource(getClass(), "search-recruiting-name.sql");
    }
    assertTrue("Should be faster", sw.stop().elapsedMillis() < 5000);
}
Also used : Stopwatch(com.google.common.base.Stopwatch) Test(org.junit.Test)

Example 20 with Stopwatch

use of com.google.common.base.Stopwatch in project presto by prestodb.

the class TestBlackHoleSmoke method pageProcessingDelay.

@Test
public void pageProcessingDelay() throws Exception {
    Session session = testSessionBuilder().setCatalog("blackhole").setSchema("default").build();
    Duration pageProcessingDelay = new Duration(1, SECONDS);
    assertThatQueryReturnsValue(format("CREATE TABLE nation WITH ( %s = 8, %s = 1, %s = 1, %s = 1, %s = '%s' ) AS " + "SELECT * FROM tpch.tiny.nation", FIELD_LENGTH_PROPERTY, ROWS_PER_PAGE_PROPERTY, PAGES_PER_SPLIT_PROPERTY, SPLIT_COUNT_PROPERTY, PAGE_PROCESSING_DELAY, pageProcessingDelay), 25L, session);
    Stopwatch stopwatch = Stopwatch.createStarted();
    assertEquals(queryRunner.execute(session, "SELECT * FROM nation").getRowCount(), 1);
    queryRunner.execute(session, "INSERT INTO nation SELECT CAST(null AS BIGINT), CAST(null AS VARCHAR(25)), CAST(null AS BIGINT), CAST(null AS VARCHAR(152))");
    stopwatch.stop();
    assertGreaterThan(stopwatch.elapsed(MILLISECONDS), pageProcessingDelay.toMillis());
    assertThatQueryReturnsValue("DROP TABLE nation", true);
}
Also used : Stopwatch(com.google.common.base.Stopwatch) Duration(io.airlift.units.Duration) Session(com.facebook.presto.Session) Test(org.testng.annotations.Test) BeforeTest(org.testng.annotations.BeforeTest) AfterTest(org.testng.annotations.AfterTest)

Aggregations

Stopwatch (com.google.common.base.Stopwatch)296 IOException (java.io.IOException)75 ArrayList (java.util.ArrayList)29 ExecutionException (java.util.concurrent.ExecutionException)27 Test (org.junit.Test)17 Map (java.util.Map)16 File (java.io.File)15 DocumentStoreException (org.apache.jackrabbit.oak.plugins.document.DocumentStoreException)15 Path (org.apache.hadoop.fs.Path)14 HashMap (java.util.HashMap)13 List (java.util.List)12 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)11 DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)11 DBCollection (com.mongodb.DBCollection)9 ISE (io.druid.java.util.common.ISE)9 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)8 OptionsParser (com.google.devtools.common.options.OptionsParser)8 MongoException (com.mongodb.MongoException)8 Connection (java.sql.Connection)8 CountDownLatch (java.util.concurrent.CountDownLatch)8