Search in sources :

Example 16 with Context

use of com.codahale.metrics.Timer.Context in project newts by OpenNMS.

the class ImportRunner method parMap.

private Observable<Boolean> parMap(Observable<List<Sample>> samples, MetricRegistry metrics, Func1<List<Sample>, Boolean> insert) {
    final Timer waitTime = metrics.timer("wait-time");
    @SuppressWarnings("serial") final BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(m_maxThreadQueueSize == 0 ? m_threadCount * 3 : m_maxThreadQueueSize) {

        @Override
        public boolean offer(Runnable r) {
            try (Context time = waitTime.time()) {
                this.put(r);
                return true;
            } catch (InterruptedException e) {
                throw Exceptions.propagate(e);
            }
        }

        @Override
        public boolean add(Runnable r) {
            try (Context time = waitTime.time()) {
                this.put(r);
                return true;
            } catch (InterruptedException e) {
                throw Exceptions.propagate(e);
            }
        }
    };
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(m_threadCount, m_threadCount, 0L, TimeUnit.MILLISECONDS, workQueue);
    metrics.register("active-threads", new Gauge<Integer>() {

        @Override
        public Integer getValue() {
            return executor.getActiveCount();
        }
    });
    metrics.register("pool-size", new Gauge<Integer>() {

        @Override
        public Integer getValue() {
            return executor.getPoolSize();
        }
    });
    metrics.register("largest-pool-size", new Gauge<Integer>() {

        @Override
        public Integer getValue() {
            return executor.getLargestPoolSize();
        }
    });
    metrics.register("work-queue-size", new Gauge<Integer>() {

        @Override
        public Integer getValue() {
            return workQueue.size();
        }
    });
    return parMap(samples, executor, metrics, insert);
}
Also used : Context(com.codahale.metrics.Timer.Context) Timer(com.codahale.metrics.Timer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 17 with Context

use of com.codahale.metrics.Timer.Context in project wikidata-query-rdf by wikimedia.

the class KafkaPoller method fetch.

/**
 * Fetch changes from Kafka.
 * @param lastNextStartTime where last fetch ended up.
 * @return Set of changes.
 * @throws RetryableException
 */
@SuppressWarnings({ "checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity" })
private Batch fetch(Instant lastNextStartTime) throws RetryableException {
    Map<String, Change> changesByTitle = new LinkedHashMap<>();
    ConsumerRecords<String, ChangeEvent> records;
    Instant nextInstant = Instant.EPOCH;
    AtomicLongMap<String> topicCounts = AtomicLongMap.create();
    Map<TopicPartition, OffsetAndMetadata> batchOffsets = new HashMap<>();
    while (true) {
        commitPendindOffsets();
        try (Context timerContext = pollingTimer.time()) {
            // TODO: make timeout configurable? Wait for a bit so we catch bursts of messages?
            records = consumer.poll(1000);
        } catch (InterruptException | WakeupException e) {
            throw new RetryableException("Error fetching recent changes", e);
        }
        int count = records.count();
        log.debug("Fetched {} records from Kafka", count);
        changesCounter.inc(count);
        if (count == 0) {
            // If we got nothing from Kafka, get out of the loop and return what we have
            break;
        }
        boolean foundSomething = false;
        for (ConsumerRecord<String, ChangeEvent> record : records) {
            ChangeEvent event = record.value();
            String topic = record.topic();
            batchOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset()));
            log.trace("Got event t:{} o:{}", record.topic(), record.offset());
            if (!event.domain().equals(uris.getHost())) {
                // wrong domain, ignore
                continue;
            }
            // check namespace
            if (!uris.isEntityNamespace(event.namespace())) {
                continue;
            }
            if (!(event instanceof RevisionCreateEvent)) {
                log.info("Got non revision create event class:{}, domain:{}, t:{}, revision:{}", event.getClass().getSimpleName(), event.title(), event.domain(), event.revision());
            }
            // Now we have event that we want to process
            foundSomething = true;
            topicCounts.getAndIncrement(record.topic());
            // very chaotic, jumping back and forth.
            if (topic.endsWith(reportingTopic)) {
                nextInstant = Utils.max(nextInstant, Instant.ofEpochMilli(record.timestamp()));
            }
            // Using offset here as RC id since we do not have real RC id (this not being RC poller) but
            // the offset serves the same function in Kafka and is also useful for debugging.
            Change change = makeChange(event, record.offset());
            Change dupe = changesByTitle.put(change.entityId(), change);
            // This is not a big deal since deletes are relatively rare.
            if (dupe != null && change.revision() > Change.NO_REVISION && (dupe.revision() > change.revision() || dupe.revision() == Change.NO_REVISION)) {
                // need to remove so that order will be correct
                changesByTitle.remove(change.entityId());
                changesByTitle.put(change.entityId(), dupe);
            }
        }
        log.debug("{} records left after filtering", changesByTitle.size());
        if (changesByTitle.size() >= batchSize) {
            // We have enough for the batch
            break;
        }
        if (changesByTitle.size() > 0 && !foundSomething) {
            log.info("Did not find anything useful in this batch, returning existing data");
            // wait for more.
            break;
        }
    // TODO: if we already have something and we've spent more than X seconds in the loop,
    // we probably should return without waiting for more
    }
    // If we didn't get anything useful in the reporting topic, keep the old value
    if (nextInstant.equals(Instant.EPOCH)) {
        nextInstant = lastNextStartTime;
    }
    final ImmutableList<Change> changes = ImmutableList.copyOf(changesByTitle.values());
    log.info("Found {} changes", changes.size());
    if (log.isDebugEnabled()) {
        topicCounts.asMap().forEach((k, v) -> log.debug("Topic {}: {} records", k, v));
    }
    long advanced = ChronoUnit.MILLIS.between(lastNextStartTime, nextInstant);
    // be sure we got the whole second
    return new Batch(changes, advanced, nextInstant.minusSeconds(1).toString(), nextInstant, batchOffsets);
}
Also used : Context(com.codahale.metrics.Timer.Context) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Instant(java.time.Instant) InterruptException(org.apache.kafka.common.errors.InterruptException) WakeupException(org.apache.kafka.common.errors.WakeupException) LinkedHashMap(java.util.LinkedHashMap) RetryableException(org.wikidata.query.rdf.tool.exception.RetryableException) ChangeEvent(org.wikidata.query.rdf.tool.change.events.ChangeEvent) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RevisionCreateEvent(org.wikidata.query.rdf.tool.change.events.RevisionCreateEvent)

Example 18 with Context

use of com.codahale.metrics.Timer.Context in project sharding-jdbc by dangdangdotcom.

the class PreparedStatementExecutor method executeQuery.

/**
     * 执行SQL查询.
     * 
     * @return 结果集列表
     */
public List<ResultSet> executeQuery() {
    Context context = MetricsContext.start("ShardingPreparedStatement-executeQuery");
    eventPostman.postExecutionEvents();
    List<ResultSet> result;
    final boolean isExceptionThrown = ExecutorExceptionHandler.isExceptionThrown();
    final Map<String, Object> dataMap = ExecutorDataMap.getDataMap();
    try {
        if (1 == preparedStatementExecutorWrappers.size()) {
            return Collections.singletonList(executeQueryInternal(preparedStatementExecutorWrappers.iterator().next(), isExceptionThrown, dataMap));
        }
        result = executorEngine.execute(preparedStatementExecutorWrappers, new ExecuteUnit<PreparedStatementExecutorWrapper, ResultSet>() {

            @Override
            public ResultSet execute(final PreparedStatementExecutorWrapper input) throws Exception {
                synchronized (input.getPreparedStatement().getConnection()) {
                    return executeQueryInternal(input, isExceptionThrown, dataMap);
                }
            }
        });
    } finally {
        MetricsContext.stop(context);
    }
    return result;
}
Also used : MetricsContext(com.dangdang.ddframe.rdb.sharding.metrics.MetricsContext) Context(com.codahale.metrics.Timer.Context) PreparedStatementExecutorWrapper(com.dangdang.ddframe.rdb.sharding.executor.wrapper.PreparedStatementExecutorWrapper) ResultSet(java.sql.ResultSet)

Example 19 with Context

use of com.codahale.metrics.Timer.Context in project sharding-jdbc by dangdangdotcom.

the class PreparedStatementExecutor method executeUpdate.

/**
     * 执行SQL更新.
     * 
     * @return 更新数量
     */
public int executeUpdate() {
    Context context = MetricsContext.start("ShardingPreparedStatement-executeUpdate");
    eventPostman.postExecutionEvents();
    final boolean isExceptionThrown = ExecutorExceptionHandler.isExceptionThrown();
    final Map<String, Object> dataMap = ExecutorDataMap.getDataMap();
    try {
        if (1 == preparedStatementExecutorWrappers.size()) {
            return executeUpdateInternal(preparedStatementExecutorWrappers.iterator().next(), isExceptionThrown, dataMap);
        }
        return executorEngine.execute(preparedStatementExecutorWrappers, new ExecuteUnit<PreparedStatementExecutorWrapper, Integer>() {

            @Override
            public Integer execute(final PreparedStatementExecutorWrapper input) throws Exception {
                synchronized (input.getPreparedStatement().getConnection()) {
                    return executeUpdateInternal(input, isExceptionThrown, dataMap);
                }
            }
        }, new MergeUnit<Integer, Integer>() {

            @Override
            public Integer merge(final List<Integer> results) {
                if (null == results) {
                    return 0;
                }
                int result = 0;
                for (Integer each : results) {
                    result += each;
                }
                return result;
            }
        });
    } finally {
        MetricsContext.stop(context);
    }
}
Also used : MetricsContext(com.dangdang.ddframe.rdb.sharding.metrics.MetricsContext) Context(com.codahale.metrics.Timer.Context) PreparedStatementExecutorWrapper(com.dangdang.ddframe.rdb.sharding.executor.wrapper.PreparedStatementExecutorWrapper) SQLException(java.sql.SQLException)

Example 20 with Context

use of com.codahale.metrics.Timer.Context in project sharding-jdbc by dangdangdotcom.

the class PreparedStatementExecutor method executeBatch.

/**
     * 执行批量接口.
     *
     * @return 每个
     * @param batchSize 批量执行语句总数
     */
public int[] executeBatch(final int batchSize) {
    Context context = MetricsContext.start("ShardingPreparedStatement-executeUpdate");
    eventPostman.postExecutionEvents();
    final boolean isExceptionThrown = ExecutorExceptionHandler.isExceptionThrown();
    final Map<String, Object> dataMap = ExecutorDataMap.getDataMap();
    try {
        if (1 == preparedStatementExecutorWrappers.size()) {
            return executeBatchInternal(preparedStatementExecutorWrappers.iterator().next(), isExceptionThrown, dataMap);
        }
        return executorEngine.execute(preparedStatementExecutorWrappers, new ExecuteUnit<PreparedStatementExecutorWrapper, int[]>() {

            @Override
            public int[] execute(final PreparedStatementExecutorWrapper input) throws Exception {
                synchronized (input.getPreparedStatement().getConnection()) {
                    return executeBatchInternal(input, isExceptionThrown, dataMap);
                }
            }
        }, new MergeUnit<int[], int[]>() {

            @Override
            public int[] merge(final List<int[]> results) {
                if (null == results) {
                    return new int[] { 0 };
                }
                int[] result = new int[batchSize];
                int i = 0;
                for (PreparedStatementExecutorWrapper each : preparedStatementExecutorWrappers) {
                    for (Integer[] indices : each.getBatchIndices()) {
                        result[indices[0]] += results.get(i)[indices[1]];
                    }
                    i++;
                }
                return result;
            }
        });
    } finally {
        MetricsContext.stop(context);
    }
}
Also used : MetricsContext(com.dangdang.ddframe.rdb.sharding.metrics.MetricsContext) Context(com.codahale.metrics.Timer.Context) PreparedStatementExecutorWrapper(com.dangdang.ddframe.rdb.sharding.executor.wrapper.PreparedStatementExecutorWrapper) SQLException(java.sql.SQLException)

Aggregations

Context (com.codahale.metrics.Timer.Context)37 Timer (com.codahale.metrics.Timer)11 MetricsContext (com.dangdang.ddframe.rdb.sharding.metrics.MetricsContext)10 ApplierContext (com.torodb.mongodb.repl.oplogreplier.ApplierContext)9 OplogOperation (com.eightkdata.mongowp.server.api.oplog.OplogOperation)8 Test (org.junit.Test)7 RetrierGiveUpException (com.torodb.core.retrier.RetrierGiveUpException)6 SQLException (java.sql.SQLException)5 PreparedStatementExecutorWrapper (com.dangdang.ddframe.rdb.sharding.executor.wrapper.PreparedStatementExecutorWrapper)4 RetrierAbortException (com.torodb.core.retrier.RetrierAbortException)4 MetricRegistry (com.codahale.metrics.MetricRegistry)3 StatementExecutorWrapper (com.dangdang.ddframe.rdb.sharding.executor.wrapper.StatementExecutorWrapper)3 RollbackException (com.torodb.core.transaction.RollbackException)3 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 List (java.util.List)3 ExecutionException (java.util.concurrent.ExecutionException)3 ExecutorService (java.util.concurrent.ExecutorService)3 ConsoleReporter (com.codahale.metrics.ConsoleReporter)2 Counter (com.codahale.metrics.Counter)2