Search in sources :

Example 6 with StatementContext

use of org.skife.jdbi.v2.StatementContext in project druid by druid-io.

the class DerivativeDataSourceManager method updateDerivatives.

private void updateDerivatives() {
    List<Pair<String, DerivativeDataSourceMetadata>> derivativesInDatabase = connector.retryWithHandle(handle -> handle.createQuery(StringUtils.format("SELECT DISTINCT dataSource,commit_metadata_payload FROM %1$s", dbTables.get().getDataSourceTable())).map((int index, ResultSet r, StatementContext ctx) -> {
        String datasourceName = r.getString("dataSource");
        DataSourceMetadata payload = JacksonUtils.readValue(objectMapper, r.getBytes("commit_metadata_payload"), DataSourceMetadata.class);
        if (!(payload instanceof DerivativeDataSourceMetadata)) {
            return null;
        }
        DerivativeDataSourceMetadata metadata = (DerivativeDataSourceMetadata) payload;
        return new Pair<>(datasourceName, metadata);
    }).list());
    List<DerivativeDataSource> derivativeDataSources = derivativesInDatabase.parallelStream().filter(data -> data != null).map(derivatives -> {
        String name = derivatives.lhs;
        DerivativeDataSourceMetadata metadata = derivatives.rhs;
        String baseDataSource = metadata.getBaseDataSource();
        long avgSizePerGranularity = getAvgSizePerGranularity(name);
        log.info("find derivatives: {bases=%s, derivative=%s, dimensions=%s, metrics=%s, avgSize=%s}", baseDataSource, name, metadata.getDimensions(), metadata.getMetrics(), avgSizePerGranularity);
        return new DerivativeDataSource(name, baseDataSource, metadata.getColumns(), avgSizePerGranularity);
    }).filter(derivatives -> derivatives.getAvgSizeBasedGranularity() > 0).collect(Collectors.toList());
    ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> newDerivatives = new ConcurrentHashMap<>();
    for (DerivativeDataSource derivative : derivativeDataSources) {
        newDerivatives.computeIfAbsent(derivative.getBaseDataSource(), ds -> new TreeSet<>()).add(derivative);
    }
    ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> current;
    do {
        current = DERIVATIVES_REF.get();
    } while (!DERIVATIVES_REF.compareAndSet(current, newDerivatives));
}
Also used : MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) SortedSet(java.util.SortedSet) Intervals(org.apache.druid.java.util.common.Intervals) Inject(com.google.inject.Inject) Supplier(com.google.common.base.Supplier) Duration(org.joda.time.Duration) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) AtomicReference(java.util.concurrent.atomic.AtomicReference) StatementContext(org.skife.jdbi.v2.StatementContext) TreeSet(java.util.TreeSet) Pair(org.apache.druid.java.util.common.Pair) HashSet(java.util.HashSet) Interval(org.joda.time.Interval) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) LifecycleStop(org.apache.druid.java.util.common.lifecycle.LifecycleStop) ResultSet(java.sql.ResultSet) ManageLifecycle(org.apache.druid.guice.ManageLifecycle) ListeningScheduledExecutorService(com.google.common.util.concurrent.ListeningScheduledExecutorService) DateTimes(org.apache.druid.java.util.common.DateTimes) SQLMetadataConnector(org.apache.druid.metadata.SQLMetadataConnector) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) JacksonUtils(org.apache.druid.java.util.common.jackson.JacksonUtils) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) ImmutableMap(com.google.common.collect.ImmutableMap) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Handle(org.skife.jdbi.v2.Handle) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) DataSegment(org.apache.druid.timeline.DataSegment) SortedSet(java.util.SortedSet) StatementContext(org.skife.jdbi.v2.StatementContext) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) TreeSet(java.util.TreeSet) ResultSet(java.sql.ResultSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Pair(org.apache.druid.java.util.common.Pair)

Example 7 with StatementContext

use of org.skife.jdbi.v2.StatementContext in project druid by druid-io.

the class DerivativeDataSourceManager method getAvgSizePerGranularity.

/**
 * calculate the average data size per segment granularity for a given datasource.
 *
 * e.g. for a datasource, there're 5 segments as follows,
 * interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
 * interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
 * interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
 * interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
 * interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
 * Then, we get interval number = 2, total segment size = 1024 * 1024 * 7
 * At last, return the result 1024 * 1024 * 7 / 2 = 1024 * 1024 * 3.5
 *
 * @param datasource
 * @return average data size per segment granularity
 */
private long getAvgSizePerGranularity(String datasource) {
    return connector.retryWithHandle(new HandleCallback<Long>() {

        Set<Interval> intervals = new HashSet<>();

        long totalSize = 0;

        @Override
        public Long withHandle(Handle handle) {
            handle.createQuery(StringUtils.format("SELECT start,%1$send%1$s,payload FROM %2$s WHERE used = true AND dataSource = :dataSource", connector.getQuoteString(), dbTables.get().getSegmentsTable())).bind("dataSource", datasource).map((int index, ResultSet r, StatementContext ctx) -> {
                intervals.add(Intervals.utc(DateTimes.of(r.getString("start")).getMillis(), DateTimes.of(r.getString("end")).getMillis()));
                DataSegment segment = JacksonUtils.readValue(objectMapper, r.getBytes("payload"), DataSegment.class);
                totalSize += segment.getSize();
                return null;
            }).list();
            return intervals.isEmpty() ? 0L : totalSize / intervals.size();
        }
    });
}
Also used : ResultSet(java.sql.ResultSet) DataSegment(org.apache.druid.timeline.DataSegment) Interval(org.joda.time.Interval) HashSet(java.util.HashSet) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext)

Example 8 with StatementContext

use of org.skife.jdbi.v2.StatementContext in project metrics by dropwizard.

the class InstrumentedTimingCollectorTest method updatesTimerForTemplateFile.

@Test
public void updatesTimerForTemplateFile() throws Exception {
    final StatementNameStrategy strategy = new SmartNameStrategy();
    final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy);
    final StatementContext ctx = mock(StatementContext.class);
    doReturn("SELECT 1").when(ctx).getRawSql();
    doReturn("foo/bar.stg").when(ctx).getAttribute(NameStrategies.STATEMENT_GROUP);
    doReturn("updatesTimerForTemplateFile").when(ctx).getAttribute(NameStrategies.STATEMENT_NAME);
    collector.collect(TimeUnit.SECONDS.toNanos(4), ctx);
    final String name = strategy.getStatementName(ctx);
    final Timer timer = registry.timer(name);
    assertThat(name).isEqualTo(name("foo", "bar", "updatesTimerForTemplateFile"));
    assertThat(timer.getSnapshot().getMax()).isEqualTo(4000000000L);
}
Also used : StatementNameStrategy(com.codahale.metrics.jdbi.strategies.StatementNameStrategy) Timer(com.codahale.metrics.Timer) SmartNameStrategy(com.codahale.metrics.jdbi.strategies.SmartNameStrategy) StatementContext(org.skife.jdbi.v2.StatementContext) Test(org.junit.Test)

Example 9 with StatementContext

use of org.skife.jdbi.v2.StatementContext in project metrics by dropwizard.

the class InstrumentedTimingCollectorTest method updatesTimerForNoRawSql.

@Test
public void updatesTimerForNoRawSql() throws Exception {
    final StatementNameStrategy strategy = new SmartNameStrategy();
    final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy);
    final StatementContext ctx = mock(StatementContext.class);
    collector.collect(TimeUnit.SECONDS.toNanos(2), ctx);
    final String name = strategy.getStatementName(ctx);
    final Timer timer = registry.timer(name);
    assertThat(name).isEqualTo(name("sql", "empty"));
    assertThat(timer.getSnapshot().getMax()).isEqualTo(2000000000);
}
Also used : StatementNameStrategy(com.codahale.metrics.jdbi.strategies.StatementNameStrategy) Timer(com.codahale.metrics.Timer) SmartNameStrategy(com.codahale.metrics.jdbi.strategies.SmartNameStrategy) StatementContext(org.skife.jdbi.v2.StatementContext) Test(org.junit.Test)

Example 10 with StatementContext

use of org.skife.jdbi.v2.StatementContext in project metrics by dropwizard.

the class InstrumentedTimingCollectorTest method updatesTimerForSqlObjects.

@Test
public void updatesTimerForSqlObjects() throws Exception {
    final StatementNameStrategy strategy = new SmartNameStrategy();
    final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy);
    final StatementContext ctx = mock(StatementContext.class);
    doReturn("SELECT 1").when(ctx).getRawSql();
    doReturn(getClass()).when(ctx).getSqlObjectType();
    doReturn(getClass().getMethod("updatesTimerForSqlObjects")).when(ctx).getSqlObjectMethod();
    collector.collect(TimeUnit.SECONDS.toNanos(1), ctx);
    final String name = strategy.getStatementName(ctx);
    final Timer timer = registry.timer(name);
    assertThat(name).isEqualTo(name(getClass(), "updatesTimerForSqlObjects"));
    assertThat(timer.getSnapshot().getMax()).isEqualTo(1000000000);
}
Also used : StatementNameStrategy(com.codahale.metrics.jdbi.strategies.StatementNameStrategy) Timer(com.codahale.metrics.Timer) SmartNameStrategy(com.codahale.metrics.jdbi.strategies.SmartNameStrategy) StatementContext(org.skife.jdbi.v2.StatementContext) Test(org.junit.Test)

Aggregations

StatementContext (org.skife.jdbi.v2.StatementContext)20 Test (org.junit.Test)13 Timer (com.codahale.metrics.Timer)11 StatementNameStrategy (com.codahale.metrics.jdbi.strategies.StatementNameStrategy)11 SmartNameStrategy (com.codahale.metrics.jdbi.strategies.SmartNameStrategy)9 ResultSet (java.sql.ResultSet)8 SQLException (java.sql.SQLException)5 Handle (org.skife.jdbi.v2.Handle)5 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 List (java.util.List)3 DataSegment (org.apache.druid.timeline.DataSegment)3 Interval (org.joda.time.Interval)3 ShortNameStrategy (com.codahale.metrics.jdbi.strategies.ShortNameStrategy)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 DataSegment (io.druid.timeline.DataSegment)2 HashSet (java.util.HashSet)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 Nullable (javax.annotation.Nullable)2 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)2