use of org.skife.jdbi.v2.StatementContext in project druid by druid-io.
the class DerivativeDataSourceManager method updateDerivatives.
private void updateDerivatives() {
List<Pair<String, DerivativeDataSourceMetadata>> derivativesInDatabase = connector.retryWithHandle(handle -> handle.createQuery(StringUtils.format("SELECT DISTINCT dataSource,commit_metadata_payload FROM %1$s", dbTables.get().getDataSourceTable())).map((int index, ResultSet r, StatementContext ctx) -> {
String datasourceName = r.getString("dataSource");
DataSourceMetadata payload = JacksonUtils.readValue(objectMapper, r.getBytes("commit_metadata_payload"), DataSourceMetadata.class);
if (!(payload instanceof DerivativeDataSourceMetadata)) {
return null;
}
DerivativeDataSourceMetadata metadata = (DerivativeDataSourceMetadata) payload;
return new Pair<>(datasourceName, metadata);
}).list());
List<DerivativeDataSource> derivativeDataSources = derivativesInDatabase.parallelStream().filter(data -> data != null).map(derivatives -> {
String name = derivatives.lhs;
DerivativeDataSourceMetadata metadata = derivatives.rhs;
String baseDataSource = metadata.getBaseDataSource();
long avgSizePerGranularity = getAvgSizePerGranularity(name);
log.info("find derivatives: {bases=%s, derivative=%s, dimensions=%s, metrics=%s, avgSize=%s}", baseDataSource, name, metadata.getDimensions(), metadata.getMetrics(), avgSizePerGranularity);
return new DerivativeDataSource(name, baseDataSource, metadata.getColumns(), avgSizePerGranularity);
}).filter(derivatives -> derivatives.getAvgSizeBasedGranularity() > 0).collect(Collectors.toList());
ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> newDerivatives = new ConcurrentHashMap<>();
for (DerivativeDataSource derivative : derivativeDataSources) {
newDerivatives.computeIfAbsent(derivative.getBaseDataSource(), ds -> new TreeSet<>()).add(derivative);
}
ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> current;
do {
current = DERIVATIVES_REF.get();
} while (!DERIVATIVES_REF.compareAndSet(current, newDerivatives));
}
use of org.skife.jdbi.v2.StatementContext in project druid by druid-io.
the class DerivativeDataSourceManager method getAvgSizePerGranularity.
/**
* calculate the average data size per segment granularity for a given datasource.
*
* e.g. for a datasource, there're 5 segments as follows,
* interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
* interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* Then, we get interval number = 2, total segment size = 1024 * 1024 * 7
* At last, return the result 1024 * 1024 * 7 / 2 = 1024 * 1024 * 3.5
*
* @param datasource
* @return average data size per segment granularity
*/
private long getAvgSizePerGranularity(String datasource) {
return connector.retryWithHandle(new HandleCallback<Long>() {
Set<Interval> intervals = new HashSet<>();
long totalSize = 0;
@Override
public Long withHandle(Handle handle) {
handle.createQuery(StringUtils.format("SELECT start,%1$send%1$s,payload FROM %2$s WHERE used = true AND dataSource = :dataSource", connector.getQuoteString(), dbTables.get().getSegmentsTable())).bind("dataSource", datasource).map((int index, ResultSet r, StatementContext ctx) -> {
intervals.add(Intervals.utc(DateTimes.of(r.getString("start")).getMillis(), DateTimes.of(r.getString("end")).getMillis()));
DataSegment segment = JacksonUtils.readValue(objectMapper, r.getBytes("payload"), DataSegment.class);
totalSize += segment.getSize();
return null;
}).list();
return intervals.isEmpty() ? 0L : totalSize / intervals.size();
}
});
}
use of org.skife.jdbi.v2.StatementContext in project metrics by dropwizard.
the class InstrumentedTimingCollectorTest method updatesTimerForTemplateFile.
@Test
public void updatesTimerForTemplateFile() throws Exception {
final StatementNameStrategy strategy = new SmartNameStrategy();
final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy);
final StatementContext ctx = mock(StatementContext.class);
doReturn("SELECT 1").when(ctx).getRawSql();
doReturn("foo/bar.stg").when(ctx).getAttribute(NameStrategies.STATEMENT_GROUP);
doReturn("updatesTimerForTemplateFile").when(ctx).getAttribute(NameStrategies.STATEMENT_NAME);
collector.collect(TimeUnit.SECONDS.toNanos(4), ctx);
final String name = strategy.getStatementName(ctx);
final Timer timer = registry.timer(name);
assertThat(name).isEqualTo(name("foo", "bar", "updatesTimerForTemplateFile"));
assertThat(timer.getSnapshot().getMax()).isEqualTo(4000000000L);
}
use of org.skife.jdbi.v2.StatementContext in project metrics by dropwizard.
the class InstrumentedTimingCollectorTest method updatesTimerForNoRawSql.
@Test
public void updatesTimerForNoRawSql() throws Exception {
final StatementNameStrategy strategy = new SmartNameStrategy();
final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy);
final StatementContext ctx = mock(StatementContext.class);
collector.collect(TimeUnit.SECONDS.toNanos(2), ctx);
final String name = strategy.getStatementName(ctx);
final Timer timer = registry.timer(name);
assertThat(name).isEqualTo(name("sql", "empty"));
assertThat(timer.getSnapshot().getMax()).isEqualTo(2000000000);
}
use of org.skife.jdbi.v2.StatementContext in project metrics by dropwizard.
the class InstrumentedTimingCollectorTest method updatesTimerForSqlObjects.
@Test
public void updatesTimerForSqlObjects() throws Exception {
final StatementNameStrategy strategy = new SmartNameStrategy();
final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy);
final StatementContext ctx = mock(StatementContext.class);
doReturn("SELECT 1").when(ctx).getRawSql();
doReturn(getClass()).when(ctx).getSqlObjectType();
doReturn(getClass().getMethod("updatesTimerForSqlObjects")).when(ctx).getSqlObjectMethod();
collector.collect(TimeUnit.SECONDS.toNanos(1), ctx);
final String name = strategy.getStatementName(ctx);
final Timer timer = registry.timer(name);
assertThat(name).isEqualTo(name(getClass(), "updatesTimerForSqlObjects"));
assertThat(timer.getSnapshot().getMax()).isEqualTo(1000000000);
}
Aggregations