use of org.skife.jdbi.v2.sqlobject.Bind in project druid by druid-io.
the class SQLMetadataStorageActionHandler method removeTasksOlderThan.
@Override
public void removeTasksOlderThan(final long timestamp) {
DateTime dateTime = DateTimes.utc(timestamp);
connector.retryWithHandle((HandleCallback<Void>) handle -> {
handle.createStatement(getSqlRemoveLogsOlderThan()).bind("date_time", dateTime.toString()).execute();
handle.createStatement(StringUtils.format("DELETE FROM %s WHERE created_date < :date_time AND active = false", entryTable)).bind("date_time", dateTime.toString()).execute();
return null;
});
}
use of org.skife.jdbi.v2.sqlobject.Bind in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegmentBatch.
/**
* Attempts to insert a single segment to the database. If the segment already exists, will do nothing; although,
* this checking is imperfect and callers must be prepared to retry their entire transaction on exceptions.
*
* @return DataSegment set inserted
*/
private Set<DataSegment> announceHistoricalSegmentBatch(final Handle handle, final Set<DataSegment> segments, final Set<DataSegment> usedSegments) throws IOException {
final Set<DataSegment> toInsertSegments = new HashSet<>();
try {
Set<String> existedSegments = segmentExistsBatch(handle, segments);
log.info("Found these segments already exist in DB: %s", existedSegments);
for (DataSegment segment : segments) {
if (!existedSegments.contains(segment.getId().toString())) {
toInsertSegments.add(segment);
}
}
// SELECT -> INSERT can fail due to races; callers must be prepared to retry.
// Avoiding ON DUPLICATE KEY since it's not portable.
// Avoiding try/catch since it may cause inadvertent transaction-splitting.
final List<List<DataSegment>> partitionedSegments = Lists.partition(new ArrayList<>(toInsertSegments), MAX_NUM_SEGMENTS_TO_ANNOUNCE_AT_ONCE);
PreparedBatch preparedBatch = handle.prepareBatch(StringUtils.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", dbTables.getSegmentsTable(), connector.getQuoteString()));
for (List<DataSegment> partition : partitionedSegments) {
for (DataSegment segment : partition) {
preparedBatch.add().bind("id", segment.getId().toString()).bind("dataSource", segment.getDataSource()).bind("created_date", DateTimes.nowUtc().toString()).bind("start", segment.getInterval().getStart().toString()).bind("end", segment.getInterval().getEnd().toString()).bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).bind("version", segment.getVersion()).bind("used", usedSegments.contains(segment)).bind("payload", jsonMapper.writeValueAsBytes(segment));
}
final int[] affectedRows = preparedBatch.execute();
final boolean succeeded = Arrays.stream(affectedRows).allMatch(eachAffectedRows -> eachAffectedRows == 1);
if (succeeded) {
log.infoSegments(partition, "Published segments to DB");
} else {
final List<DataSegment> failedToPublish = IntStream.range(0, partition.size()).filter(i -> affectedRows[i] != 1).mapToObj(partition::get).collect(Collectors.toList());
throw new ISE("Failed to publish segments to DB: %s", SegmentUtils.commaSeparatedIdentifiers(failedToPublish));
}
}
} catch (Exception e) {
log.errorSegments(segments, "Exception inserting segments");
throw e;
}
return toInsertSegments;
}
use of org.skife.jdbi.v2.sqlobject.Bind in project airpal by airbnb.
the class JobHistoryStoreDAO method getJobs.
private List<Job> getJobs(long limit, int dayInterval, String outerWhereClauseArg, String innerWhereClauseArg) {
String outerWhereClause = Strings.isNullOrEmpty(outerWhereClauseArg) ? "true" : outerWhereClauseArg;
String innerWhereClause = Strings.isNullOrEmpty(innerWhereClauseArg) ? "true" : innerWhereClauseArg;
try (Handle handle = dbi.open()) {
Query<Map<String, Object>> query = handle.createQuery("SELECT " + "j.id AS id, " + "j.query AS query, " + "j.user AS user, " + "j.uuid AS uuid, " + "j.queryStats as queryStats, " + "j.state AS state, " + "j.columns AS columns, " + "j.query_finished AS queryFinished, " + "j.query_started AS queryStarted, " + "j.error AS error, " + "t.connector_id AS connectorId, " + "t.schema_ AS \"schema\", " + "t.table_ AS \"table\", " + "t.columns, " + "jo.type, " + "jo.description, " + "jo.location " + "FROM (SELECT * FROM jobs " + "WHERE " + Util.getQueryFinishedCondition(dbType) + " " + "AND " + innerWhereClause + " " + "ORDER BY query_finished DESC LIMIT :limit) j " + "LEFT OUTER JOIN job_tables jt ON j.id = jt.job_id " + "LEFT OUTER JOIN tables t ON jt.table_id = t.id " + "LEFT OUTER JOIN job_outputs jo ON j.id = jo.job_id " + "WHERE " + outerWhereClause + " " + "ORDER BY query_finished DESC").bind("limit", limit).bind("day_interval", dayInterval);
Map<Long, Job> idToJobMap = query.map(RosettaResultSetMapperFactory.mapperFor(JobTableOutputJoinRow.class)).fold(new HashMap<Long, Job>(), new JobTableOutputJoinRow.JobFolder());
return new ArrayList<>(idToJobMap.values());
}
}
use of org.skife.jdbi.v2.sqlobject.Bind in project druid by druid-io.
the class DerivativeDataSourceManager method getAvgSizePerGranularity.
/**
* calculate the average data size per segment granularity for a given datasource.
*
* e.g. for a datasource, there're 5 segments as follows,
* interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
* interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* Then, we get interval number = 2, total segment size = 1024 * 1024 * 7
* At last, return the result 1024 * 1024 * 7 / 2 = 1024 * 1024 * 3.5
*
* @param datasource
* @return average data size per segment granularity
*/
private long getAvgSizePerGranularity(String datasource) {
return connector.retryWithHandle(new HandleCallback<Long>() {
Set<Interval> intervals = new HashSet<>();
long totalSize = 0;
@Override
public Long withHandle(Handle handle) {
handle.createQuery(StringUtils.format("SELECT start,%1$send%1$s,payload FROM %2$s WHERE used = true AND dataSource = :dataSource", connector.getQuoteString(), dbTables.get().getSegmentsTable())).bind("dataSource", datasource).map((int index, ResultSet r, StatementContext ctx) -> {
intervals.add(Intervals.utc(DateTimes.of(r.getString("start")).getMillis(), DateTimes.of(r.getString("end")).getMillis()));
DataSegment segment = JacksonUtils.readValue(objectMapper, r.getBytes("payload"), DataSegment.class);
totalSize += segment.getSize();
return null;
}).list();
return intervals.isEmpty() ? 0L : totalSize / intervals.size();
}
});
}
use of org.skife.jdbi.v2.sqlobject.Bind in project dropwizard by dropwizard.
the class GuavaJDBITest method createsAValidDBI.
@Test
public void createsAValidDBI() throws Exception {
final Handle handle = dbi.open();
final Query<String> names = handle.createQuery("SELECT name FROM people WHERE age < ?").bind(0, 50).map(StringColumnMapper.INSTANCE);
assertThat(names).containsOnly("Coda Hale", "Kris Gale");
}
Aggregations