use of org.skife.jdbi.v2.sqlobject.Bind in project dropwizard by dropwizard.
the class GuavaJDBITest method setUp.
@Before
public void setUp() throws Exception {
when(environment.healthChecks()).thenReturn(healthChecks);
when(environment.lifecycle()).thenReturn(lifecycleEnvironment);
when(environment.metrics()).thenReturn(metricRegistry);
when(environment.getHealthCheckExecutorService()).thenReturn(Executors.newSingleThreadExecutor());
this.dbi = factory.build(environment, hsqlConfig, "hsql");
final ArgumentCaptor<Managed> managedCaptor = ArgumentCaptor.forClass(Managed.class);
verify(lifecycleEnvironment).manage(managedCaptor.capture());
managed.addAll(managedCaptor.getAllValues());
for (Managed obj : managed) {
obj.start();
}
try (Handle handle = dbi.open()) {
handle.createCall("DROP TABLE people IF EXISTS").invoke();
handle.createCall("CREATE TABLE people (name varchar(100) primary key, email varchar(100), age int, created_at timestamp)").invoke();
handle.createStatement("INSERT INTO people VALUES (?, ?, ?, ?)").bind(0, "Coda Hale").bind(1, "chale@yammer-inc.com").bind(2, 30).bind(3, new Timestamp(1365465078000L)).execute();
handle.createStatement("INSERT INTO people VALUES (?, ?, ?, ?)").bind(0, "Kris Gale").bind(1, "kgale@yammer-inc.com").bind(2, 32).bind(3, new Timestamp(1365465078000L)).execute();
handle.createStatement("INSERT INTO people VALUES (?, ?, ?, ?)").bind(0, "Old Guy").bindNull(1, Types.VARCHAR).bind(2, 99).bind(3, new Timestamp(1365465078000L)).execute();
handle.createStatement("INSERT INTO people VALUES (?, ?, ?, ?)").bind(0, "Alice Example").bind(1, "alice@example.org").bind(2, 99).bindNull(3, Types.TIMESTAMP).execute();
}
}
use of org.skife.jdbi.v2.sqlobject.Bind in project dropwizard by dropwizard.
the class JDBITest method createsAValidDBI.
@Test
public void createsAValidDBI() throws Exception {
final Handle handle = dbi.open();
final Query<String> names = handle.createQuery("SELECT name FROM people WHERE age < ?").bind(0, 50).map(StringColumnMapper.INSTANCE);
assertThat(names).containsOnly("Coda Hale", "Kris Gale");
}
use of org.skife.jdbi.v2.sqlobject.Bind in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinator method allocatePendingSegment.
@Override
public SegmentIdentifier allocatePendingSegment(final String dataSource, final String sequenceName, final String previousSegmentId, final Interval interval, final String maxVersion) throws IOException {
Preconditions.checkNotNull(dataSource, "dataSource");
Preconditions.checkNotNull(sequenceName, "sequenceName");
Preconditions.checkNotNull(interval, "interval");
Preconditions.checkNotNull(maxVersion, "maxVersion");
final String previousSegmentIdNotNull = previousSegmentId == null ? "" : previousSegmentId;
return connector.retryTransaction(new TransactionCallback<SegmentIdentifier>() {
@Override
public SegmentIdentifier inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception {
final List<byte[]> existingBytes = handle.createQuery(String.format("SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "sequence_prev_id = :sequence_prev_id", dbTables.getPendingSegmentsTable())).bind("dataSource", dataSource).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).map(ByteArrayMapper.FIRST).list();
if (!existingBytes.isEmpty()) {
final SegmentIdentifier existingIdentifier = jsonMapper.readValue(Iterables.getOnlyElement(existingBytes), SegmentIdentifier.class);
if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) {
log.info("Found existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
return existingIdentifier;
} else {
log.warn("Cannot use existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB, " + "does not match requested interval[%s]", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull, interval);
return null;
}
}
// Make up a pending segment based on existing segments and pending segments in the DB. This works
// assuming that all tasks inserting segments at a particular point in time are going through the
// allocatePendingSegment flow. This should be assured through some other mechanism (like task locks).
final SegmentIdentifier newIdentifier;
final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
if (existingChunks.size() > 1) {
// Not possible to expand more than one chunk with a single segment.
log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: already have [%,d] chunks.", dataSource, interval, maxVersion, existingChunks.size());
return null;
} else {
SegmentIdentifier max = null;
if (!existingChunks.isEmpty()) {
TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
max = SegmentIdentifier.fromDataSegment(existing.getObject());
}
}
}
final List<SegmentIdentifier> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
for (SegmentIdentifier pending : pendings) {
if (max == null || pending.getVersion().compareTo(max.getVersion()) > 0 || (pending.getVersion().equals(max.getVersion()) && pending.getShardSpec().getPartitionNum() > max.getShardSpec().getPartitionNum())) {
max = pending;
}
}
if (max == null) {
newIdentifier = new SegmentIdentifier(dataSource, interval, maxVersion, new NumberedShardSpec(0, 0));
} else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) {
log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", dataSource, interval, maxVersion, max.getIdentifierAsString());
return null;
} else if (max.getShardSpec() instanceof LinearShardSpec) {
newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1));
} else if (max.getShardSpec() instanceof NumberedShardSpec) {
newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new NumberedShardSpec(max.getShardSpec().getPartitionNum() + 1, ((NumberedShardSpec) max.getShardSpec()).getPartitions()));
} else {
log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: ShardSpec class[%s] used by [%s].", dataSource, interval, maxVersion, max.getShardSpec().getClass(), max.getIdentifierAsString());
return null;
}
}
// SELECT -> INSERT can fail due to races; callers must be prepared to retry.
// Avoiding ON DUPLICATE KEY since it's not portable.
// Avoiding try/catch since it may cause inadvertent transaction-splitting.
// UNIQUE key for the row, ensuring sequences do not fork in two directions.
// Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
// have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319)
final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff).putBytes(StringUtils.toUtf8(previousSegmentIdNotNull)).hash().asBytes());
handle.createStatement(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", dbTables.getPendingSegmentsTable(), connector.getQuoteString())).bind("id", newIdentifier.getIdentifierAsString()).bind("dataSource", dataSource).bind("created_date", new DateTime().toString()).bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1).bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)).execute();
log.info("Allocated pending segment [%s] for sequence[%s] (previous = [%s]) in DB", newIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
return newIdentifier;
}
}, ALLOCATE_SEGMENT_QUIET_TRIES, SQLMetadataConnector.DEFAULT_MAX_TRIES);
}
use of org.skife.jdbi.v2.sqlobject.Bind in project pac4j by pac4j.
the class DbProfileService method query.
protected List<Map<String, Object>> query(final String query, final String key, final String value) {
Handle h = null;
try {
h = dbi.open();
logger.debug("Query: {} for key/value: {} / {}", query, key, value);
return h.createQuery(query).bind(key, value).list(2);
} finally {
if (h != null) {
h.close();
}
}
}
use of org.skife.jdbi.v2.sqlobject.Bind in project SpinalTap by airbnb.
the class LatestMysqlSchemaStore method getLatest.
@Override
public Map<String, MysqlTableSchema> getLatest(@NotNull final String database) {
List<ColumnInfo> allColumnInfo;
try (Handle handle = jdbi.open()) {
allColumnInfo = MysqlSchemaUtil.LIST_COLUMNINFO_RETRYER.call(() -> handle.createQuery(ALL_TABLE_SCHEMA_QUERY).bind("db", database).map(MysqlSchemaUtil.COLUMN_MAPPER).list());
} catch (Exception ex) {
log.error(String.format("Failed to fetch schema for database: %s", database), ex);
Throwables.throwIfUnchecked(ex);
throw new RuntimeException(ex);
}
Map<String, MysqlTableSchema> allTableSchemaMap = Maps.newHashMap();
allColumnInfo.forEach(columnInfo -> {
String table = columnInfo.getTable();
allTableSchemaMap.computeIfAbsent(table, __ -> MysqlSchemaUtil.createTableSchema(source, database, table, getTableDDL(database, table), Lists.newArrayList())).getColumnInfo().add(columnInfo);
});
return allTableSchemaMap;
}
Aggregations