use of org.skife.jdbi.v2.Query in project dropwizard by dropwizard.
the class JDBITest method createsAValidDBI.
@Test
public void createsAValidDBI() throws Exception {
final Handle handle = dbi.open();
final Query<String> names = handle.createQuery("SELECT name FROM people WHERE age < ?").bind(0, 50).map(StringColumnMapper.INSTANCE);
assertThat(names).containsOnly("Coda Hale", "Kris Gale");
}
use of org.skife.jdbi.v2.Query in project druid by druid-io.
the class JDBCExtractionNamespaceCacheFactory method populateCache.
@Override
@Nullable
public CacheScheduler.VersionedCache populateCache(final JDBCExtractionNamespace namespace, final CacheScheduler.EntryImpl<JDBCExtractionNamespace> entryId, final String lastVersion, final CacheScheduler scheduler) {
final long lastCheck = lastVersion == null ? JodaUtils.MIN_INSTANT : Long.parseLong(lastVersion);
final Long lastDBUpdate = lastUpdates(entryId, namespace);
if (lastDBUpdate != null && lastDBUpdate <= lastCheck) {
return null;
}
final long dbQueryStart = System.currentTimeMillis();
final DBI dbi = ensureDBI(entryId, namespace);
final String table = namespace.getTable();
final String valueColumn = namespace.getValueColumn();
final String keyColumn = namespace.getKeyColumn();
LOG.debug("Updating %s", entryId);
final List<Pair<String, String>> pairs = dbi.withHandle(new HandleCallback<List<Pair<String, String>>>() {
@Override
public List<Pair<String, String>> withHandle(Handle handle) throws Exception {
final String query;
query = String.format("SELECT %s, %s FROM %s", keyColumn, valueColumn, table);
return handle.createQuery(query).map(new ResultSetMapper<Pair<String, String>>() {
@Override
public Pair<String, String> map(final int index, final ResultSet r, final StatementContext ctx) throws SQLException {
return new Pair<>(r.getString(keyColumn), r.getString(valueColumn));
}
}).list();
}
});
final String newVersion;
if (lastDBUpdate != null) {
newVersion = lastDBUpdate.toString();
} else {
newVersion = String.format("%d", dbQueryStart);
}
final CacheScheduler.VersionedCache versionedCache = scheduler.createVersionedCache(entryId, newVersion);
try {
final Map<String, String> cache = versionedCache.getCache();
for (Pair<String, String> pair : pairs) {
cache.put(pair.lhs, pair.rhs);
}
LOG.info("Finished loading %d values for %s", cache.size(), entryId);
return versionedCache;
} catch (Throwable t) {
try {
versionedCache.close();
} catch (Exception e) {
t.addSuppressed(e);
}
throw t;
}
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class V1TestUtil method countEntities.
/**
* Returns the total number of entities for the given datastore.
*/
static long countEntities(V1TestOptions options, String project, String ancestor) throws Exception {
// Read from datastore.
Datastore datastore = V1TestUtil.getDatastore(options, project);
Query query = V1TestUtil.makeAncestorKindQuery(options.getKind(), options.getNamespace(), ancestor);
V1TestReader reader = new V1TestReader(datastore, query, options.getNamespace());
long numEntitiesRead = 0;
while (reader.advance()) {
reader.getCurrent();
numEntitiesRead++;
}
return numEntitiesRead;
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class V1TestUtil method deleteAllEntities.
/**
* Delete all entities with the given ancestor.
*/
static void deleteAllEntities(V1TestOptions options, String project, String ancestor) throws Exception {
Datastore datastore = getDatastore(options, project);
Query query = V1TestUtil.makeAncestorKindQuery(options.getKind(), options.getNamespace(), ancestor);
V1TestReader reader = new V1TestReader(datastore, query, options.getNamespace());
V1TestWriter writer = new V1TestWriter(datastore, new DeleteMutationBuilder());
long numEntities = 0;
while (reader.advance()) {
Entity entity = reader.getCurrent();
numEntities++;
writer.write(entity);
}
writer.close();
LOG.info("Successfully deleted {} entities", numEntities);
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class DatastoreV1Test method testReadValidationFailsQueryLimitNegative.
@Test
public void testReadValidationFailsQueryLimitNegative() throws Exception {
Query invalidLimit = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(-5)).build();
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Invalid query limit -5: must be positive");
DatastoreIO.v1().read().withQuery(invalidLimit);
}
Aggregations