use of org.skife.jdbi.v2.Query in project beam by apache.
the class V1ReadIT method testE2EV1Read.
/**
* An end-to-end test for {@link DatastoreV1.Read#withQuery(Query)}
*
* <p>Write some test entities to datastore and then run a pipeline that reads and counts the
* total number of entities. Verify that the count matches the number of entities written.
*/
@Test
public void testE2EV1Read() throws Exception {
// Read from datastore
Query query = V1TestUtil.makeAncestorKindQuery(options.getKind(), options.getNamespace(), ancestor);
DatastoreV1.Read read = DatastoreIO.v1().read().withProjectId(project).withQuery(query).withNamespace(options.getNamespace());
// Count the total number of entities
Pipeline p = Pipeline.create(options);
PCollection<Long> count = p.apply(read).apply(Count.globally());
PAssert.thatSingleton(count).isEqualTo(numEntities);
p.run();
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class V1TestUtil method deleteAllEntities.
/**
* Delete all entities with the given ancestor.
*/
static void deleteAllEntities(V1TestOptions options, String project, String ancestor) throws Exception {
Datastore datastore = getDatastore(options, project);
Query query = V1TestUtil.makeAncestorKindQuery(options.getKind(), options.getNamespace(), ancestor);
V1TestReader reader = new V1TestReader(datastore, query, options.getNamespace());
V1TestWriter writer = new V1TestWriter(datastore, new DeleteMutationBuilder());
long numEntities = 0;
while (reader.advance()) {
Entity entity = reader.getCurrent();
numEntities++;
writer.write(entity);
}
writer.close();
LOG.info("Successfully deleted {} entities", numEntities);
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class V1TestUtil method countEntities.
/**
* Returns the total number of entities for the given datastore.
*/
static long countEntities(V1TestOptions options, String project, String ancestor) throws Exception {
// Read from datastore.
Datastore datastore = V1TestUtil.getDatastore(options, project);
Query query = V1TestUtil.makeAncestorKindQuery(options.getKind(), options.getNamespace(), ancestor);
V1TestReader reader = new V1TestReader(datastore, query, options.getNamespace());
long numEntitiesRead = 0;
while (reader.advance()) {
reader.getCurrent();
numEntitiesRead++;
}
return numEntitiesRead;
}
use of org.skife.jdbi.v2.Query in project killbill by killbill.
the class DatabaseExportDao method exportDataForAccountAndTable.
private void exportDataForAccountAndTable(final DatabaseExportOutputStream out, final List<ColumnInfo> columnsForTable, final InternalTenantContext context) {
TableType tableType = TableType.OTHER;
final String tableName = columnsForTable.get(0).getTableName();
// Ignore casing (for H2)
if (TableName.ACCOUNT.getTableName().equalsIgnoreCase(tableName)) {
tableType = TableType.KB_ACCOUNT;
} else if (TableName.ACCOUNT_HISTORY.getTableName().equalsIgnoreCase(tableName)) {
tableType = TableType.KB_ACCOUNT_HISTORY;
}
boolean firstColumn = true;
final StringBuilder queryBuilder = new StringBuilder("select ");
for (final ColumnInfo column : columnsForTable) {
if (!firstColumn) {
queryBuilder.append(", ");
} else {
firstColumn = false;
}
queryBuilder.append(column.getColumnName());
if (tableType == TableType.OTHER) {
// Ignore casing (for H2)
if (column.getColumnName().equalsIgnoreCase(TableType.KB_PER_ACCOUNT.getAccountRecordIdColumnName())) {
tableType = TableType.KB_PER_ACCOUNT;
} else if (column.getColumnName().equalsIgnoreCase(TableType.NOTIFICATION.getAccountRecordIdColumnName())) {
tableType = TableType.NOTIFICATION;
}
}
}
// Don't export non-account specific tables
if (tableType == TableType.OTHER) {
return;
}
// Build the query - make sure to filter by account and tenant!
queryBuilder.append(" from ").append(tableName).append(" where ").append(tableType.getAccountRecordIdColumnName()).append(" = :accountRecordId and ").append(tableType.getTenantRecordIdColumnName()).append(" = :tenantRecordId");
// Notify the stream that we're about to write data for a different table
out.newTable(tableName, columnsForTable);
dbi.withHandle(new HandleCallback<Void>() {
@Override
public Void withHandle(final Handle handle) throws Exception {
final ResultIterator<Map<String, Object>> iterator = handle.createQuery(queryBuilder.toString()).bind("accountRecordId", context.getAccountRecordId()).bind("tenantRecordId", context.getTenantRecordId()).iterator();
try {
while (iterator.hasNext()) {
final Map<String, Object> row = iterator.next();
for (final String k : row.keySet()) {
final Object value = row.get(k);
// See also LowerToCamelBeanMapper
if (value instanceof Blob) {
final Blob blob = (Blob) value;
row.put(k, blob.getBytes(0, (int) blob.length()));
} else if (value instanceof Clob) {
// TODO Update LowerToCamelBeanMapper?
final Clob clob = (Clob) value;
row.put(k, clob.getSubString(1, (int) clob.length()));
}
}
try {
out.write(row);
} catch (final IOException e) {
logger.warn("Unable to write row: {}", row, e);
throw e;
}
}
} finally {
iterator.close();
}
return null;
}
});
}
Aggregations