use of org.skife.jdbi.v2.Query in project druid by druid-io.
the class SqlEntity method openCleanableFile.
/**
* Executes a SQL query on the specified database and fetches the result into the given file.
* The result file is deleted if the query execution or the file write fails.
*
* @param sql The SQL query to be executed
* @param sqlFirehoseDatabaseConnector The database connector
* @param objectMapper An object mapper, used for deserialization
* @param foldCase A boolean flag used to enable or disabling case sensitivity while handling database column names
*
* @return A {@link InputEntity.CleanableFile} object that wraps the file containing the SQL results
*/
public static CleanableFile openCleanableFile(String sql, SQLFirehoseDatabaseConnector sqlFirehoseDatabaseConnector, ObjectMapper objectMapper, boolean foldCase, File tempFile) throws IOException {
try (FileOutputStream fos = new FileOutputStream(tempFile);
final JsonGenerator jg = objectMapper.getFactory().createGenerator(fos)) {
// Execute the sql query and lazily retrieve the results into the file in json format.
// foldCase is useful to handle differences in case sensitivity behavior across databases.
sqlFirehoseDatabaseConnector.retryWithHandle((handle) -> {
ResultIterator<Map<String, Object>> resultIterator = handle.createQuery(sql).map((index, r, ctx) -> {
Map<String, Object> resultRow = foldCase ? new CaseFoldedMap() : new HashMap<>();
ResultSetMetaData resultMetadata;
try {
resultMetadata = r.getMetaData();
} catch (SQLException e) {
throw new ResultSetException("Unable to obtain metadata from result set", e, ctx);
}
try {
for (int i = 1; i <= resultMetadata.getColumnCount(); i++) {
String key = resultMetadata.getColumnName(i);
String alias = resultMetadata.getColumnLabel(i);
Object value = r.getObject(i);
resultRow.put(alias != null ? alias : key, value);
}
} catch (SQLException e) {
throw new ResultSetException("Unable to access specific metadata from " + "result set metadata", e, ctx);
}
return resultRow;
}).iterator();
jg.writeStartArray();
while (resultIterator.hasNext()) {
jg.writeObject(resultIterator.next());
}
jg.writeEndArray();
jg.close();
return null;
}, (exception) -> sqlFirehoseDatabaseConnector.isTransientException(exception) && !(SQLMetadataStorageActionHandler.isStatementException(exception)));
return new CleanableFile() {
@Override
public File file() {
return tempFile;
}
@Override
public void close() {
if (!tempFile.delete()) {
LOG.warn("Failed to remove file[%s]", tempFile.getAbsolutePath());
}
}
};
} catch (Exception e) {
if (!tempFile.delete()) {
LOG.warn("Failed to remove file[%s]", tempFile.getAbsolutePath());
}
throw new IOException(e);
}
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class DataStoreReadWriteIT method testWriteRead_viaCoreBeamIO.
@Test
public void testWriteRead_viaCoreBeamIO() {
String projectId = options.getProject();
Key ancestor = makeKey(KIND, UUID.randomUUID().toString()).build();
Key itemKey = makeKey(ancestor, KIND, UUID.randomUUID().toString()).setPartitionId(PartitionId.newBuilder().setProjectId(projectId).build()).build();
Row testWriteRow = Row.withSchema(SOURCE_SCHEMA).addValues(itemKey.toByteArray(), "4000").build();
writePipeline.apply(Create.of(testWriteRow).withRowSchema(SOURCE_SCHEMA)).apply(RowToEntity.create("__key__", KIND)).apply(DatastoreIO.v1().write().withProjectId(projectId));
writePipeline.run().waitUntilFinish();
Query.Builder query = Query.newBuilder();
query.addKindBuilder().setName(KIND);
query.setFilter(makeFilter("__key__", Operator.EQUAL, makeValue(itemKey)));
DatastoreV1.Read read = DatastoreIO.v1().read().withProjectId(projectId).withQuery(query.build());
PCollection<Row> rowsRead = readPipeline.apply(read).apply(EntityToRow.create(SOURCE_SCHEMA, "__key__"));
PAssert.that(rowsRead).containsInAnyOrder(testWriteRow);
readPipeline.run().waitUntilFinish();
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class SplitQueryFnIT method testSplitQueryFn.
/**
* A helper method to test {@link SplitQueryFn} to generate the expected number of splits.
*/
private void testSplitQueryFn(String projectId, String kind, @Nullable String namespace, int expectedNumSplits) throws Exception {
Query.Builder query = Query.newBuilder();
query.addKindBuilder().setName(kind);
SplitQueryFn splitQueryFn = new SplitQueryFn(V1Options.from(projectId, namespace, null), 0);
DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
List<Query> queries = doFnTester.processBundle(query.build());
assertEquals(expectedNumSplits, queries.size());
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class DatastoreV1Test method testReadValidationFailsQueryLimitZero.
@Test
public void testReadValidationFailsQueryLimitZero() throws Exception {
Query invalidLimit = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(0)).build();
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Invalid query limit 0: must be positive");
DatastoreIO.v1().read().withQuery(invalidLimit);
}
use of org.skife.jdbi.v2.Query in project beam by apache.
the class DatastoreV1Test method testSplitQueryFnWithQueryLimit.
/**
* Tests {@link DatastoreV1.Read.SplitQueryFn} when the query has a user specified limit.
*/
@Test
public void testSplitQueryFnWithQueryLimit() throws Exception {
Query queryWithLimit = QUERY.toBuilder().setLimit(Int32Value.newBuilder().setValue(1)).build();
SplitQueryFn splitQueryFn = new SplitQueryFn(V_1_OPTIONS, 10, mockDatastoreFactory);
DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
List<Query> queries = doFnTester.processBundle(queryWithLimit);
assertEquals(1, queries.size());
verifyNoMoreInteractions(mockDatastore);
verifyNoMoreInteractions(mockQuerySplitter);
}
Aggregations