Search in sources :

Example 31 with Query

use of org.skife.jdbi.v2.Query in project druid by druid-io.

the class SqlEntity method openCleanableFile.

/**
 * Executes a SQL query on the specified database and fetches the result into the given file.
 * The result file is deleted if the query execution or the file write fails.
 *
 * @param sql                          The SQL query to be executed
 * @param sqlFirehoseDatabaseConnector The database connector
 * @param objectMapper                 An object mapper, used for deserialization
 * @param foldCase                     A boolean flag used to enable or disabling case sensitivity while handling database column names
 *
 * @return A {@link InputEntity.CleanableFile} object that wraps the file containing the SQL results
 */
public static CleanableFile openCleanableFile(String sql, SQLFirehoseDatabaseConnector sqlFirehoseDatabaseConnector, ObjectMapper objectMapper, boolean foldCase, File tempFile) throws IOException {
    try (FileOutputStream fos = new FileOutputStream(tempFile);
        final JsonGenerator jg = objectMapper.getFactory().createGenerator(fos)) {
        // Execute the sql query and lazily retrieve the results into the file in json format.
        // foldCase is useful to handle differences in case sensitivity behavior across databases.
        sqlFirehoseDatabaseConnector.retryWithHandle((handle) -> {
            ResultIterator<Map<String, Object>> resultIterator = handle.createQuery(sql).map((index, r, ctx) -> {
                Map<String, Object> resultRow = foldCase ? new CaseFoldedMap() : new HashMap<>();
                ResultSetMetaData resultMetadata;
                try {
                    resultMetadata = r.getMetaData();
                } catch (SQLException e) {
                    throw new ResultSetException("Unable to obtain metadata from result set", e, ctx);
                }
                try {
                    for (int i = 1; i <= resultMetadata.getColumnCount(); i++) {
                        String key = resultMetadata.getColumnName(i);
                        String alias = resultMetadata.getColumnLabel(i);
                        Object value = r.getObject(i);
                        resultRow.put(alias != null ? alias : key, value);
                    }
                } catch (SQLException e) {
                    throw new ResultSetException("Unable to access specific metadata from " + "result set metadata", e, ctx);
                }
                return resultRow;
            }).iterator();
            jg.writeStartArray();
            while (resultIterator.hasNext()) {
                jg.writeObject(resultIterator.next());
            }
            jg.writeEndArray();
            jg.close();
            return null;
        }, (exception) -> sqlFirehoseDatabaseConnector.isTransientException(exception) && !(SQLMetadataStorageActionHandler.isStatementException(exception)));
        return new CleanableFile() {

            @Override
            public File file() {
                return tempFile;
            }

            @Override
            public void close() {
                if (!tempFile.delete()) {
                    LOG.warn("Failed to remove file[%s]", tempFile.getAbsolutePath());
                }
            }
        };
    } catch (Exception e) {
        if (!tempFile.delete()) {
            LOG.warn("Failed to remove file[%s]", tempFile.getAbsolutePath());
        }
        throw new IOException(e);
    }
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) SQLFirehoseDatabaseConnector(org.apache.druid.metadata.SQLFirehoseDatabaseConnector) JsonGenerator(com.fasterxml.jackson.core.JsonGenerator) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) StringUtils(org.apache.druid.java.util.common.StringUtils) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) HashMap(java.util.HashMap) SQLMetadataStorageActionHandler(org.apache.druid.metadata.SQLMetadataStorageActionHandler) File(java.io.File) SQLException(java.sql.SQLException) Map(java.util.Map) ResultIterator(org.skife.jdbi.v2.ResultIterator) Preconditions(com.google.common.base.Preconditions) ResultSetException(org.skife.jdbi.v2.exceptions.ResultSetException) URI(java.net.URI) InputEntity(org.apache.druid.data.input.InputEntity) Nullable(javax.annotation.Nullable) ResultSetMetaData(java.sql.ResultSetMetaData) InputStream(java.io.InputStream) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) IOException(java.io.IOException) IOException(java.io.IOException) SQLException(java.sql.SQLException) ResultSetException(org.skife.jdbi.v2.exceptions.ResultSetException) ResultSetMetaData(java.sql.ResultSetMetaData) ResultSetException(org.skife.jdbi.v2.exceptions.ResultSetException) FileOutputStream(java.io.FileOutputStream) JsonGenerator(com.fasterxml.jackson.core.JsonGenerator) HashMap(java.util.HashMap) Map(java.util.Map)

Example 32 with Query

use of org.skife.jdbi.v2.Query in project beam by apache.

the class DataStoreReadWriteIT method testWriteRead_viaCoreBeamIO.

@Test
public void testWriteRead_viaCoreBeamIO() {
    String projectId = options.getProject();
    Key ancestor = makeKey(KIND, UUID.randomUUID().toString()).build();
    Key itemKey = makeKey(ancestor, KIND, UUID.randomUUID().toString()).setPartitionId(PartitionId.newBuilder().setProjectId(projectId).build()).build();
    Row testWriteRow = Row.withSchema(SOURCE_SCHEMA).addValues(itemKey.toByteArray(), "4000").build();
    writePipeline.apply(Create.of(testWriteRow).withRowSchema(SOURCE_SCHEMA)).apply(RowToEntity.create("__key__", KIND)).apply(DatastoreIO.v1().write().withProjectId(projectId));
    writePipeline.run().waitUntilFinish();
    Query.Builder query = Query.newBuilder();
    query.addKindBuilder().setName(KIND);
    query.setFilter(makeFilter("__key__", Operator.EQUAL, makeValue(itemKey)));
    DatastoreV1.Read read = DatastoreIO.v1().read().withProjectId(projectId).withQuery(query.build());
    PCollection<Row> rowsRead = readPipeline.apply(read).apply(EntityToRow.create(SOURCE_SCHEMA, "__key__"));
    PAssert.that(rowsRead).containsInAnyOrder(testWriteRow);
    readPipeline.run().waitUntilFinish();
}
Also used : Query(com.google.datastore.v1.Query) DatastoreV1(org.apache.beam.sdk.io.gcp.datastore.DatastoreV1) ByteString(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.avatica.util.ByteString) Row(org.apache.beam.sdk.values.Row) EntityToRow(org.apache.beam.sdk.io.gcp.datastore.EntityToRow) Key(com.google.datastore.v1.Key) DatastoreHelper.makeKey(com.google.datastore.v1.client.DatastoreHelper.makeKey) Test(org.junit.Test)

Example 33 with Query

use of org.skife.jdbi.v2.Query in project beam by apache.

the class SplitQueryFnIT method testSplitQueryFn.

/**
 * A helper method to test {@link SplitQueryFn} to generate the expected number of splits.
 */
private void testSplitQueryFn(String projectId, String kind, @Nullable String namespace, int expectedNumSplits) throws Exception {
    Query.Builder query = Query.newBuilder();
    query.addKindBuilder().setName(kind);
    SplitQueryFn splitQueryFn = new SplitQueryFn(V1Options.from(projectId, namespace, null), 0);
    DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
    List<Query> queries = doFnTester.processBundle(query.build());
    assertEquals(expectedNumSplits, queries.size());
}
Also used : Query(com.google.datastore.v1.Query) SplitQueryFn(org.apache.beam.sdk.io.gcp.datastore.DatastoreV1.Read.SplitQueryFn)

Example 34 with Query

use of org.skife.jdbi.v2.Query in project beam by apache.

the class DatastoreV1Test method testReadValidationFailsQueryLimitZero.

@Test
public void testReadValidationFailsQueryLimitZero() throws Exception {
    Query invalidLimit = Query.newBuilder().setLimit(Int32Value.newBuilder().setValue(0)).build();
    thrown.expect(IllegalArgumentException.class);
    thrown.expectMessage("Invalid query limit 0: must be positive");
    DatastoreIO.v1().read().withQuery(invalidLimit);
}
Also used : GqlQuery(com.google.datastore.v1.GqlQuery) Query(com.google.datastore.v1.Query) Test(org.junit.Test)

Example 35 with Query

use of org.skife.jdbi.v2.Query in project beam by apache.

the class DatastoreV1Test method testSplitQueryFnWithQueryLimit.

/**
 * Tests {@link DatastoreV1.Read.SplitQueryFn} when the query has a user specified limit.
 */
@Test
public void testSplitQueryFnWithQueryLimit() throws Exception {
    Query queryWithLimit = QUERY.toBuilder().setLimit(Int32Value.newBuilder().setValue(1)).build();
    SplitQueryFn splitQueryFn = new SplitQueryFn(V_1_OPTIONS, 10, mockDatastoreFactory);
    DoFnTester<Query, Query> doFnTester = DoFnTester.of(splitQueryFn);
    doFnTester.setCloningBehavior(CloningBehavior.DO_NOT_CLONE);
    List<Query> queries = doFnTester.processBundle(queryWithLimit);
    assertEquals(1, queries.size());
    verifyNoMoreInteractions(mockDatastore);
    verifyNoMoreInteractions(mockQuerySplitter);
}
Also used : GqlQuery(com.google.datastore.v1.GqlQuery) Query(com.google.datastore.v1.Query) SplitQueryFn(org.apache.beam.sdk.io.gcp.datastore.DatastoreV1.Read.SplitQueryFn) Test(org.junit.Test)

Aggregations

Query (org.jpl7.Query)56 Term (org.jpl7.Term)33 Variable (org.jpl7.Variable)23 Map (java.util.Map)18 Atom (org.jpl7.Atom)16 Compound (org.jpl7.Compound)16 Query (com.google.datastore.v1.Query)12 Handle (org.skife.jdbi.v2.Handle)12 Test (org.junit.Test)10 GqlQuery (com.google.datastore.v1.GqlQuery)7 Integer (org.jpl7.Integer)6 SQLException (java.sql.SQLException)5 ArrayList (java.util.ArrayList)5 Datastore (com.google.datastore.v1.client.Datastore)4 IOException (java.io.IOException)4 HashMap (java.util.HashMap)4 List (java.util.List)4 SplitQueryFn (org.apache.beam.sdk.io.gcp.datastore.DatastoreV1.Read.SplitQueryFn)4 RunQueryRequest (com.google.datastore.v1.RunQueryRequest)3 Nullable (javax.annotation.Nullable)3