Search in sources :

Example 21 with QueryResult

use of co.cask.cdap.proto.QueryResult in project cdap by caskdata.

the class HiveExploreTableTestRun method testInsert.

@Test
public void testInsert() throws Exception {
    setupTable(null, null);
    DatasetId otherTable = NAMESPACE_ID.dataset("othertable");
    Schema schema = Schema.recordOf("record", Schema.Field.of("value", Schema.of(Schema.Type.INT)), Schema.Field.of("id", Schema.of(Schema.Type.STRING)));
    datasetFramework.addInstance(Table.class.getName(), otherTable, TableProperties.builder().setSchema(schema).setRowFieldName("id").build());
    try {
        String command = String.format("insert into %s select int_field, string_field from %s", getDatasetHiveName(otherTable), MY_TABLE_NAME);
        ExploreExecutionResult result = exploreClient.submit(NAMESPACE_ID, command).get();
        Assert.assertEquals(QueryStatus.OpStatus.FINISHED, result.getStatus().getStatus());
        command = String.format("select id, value from %s", getDatasetHiveName(otherTable));
        runCommand(NAMESPACE_ID, command, true, Lists.newArrayList(new ColumnDesc("id", "STRING", 1, null), new ColumnDesc("value", "INT", 2, null)), Lists.newArrayList(new QueryResult(Lists.<Object>newArrayList("row1", Integer.MAX_VALUE))));
    } finally {
        datasetFramework.deleteInstance(MY_TABLE);
        datasetFramework.deleteInstance(otherTable);
    }
}
Also used : QueryResult(co.cask.cdap.proto.QueryResult) Table(co.cask.cdap.api.dataset.table.Table) Schema(co.cask.cdap.api.data.schema.Schema) ColumnDesc(co.cask.cdap.proto.ColumnDesc) ExploreExecutionResult(co.cask.cdap.explore.client.ExploreExecutionResult) DatasetId(co.cask.cdap.proto.id.DatasetId) Test(org.junit.Test)

Example 22 with QueryResult

use of co.cask.cdap.proto.QueryResult in project cdap by caskdata.

the class InMemoryExploreServiceTest method trimColumnValues.

private static List<QueryResult> trimColumnValues(List<QueryResult> results) {
    List<QueryResult> newResults = Lists.newArrayList();
    for (QueryResult result : results) {
        List<Object> newCols = Lists.newArrayList();
        for (Object obj : result.getColumns()) {
            if (obj instanceof String) {
                newCols.add(((String) obj).trim());
            } else {
                newCols.add(obj);
            }
        }
        newResults.add(new QueryResult(newCols));
    }
    return newResults;
}
Also used : QueryResult(co.cask.cdap.proto.QueryResult)

Example 23 with QueryResult

use of co.cask.cdap.proto.QueryResult in project cdap by caskdata.

the class HiveExploreStructuredRecordTestRun method testRecordScannableAndWritableIsOK.

@Test
public void testRecordScannableAndWritableIsOK() throws Exception {
    DatasetId instanceId = NAMESPACE_ID.dataset("tabul");
    datasetFramework.addInstance("TableWrapper", instanceId, DatasetProperties.builder().add(DatasetProperties.SCHEMA, Schema.recordOf("intRecord", Schema.Field.of("x", Schema.of(Schema.Type.STRING))).toString()).build());
    DatasetSpecification spec = datasetFramework.getDatasetSpec(instanceId);
    try {
        exploreTableManager.enableDataset(instanceId, spec, false);
        runCommand(NAMESPACE_ID, "describe dataset_tabul", true, Lists.newArrayList(new ColumnDesc("col_name", "STRING", 1, "from deserializer"), new ColumnDesc("data_type", "STRING", 2, "from deserializer"), new ColumnDesc("comment", "STRING", 3, "from deserializer")), Lists.newArrayList(new QueryResult(Lists.<Object>newArrayList("x", "string", "from deserializer"))));
    } finally {
        datasetFramework.deleteInstance(instanceId);
    }
}
Also used : QueryResult(co.cask.cdap.proto.QueryResult) DatasetSpecification(co.cask.cdap.api.dataset.DatasetSpecification) ColumnDesc(co.cask.cdap.proto.ColumnDesc) DatasetId(co.cask.cdap.proto.id.DatasetId) Test(org.junit.Test)

Example 24 with QueryResult

use of co.cask.cdap.proto.QueryResult in project cdap by caskdata.

the class HiveExploreStructuredRecordTestRun method testInsert.

@Test
public void testInsert() throws Exception {
    DatasetId copyTable = NAMESPACE_ID.dataset("emailCopy");
    datasetFramework.addInstance(Table.class.getName(), copyTable, TableProperties.builder().setSchema(EmailTableDefinition.SCHEMA).setRowFieldName("id").build());
    try {
        String command = String.format("insert into %s select * from %s", getDatasetHiveName(copyTable), MY_TABLE_NAME);
        ExploreExecutionResult result = exploreClient.submit(NAMESPACE_ID, command).get();
        Assert.assertEquals(QueryStatus.OpStatus.FINISHED, result.getStatus().getStatus());
        command = String.format("select id, subject, body, sender from %s", getDatasetHiveName(copyTable));
        runCommand(NAMESPACE_ID, command, true, Lists.newArrayList(new ColumnDesc("id", "STRING", 1, null), new ColumnDesc("subject", "STRING", 2, null), new ColumnDesc("body", "STRING", 3, null), new ColumnDesc("sender", "STRING", 4, null)), Lists.newArrayList(new QueryResult(Lists.<Object>newArrayList("email1", "this is the subject", "this is the body", "sljackson@boss.com"))));
    } finally {
        datasetFramework.deleteInstance(copyTable);
    }
}
Also used : QueryResult(co.cask.cdap.proto.QueryResult) Table(co.cask.cdap.api.dataset.table.Table) ObjectMappedTable(co.cask.cdap.api.dataset.lib.ObjectMappedTable) ColumnDesc(co.cask.cdap.proto.ColumnDesc) ExploreExecutionResult(co.cask.cdap.explore.client.ExploreExecutionResult) DatasetId(co.cask.cdap.proto.id.DatasetId) Test(org.junit.Test)

Example 25 with QueryResult

use of co.cask.cdap.proto.QueryResult in project cdap by caskdata.

the class BaseHiveExploreService method nextResults.

@Override
public List<QueryResult> nextResults(QueryHandle handle, int size) throws ExploreException, HandleNotFoundException, SQLException {
    startAndWait();
    InactiveOperationInfo inactiveOperationInfo = inactiveHandleCache.getIfPresent(handle);
    if (inactiveOperationInfo != null) {
        // Operation has been made inactive, so all results should have been fetched already - return empty list.
        LOG.trace("Returning empty result for inactive handle {}", handle);
        return ImmutableList.of();
    }
    try {
        List<QueryResult> results = fetchNextResults(handle, size);
        QueryStatus status = getStatus(handle);
        if (results.isEmpty() && status.getStatus() == QueryStatus.OpStatus.FINISHED) {
            // Since operation has fetched all the results, handle can be timed out aggressively.
            timeoutAggressively(handle, getResultSchema(handle), status);
        }
        return results;
    } catch (HiveSQLException e) {
        throw getSqlException(e);
    }
}
Also used : QueryResult(co.cask.cdap.proto.QueryResult) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) QueryStatus(co.cask.cdap.proto.QueryStatus)

Aggregations

QueryResult (co.cask.cdap.proto.QueryResult)39 ColumnDesc (co.cask.cdap.proto.ColumnDesc)23 Test (org.junit.Test)18 DatasetId (co.cask.cdap.proto.id.DatasetId)16 ExploreExecutionResult (co.cask.cdap.explore.client.ExploreExecutionResult)9 TimePartitionedFileSet (co.cask.cdap.api.dataset.lib.TimePartitionedFileSet)8 PartitionedFileSet (co.cask.cdap.api.dataset.lib.PartitionedFileSet)7 Location (org.apache.twill.filesystem.Location)7 FileSet (co.cask.cdap.api.dataset.lib.FileSet)6 ImmutableList (com.google.common.collect.ImmutableList)6 SQLException (java.sql.SQLException)6 HandleNotFoundException (co.cask.cdap.explore.service.HandleNotFoundException)5 QueryHandle (co.cask.cdap.proto.QueryHandle)4 StreamId (co.cask.cdap.proto.id.StreamId)4 Schema (co.cask.cdap.api.data.schema.Schema)3 PartitionKey (co.cask.cdap.api.dataset.lib.PartitionKey)3 PartitionedFileSetProperties (co.cask.cdap.api.dataset.lib.PartitionedFileSetProperties)3 Table (co.cask.cdap.api.dataset.table.Table)3 ExploreClient (co.cask.cdap.explore.client.ExploreClient)3 MockExploreClient (co.cask.cdap.explore.client.MockExploreClient)3