use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class MetadataControllerBatch method getMetadataUnits.
private List<TableMetadataUnit> getMetadataUnits(VectorContainer container) {
List<TableMetadataUnit> metadataUnits = new ArrayList<>();
RowSetReader reader = DirectRowSet.fromContainer(container).reader();
while (reader.next()) {
metadataUnits.addAll(getMetadataUnits(reader, 0));
}
if (metadataToHandle != null) {
// leaves only table metadata and metadata which belongs to segments to be overridden
metadataUnits = metadataUnits.stream().filter(tableMetadataUnit -> metadataToHandle.values().stream().map(MetadataInfo::key).anyMatch(s -> s.equals(tableMetadataUnit.metadataKey())) || MetadataType.TABLE.name().equals(tableMetadataUnit.metadataType())).collect(Collectors.toList());
// leaves only metadata which should be fetched from the Metastore
metadataUnits.stream().map(TableMetadataUnit::metadataIdentifier).forEach(metadataToHandle::remove);
List<TableMetadataUnit> metadata = metadataToHandle.isEmpty() ? Collections.emptyList() : tables.basicRequests().metadata(popConfig.getContext().tableInfo(), metadataToHandle.values());
metadataUnits.addAll(metadata);
}
// checks whether metadataUnits contains not only table metadata before adding default segment
// to avoid case when only table metadata should be updated and / or root segments removed
boolean insertDefaultSegment = metadataUnits.size() > 1 && metadataUnits.stream().noneMatch(metadataUnit -> metadataUnit.metadataType().equals(MetadataType.SEGMENT.name()));
if (insertDefaultSegment) {
TableMetadataUnit defaultSegmentMetadata = getDefaultSegment(metadataUnits);
metadataUnits.add(defaultSegmentMetadata);
}
return metadataUnits;
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class IcebergQueriesTest method testSelectFromSnapshotIdAndToSnapshotId.
@Test
public void testSelectFromSnapshotIdAndToSnapshotId() throws Exception {
String snapshotQuery = "select snapshot_id from dfs.tmp.`testAllTypes#snapshots` order by committed_at";
String query = "select * from table(dfs.tmp.testAllTypes(type => 'iceberg', fromSnapshotId => %s, toSnapshotId => %s))";
DirectRowSet rowSet = queryBuilder().sql(snapshotQuery).rowSet();
try {
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
Long fromSnapshotId = (Long) reader.column(0).reader().getObject();
assertTrue(reader.next());
Long toSnapshotId = (Long) reader.column(0).reader().getObject();
String plan = queryBuilder().sql(query, fromSnapshotId, toSnapshotId).explainJson();
long count = queryBuilder().physical(plan).run().recordCount();
assertEquals(1, count);
} finally {
rowSet.clear();
}
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class StreamingHttpConnection method sendData.
/**
* Called from query thread, specifically from the Screen operator,
* for each batch.
*/
@Override
public void sendData(RpcOutcomeListener<Ack> listener, QueryDataPackage data) {
VectorContainer batch = data.batch();
try {
if (batchCount == 0) {
batchHolder = new BatchHolder(batch);
reader = new PushResultSetReaderImpl(batchHolder);
startSignal.await();
}
batchHolder.newBatch();
RowSetReader batchReader = reader.start();
emitBatch(batchReader);
batchCount++;
} catch (IOException e) {
throw UserException.dataWriteError(e).addContext("Failed to send JSON results to the REST client").build(logger);
} catch (InterruptedException e) {
throw new DrillRuntimeException("Interrupted", e);
} finally {
batch.zeroVectors();
listener.success(Acks.OK, null);
}
}
Aggregations