use of com.facebook.presto.spi.TableHandle in project presto by prestodb.
the class TestJdbcComputePushdown method jdbcTableScan.
private TableScanNode jdbcTableScan(String schema, String table, Type type, String... columnNames) {
JdbcTableHandle jdbcTableHandle = new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName(schema, table), CATALOG_NAME, schema, table);
JdbcTableLayoutHandle jdbcTableLayoutHandle = new JdbcTableLayoutHandle(TEST_SESSION.getSqlFunctionProperties(), jdbcTableHandle, TupleDomain.none(), Optional.empty());
TableHandle tableHandle = new TableHandle(new ConnectorId(CATALOG_NAME), jdbcTableHandle, new ConnectorTransactionHandle() {
}, Optional.of(jdbcTableLayoutHandle));
return PLAN_BUILDER.tableScan(tableHandle, Arrays.stream(columnNames).map(column -> newVariable(column, type)).collect(toImmutableList()), Arrays.stream(columnNames).map(column -> newVariable(column, type)).collect(toMap(identity(), entry -> getColumnHandleForVariable(entry.getName(), type))));
}
use of com.facebook.presto.spi.TableHandle in project presto by prestodb.
the class TestSourcePartitionedScheduler method createPlan.
private static SubPlan createPlan() {
VariableReferenceExpression variable = new VariableReferenceExpression(Optional.empty(), "column", VARCHAR);
// table scan with splitCount splits
TableScanNode tableScan = new TableScanNode(Optional.empty(), TABLE_SCAN_NODE_ID, new TableHandle(CONNECTOR_ID, new TestingTableHandle(), TestingTransactionHandle.create(), Optional.empty()), ImmutableList.of(variable), ImmutableMap.of(variable, new TestingColumnHandle("column")), TupleDomain.all(), TupleDomain.all());
RemoteSourceNode remote = new RemoteSourceNode(Optional.empty(), new PlanNodeId("remote_id"), new PlanFragmentId(0), ImmutableList.of(), false, Optional.empty(), GATHER);
PlanFragment testFragment = new PlanFragment(new PlanFragmentId(0), new JoinNode(Optional.empty(), new PlanNodeId("join_id"), INNER, tableScan, remote, ImmutableList.of(), ImmutableList.<VariableReferenceExpression>builder().addAll(tableScan.getOutputVariables()).addAll(remote.getOutputVariables()).build(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of()), ImmutableSet.of(variable), SOURCE_DISTRIBUTION, ImmutableList.of(TABLE_SCAN_NODE_ID), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(variable)), StageExecutionDescriptor.ungroupedExecution(), false, StatsAndCosts.empty(), Optional.empty());
return new SubPlan(testFragment, ImmutableList.of());
}
use of com.facebook.presto.spi.TableHandle in project presto by prestodb.
the class TestSystemMemoryBlocking method testTableScanSystemMemoryBlocking.
@Test
public void testTableScanSystemMemoryBlocking() {
PlanNodeId sourceId = new PlanNodeId("source");
final List<Type> types = ImmutableList.of(VARCHAR);
TableScanOperator source = new TableScanOperator(driverContext.addOperatorContext(1, new PlanNodeId("test"), "values"), sourceId, (session, split, table, columns) -> new FixedPageSource(rowPagesBuilder(types).addSequencePage(10, 1).addSequencePage(10, 1).addSequencePage(10, 1).addSequencePage(10, 1).addSequencePage(10, 1).build()), new TableHandle(new ConnectorId("test"), new ConnectorTableHandle() {
}, new ConnectorTransactionHandle() {
}, Optional.empty()), ImmutableList.of());
PageConsumerOperator sink = createSinkOperator(types);
Driver driver = Driver.createDriver(driverContext, source, sink);
assertSame(driver.getDriverContext(), driverContext);
assertFalse(driver.isFinished());
Split testSplit = new Split(new ConnectorId("test"), TestingTransactionHandle.create(), new TestSplit());
driver.updateSource(new TaskSource(sourceId, ImmutableSet.of(new ScheduledSplit(0, sourceId, testSplit)), true));
ListenableFuture<?> blocked = driver.processFor(new Duration(1, NANOSECONDS));
// the driver shouldn't block in the first call as it will be able to move a page between source and the sink operator
// but the operator should be blocked
assertTrue(blocked.isDone());
assertFalse(source.getOperatorContext().isWaitingForMemory().isDone());
// and they should stay blocked until more memory becomes available
for (int i = 0; i < 10; i++) {
blocked = driver.processFor(new Duration(1, NANOSECONDS));
assertFalse(blocked.isDone());
assertFalse(source.getOperatorContext().isWaitingForMemory().isDone());
}
// free up some memory
memoryPool.free(QUERY_ID, "test", memoryPool.getReservedBytes());
// the operator should be unblocked
assertTrue(source.getOperatorContext().isWaitingForMemory().isDone());
// the driver shouldn't be blocked
blocked = driver.processFor(new Duration(1, NANOSECONDS));
assertTrue(blocked.isDone());
}
use of com.facebook.presto.spi.TableHandle in project presto by prestodb.
the class AbstractTestHiveClient method doTestMismatchSchemaTable.
protected void doTestMismatchSchemaTable(SchemaTableName schemaTableName, HiveStorageFormat storageFormat, List<ColumnMetadata> tableBefore, MaterializedResult dataBefore, List<ColumnMetadata> tableAfter, MaterializedResult dataAfter, List<RowExpression> afterFilters, List<Predicate<MaterializedRow>> afterResultPredicates) throws Exception {
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
doCreateEmptyTable(schemaTableName, storageFormat, tableBefore);
// insert the data
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle, TEST_HIVE_PAGE_SINK_CONTEXT);
sink.appendPage(dataBefore.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
transaction.commit();
}
// load the table and verify the data
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
List<ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toList());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(result.getMaterializedRows(), dataBefore.getMaterializedRows());
transaction.commit();
}
// alter the table schema
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), false, DEFAULT_COLUMN_CONVERTER_PROVIDER);
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(session);
Table oldTable = transaction.getMetastore().getTable(metastoreContext, schemaName, tableName).get();
HiveTypeTranslator hiveTypeTranslator = new HiveTypeTranslator();
List<Column> dataColumns = tableAfter.stream().filter(columnMetadata -> !columnMetadata.getName().equals("ds")).map(columnMetadata -> new Column(columnMetadata.getName(), toHiveType(hiveTypeTranslator, columnMetadata.getType()), Optional.empty(), Optional.empty())).collect(toList());
Table.Builder newTable = Table.builder(oldTable).setDataColumns(dataColumns);
transaction.getMetastore().replaceView(metastoreContext, schemaName, tableName, newTable.build(), principalPrivileges);
transaction.commit();
}
// load the altered table and verify the data
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
List<ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toList());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(result.getMaterializedRows(), dataAfter.getMaterializedRows());
int filterCount = afterFilters.size();
for (int i = 0; i < filterCount; i++) {
RowExpression predicate = afterFilters.get(i);
ConnectorTableLayoutHandle layoutHandle = pushdownFilter(session, metadata, transaction.getMetastore(), ROW_EXPRESSION_SERVICE, FUNCTION_RESOLUTION, hivePartitionManager, METADATA.getFunctionAndTypeManager(), tableHandle, predicate, Optional.empty()).getLayout().getHandle();
// Read all columns with a filter
MaterializedResult filteredResult = readTable(transaction, tableHandle, layoutHandle, columnHandles, session, OptionalInt.empty(), Optional.empty());
Predicate<MaterializedRow> rowPredicate = afterResultPredicates.get(i);
List<MaterializedRow> expectedRows = dataAfter.getMaterializedRows().stream().filter(rowPredicate::apply).collect(toList());
assertEqualsIgnoreOrder(filteredResult.getMaterializedRows(), expectedRows);
// Read all columns except the ones used in the filter
Set<String> filterColumnNames = extractUnique(predicate).stream().map(VariableReferenceExpression::getName).collect(toImmutableSet());
List<ColumnHandle> nonFilterColumns = columnHandles.stream().filter(column -> !filterColumnNames.contains(((HiveColumnHandle) column).getName())).collect(toList());
int resultCount = readTable(transaction, tableHandle, layoutHandle, nonFilterColumns, session, OptionalInt.empty(), Optional.empty()).getRowCount();
assertEquals(resultCount, expectedRows.size());
}
transaction.commit();
}
// insertions to the partitions with type mismatches should fail
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle, TEST_HIVE_PAGE_SINK_CONTEXT);
sink.appendPage(dataAfter.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
transaction.commit();
fail("expected exception");
} catch (PrestoException e) {
// expected
assertEquals(e.getErrorCode(), HIVE_PARTITION_SCHEMA_MISMATCH.toErrorCode());
}
}
use of com.facebook.presto.spi.TableHandle in project presto by prestodb.
the class AbstractTestHiveClient method assertTableStatsComputed.
private void assertTableStatsComputed(SchemaTableName tableName, Set<String> expectedColumnStatsColumns) {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> allColumnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
TableStatistics tableStatistics = metadata.getTableStatistics(session, tableHandle, Optional.empty(), allColumnHandles, Constraint.alwaysTrue());
assertFalse(tableStatistics.getRowCount().isUnknown(), "row count is unknown");
Map<String, ColumnStatistics> columnsStatistics = tableStatistics.getColumnStatistics().entrySet().stream().collect(toImmutableMap(entry -> ((HiveColumnHandle) entry.getKey()).getName(), Map.Entry::getValue));
assertEquals(columnsStatistics.keySet(), expectedColumnStatsColumns, "columns with statistics");
Map<String, ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle);
columnsStatistics.forEach((columnName, columnStatistics) -> {
ColumnHandle columnHandle = columnHandles.get(columnName);
Type columnType = metadata.getColumnMetadata(session, tableHandle, columnHandle).getType();
assertFalse(columnStatistics.getNullsFraction().isUnknown(), "unknown nulls fraction for " + columnName);
assertFalse(columnStatistics.getDistinctValuesCount().isUnknown(), "unknown distinct values count for " + columnName);
if (isVarcharType(columnType)) {
assertFalse(columnStatistics.getDataSize().isUnknown(), "unknown data size for " + columnName);
} else {
assertTrue(columnStatistics.getDataSize().isUnknown(), "unknown data size for" + columnName);
}
});
}
}
Aggregations