use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class TestDriver method testBrokenOperatorAddSource.
@Test
public void testBrokenOperatorAddSource() throws Exception {
PlanNodeId sourceId = new PlanNodeId("source");
final List<Type> types = ImmutableList.of(VARCHAR, BIGINT, BIGINT);
// create a table scan operator that does not block, which will cause the driver loop to busy wait
TableScanOperator source = new NotBlockedTableScanOperator(driverContext.addOperatorContext(99, new PlanNodeId("test"), "values"), sourceId, new PageSourceProvider() {
@Override
public ConnectorPageSource createPageSource(Session session, Split split, List<ColumnHandle> columns) {
return new FixedPageSource(rowPagesBuilder(types).addSequencePage(10, 20, 30, 40).build());
}
}, types, ImmutableList.of());
BrokenOperator brokenOperator = new BrokenOperator(driverContext.addOperatorContext(0, new PlanNodeId("test"), "source"));
final Driver driver = new Driver(driverContext, source, brokenOperator);
// block thread in operator processing
Future<Boolean> driverProcessFor = executor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return driver.processFor(new Duration(1, TimeUnit.MILLISECONDS)).isDone();
}
});
brokenOperator.waitForLocked();
assertSame(driver.getDriverContext(), driverContext);
assertFalse(driver.isFinished());
// processFor always returns NOT_BLOCKED, because DriveLockResult was not acquired
assertTrue(driver.processFor(new Duration(1, TimeUnit.MILLISECONDS)).isDone());
assertFalse(driver.isFinished());
driver.updateSource(new TaskSource(sourceId, ImmutableSet.of(new ScheduledSplit(0, sourceId, newMockSplit())), true));
assertFalse(driver.isFinished());
// processFor always returns NOT_BLOCKED, because DriveLockResult was not acquired
assertTrue(driver.processFor(new Duration(1, TimeUnit.SECONDS)).isDone());
assertFalse(driver.isFinished());
driver.close();
assertTrue(driver.isFinished());
try {
driverProcessFor.get(1, TimeUnit.SECONDS);
fail("Expected InterruptedException");
} catch (ExecutionException e) {
checkArgument(getRootCause(e) instanceof InterruptedException, "Expected root cause exception to be an instance of InterruptedException");
}
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class TestDriver method testAddSourceFinish.
@Test
public void testAddSourceFinish() {
PlanNodeId sourceId = new PlanNodeId("source");
final List<Type> types = ImmutableList.of(VARCHAR, BIGINT, BIGINT);
TableScanOperator source = new TableScanOperator(driverContext.addOperatorContext(99, new PlanNodeId("test"), "values"), sourceId, new PageSourceProvider() {
@Override
public ConnectorPageSource createPageSource(Session session, Split split, List<ColumnHandle> columns) {
return new FixedPageSource(rowPagesBuilder(types).addSequencePage(10, 20, 30, 40).build());
}
}, types, ImmutableList.of());
PageConsumerOperator sink = createSinkOperator(source);
Driver driver = new Driver(driverContext, source, sink);
assertSame(driver.getDriverContext(), driverContext);
assertFalse(driver.isFinished());
assertFalse(driver.processFor(new Duration(1, TimeUnit.MILLISECONDS)).isDone());
assertFalse(driver.isFinished());
driver.updateSource(new TaskSource(sourceId, ImmutableSet.of(new ScheduledSplit(0, sourceId, newMockSplit())), true));
assertFalse(driver.isFinished());
assertTrue(driver.processFor(new Duration(1, TimeUnit.SECONDS)).isDone());
assertTrue(driver.isFinished());
assertTrue(sink.isFinished());
assertTrue(source.isFinished());
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class HiveMetadata method buildTableObject.
private static Table buildTableObject(String queryId, String schemaName, String tableName, String tableOwner, List<HiveColumnHandle> columnHandles, HiveStorageFormat hiveStorageFormat, List<String> partitionedBy, Optional<HiveBucketProperty> bucketProperty, Map<String, String> additionalTableParameters, Path targetPath, boolean external, String prestoVersion) {
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
Set<String> partitionColumnNames = ImmutableSet.copyOf(partitionedBy);
ImmutableList.Builder<Column> columns = ImmutableList.builder();
for (HiveColumnHandle columnHandle : columnHandles) {
String name = columnHandle.getName();
HiveType type = columnHandle.getHiveType();
if (!partitionColumnNames.contains(name)) {
verify(!columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
columns.add(new Column(name, type, columnHandle.getComment()));
} else {
verify(columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
}
}
ImmutableMap.Builder<String, String> tableParameters = ImmutableMap.<String, String>builder().put("comment", "Created by Presto").put(PRESTO_VERSION_NAME, prestoVersion).put(PRESTO_QUERY_ID_NAME, queryId).putAll(additionalTableParameters);
if (external) {
tableParameters.put("EXTERNAL", "TRUE");
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(tableOwner).setTableType((external ? EXTERNAL_TABLE : MANAGED_TABLE).name()).setDataColumns(columns.build()).setPartitionColumns(partitionColumns).setParameters(tableParameters.build());
tableBuilder.getStorageBuilder().setStorageFormat(fromHiveStorageFormat(hiveStorageFormat)).setBucketProperty(bucketProperty).setLocation(targetPath.toString());
return tableBuilder.build();
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class HiveMetadata method beginInsert.
@Override
public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle) {
verifyJvmTimeZone();
SchemaTableName tableName = schemaTableName(tableHandle);
Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName());
if (!table.isPresent()) {
throw new TableNotFoundException(tableName);
}
checkTableIsWritable(table.get());
for (Column column : table.get().getDataColumns()) {
if (!isWritableType(column.getType())) {
throw new PrestoException(NOT_SUPPORTED, format("Inserting into Hive table %s.%s with column type %s not supported", table.get().getDatabaseName(), table.get().getTableName(), column.getType()));
}
}
List<HiveColumnHandle> handles = hiveColumnHandles(connectorId, table.get()).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toList());
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table.get());
LocationHandle locationHandle = locationService.forExistingTable(metastore, session.getUser(), session.getQueryId(), table.get());
HiveInsertTableHandle result = new HiveInsertTableHandle(connectorId, tableName.getSchemaName(), tableName.getTableName(), handles, session.getQueryId(), metastore.generatePageSinkMetadata(tableName), locationHandle, table.get().getStorage().getBucketProperty(), tableStorageFormat, respectTableFormat ? tableStorageFormat : defaultStorageFormat);
Optional<Path> writePathRoot = locationService.writePathRoot(locationHandle);
Path targetPathRoot = locationService.targetPathRoot(locationHandle);
if (writePathRoot.isPresent()) {
WriteMode mode = writePathRoot.get().equals(targetPathRoot) ? DIRECT_TO_TARGET_NEW_DIRECTORY : STAGE_AND_MOVE_TO_TARGET_DIRECTORY;
metastore.declareIntentionToWrite(session, mode, writePathRoot.get(), result.getFilePrefix(), tableName);
} else {
metastore.declareIntentionToWrite(session, DIRECT_TO_TARGET_EXISTING_DIRECTORY, targetPathRoot, result.getFilePrefix(), tableName);
}
return result;
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class HivePartitionManager method getPartitions.
public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore, ConnectorTableHandle tableHandle, Constraint<ColumnHandle> constraint) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
TupleDomain<ColumnHandle> effectivePredicate = constraint.getSummary();
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
Table table = getTable(metastore, tableName);
Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(connectorId, table);
List<HiveColumnHandle> partitionColumns = getPartitionKeyColumnHandles(connectorId, table);
List<HiveBucket> buckets = getHiveBucketNumbers(table, effectivePredicate);
TupleDomain<HiveColumnHandle> compactEffectivePredicate = toCompactTupleDomain(effectivePredicate, domainCompactionThreshold);
if (effectivePredicate.isNone()) {
return new HivePartitionResult(partitionColumns, ImmutableList.of(), TupleDomain.none(), TupleDomain.none(), hiveBucketHandle);
}
if (partitionColumns.isEmpty()) {
return new HivePartitionResult(partitionColumns, ImmutableList.of(new HivePartition(tableName, compactEffectivePredicate, buckets)), effectivePredicate, TupleDomain.none(), hiveBucketHandle);
}
List<Type> partitionTypes = partitionColumns.stream().map(column -> typeManager.getType(column.getTypeSignature())).collect(toList());
List<String> partitionNames = getFilteredPartitionNames(metastore, tableName, partitionColumns, effectivePredicate);
// do a final pass to filter based on fields that could not be used to filter the partitions
int partitionCount = 0;
ImmutableList.Builder<HivePartition> partitions = ImmutableList.builder();
for (String partitionName : partitionNames) {
Optional<Map<ColumnHandle, NullableValue>> values = parseValuesAndFilterPartition(partitionName, partitionColumns, partitionTypes, constraint);
if (values.isPresent()) {
if (partitionCount == maxPartitions) {
throw new PrestoException(HIVE_EXCEEDED_PARTITION_LIMIT, format("Query over table '%s' can potentially read more than %s partitions", hiveTableHandle.getSchemaTableName(), maxPartitions));
}
partitionCount++;
partitions.add(new HivePartition(tableName, compactEffectivePredicate, partitionName, values.get(), buckets));
}
}
// All partition key domains will be fully evaluated, so we don't need to include those
TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.withColumnDomains(Maps.filterKeys(effectivePredicate.getDomains().get(), not(Predicates.in(partitionColumns))));
TupleDomain<ColumnHandle> enforcedTupleDomain = TupleDomain.withColumnDomains(Maps.filterKeys(effectivePredicate.getDomains().get(), Predicates.in(partitionColumns)));
return new HivePartitionResult(partitionColumns, partitions.build(), remainingTupleDomain, enforcedTupleDomain, hiveBucketHandle);
}
Aggregations