use of io.trino.plugin.hive.HiveTableHandle in project trino by trinodb.
the class CreateEmptyPartitionProcedure method doCreateEmptyPartition.
private void doCreateEmptyPartition(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, List<String> partitionColumnNames, List<String> partitionValues) {
TransactionalMetadata hiveMetadata = hiveMetadataFactory.create(session.getIdentity(), true);
HiveTableHandle tableHandle = (HiveTableHandle) hiveMetadata.getTableHandle(session, new SchemaTableName(schemaName, tableName));
if (tableHandle == null) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, format("Table '%s' does not exist", new SchemaTableName(schemaName, tableName)));
}
accessControl.checkCanInsertIntoTable(null, new SchemaTableName(schemaName, tableName));
List<String> actualPartitionColumnNames = tableHandle.getPartitionColumns().stream().map(HiveColumnHandle::getName).collect(toImmutableList());
if (!Objects.equals(partitionColumnNames, actualPartitionColumnNames)) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Provided partition column names do not match actual partition column names: " + actualPartitionColumnNames);
}
HiveMetastoreClosure metastore = hiveMetadata.getMetastore().unsafeGetRawHiveMetastoreClosure();
if (metastore.getPartition(schemaName, tableName, partitionValues).isPresent()) {
throw new TrinoException(ALREADY_EXISTS, "Partition already exists");
}
HiveInsertTableHandle hiveInsertTableHandle = (HiveInsertTableHandle) hiveMetadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
String partitionName = FileUtils.makePartName(actualPartitionColumnNames, partitionValues);
WriteInfo writeInfo = locationService.getPartitionWriteInfo(hiveInsertTableHandle.getLocationHandle(), Optional.empty(), partitionName);
Slice serializedPartitionUpdate = Slices.wrappedBuffer(partitionUpdateJsonCodec.toJsonBytes(new PartitionUpdate(partitionName, UpdateMode.NEW, writeInfo.getWritePath(), writeInfo.getTargetPath(), ImmutableList.of(), 0, 0, 0)));
hiveMetadata.finishInsert(session, hiveInsertTableHandle, ImmutableList.of(serializedPartitionUpdate), ImmutableList.of());
hiveMetadata.commit();
}
use of io.trino.plugin.hive.HiveTableHandle in project trino by trinodb.
the class HiveBucketing method getHiveBucketFilter.
public static Optional<HiveBucketFilter> getHiveBucketFilter(HiveTableHandle hiveTable, TupleDomain<ColumnHandle> effectivePredicate) {
if (hiveTable.getBucketHandle().isEmpty()) {
return Optional.empty();
}
HiveBucketProperty hiveBucketProperty = hiveTable.getBucketHandle().get().toTableBucketProperty();
List<Column> dataColumns = hiveTable.getDataColumns().stream().map(HiveColumnHandle::toMetastoreColumn).collect(toImmutableList());
Optional<Map<ColumnHandle, List<NullableValue>>> bindings = TupleDomain.extractDiscreteValues(effectivePredicate);
if (bindings.isEmpty()) {
return Optional.empty();
}
Optional<Set<Integer>> buckets = getHiveBuckets(hiveBucketProperty, dataColumns, bindings.get());
if (buckets.isPresent()) {
return Optional.of(new HiveBucketFilter(buckets.get()));
}
Optional<Domain> domain = effectivePredicate.getDomains().flatMap(domains -> domains.entrySet().stream().filter(entry -> ((HiveColumnHandle) entry.getKey()).getName().equals(BUCKET_COLUMN_NAME)).findFirst().map(Entry::getValue));
if (domain.isEmpty()) {
return Optional.empty();
}
ValueSet values = domain.get().getValues();
ImmutableSet.Builder<Integer> builder = ImmutableSet.builder();
int bucketCount = hiveBucketProperty.getBucketCount();
for (int i = 0; i < bucketCount; i++) {
if (values.containsValue((long) i)) {
builder.add(i);
}
}
return Optional.of(new HiveBucketFilter(builder.build()));
}
use of io.trino.plugin.hive.HiveTableHandle in project trino by trinodb.
the class AbstractFileFormat method createGenericReader.
@Override
public ConnectorPageSource createGenericReader(ConnectorSession session, HdfsEnvironment hdfsEnvironment, File targetFile, List<ColumnHandle> readColumns, List<String> schemaColumnNames, List<Type> schemaColumnTypes) {
HivePageSourceProvider factory = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, new HiveConfig(), getHivePageSourceFactory(hdfsEnvironment).map(ImmutableSet::of).orElse(ImmutableSet.of()), getHiveRecordCursorProvider(hdfsEnvironment).map(ImmutableSet::of).orElse(ImmutableSet.of()), new GenericHiveRecordCursorProvider(hdfsEnvironment, new HiveConfig()), Optional.empty());
Properties schema = createSchema(getFormat(), schemaColumnNames, schemaColumnTypes);
HiveSplit split = new HiveSplit("schema_name", "table_name", "", targetFile.getPath(), 0, targetFile.length(), targetFile.length(), targetFile.lastModified(), schema, ImmutableList.of(), ImmutableList.of(), OptionalInt.empty(), 0, false, TableToPartitionMapping.empty(), Optional.empty(), Optional.empty(), false, Optional.empty(), 0, SplitWeight.standard());
return factory.createPageSource(TestingConnectorTransactionHandle.INSTANCE, session, split, new HiveTableHandle("schema_name", "table_name", ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()), readColumns, DynamicFilter.EMPTY);
}
use of io.trino.plugin.hive.HiveTableHandle in project trino by trinodb.
the class TestConnectorPushdownRulesWithHive method testPredicatePushdown.
@Test
public void testPredicatePushdown() {
String tableName = "predicate_test";
tester().getQueryRunner().execute(format("CREATE TABLE %s (a, b) AS SELECT 5, 6", tableName));
PushPredicateIntoTableScan pushPredicateIntoTableScan = new PushPredicateIntoTableScan(tester().getPlannerContext(), tester().getTypeAnalyzer());
HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false));
HiveColumnHandle column = createBaseColumn("a", 0, HIVE_INT, INTEGER, REGULAR, Optional.empty());
tester().assertThat(pushPredicateIntoTableScan).on(p -> p.filter(PlanBuilder.expression("a = 5"), p.tableScan(table, ImmutableList.of(p.symbol("a", INTEGER)), ImmutableMap.of(p.symbol("a", INTEGER), column)))).matches(filter("a = 5", tableScan(tableHandle -> ((HiveTableHandle) tableHandle).getCompactEffectivePredicate().getDomains().get().equals(ImmutableMap.of(column, Domain.singleValue(INTEGER, 5L))), TupleDomain.all(), ImmutableMap.of("a", column::equals))));
metastore.dropTable(SCHEMA_NAME, tableName, true);
}
use of io.trino.plugin.hive.HiveTableHandle in project trino by trinodb.
the class TestConnectorPushdownRulesWithHive method testColumnPruningProjectionPushdown.
@Test
public void testColumnPruningProjectionPushdown() {
String tableName = "column_pruning_projection_test";
tester().getQueryRunner().execute(format("CREATE TABLE %s (a, b) AS SELECT 5, 6", tableName));
PruneTableScanColumns pruneTableScanColumns = new PruneTableScanColumns(tester().getMetadata());
HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false));
HiveColumnHandle columnA = createBaseColumn("a", 0, HIVE_INT, INTEGER, REGULAR, Optional.empty());
HiveColumnHandle columnB = createBaseColumn("b", 1, HIVE_INT, INTEGER, REGULAR, Optional.empty());
tester().assertThat(pruneTableScanColumns).on(p -> {
Symbol symbolA = p.symbol("a", INTEGER);
Symbol symbolB = p.symbol("b", INTEGER);
return p.project(Assignments.of(p.symbol("x"), symbolA.toSymbolReference()), p.tableScan(table, ImmutableList.of(symbolA, symbolB), ImmutableMap.of(symbolA, columnA, symbolB, columnB)));
}).matches(strictProject(ImmutableMap.of("expr", expression("COLA")), tableScan(hiveTable.withProjectedColumns(ImmutableSet.of(columnA))::equals, TupleDomain.all(), ImmutableMap.of("COLA", columnA::equals))));
metastore.dropTable(SCHEMA_NAME, tableName, true);
}
Aggregations