use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class AbstractTestHive method testUpdatePartitionColumnStatistics.
@Test
public void testUpdatePartitionColumnStatistics() throws Exception {
SchemaTableName tableName = temporaryTable("update_partition_column_statistics");
try {
createDummyPartitionedTable(tableName, STATISTICS_PARTITIONED_TABLE_COLUMNS);
testUpdatePartitionStatistics(tableName, EMPTY_TABLE_STATISTICS, ImmutableList.of(STATISTICS_1_1, STATISTICS_1_2, STATISTICS_2), ImmutableList.of(STATISTICS_1_2, STATISTICS_1_1, STATISTICS_2));
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class AbstractTestHive method testNewDirectoryPermissions.
@Test
public void testNewDirectoryPermissions() throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, ORC, columns, ImmutableList.of(), Optional.empty());
try {
Transaction transaction = newTransaction();
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow();
// create new directory and set directory permission after creation
HdfsContext context = new HdfsContext(session);
Path location = new Path(table.getStorage().getLocation());
Path defaultPath = new Path(location + "/defaultperms");
createDirectory(context, hdfsEnvironment, defaultPath);
FileStatus defaultFsStatus = hdfsEnvironment.getFileSystem(context, defaultPath).getFileStatus(defaultPath);
assertEquals(defaultFsStatus.getPermission().toOctal(), 777);
// use hdfs config that skips setting directory permissions after creation
HdfsConfig configWithSkip = new HdfsConfig();
configWithSkip.setNewDirectoryPermissions(HdfsConfig.SKIP_DIR_PERMISSIONS);
HdfsEnvironment hdfsEnvironmentWithSkip = new HdfsEnvironment(createTestHdfsConfiguration(), configWithSkip, new NoHdfsAuthentication());
Path skipPath = new Path(location + "/skipperms");
createDirectory(context, hdfsEnvironmentWithSkip, skipPath);
FileStatus skipFsStatus = hdfsEnvironmentWithSkip.getFileSystem(context, skipPath).getFileStatus(skipPath);
assertEquals(skipFsStatus.getPermission().toOctal(), 755);
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class AbstractTestHive method testGetAllTableColumnsInSchema.
@Test
public void testGetAllTableColumnsInSchema() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
Map<SchemaTableName, List<ColumnMetadata>> allColumns = listTableColumns(metadata, newSession(), new SchemaTablePrefix(database));
assertTrue(allColumns.containsKey(tablePartitionFormat));
assertTrue(allColumns.containsKey(tableUnpartitioned));
}
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class AbstractTestHive method setupHive.
protected void setupHive(String databaseName) {
database = databaseName;
tablePartitionFormat = new SchemaTableName(database, "trino_test_partition_format");
tableUnpartitioned = new SchemaTableName(database, "trino_test_unpartitioned");
tableOffline = new SchemaTableName(database, "trino_test_offline");
tableOfflinePartition = new SchemaTableName(database, "trino_test_offline_partition");
tableNotReadable = new SchemaTableName(database, "trino_test_not_readable");
view = new SchemaTableName(database, "trino_test_view");
invalidTable = new SchemaTableName(database, INVALID_TABLE);
tableBucketedStringInt = new SchemaTableName(database, "trino_test_bucketed_by_string_int");
tableBucketedBigintBoolean = new SchemaTableName(database, "trino_test_bucketed_by_bigint_boolean");
tableBucketedDoubleFloat = new SchemaTableName(database, "trino_test_bucketed_by_double_float");
tablePartitionSchemaChange = new SchemaTableName(database, "trino_test_partition_schema_change");
tablePartitionSchemaChangeNonCanonical = new SchemaTableName(database, "trino_test_partition_schema_change_non_canonical");
tableBucketEvolution = new SchemaTableName(database, "trino_test_bucket_evolution");
invalidTableHandle = new HiveTableHandle(database, INVALID_TABLE, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
dsColumn = createBaseColumn("ds", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
fileFormatColumn = createBaseColumn("file_format", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
dummyColumn = createBaseColumn("dummy", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
intColumn = createBaseColumn("t_int", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
invalidColumnHandle = createBaseColumn(INVALID_COLUMN, 0, HIVE_STRING, VARCHAR, REGULAR, Optional.empty());
List<ColumnHandle> partitionColumns = ImmutableList.of(dsColumn, fileFormatColumn, dummyColumn);
tablePartitionFormatPartitions = ImmutableList.<HivePartition>builder().add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=textfile/dummy=1", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("textfile"))).put(dummyColumn, NullableValue.of(INTEGER, 1L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=sequencefile/dummy=2", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("sequencefile"))).put(dummyColumn, NullableValue.of(INTEGER, 2L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rctext/dummy=3", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rctext"))).put(dummyColumn, NullableValue.of(INTEGER, 3L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rcbinary/dummy=4", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rcbinary"))).put(dummyColumn, NullableValue.of(INTEGER, 4L)).buildOrThrow())).build();
tableUnpartitionedPartitions = ImmutableList.of(new HivePartition(tableUnpartitioned));
tablePartitionFormatProperties = new ConnectorTableProperties(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile")), Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile")), Range.equal(createUnboundedVarcharType(), utf8Slice("rctext")), Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L), Range.equal(INTEGER, 2L), Range.equal(INTEGER, 3L), Range.equal(INTEGER, 4L)), false))), Optional.empty(), Optional.empty(), Optional.of(new DiscretePredicates(partitionColumns, ImmutableList.of(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 2L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rctext"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 3L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 4L)), false)))))), ImmutableList.of());
tableUnpartitionedProperties = new ConnectorTableProperties();
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestBackgroundHiveSplitLoader method backgroundHiveSplitLoader.
private BackgroundHiveSplitLoader backgroundHiveSplitLoader(List<LocatedFileStatus> files, DirectoryLister directoryLister) {
List<HivePartitionMetadata> hivePartitionMetadatas = ImmutableList.of(new HivePartitionMetadata(new HivePartition(new SchemaTableName("testSchema", "table_name")), Optional.empty(), TableToPartitionMapping.empty()));
ConnectorSession connectorSession = getHiveSession(new HiveConfig().setMaxSplitSize(DataSize.of(1, GIGABYTE)));
return new BackgroundHiveSplitLoader(SIMPLE_TABLE, NO_ACID_TRANSACTION, hivePartitionMetadatas, TupleDomain.none(), DynamicFilter.EMPTY, new Duration(0, SECONDS), TESTING_TYPE_MANAGER, Optional.empty(), connectorSession, new TestingHdfsEnvironment(files), new NamenodeStats(), directoryLister, executor, 2, false, false, true, Optional.empty(), Optional.empty());
}
Aggregations