Search in sources :

Example 61 with SchemaTableName

use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.

the class AbstractTestHive method testUpdatePartitionColumnStatistics.

@Test
public void testUpdatePartitionColumnStatistics() throws Exception {
    SchemaTableName tableName = temporaryTable("update_partition_column_statistics");
    try {
        createDummyPartitionedTable(tableName, STATISTICS_PARTITIONED_TABLE_COLUMNS);
        testUpdatePartitionStatistics(tableName, EMPTY_TABLE_STATISTICS, ImmutableList.of(STATISTICS_1_1, STATISTICS_1_2, STATISTICS_2), ImmutableList.of(STATISTICS_1_2, STATISTICS_1_1, STATISTICS_2));
    } finally {
        dropTable(tableName);
    }
}
Also used : CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) Test(org.testng.annotations.Test)

Example 62 with SchemaTableName

use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.

the class AbstractTestHive method testNewDirectoryPermissions.

@Test
public void testNewDirectoryPermissions() throws Exception {
    SchemaTableName tableName = temporaryTable("empty_file");
    List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
    createEmptyTable(tableName, ORC, columns, ImmutableList.of(), Optional.empty());
    try {
        Transaction transaction = newTransaction();
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow();
        // create new directory and set directory permission after creation
        HdfsContext context = new HdfsContext(session);
        Path location = new Path(table.getStorage().getLocation());
        Path defaultPath = new Path(location + "/defaultperms");
        createDirectory(context, hdfsEnvironment, defaultPath);
        FileStatus defaultFsStatus = hdfsEnvironment.getFileSystem(context, defaultPath).getFileStatus(defaultPath);
        assertEquals(defaultFsStatus.getPermission().toOctal(), 777);
        // use hdfs config that skips setting directory permissions after creation
        HdfsConfig configWithSkip = new HdfsConfig();
        configWithSkip.setNewDirectoryPermissions(HdfsConfig.SKIP_DIR_PERMISSIONS);
        HdfsEnvironment hdfsEnvironmentWithSkip = new HdfsEnvironment(createTestHdfsConfiguration(), configWithSkip, new NoHdfsAuthentication());
        Path skipPath = new Path(location + "/skipperms");
        createDirectory(context, hdfsEnvironmentWithSkip, skipPath);
        FileStatus skipFsStatus = hdfsEnvironmentWithSkip.getFileSystem(context, skipPath).getFileStatus(skipPath);
        assertEquals(skipFsStatus.getPermission().toOctal(), 755);
    } finally {
        dropTable(tableName);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) Table(io.trino.plugin.hive.metastore.Table) FileStatus(org.apache.hadoop.fs.FileStatus) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) Column(io.trino.plugin.hive.metastore.Column) ViewColumn(io.trino.spi.connector.ConnectorViewDefinition.ViewColumn) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) Test(org.testng.annotations.Test)

Example 63 with SchemaTableName

use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.

the class AbstractTestHive method testGetAllTableColumnsInSchema.

@Test
public void testGetAllTableColumnsInSchema() {
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        Map<SchemaTableName, List<ColumnMetadata>> allColumns = listTableColumns(metadata, newSession(), new SchemaTablePrefix(database));
        assertTrue(allColumns.containsKey(tablePartitionFormat));
        assertTrue(allColumns.containsKey(tableUnpartitioned));
    }
}
Also used : SchemaTablePrefix(io.trino.spi.connector.SchemaTablePrefix) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ImmutableList(com.google.common.collect.ImmutableList) Collectors.toList(java.util.stream.Collectors.toList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) List(java.util.List) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) Test(org.testng.annotations.Test)

Example 64 with SchemaTableName

use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.

the class AbstractTestHive method setupHive.

protected void setupHive(String databaseName) {
    database = databaseName;
    tablePartitionFormat = new SchemaTableName(database, "trino_test_partition_format");
    tableUnpartitioned = new SchemaTableName(database, "trino_test_unpartitioned");
    tableOffline = new SchemaTableName(database, "trino_test_offline");
    tableOfflinePartition = new SchemaTableName(database, "trino_test_offline_partition");
    tableNotReadable = new SchemaTableName(database, "trino_test_not_readable");
    view = new SchemaTableName(database, "trino_test_view");
    invalidTable = new SchemaTableName(database, INVALID_TABLE);
    tableBucketedStringInt = new SchemaTableName(database, "trino_test_bucketed_by_string_int");
    tableBucketedBigintBoolean = new SchemaTableName(database, "trino_test_bucketed_by_bigint_boolean");
    tableBucketedDoubleFloat = new SchemaTableName(database, "trino_test_bucketed_by_double_float");
    tablePartitionSchemaChange = new SchemaTableName(database, "trino_test_partition_schema_change");
    tablePartitionSchemaChangeNonCanonical = new SchemaTableName(database, "trino_test_partition_schema_change_non_canonical");
    tableBucketEvolution = new SchemaTableName(database, "trino_test_bucket_evolution");
    invalidTableHandle = new HiveTableHandle(database, INVALID_TABLE, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
    dsColumn = createBaseColumn("ds", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
    fileFormatColumn = createBaseColumn("file_format", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
    dummyColumn = createBaseColumn("dummy", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
    intColumn = createBaseColumn("t_int", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
    invalidColumnHandle = createBaseColumn(INVALID_COLUMN, 0, HIVE_STRING, VARCHAR, REGULAR, Optional.empty());
    List<ColumnHandle> partitionColumns = ImmutableList.of(dsColumn, fileFormatColumn, dummyColumn);
    tablePartitionFormatPartitions = ImmutableList.<HivePartition>builder().add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=textfile/dummy=1", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("textfile"))).put(dummyColumn, NullableValue.of(INTEGER, 1L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=sequencefile/dummy=2", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("sequencefile"))).put(dummyColumn, NullableValue.of(INTEGER, 2L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rctext/dummy=3", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rctext"))).put(dummyColumn, NullableValue.of(INTEGER, 3L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rcbinary/dummy=4", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rcbinary"))).put(dummyColumn, NullableValue.of(INTEGER, 4L)).buildOrThrow())).build();
    tableUnpartitionedPartitions = ImmutableList.of(new HivePartition(tableUnpartitioned));
    tablePartitionFormatProperties = new ConnectorTableProperties(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile")), Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile")), Range.equal(createUnboundedVarcharType(), utf8Slice("rctext")), Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L), Range.equal(INTEGER, 2L), Range.equal(INTEGER, 3L), Range.equal(INTEGER, 4L)), false))), Optional.empty(), Optional.empty(), Optional.of(new DiscretePredicates(partitionColumns, ImmutableList.of(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 2L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rctext"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 3L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 4L)), false)))))), ImmutableList.of());
    tableUnpartitionedProperties = new ConnectorTableProperties();
}
Also used : HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) NullableValue(io.trino.spi.predicate.NullableValue) DiscretePredicates(io.trino.spi.connector.DiscretePredicates) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) ConnectorTableProperties(io.trino.spi.connector.ConnectorTableProperties)

Example 65 with SchemaTableName

use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.

the class TestBackgroundHiveSplitLoader method backgroundHiveSplitLoader.

private BackgroundHiveSplitLoader backgroundHiveSplitLoader(List<LocatedFileStatus> files, DirectoryLister directoryLister) {
    List<HivePartitionMetadata> hivePartitionMetadatas = ImmutableList.of(new HivePartitionMetadata(new HivePartition(new SchemaTableName("testSchema", "table_name")), Optional.empty(), TableToPartitionMapping.empty()));
    ConnectorSession connectorSession = getHiveSession(new HiveConfig().setMaxSplitSize(DataSize.of(1, GIGABYTE)));
    return new BackgroundHiveSplitLoader(SIMPLE_TABLE, NO_ACID_TRANSACTION, hivePartitionMetadatas, TupleDomain.none(), DynamicFilter.EMPTY, new Duration(0, SECONDS), TESTING_TYPE_MANAGER, Optional.empty(), connectorSession, new TestingHdfsEnvironment(files), new NamenodeStats(), directoryLister, executor, 2, false, false, true, Optional.empty(), Optional.empty());
}
Also used : ConnectorSession(io.trino.spi.connector.ConnectorSession) Duration(io.airlift.units.Duration) SchemaTableName(io.trino.spi.connector.SchemaTableName)

Aggregations

SchemaTableName (io.trino.spi.connector.SchemaTableName)446 Test (org.testng.annotations.Test)212 ImmutableList (com.google.common.collect.ImmutableList)131 ImmutableMap (com.google.common.collect.ImmutableMap)106 List (java.util.List)102 TrinoException (io.trino.spi.TrinoException)100 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)98 ConnectorSession (io.trino.spi.connector.ConnectorSession)98 CatalogSchemaTableName (io.trino.spi.connector.CatalogSchemaTableName)92 TableNotFoundException (io.trino.spi.connector.TableNotFoundException)89 ColumnMetadata (io.trino.spi.connector.ColumnMetadata)86 ConnectorTableMetadata (io.trino.spi.connector.ConnectorTableMetadata)86 Optional (java.util.Optional)78 Map (java.util.Map)67 ColumnHandle (io.trino.spi.connector.ColumnHandle)66 TupleDomain (io.trino.spi.predicate.TupleDomain)59 ConnectorMetadata (io.trino.spi.connector.ConnectorMetadata)58 Path (org.apache.hadoop.fs.Path)55 ConnectorTableHandle (io.trino.spi.connector.ConnectorTableHandle)53 ImmutableSet (com.google.common.collect.ImmutableSet)52