Search in sources :

Example 51 with ConnectorSession

use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.

the class AbstractTestHive method testIllegalStorageFormatDuringTableScan.

/**
 * During table scan, the illegal storage format for some specific table should not fail the whole table scan
 */
@Test
public void testIllegalStorageFormatDuringTableScan() {
    SchemaTableName schemaTableName = temporaryTable("test_illegal_storage_format");
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        List<Column> columns = ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty()));
        String tableOwner = session.getUser();
        String schemaName = schemaTableName.getSchemaName();
        String tableName = schemaTableName.getTableName();
        LocationHandle locationHandle = locationService.forNewTable(transaction.getMetastore(), session, schemaName, tableName, Optional.empty());
        Path targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
        // create table whose storage format is null
        Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(tableOwner)).setTableType(TableType.MANAGED_TABLE.name()).setParameters(ImmutableMap.of(PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, session.getQueryId())).setDataColumns(columns).withStorage(storage -> storage.setLocation(targetPath.toString()).setStorageFormat(StorageFormat.createNullable(null, null, null)).setSerdeParameters(ImmutableMap.of()));
        PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser());
        transaction.getMetastore().createTable(session, tableBuilder.build(), principalPrivileges, Optional.empty(), Optional.empty(), true, EMPTY_TABLE_STATISTICS, false);
        transaction.commit();
    }
    // to make sure it can still be retrieved instead of throwing exception.
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        Map<SchemaTableName, List<ColumnMetadata>> allColumns = listTableColumns(metadata, newSession(), new SchemaTablePrefix(schemaTableName.getSchemaName()));
        assertTrue(allColumns.containsKey(schemaTableName));
    } finally {
        dropTable(schemaTableName);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) Table(io.trino.plugin.hive.metastore.Table) PrincipalPrivileges(io.trino.plugin.hive.metastore.PrincipalPrivileges) SchemaTablePrefix(io.trino.spi.connector.SchemaTablePrefix) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) Column(io.trino.plugin.hive.metastore.Column) ViewColumn(io.trino.spi.connector.ConnectorViewDefinition.ViewColumn) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ImmutableList(com.google.common.collect.ImmutableList) Collectors.toList(java.util.stream.Collectors.toList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) List(java.util.List) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) Test(org.testng.annotations.Test)

Example 52 with ConnectorSession

use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.

the class AbstractTestHive method testDisallowQueryingOfIcebergTables.

@Test
public void testDisallowQueryingOfIcebergTables() {
    ConnectorSession session = newSession();
    SchemaTableName tableName = temporaryTable("trino_iceberg_table");
    Table.Builder table = Table.builder().setDatabaseName(tableName.getSchemaName()).setTableName(tableName.getTableName()).setOwner(Optional.of(session.getUser())).setTableType(MANAGED_TABLE.name()).setPartitionColumns(List.of(new Column("a_partition_column", HIVE_INT, Optional.empty()))).setDataColumns(List.of(new Column("a_column", HIVE_STRING, Optional.empty()))).setParameter(ICEBERG_TABLE_TYPE_NAME, ICEBERG_TABLE_TYPE_VALUE);
    table.getStorageBuilder().setStorageFormat(fromHiveStorageFormat(PARQUET)).setLocation(getTableDefaultLocation(metastoreClient.getDatabase(tableName.getSchemaName()).orElseThrow(), new HdfsContext(session.getIdentity()), hdfsEnvironment, tableName.getSchemaName(), tableName.getTableName()).toString());
    metastoreClient.createTable(table.build(), NO_PRIVILEGES);
    try {
        // Verify that the table was created as a Iceberg table can't be queried in hive
        try (Transaction transaction = newTransaction()) {
            ConnectorMetadata metadata = transaction.getMetadata();
            metadata.beginQuery(session);
            assertThatThrownBy(() -> getTableHandle(metadata, tableName)).hasMessage(format("Cannot query Iceberg table '%s'", tableName));
        }
        // Verify the hidden `$properties` and `$partitions` hive system tables table handle can't be obtained for the Iceberg tables
        try (Transaction transaction = newTransaction()) {
            ConnectorMetadata metadata = transaction.getMetadata();
            metadata.beginQuery(session);
            SchemaTableName propertiesTableName = new SchemaTableName(tableName.getSchemaName(), format("%s$properties", tableName.getTableName()));
            assertThat(metadata.getSystemTable(newSession(), propertiesTableName)).isEmpty();
            SchemaTableName partitionsTableName = new SchemaTableName(tableName.getSchemaName(), format("%s$partitions", tableName.getTableName()));
            assertThat(metadata.getSystemTable(newSession(), partitionsTableName)).isEmpty();
        }
    } finally {
        // Clean up
        metastoreClient.dropTable(tableName.getSchemaName(), tableName.getTableName(), true);
    }
}
Also used : Table(io.trino.plugin.hive.metastore.Table) HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) Column(io.trino.plugin.hive.metastore.Column) ViewColumn(io.trino.spi.connector.ConnectorViewDefinition.ViewColumn) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) Test(org.testng.annotations.Test)

Example 53 with ConnectorSession

use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.

the class AbstractTestHive method doTestBucketedTableEvolution.

private void doTestBucketedTableEvolution(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
    int rowCount = 100;
    // 
    // Produce a table with 8 buckets.
    // The table has 3 partitions of 3 different bucket count (4, 8, 16).
    createEmptyTable(tableName, storageFormat, ImmutableList.of(new Column("id", HIVE_LONG, Optional.empty()), new Column("name", HIVE_STRING, Optional.empty())), ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty())), Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 4, ImmutableList.of())));
    // write a 4-bucket partition
    MaterializedResult.Builder bucket4Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
    IntStream.range(0, rowCount).forEach(i -> bucket4Builder.row((long) i, String.valueOf(i), "four"));
    insertData(tableName, bucket4Builder.build());
    // write a 16-bucket partition
    alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 16, ImmutableList.of())));
    MaterializedResult.Builder bucket16Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
    IntStream.range(0, rowCount).forEach(i -> bucket16Builder.row((long) i, String.valueOf(i), "sixteen"));
    insertData(tableName, bucket16Builder.build());
    // write an 8-bucket partition
    alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 8, ImmutableList.of())));
    MaterializedResult.Builder bucket8Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
    IntStream.range(0, rowCount).forEach(i -> bucket8Builder.row((long) i, String.valueOf(i), "eight"));
    insertData(tableName, bucket8Builder.build());
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        // read entire table
        List<ColumnHandle> columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values()).build();
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
        assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(0, 1, 2, 3, 4, 5, 6, 7), rowCount);
        // read single bucket (table/logical bucket)
        result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(ImmutableMap.of(bucketColumnHandle(), NullableValue.of(INTEGER, 6L))), OptionalInt.empty(), Optional.empty());
        assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
        // read single bucket, without selecting the bucketing column (i.e. id column)
        columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !"id".equals(((HiveColumnHandle) columnHandle).getName())).collect(toImmutableList())).build();
        result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(ImmutableMap.of(bucketColumnHandle(), NullableValue.of(INTEGER, 6L))), OptionalInt.empty(), Optional.empty());
        assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
    }
}
Also used : HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) Column(io.trino.plugin.hive.metastore.Column) ViewColumn(io.trino.spi.connector.ConnectorViewDefinition.ViewColumn) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) MaterializedResult(io.trino.testing.MaterializedResult) Constraint(io.trino.spi.connector.Constraint) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle)

Example 54 with ConnectorSession

use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.

the class AbstractTestHiveFileFormats method createTestFileTrino.

public static FileSplit createTestFileTrino(String filePath, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec, List<TestColumn> testColumns, ConnectorSession session, int numRows, HiveFileWriterFactory fileWriterFactory) {
    // filter out partition keys, which are not written to the file
    testColumns = testColumns.stream().filter(column -> !column.isPartitionKey()).collect(toImmutableList());
    List<Type> types = testColumns.stream().map(TestColumn::getType).map(HiveType::valueOf).map(type -> type.getType(TESTING_TYPE_MANAGER)).collect(toList());
    PageBuilder pageBuilder = new PageBuilder(types);
    for (int rowNumber = 0; rowNumber < numRows; rowNumber++) {
        pageBuilder.declarePosition();
        for (int columnNumber = 0; columnNumber < testColumns.size(); columnNumber++) {
            serializeObject(types.get(columnNumber), pageBuilder.getBlockBuilder(columnNumber), testColumns.get(columnNumber).getWriteValue(), testColumns.get(columnNumber).getObjectInspector(), false);
        }
    }
    Page page = pageBuilder.build();
    JobConf jobConf = new JobConf();
    configureCompression(jobConf, compressionCodec);
    Properties tableProperties = new Properties();
    tableProperties.setProperty("columns", testColumns.stream().map(TestColumn::getName).collect(Collectors.joining(",")));
    tableProperties.setProperty("columns.types", testColumns.stream().map(TestColumn::getType).collect(Collectors.joining(",")));
    Optional<FileWriter> fileWriter = fileWriterFactory.createFileWriter(new Path(filePath), testColumns.stream().map(TestColumn::getName).collect(toList()), StorageFormat.fromHiveStorageFormat(storageFormat), tableProperties, jobConf, session, OptionalInt.empty(), NO_ACID_TRANSACTION, false, WriterKind.INSERT);
    FileWriter hiveFileWriter = fileWriter.orElseThrow(() -> new IllegalArgumentException("fileWriterFactory"));
    hiveFileWriter.appendRows(page);
    hiveFileWriter.commit();
    return new FileSplit(new Path(filePath), 0, new File(filePath).length(), new String[0]);
}
Also used : StructuralTestUtil.decimalMapBlockOf(io.trino.testing.StructuralTestUtil.decimalMapBlockOf) DateTimeZone(org.joda.time.DateTimeZone) Arrays(java.util.Arrays) JavaHiveDecimalObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveDecimalObjectInspector) SerDeUtils.serializeObject(io.trino.plugin.hive.util.SerDeUtils.serializeObject) DateType(io.trino.spi.type.DateType) PrimitiveObjectInspectorFactory.javaByteObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaByteObjectInspector) Text(org.apache.hadoop.io.Text) PrimitiveObjectInspectorFactory.javaLongObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaLongObjectInspector) Writable(org.apache.hadoop.io.Writable) Test(org.testng.annotations.Test) NO_ACID_TRANSACTION(io.trino.plugin.hive.acid.AcidTransaction.NO_ACID_TRANSACTION) PrimitiveObjectInspectorFactory.javaTimestampObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaTimestampObjectInspector) Date(org.apache.hadoop.hive.common.type.Date) PrimitiveObjectInspectorFactory.javaDateObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaDateObjectInspector) CharType.createCharType(io.trino.spi.type.CharType.createCharType) HiveChar(org.apache.hadoop.hive.common.type.HiveChar) BigDecimal(java.math.BigDecimal) FileSplit(org.apache.hadoop.mapred.FileSplit) Slices(io.airlift.slice.Slices) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) StructuralTestUtil.rowBlockOf(io.trino.testing.StructuralTestUtil.rowBlockOf) BigInteger(java.math.BigInteger) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) MaterializedRow(io.trino.testing.MaterializedRow) Assert.assertFalse(org.testng.Assert.assertFalse) SMALLINT(io.trino.spi.type.SmallintType.SMALLINT) PrimitiveObjectInspectorFactory.javaByteArrayObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaByteArrayObjectInspector) PrimitiveObjectInspectorFactory.javaFloatObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaFloatObjectInspector) PrimitiveObjectInspectorFactory.javaDoubleObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaDoubleObjectInspector) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) MICROSECONDS_PER_MILLISECOND(io.trino.type.DateTimes.MICROSECONDS_PER_MILLISECOND) PrimitiveObjectInspectorFactory.javaHiveVarcharObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaHiveVarcharObjectInspector) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) StructuralTestUtil.arrayBlockOf(io.trino.testing.StructuralTestUtil.arrayBlockOf) REAL(io.trino.spi.type.RealType.REAL) PrimitiveObjectInspectorFactory.javaIntObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaIntObjectInspector) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) JavaHiveCharObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveCharObjectInspector) MethodHandle(java.lang.invoke.MethodHandle) Slice(io.airlift.slice.Slice) PageBuilder(io.trino.spi.PageBuilder) Page(io.trino.spi.Page) SqlDecimal(io.trino.spi.type.SqlDecimal) BOOLEAN(io.trino.spi.type.BooleanType.BOOLEAN) TimestampType(io.trino.spi.type.TimestampType) ArrayList(java.util.ArrayList) PrimitiveObjectInspectorFactory.javaShortObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaShortObjectInspector) Chars.padSpaces(io.trino.spi.type.Chars.padSpaces) VARBINARY(io.trino.spi.type.VarbinaryType.VARBINARY) Math.floorDiv(java.lang.Math.floorDiv) Int128(io.trino.spi.type.Int128) Arrays.fill(java.util.Arrays.fill) RecordCursor(io.trino.spi.connector.RecordCursor) Properties(java.util.Properties) TESTING_TYPE_MANAGER(io.trino.type.InternalTypeManager.TESTING_TYPE_MANAGER) IOException(java.io.IOException) ConnectorSession(io.trino.spi.connector.ConnectorSession) ObjectInspectorFactory.getStandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardStructObjectInspector) StructuralTestUtil.mapBlockOf(io.trino.testing.StructuralTestUtil.mapBlockOf) UTC(org.joda.time.DateTimeZone.UTC) DecimalTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo) File(java.io.File) SettableStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector) DOUBLE(io.trino.spi.type.DoubleType.DOUBLE) SESSION(io.trino.plugin.hive.HiveTestUtils.SESSION) SqlVarbinary(io.trino.spi.type.SqlVarbinary) HiveTestUtils.mapType(io.trino.plugin.hive.HiveTestUtils.mapType) CharType(io.trino.spi.type.CharType) TINYINT(io.trino.spi.type.TinyintType.TINYINT) BlockBuilder(io.trino.spi.block.BlockBuilder) VarcharType.createVarcharType(io.trino.spi.type.VarcharType.createVarcharType) RecordWriter(org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter) PARTITION_KEY(io.trino.plugin.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) DateTimeTestingUtils.sqlTimestampOf(io.trino.testing.DateTimeTestingUtils.sqlTimestampOf) MaterializedResult(io.trino.testing.MaterializedResult) CompressionConfigUtil.configureCompression(io.trino.plugin.hive.util.CompressionConfigUtil.configureCompression) SqlTimestamp(io.trino.spi.type.SqlTimestamp) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) StructuralTestUtil.decimalArrayBlockOf(io.trino.testing.StructuralTestUtil.decimalArrayBlockOf) Block(io.trino.spi.block.Block) Path(org.apache.hadoop.fs.Path) INTEGER(io.trino.spi.type.IntegerType.INTEGER) StorageFormat(io.trino.plugin.hive.metastore.StorageFormat) DateTimeFormat(org.joda.time.format.DateTimeFormat) RowType(io.trino.spi.type.RowType) ImmutableMap(com.google.common.collect.ImmutableMap) ArrayType(io.trino.spi.type.ArrayType) MaterializedResult.materializeSourceDataStream(io.trino.testing.MaterializedResult.materializeSourceDataStream) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) Collectors(java.util.stream.Collectors) SqlDate(io.trino.spi.type.SqlDate) List(java.util.List) BIGINT(io.trino.spi.type.BigintType.BIGINT) Function.identity(java.util.function.Function.identity) Optional(java.util.Optional) DecimalType(io.trino.spi.type.DecimalType) TypeInfoFactory.getCharTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.getCharTypeInfo) PrimitiveObjectInspectorFactory.javaBooleanObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaBooleanObjectInspector) Type(io.trino.spi.type.Type) HiveColumnProjectionInfo.generatePartialName(io.trino.plugin.hive.HiveColumnProjectionInfo.generatePartialName) VarcharType.createUnboundedVarcharType(io.trino.spi.type.VarcharType.createUnboundedVarcharType) Assert.assertEquals(org.testng.Assert.assertEquals) HashMap(java.util.HashMap) Float.intBitsToFloat(java.lang.Float.intBitsToFloat) OptionalInt(java.util.OptionalInt) Category(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category) VarcharType(io.trino.spi.type.VarcharType) HiveVarchar(org.apache.hadoop.hive.common.type.HiveVarchar) ImmutableList(com.google.common.collect.ImmutableList) Objects.requireNonNull(java.util.Objects.requireNonNull) HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) HIVE_DEFAULT_DYNAMIC_PARTITION(io.trino.plugin.hive.HivePartitionKey.HIVE_DEFAULT_DYNAMIC_PARTITION) ObjectInspectorFactory.getStandardMapObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardMapObjectInspector) Timestamp(org.apache.hadoop.hive.common.type.Timestamp) UTF_8(java.nio.charset.StandardCharsets.UTF_8) DateTime(org.joda.time.DateTime) HiveTestUtils.isDistinctFrom(io.trino.plugin.hive.HiveTestUtils.isDistinctFrom) ObjectInspectorFactory.getStandardListObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardListObjectInspector) JobConf(org.apache.hadoop.mapred.JobConf) TimeUnit(java.util.concurrent.TimeUnit) Collectors.toList(java.util.stream.Collectors.toList) Serializer(org.apache.hadoop.hive.serde2.Serializer) HiveDecimal(org.apache.hadoop.hive.common.type.HiveDecimal) Assert.assertTrue(org.testng.Assert.assertTrue) PrimitiveObjectInspectorFactory.javaStringObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaStringObjectInspector) HiveUtil.isStructuralType(io.trino.plugin.hive.util.HiveUtil.isStructuralType) REGULAR(io.trino.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) Path(org.apache.hadoop.fs.Path) Page(io.trino.spi.Page) PageBuilder(io.trino.spi.PageBuilder) Properties(java.util.Properties) FileSplit(org.apache.hadoop.mapred.FileSplit) DateType(io.trino.spi.type.DateType) CharType.createCharType(io.trino.spi.type.CharType.createCharType) TimestampType(io.trino.spi.type.TimestampType) HiveTestUtils.mapType(io.trino.plugin.hive.HiveTestUtils.mapType) CharType(io.trino.spi.type.CharType) VarcharType.createVarcharType(io.trino.spi.type.VarcharType.createVarcharType) RowType(io.trino.spi.type.RowType) ArrayType(io.trino.spi.type.ArrayType) DecimalType(io.trino.spi.type.DecimalType) Type(io.trino.spi.type.Type) VarcharType.createUnboundedVarcharType(io.trino.spi.type.VarcharType.createUnboundedVarcharType) VarcharType(io.trino.spi.type.VarcharType) HiveUtil.isStructuralType(io.trino.plugin.hive.util.HiveUtil.isStructuralType) JobConf(org.apache.hadoop.mapred.JobConf) File(java.io.File)

Example 55 with ConnectorSession

use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.

the class AbstractTestHiveFileSystem method createTable.

private void createTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
    List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", BIGINT)).build();
    MaterializedResult data = MaterializedResult.resultBuilder(newSession(), BIGINT).row(1L).row(3L).row(2L).build();
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        // begin creating the table
        ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(storageFormat));
        ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
        // write the records
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
        sink.appendPage(data.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        // commit the table
        metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
        transaction.commit();
        // Hack to work around the metastore not being configured for S3 or other FS.
        // The metastore tries to validate the location when creating the
        // table, which fails without explicit configuration for file system.
        // We work around that by using a dummy location when creating the
        // table and update it here to the correct location.
        metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle(), false).getTargetPath().toString());
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        // load the new table
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        // verify the metadata
        ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
        assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), columns);
        // verify the data
        metadata.beginQuery(session);
        ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, tableHandle);
        ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
        try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
            MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
            assertEqualsIgnoreOrder(result.getMaterializedRows(), data.getMaterializedRows());
        }
        metadata.cleanupQuery(session);
    }
}
Also used : ColumnHandle(io.trino.spi.connector.ColumnHandle) ColumnMetadata(io.trino.spi.connector.ColumnMetadata) AbstractTestHive.filterNonHiddenColumnMetadata(io.trino.plugin.hive.AbstractTestHive.filterNonHiddenColumnMetadata) ConnectorSplitSource(io.trino.spi.connector.ConnectorSplitSource) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) Transaction(io.trino.plugin.hive.AbstractTestHive.Transaction) HiveTransaction(io.trino.plugin.hive.AbstractTestHive.HiveTransaction) Slice(io.airlift.slice.Slice) ConnectorSession(io.trino.spi.connector.ConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) MaterializedResult(io.trino.testing.MaterializedResult) ConnectorPageSink(io.trino.spi.connector.ConnectorPageSink) ConnectorSplit(io.trino.spi.connector.ConnectorSplit) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata)

Aggregations

ConnectorSession (io.trino.spi.connector.ConnectorSession)252 SchemaTableName (io.trino.spi.connector.SchemaTableName)131 ConnectorMetadata (io.trino.spi.connector.ConnectorMetadata)122 ConnectorTableHandle (io.trino.spi.connector.ConnectorTableHandle)119 List (java.util.List)111 ColumnHandle (io.trino.spi.connector.ColumnHandle)108 Optional (java.util.Optional)107 ImmutableList (com.google.common.collect.ImmutableList)98 Objects.requireNonNull (java.util.Objects.requireNonNull)97 TrinoException (io.trino.spi.TrinoException)94 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)92 Map (java.util.Map)88 TestingConnectorSession (io.trino.testing.TestingConnectorSession)87 ImmutableMap (com.google.common.collect.ImmutableMap)85 TupleDomain (io.trino.spi.predicate.TupleDomain)85 ConnectorTableMetadata (io.trino.spi.connector.ConnectorTableMetadata)83 Test (org.testng.annotations.Test)82 ColumnMetadata (io.trino.spi.connector.ColumnMetadata)80 Constraint (io.trino.spi.connector.Constraint)76 Type (io.trino.spi.type.Type)72