Search in sources :

Example 11 with ConnectorTableMetadata

use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.

the class AbstractTestHive method assertGetRecords.

protected void assertGetRecords(String tableName, HiveStorageFormat hiveStorageFormat) throws Exception {
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, new SchemaTableName(database, tableName));
        ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle);
        HiveSplit hiveSplit = getHiveSplit(tableHandle, transaction, session);
        List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
        ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, hiveSplit, tableHandle, columnHandles, DynamicFilter.EMPTY);
        assertGetRecords(hiveStorageFormat, tableMetadata, hiveSplit, pageSource, columnHandles);
    }
}
Also used : HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle)

Example 12 with ConnectorTableMetadata

use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.

the class AbstractTestHive method testTableCreationRollback.

@Test
public void testTableCreationRollback() throws Exception {
    SchemaTableName temporaryCreateRollbackTable = temporaryTable("create_rollback");
    try {
        Path stagingPathRoot;
        try (Transaction transaction = newTransaction()) {
            ConnectorSession session = newSession();
            ConnectorMetadata metadata = transaction.getMetadata();
            // begin creating the table
            ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(temporaryCreateRollbackTable, CREATE_TABLE_COLUMNS, createTableProperties(RCBINARY));
            ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
            // write the data
            ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
            sink.appendPage(CREATE_TABLE_DATA.toPage());
            getFutureValue(sink.finish());
            // verify we have data files
            stagingPathRoot = getStagingPathRoot(outputHandle);
            HdfsContext context = new HdfsContext(session);
            assertFalse(listAllDataFiles(context, stagingPathRoot).isEmpty());
            // rollback the table
            transaction.rollback();
        }
        // verify all files have been deleted
        HdfsContext context = new HdfsContext(newSession());
        assertTrue(listAllDataFiles(context, stagingPathRoot).isEmpty());
        // verify table is not in the metastore
        try (Transaction transaction = newTransaction()) {
            ConnectorSession session = newSession();
            ConnectorMetadata metadata = transaction.getMetadata();
            assertNull(metadata.getTableHandle(session, temporaryCreateRollbackTable));
        }
    } finally {
        dropTable(temporaryCreateRollbackTable);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) ConnectorPageSink(io.trino.spi.connector.ConnectorPageSink) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata) Test(org.testng.annotations.Test)

Example 13 with ConnectorTableMetadata

use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.

the class AbstractTestHive method createDummyTable.

private void createDummyTable(SchemaTableName tableName) {
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        List<ColumnMetadata> columns = ImmutableList.of(new ColumnMetadata("dummy", createUnboundedVarcharType()));
        ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(TEXTFILE));
        ConnectorOutputTableHandle handle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
        metadata.finishCreateTable(session, handle, ImmutableList.of(), ImmutableList.of());
        transaction.commit();
    }
}
Also used : ColumnMetadata(io.trino.spi.connector.ColumnMetadata) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata)

Example 14 with ConnectorTableMetadata

use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.

the class AbstractTestHive method testGetTableSchemaNotReadablePartition.

@Test
public void testGetTableSchemaNotReadablePartition() {
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableNotReadable);
        ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(newSession(), tableHandle);
        Map<String, ColumnMetadata> map = uniqueIndex(tableMetadata.getColumns(), ColumnMetadata::getName);
        assertPrimitiveField(map, "t_string", createUnboundedVarcharType(), false);
    }
}
Also used : ColumnMetadata(io.trino.spi.connector.ColumnMetadata) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Test(org.testng.annotations.Test)

Example 15 with ConnectorTableMetadata

use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.

the class AbstractTestHive method doTestBucketSortedTables.

private void doTestBucketSortedTables(SchemaTableName table) throws IOException {
    int bucketCount = 3;
    int expectedRowCount = 0;
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        // begin creating the table
        ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(table, ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", VARCHAR)).add(new ColumnMetadata("value_asc", VARCHAR)).add(new ColumnMetadata("value_desc", BIGINT)).add(new ColumnMetadata("ds", VARCHAR)).build(), ImmutableMap.<String, Object>builder().put(STORAGE_FORMAT_PROPERTY, RCBINARY).put(PARTITIONED_BY_PROPERTY, ImmutableList.of("ds")).put(BUCKETED_BY_PROPERTY, ImmutableList.of("id")).put(BUCKET_COUNT_PROPERTY, bucketCount).put(SORTED_BY_PROPERTY, ImmutableList.builder().add(new SortingColumn("value_asc", ASCENDING)).add(new SortingColumn("value_desc", DESCENDING)).build()).buildOrThrow());
        ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
        // write the data
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
        List<Type> types = tableMetadata.getColumns().stream().map(ColumnMetadata::getType).collect(toList());
        ThreadLocalRandom random = ThreadLocalRandom.current();
        for (int i = 0; i < 50; i++) {
            MaterializedResult.Builder builder = MaterializedResult.resultBuilder(session, types);
            for (int j = 0; j < 1000; j++) {
                builder.row(sha256().hashLong(random.nextLong()).toString(), "test" + random.nextInt(100), random.nextLong(100_000), "2018-04-01");
                expectedRowCount++;
            }
            sink.appendPage(builder.build().toPage());
        }
        HdfsContext context = new HdfsContext(session);
        // verify we have enough temporary files per bucket to require multiple passes
        Path stagingPathRoot;
        if (isTemporaryStagingDirectoryEnabled(session)) {
            stagingPathRoot = new Path(getTemporaryStagingDirectoryPath(session).replace("${USER}", context.getIdentity().getUser()));
        } else {
            stagingPathRoot = getStagingPathRoot(outputHandle);
        }
        assertThat(listAllDataFiles(context, stagingPathRoot)).filteredOn(file -> file.contains(".tmp-sort.")).size().isGreaterThan(bucketCount * getHiveConfig().getMaxOpenSortFiles() * 2);
        // finish the write
        Collection<Slice> fragments = getFutureValue(sink.finish());
        // verify there are no temporary files
        for (String file : listAllDataFiles(context, stagingPathRoot)) {
            assertThat(file).doesNotContain(".tmp-sort.");
        }
        // finish creating table
        metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
        transaction.commit();
    }
    // verify that bucket files are sorted
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, table);
        List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
        // verify local sorting property
        ConnectorTableProperties properties = metadata.getTableProperties(newSession(ImmutableMap.of("propagate_table_scan_sorting_properties", true, "bucket_execution_enabled", false)), tableHandle);
        Map<String, Integer> columnIndex = indexColumns(columnHandles);
        assertEquals(properties.getLocalProperties(), ImmutableList.of(new SortingProperty<>(columnHandles.get(columnIndex.get("value_asc")), ASC_NULLS_FIRST), new SortingProperty<>(columnHandles.get(columnIndex.get("value_desc")), DESC_NULLS_LAST)));
        assertThat(metadata.getTableProperties(newSession(), tableHandle).getLocalProperties()).isEmpty();
        List<ConnectorSplit> splits = getAllSplits(tableHandle, transaction, session);
        assertThat(splits).hasSize(bucketCount);
        int actualRowCount = 0;
        for (ConnectorSplit split : splits) {
            try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
                String lastValueAsc = null;
                long lastValueDesc = -1;
                while (!pageSource.isFinished()) {
                    Page page = pageSource.getNextPage();
                    if (page == null) {
                        continue;
                    }
                    for (int i = 0; i < page.getPositionCount(); i++) {
                        Block blockAsc = page.getBlock(1);
                        Block blockDesc = page.getBlock(2);
                        assertFalse(blockAsc.isNull(i));
                        assertFalse(blockDesc.isNull(i));
                        String valueAsc = VARCHAR.getSlice(blockAsc, i).toStringUtf8();
                        if (lastValueAsc != null) {
                            assertGreaterThanOrEqual(valueAsc, lastValueAsc);
                            if (valueAsc.equals(lastValueAsc)) {
                                long valueDesc = BIGINT.getLong(blockDesc, i);
                                if (lastValueDesc != -1) {
                                    assertLessThanOrEqual(valueDesc, lastValueDesc);
                                }
                                lastValueDesc = valueDesc;
                            } else {
                                lastValueDesc = -1;
                            }
                        }
                        lastValueAsc = valueAsc;
                        actualRowCount++;
                    }
                }
            }
        }
        assertThat(actualRowCount).isEqualTo(expectedRowCount);
    }
}
Also used : ColumnMetadata(io.trino.spi.connector.ColumnMetadata) Page(io.trino.spi.Page) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata) Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) SortingProperty(io.trino.spi.connector.SortingProperty) Constraint(io.trino.spi.connector.Constraint) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) MapType(io.trino.spi.type.MapType) HiveTestUtils.mapType(io.trino.plugin.hive.HiveTestUtils.mapType) VarcharType.createVarcharType(io.trino.spi.type.VarcharType.createVarcharType) HiveTestUtils.arrayType(io.trino.plugin.hive.HiveTestUtils.arrayType) HiveTestUtils.rowType(io.trino.plugin.hive.HiveTestUtils.rowType) CharType.createCharType(io.trino.spi.type.CharType.createCharType) HiveType.toHiveType(io.trino.plugin.hive.HiveType.toHiveType) DecimalType.createDecimalType(io.trino.spi.type.DecimalType.createDecimalType) CharType(io.trino.spi.type.CharType) TableType(org.apache.hadoop.hive.metastore.TableType) RowType(io.trino.spi.type.RowType) ArrayType(io.trino.spi.type.ArrayType) Type(io.trino.spi.type.Type) VarcharType.createUnboundedVarcharType(io.trino.spi.type.VarcharType.createUnboundedVarcharType) VarcharType(io.trino.spi.type.VarcharType) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) Block(io.trino.spi.block.Block) ConnectorPageSink(io.trino.spi.connector.ConnectorPageSink) MaterializedResult(io.trino.testing.MaterializedResult) ConnectorTableProperties(io.trino.spi.connector.ConnectorTableProperties) ConnectorSplit(io.trino.spi.connector.ConnectorSplit)

Aggregations

ConnectorTableMetadata (io.trino.spi.connector.ConnectorTableMetadata)113 SchemaTableName (io.trino.spi.connector.SchemaTableName)69 ColumnMetadata (io.trino.spi.connector.ColumnMetadata)62 Test (org.testng.annotations.Test)48 ConnectorTableHandle (io.trino.spi.connector.ConnectorTableHandle)39 ConnectorSession (io.trino.spi.connector.ConnectorSession)35 ImmutableList (com.google.common.collect.ImmutableList)34 List (java.util.List)32 ImmutableMap (com.google.common.collect.ImmutableMap)31 ConnectorMetadata (io.trino.spi.connector.ConnectorMetadata)30 ColumnHandle (io.trino.spi.connector.ColumnHandle)25 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)24 ConnectorOutputTableHandle (io.trino.spi.connector.ConnectorOutputTableHandle)20 Optional (java.util.Optional)20 TrinoException (io.trino.spi.TrinoException)19 Map (java.util.Map)19 Type (io.trino.spi.type.Type)18 CatalogSchemaTableName (io.trino.spi.connector.CatalogSchemaTableName)17 Constraint (io.trino.spi.connector.Constraint)17 ImmutableSet (com.google.common.collect.ImmutableSet)16