Search in sources :

Example 26 with ConnectorSession

use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.

the class MetadataManager method listApplicableRoles.

@Override
public Set<RoleGrant> listApplicableRoles(Session session, PrestoPrincipal principal, String catalog) {
    Optional<CatalogMetadata> catalogMetadata = getOptionalCatalogMetadata(session, catalog);
    if (!catalogMetadata.isPresent()) {
        return ImmutableSet.of();
    }
    CatalogName catalogName = catalogMetadata.get().getCatalogName();
    ConnectorSession connectorSession = session.toConnectorSession(catalogName);
    ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(catalogName);
    return ImmutableSet.copyOf(metadata.listApplicableRoles(connectorSession, principal));
}
Also used : CatalogName(io.prestosql.spi.connector.CatalogName) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata)

Example 27 with ConnectorSession

use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.

the class AbstractTestHive method doInsertIntoNewPartition.

private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
    // creating the table
    doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS_PARTITIONED);
    // insert the data
    String queryId = insertData(tableName, CREATE_TABLE_PARTITIONED_DATA);
    Set<String> existingFiles;
    try (Transaction transaction = newTransaction()) {
        // verify partitions were created
        HiveIdentity identity = new HiveIdentity(newSession());
        List<String> partitionNames = transaction.getMetastore(tableName.getSchemaName()).getPartitionNames(identity, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
        assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream().map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)).collect(toList()));
        // verify the node versions in partitions
        Map<String, Optional<Partition>> partitions = getMetastoreClient().getPartitionsByNames(identity, tableName.getSchemaName(), tableName.getTableName(), partitionNames);
        assertEquals(partitions.size(), partitionNames.size());
        for (String partitionName : partitionNames) {
            Partition partition = partitions.get(partitionName).get();
            assertEquals(partition.getParameters().get(PRESTO_VERSION_NAME), TEST_SERVER_VERSION);
            assertEquals(partition.getParameters().get(PRESTO_QUERY_ID_NAME), queryId);
        }
        // load the new table
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        // verify the data
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows());
        // test rollback
        existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
        assertFalse(existingFiles.isEmpty());
        // test statistics
        for (String partitionName : partitionNames) {
            HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session, transaction, tableName, partitionName);
            assertEquals(partitionStatistics.getRowCount().getAsLong(), 1L);
            assertEquals(partitionStatistics.getFileCount().getAsLong(), 1L);
            assertGreaterThan(partitionStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L);
            assertGreaterThan(partitionStatistics.getOnDiskDataSizeInBytes().getAsLong(), 0L);
        }
    }
    Path stagingPathRoot;
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        metadata.beginQuery(session);
        // "stage" insert data
        ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
        stagingPathRoot = getStagingPathRoot(insertTableHandle);
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
        sink.appendPage(CREATE_TABLE_PARTITIONED_DATA_2ND.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
        // verify all temp files start with the unique prefix
        HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName());
        Set<String> tempFiles = listAllDataFiles(context, getStagingPathRoot(insertTableHandle));
        assertTrue(!tempFiles.isEmpty());
        for (String filePath : tempFiles) {
            assertThat(new Path(filePath).getName()).startsWith(session.getQueryId());
        }
        // rollback insert
        transaction.rollback();
    }
    // verify the data is unchanged
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
        assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows());
        // verify we did not modify the table directory
        assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
        // verify temp directory is empty
        HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName());
        assertTrue(listAllDataFiles(context, stagingPathRoot).isEmpty());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(io.prestosql.plugin.hive.metastore.Partition) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) Optional(java.util.Optional) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) MaterializedResult(io.prestosql.testing.MaterializedResult) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink)

Example 28 with ConnectorSession

use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.

the class AbstractTestHive method doInsertUnsupportedWriteType.

private void doInsertUnsupportedWriteType(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
    List<Column> columns = ImmutableList.of(new Column("dummy", HiveType.valueOf("uniontype<smallint,tinyint>"), Optional.empty()));
    List<Column> partitionColumns = ImmutableList.of(new Column("name", HIVE_STRING, Optional.empty()));
    createEmptyTable(tableName, storageFormat, columns, partitionColumns);
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        metadata.beginInsert(session, tableHandle);
        fail("expected failure");
    } catch (PrestoException e) {
        assertThat(e).hasMessageMatching("Inserting into Hive table .* with column type uniontype<smallint,tinyint> not supported");
    }
}
Also used : ViewColumn(io.prestosql.spi.connector.ConnectorViewDefinition.ViewColumn) Column(io.prestosql.plugin.hive.metastore.Column) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) PrestoException(io.prestosql.spi.PrestoException) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle)

Example 29 with ConnectorSession

use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.

the class AbstractTestHive method testGetPartitionSplitsTableNotReadablePartition.

@Test
public void testGetPartitionSplitsTableNotReadablePartition() {
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableNotReadable);
        assertNotNull(tableHandle);
        try {
            getSplitCount(splitManager.getSplits(transaction.getTransactionHandle(), session, tableHandle, UNGROUPED_SCHEDULING));
            fail("Expected HiveNotReadableException");
        } catch (HiveNotReadableException e) {
            assertThat(e).hasMessageMatching("Table '.*\\.presto_test_not_readable' is not readable: reason for not readable");
            assertEquals(e.getTableName(), tableNotReadable);
            assertEquals(e.getPartition(), Optional.empty());
        }
    }
}
Also used : ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) Test(org.testng.annotations.Test)

Example 30 with ConnectorSession

use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.

the class AbstractTestHiveFileSystem method createTable.

private void createTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
    List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", BIGINT)).build();
    MaterializedResult data = MaterializedResult.resultBuilder(newSession(), BIGINT).row(1L).row(3L).row(2L).build();
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        // begin creating the table
        ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(storageFormat));
        ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty());
        // write the records
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
        sink.appendPage(data.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        // commit the table
        metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
        transaction.commit();
        // Hack to work around the metastore not being configured for S3 or other FS.
        // The metastore tries to validate the location when creating the
        // table, which fails without explicit configuration for file system.
        // We work around that by using a dummy location when creating the
        // table and update it here to the correct location.
        metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle(), false).getTargetPath().toString());
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        // load the new table
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        // verify the metadata
        ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
        assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), columns);
        // verify the data
        ConnectorSplitSource splitSource = splitManager.getSplits(transaction.getTransactionHandle(), session, tableHandle, UNGROUPED_SCHEDULING);
        ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
        try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles)) {
            MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
            assertEqualsIgnoreOrder(result.getMaterializedRows(), data.getMaterializedRows());
        }
    }
}
Also used : ColumnHandle(io.prestosql.spi.connector.ColumnHandle) AbstractTestHive.filterNonHiddenColumnMetadata(io.prestosql.plugin.hive.AbstractTestHive.filterNonHiddenColumnMetadata) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) ConnectorOutputTableHandle(io.prestosql.spi.connector.ConnectorOutputTableHandle) HiveTransaction(io.prestosql.plugin.hive.AbstractTestHive.HiveTransaction) Transaction(io.prestosql.plugin.hive.AbstractTestHive.Transaction) Slice(io.airlift.slice.Slice) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) MaterializedResult(io.prestosql.testing.MaterializedResult) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata)

Aggregations

ConnectorSession (io.prestosql.spi.connector.ConnectorSession)267 TestingConnectorSession (io.prestosql.testing.TestingConnectorSession)160 ConnectorMetadata (io.prestosql.spi.connector.ConnectorMetadata)159 ConnectorTableHandle (io.prestosql.spi.connector.ConnectorTableHandle)130 Test (org.testng.annotations.Test)115 SchemaTableName (io.prestosql.spi.connector.SchemaTableName)107 ColumnHandle (io.prestosql.spi.connector.ColumnHandle)98 ColumnMetadata (io.prestosql.spi.connector.ColumnMetadata)90 PrestoException (io.prestosql.spi.PrestoException)83 TupleDomain (io.prestosql.spi.predicate.TupleDomain)83 List (java.util.List)82 ImmutableList (com.google.common.collect.ImmutableList)80 ConnectorTableMetadata (io.prestosql.spi.connector.ConnectorTableMetadata)79 Optional (java.util.Optional)78 Objects.requireNonNull (java.util.Objects.requireNonNull)74 Path (org.apache.hadoop.fs.Path)74 Slice (io.airlift.slice.Slice)72 ImmutableMap (com.google.common.collect.ImmutableMap)67 HiveIdentity (io.prestosql.plugin.hive.authentication.HiveIdentity)65 ConnectorInsertTableHandle (io.prestosql.spi.connector.ConnectorInsertTableHandle)65