use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.
the class TestHiveView method tesHiveView.
@Test(enabled = false)
public void tesHiveView() {
SchemaTableName temporaryCreateView = temporaryTable("hive_view");
String viewData = "test hive view";
String expectedvViewData = "{\n" + " \"originalSql\" : \"test hive view\",\n" + " \"catalog\" : \"hive\",\n" + " \"columns\" : [ {\n" + " \"name\" : \"dummy\",\n" + " \"type\" : \"varchar\"\n" + " } ],\n" + " \"owner\" : \"test\",\n" + " \"runAsInvoker\" : false\n" + "}";
String owner = "test";
ConnectorSession session = newSession();
HiveIdentity identity = new HiveIdentity(session);
metastoreClient.createTable(identity, buildHiveView(temporaryCreateView, owner, viewData), buildInitialPrivilegeSet(owner));
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
Optional<ConnectorViewDefinition> views = metadata.getView(newSession(), temporaryCreateView);
assertEquals(views.get().getOriginalSql(), expectedvViewData);
assertTrue(metadata.listViews(newSession(), Optional.of(temporaryCreateView.getSchemaName())).contains(temporaryCreateView));
} finally {
metastoreClient.dropTable(identity, temporaryCreateView.getSchemaName(), temporaryCreateView.getTableName(), true);
}
}
use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.
the class TestHiveWriterFactory method testSortingPath.
@Test
public void testSortingPath() {
setUp();
String targetPath = "/tmp";
String writePath = "/tmp/table";
Optional<WriteIdInfo> writeIdInfo = Optional.of(new WriteIdInfo(1, 1, 0));
StorageFormat storageFormat = StorageFormat.fromHiveStorageFormat(ORC);
Storage storage = new Storage(storageFormat, "", Optional.empty(), false, ImmutableMap.of());
Table table = new Table("schema", "table", "user", "MANAGED_TABLE", storage, ImmutableList.of(new Column("col_1", HiveType.HIVE_INT, Optional.empty())), ImmutableList.of(), ImmutableMap.of("transactional", "true"), Optional.of("original"), Optional.of("expanded"));
HiveConfig hiveConfig = getHiveConfig();
HivePageSinkMetadata hivePageSinkMetadata = new HivePageSinkMetadata(new SchemaTableName("schema", "table"), Optional.of(table), ImmutableMap.of());
PageSorter pageSorter = new PagesIndexPageSorter(new PagesIndex.TestingFactory(false));
Metadata metadata = createTestMetadataManager();
TypeManager typeManager = new InternalTypeManager(metadata.getFunctionAndTypeManager());
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
LocationService locationService = new HiveLocationService(hdfsEnvironment);
ConnectorSession session = newSession();
HiveWriterFactory hiveWriterFactory = new HiveWriterFactory(getDefaultHiveFileWriterFactories(hiveConfig), "schema", "table", false, HiveACIDWriteType.DELETE, ImmutableList.of(new HiveColumnHandle("col_1", HiveType.HIVE_INT, new TypeSignature("integer", ImmutableList.of()), 0, HiveColumnHandle.ColumnType.REGULAR, Optional.empty())), ORC, ORC, ImmutableMap.of(), OptionalInt.empty(), ImmutableList.of(), new LocationHandle(targetPath, writePath, false, LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY, writeIdInfo), locationService, session.getQueryId(), new HivePageSinkMetadataProvider(hivePageSinkMetadata, CachingHiveMetastore.memoizeMetastore(metastore, 1000), new HiveIdentity(session)), typeManager, hdfsEnvironment, pageSorter, hiveConfig.getWriterSortBufferSize(), hiveConfig.getMaxOpenSortFiles(), false, UTC, session, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(hiveConfig, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(hiveConfig));
HiveWriter hiveWriter = hiveWriterFactory.createWriter(ImmutableList.of(), OptionalInt.empty(), Optional.empty());
assertEquals(((SortingFileWriter) hiveWriter.getFileWriter()).getTempFilePrefix().getName(), ".tmp-sort.bucket_00000");
}
use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.
the class TestSemiTransactionalHiveMetastore method testIsCollectColumnStatisticsOnWriteFalse.
@Test
public void testIsCollectColumnStatisticsOnWriteFalse() {
ConnectorSession session = newSession(ImmutableMap.of("collect_column_statistics_on_write", false));
assertEquals(STATISTICS_2, skipStats(STATISTICS_1, STATISTICS_2, HiveSessionProperties.isCollectColumnStatisticsOnWrite(session)));
}
use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.
the class MetadataManager method getTableProperties.
@Override
public TableProperties getTableProperties(Session session, TableHandle handle) {
CatalogName catalogName = handle.getCatalogName();
CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName);
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName);
ConnectorSession connectorSession = session.toConnectorSession(catalogName);
TableProperties tableProperties;
ConcurrentHashMap<ConnectorTableHandle, TableProperties> tablePropertiesMap;
String queryId = session.getQueryId().getId();
if (metadata.usesLegacyTableLayouts()) {
return handle.getLayout().map(layout -> new TableProperties(catalogName, handle.getTransaction(), new ConnectorTableProperties(metadata.getTableLayout(connectorSession, layout)))).orElseGet(() -> getLayout(session, handle, Constraint.alwaysTrue(), Optional.empty()).get().getTableProperties());
}
if (!handle.getConnectorHandle().isTablePropertiesCacheSupported()) {
return new TableProperties(catalogName, handle.getTransaction(), metadata.getTableProperties(connectorSession, handle.getConnectorHandle()));
}
if (tablePropertiesQueryCache.get(queryId) != null && tablePropertiesQueryCache.get(queryId).get(handle.getConnectorHandle()) != null) {
return tablePropertiesQueryCache.get(queryId).get(handle.getConnectorHandle());
} else {
tableProperties = new TableProperties(catalogName, handle.getTransaction(), metadata.getTableProperties(connectorSession, handle.getConnectorHandle()));
if (tablePropertiesQueryCache.containsKey(queryId)) {
tablePropertiesMap = tablePropertiesQueryCache.get(queryId);
} else {
tablePropertiesMap = new ConcurrentHashMap<>();
}
tablePropertiesMap.put(handle.getConnectorHandle(), tableProperties);
tablePropertiesQueryCache.put(queryId, tablePropertiesMap);
}
return tableProperties;
}
use of io.prestosql.spi.connector.ConnectorSession in project hetu-core by openlookeng.
the class MetadataManager method applyFilter.
@Override
public Optional<ConstraintApplicationResult<TableHandle>> applyFilter(Session session, TableHandle table, Constraint constraint, List<Constraint> disjunctConstraints, Set<ColumnHandle> allColumnHandles, boolean pushPartitionsOnly) {
CatalogName catalogName = table.getCatalogName();
ConnectorMetadata metadata = getMetadata(session, catalogName);
if (metadata.usesLegacyTableLayouts()) {
return Optional.empty();
}
ConnectorSession connectorSession = session.toConnectorSession(catalogName);
return metadata.applyFilter(connectorSession, table.getConnectorHandle(), constraint, disjunctConstraints, allColumnHandles, pushPartitionsOnly).map(result -> new ConstraintApplicationResult<>(new TableHandle(catalogName, result.getHandle(), table.getTransaction(), Optional.empty()), result.getRemainingFilter()));
}
Aggregations