use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class BaseJdbcClient method renameSchema.
@Override
public void renameSchema(ConnectorSession session, String schemaName, String newSchemaName) {
ConnectorIdentity identity = session.getIdentity();
try (Connection connection = connectionFactory.openConnection(session)) {
String remoteSchemaName = identifierMapping.toRemoteSchemaName(identity, connection, schemaName);
String newRemoteSchemaName = identifierMapping.toRemoteSchemaName(identity, connection, newSchemaName);
execute(connection, renameSchemaSql(remoteSchemaName, newRemoteSchemaName));
} catch (SQLException e) {
throw new TrinoException(JDBC_ERROR, e);
}
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class BaseJdbcClient method getTableNames.
@Override
public List<SchemaTableName> getTableNames(ConnectorSession session, Optional<String> schema) {
try (Connection connection = connectionFactory.openConnection(session)) {
ConnectorIdentity identity = session.getIdentity();
Optional<String> remoteSchema = schema.map(schemaName -> identifierMapping.toRemoteSchemaName(identity, connection, schemaName));
if (remoteSchema.isPresent() && !filterSchema(remoteSchema.get())) {
return ImmutableList.of();
}
try (ResultSet resultSet = getTables(connection, remoteSchema, Optional.empty())) {
ImmutableList.Builder<SchemaTableName> list = ImmutableList.builder();
while (resultSet.next()) {
String remoteSchemaFromResultSet = getTableSchemaName(resultSet);
String tableSchema = identifierMapping.fromRemoteSchemaName(remoteSchemaFromResultSet);
String tableName = identifierMapping.fromRemoteTableName(remoteSchemaFromResultSet, resultSet.getString("TABLE_NAME"));
if (filterSchema(tableSchema)) {
list.add(new SchemaTableName(tableSchema, tableName));
}
}
return list.build();
}
} catch (SQLException e) {
throw new TrinoException(JDBC_ERROR, e);
}
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class BaseJdbcClient method getTableHandle.
@Override
public Optional<JdbcTableHandle> getTableHandle(ConnectorSession session, SchemaTableName schemaTableName) {
try (Connection connection = connectionFactory.openConnection(session)) {
ConnectorIdentity identity = session.getIdentity();
String remoteSchema = identifierMapping.toRemoteSchemaName(identity, connection, schemaTableName.getSchemaName());
String remoteTable = identifierMapping.toRemoteTableName(identity, connection, remoteSchema, schemaTableName.getTableName());
try (ResultSet resultSet = getTables(connection, Optional.of(remoteSchema), Optional.of(remoteTable))) {
List<JdbcTableHandle> tableHandles = new ArrayList<>();
while (resultSet.next()) {
tableHandles.add(new JdbcTableHandle(schemaTableName, getRemoteTable(resultSet), getTableComment(resultSet)));
}
if (tableHandles.isEmpty()) {
return Optional.empty();
}
if (tableHandles.size() > 1) {
throw new TrinoException(NOT_SUPPORTED, "Multiple tables matched: " + schemaTableName);
}
return Optional.of(getOnlyElement(tableHandles));
}
} catch (SQLException e) {
throw new TrinoException(JDBC_ERROR, e);
}
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class TestDeltaLakePerTransactionMetastoreCache method createQueryRunner.
private DistributedQueryRunner createQueryRunner(boolean enablePerTransactionHiveMetastoreCaching) throws Exception {
boolean createdDeltaLake = false;
if (dockerizedMinioDataLake == null) {
// share environment between testcases to speed things up
dockerizedMinioDataLake = createDockerizedMinioDataLakeForDeltaLake(BUCKET_NAME);
createdDeltaLake = true;
}
Session session = testSessionBuilder().setCatalog(DELTA_CATALOG).setSchema("default").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();
queryRunner.installPlugin(new Plugin() {
@Override
public Iterable<ConnectorFactory> getConnectorFactories() {
return ImmutableList.of(new ConnectorFactory() {
@Override
public String getName() {
return TEST_DELTA_CONNECTOR_NAME;
}
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context) {
return InternalDeltaLakeConnectorFactory.createConnector(catalogName, config, context, new AbstractConfigurationAwareModule() {
@Override
protected void setup(Binder binder) {
newOptionalBinder(binder, ThriftMetastoreClientFactory.class).setDefault().to(DefaultThriftMetastoreClientFactory.class).in(Scopes.SINGLETON);
binder.bind(MetastoreLocator.class).to(StaticMetastoreLocator.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticMetastoreConfig.class);
configBinder(binder).bindConfig(ThriftMetastoreConfig.class);
binder.bind(ThriftMetastore.class).to(ThriftHiveMetastore.class).in(Scopes.SINGLETON);
newExporter(binder).export(ThriftMetastore.class).as((generator) -> generator.generatedNameOf(ThriftHiveMetastore.class));
install(new ThriftMetastoreAuthenticationModule());
binder.bind(Boolean.class).annotatedWith(HideNonDeltaLakeTables.class).toInstance(false);
binder.bind(BridgingHiveMetastoreFactory.class).in(Scopes.SINGLETON);
}
@Provides
@Singleton
@RawHiveMetastoreFactory
public HiveMetastoreFactory getCountingHiveMetastoreFactory(BridgingHiveMetastoreFactory bridgingHiveMetastoreFactory) {
return new HiveMetastoreFactory() {
@Override
public boolean isImpersonationEnabled() {
return false;
}
@Override
public HiveMetastore createMetastore(Optional<ConnectorIdentity> identity) {
HiveMetastore bridgingHiveMetastore = bridgingHiveMetastoreFactory.createMetastore(identity);
// bind HiveMetastore which counts method executions
return Reflection.newProxy(HiveMetastore.class, (proxy, method, args) -> {
String methodName = method.getName();
long count = hiveMetastoreInvocationCounts.getOrDefault(methodName, 0L);
hiveMetastoreInvocationCounts.put(methodName, count + 1);
return method.invoke(bridgingHiveMetastore, args);
});
}
};
}
});
}
});
}
});
ImmutableMap.Builder<String, String> deltaLakeProperties = ImmutableMap.builder();
deltaLakeProperties.put("hive.metastore.uri", dockerizedMinioDataLake.getTestingHadoop().getMetastoreAddress());
deltaLakeProperties.put("hive.s3.aws-access-key", MINIO_ACCESS_KEY);
deltaLakeProperties.put("hive.s3.aws-secret-key", MINIO_SECRET_KEY);
deltaLakeProperties.put("hive.s3.endpoint", dockerizedMinioDataLake.getMinioAddress());
deltaLakeProperties.put("hive.s3.path-style-access", "true");
// use test value so we do not get clash with default bindings)
deltaLakeProperties.put("hive.metastore", "test");
if (!enablePerTransactionHiveMetastoreCaching) {
// almost disable the cache; 0 is not allowed as config property value
deltaLakeProperties.put("hive.per-transaction-metastore-cache-maximum-size", "1");
}
queryRunner.createCatalog(DELTA_CATALOG, TEST_DELTA_CONNECTOR_NAME, deltaLakeProperties.buildOrThrow());
if (createdDeltaLake) {
List<TpchTable<? extends TpchEntity>> tpchTables = List.of(TpchTable.NATION, TpchTable.REGION);
tpchTables.forEach(table -> {
String tableName = table.getTableName();
dockerizedMinioDataLake.copyResources("io/trino/plugin/deltalake/testing/resources/databricks/" + tableName, tableName);
queryRunner.execute(format("CREATE TABLE %s.%s.%s (dummy int) WITH (location = 's3://%s/%3$s')", DELTA_CATALOG, "default", tableName, BUCKET_NAME));
});
}
return queryRunner;
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class TestFileSystemCache method testFileSystemCache.
@Test
public void testFileSystemCache() throws IOException {
HdfsEnvironment environment = new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(new HdfsConfig()), ImmutableSet.of()), new HdfsConfig(), new ImpersonatingHdfsAuthentication(new SimpleHadoopAuthentication(), new SimpleUserNameProvider()));
ConnectorIdentity userId = ConnectorIdentity.ofUser("user");
ConnectorIdentity otherUserId = ConnectorIdentity.ofUser("other_user");
FileSystem fs1 = getFileSystem(environment, userId);
FileSystem fs2 = getFileSystem(environment, userId);
assertSame(fs1, fs2);
FileSystem fs3 = getFileSystem(environment, otherUserId);
assertNotSame(fs1, fs3);
FileSystem fs4 = getFileSystem(environment, otherUserId);
assertSame(fs3, fs4);
FileSystem.closeAll();
FileSystem fs5 = getFileSystem(environment, userId);
assertNotSame(fs5, fs1);
}
Aggregations