use of io.trino.plugin.hive.authentication.NoHdfsAuthentication in project trino by trinodb.
the class TestHiveAlluxioMetastore method setup.
@Parameters({ "hive.hadoop2.alluxio.host", "hive.hadoop2.alluxio.port", "hive.hadoop2.hiveVersionMajor", "hive.hadoop2.timeZone" })
@BeforeClass
public void setup(String host, String port, int hiveVersionMajor, String timeZone) {
checkArgument(hiveVersionMajor > 0, "Invalid hiveVersionMajor: %s", hiveVersionMajor);
timeZone = hiveVersionMajor >= 3 ? "UTC" : timeZone;
this.alluxioAddress = host + ":" + port;
this.hiveVersionMajor = hiveVersionMajor;
System.setProperty(PropertyKey.Name.SECURITY_LOGIN_USERNAME, "presto");
System.setProperty(PropertyKey.Name.MASTER_HOSTNAME, host);
HiveConfig hiveConfig = new HiveConfig().setParquetTimeZone(timeZone).setRcfileTimeZone(timeZone);
AlluxioHiveMetastoreConfig alluxioConfig = new AlluxioHiveMetastoreConfig();
alluxioConfig.setMasterAddress(this.alluxioAddress);
TableMasterClient client = AlluxioMetastoreModule.createCatalogMasterClient(alluxioConfig);
hdfsEnvironment = new HdfsEnvironment(createTestHdfsConfiguration(), new HdfsConfig(), new NoHdfsAuthentication());
setup(SCHEMA, hiveConfig, new AlluxioHiveMetastore(client, new MetastoreConfig()), hdfsEnvironment);
}
use of io.trino.plugin.hive.authentication.NoHdfsAuthentication in project trino by trinodb.
the class AbstractTestHive method setup.
protected final void setup(String host, int port, String databaseName, String timeZone) {
HiveConfig hiveConfig = getHiveConfig().setParquetTimeZone(timeZone).setRcfileTimeZone(timeZone);
Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
hdfsEnvironment = new HdfsEnvironment(createTestHdfsConfiguration(), new HdfsConfig(), new NoHdfsAuthentication());
HiveMetastore metastore = cachingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, hiveConfig, new MetastoreConfig(), new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(SESSION.getIdentity())), executor, new Duration(1, MINUTES), Optional.of(new Duration(15, SECONDS)), 10000);
setup(databaseName, hiveConfig, metastore, hdfsEnvironment);
}
use of io.trino.plugin.hive.authentication.NoHdfsAuthentication in project trino by trinodb.
the class AbstractTestHive method testNewDirectoryPermissions.
@Test
public void testNewDirectoryPermissions() throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, ORC, columns, ImmutableList.of(), Optional.empty());
try {
Transaction transaction = newTransaction();
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow();
// create new directory and set directory permission after creation
HdfsContext context = new HdfsContext(session);
Path location = new Path(table.getStorage().getLocation());
Path defaultPath = new Path(location + "/defaultperms");
createDirectory(context, hdfsEnvironment, defaultPath);
FileStatus defaultFsStatus = hdfsEnvironment.getFileSystem(context, defaultPath).getFileStatus(defaultPath);
assertEquals(defaultFsStatus.getPermission().toOctal(), 777);
// use hdfs config that skips setting directory permissions after creation
HdfsConfig configWithSkip = new HdfsConfig();
configWithSkip.setNewDirectoryPermissions(HdfsConfig.SKIP_DIR_PERMISSIONS);
HdfsEnvironment hdfsEnvironmentWithSkip = new HdfsEnvironment(createTestHdfsConfiguration(), configWithSkip, new NoHdfsAuthentication());
Path skipPath = new Path(location + "/skipperms");
createDirectory(context, hdfsEnvironmentWithSkip, skipPath);
FileStatus skipFsStatus = hdfsEnvironmentWithSkip.getFileSystem(context, skipPath).getFileStatus(skipPath);
assertEquals(skipFsStatus.getPermission().toOctal(), 755);
} finally {
dropTable(tableName);
}
}
use of io.trino.plugin.hive.authentication.NoHdfsAuthentication in project trino by trinodb.
the class BaseTestHiveOnDataLake method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
this.bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
this.dockerizedS3DataLake = closeAfterClass(new HiveMinioDataLake(bucketName, ImmutableMap.of(), hiveHadoopImage));
this.dockerizedS3DataLake.start();
this.metastoreClient = new BridgingHiveMetastore(new ThriftHiveMetastore(new TestingMetastoreLocator(Optional.empty(), this.dockerizedS3DataLake.getHiveHadoop().getHiveMetastoreEndpoint()), new HiveConfig(), new MetastoreConfig(), new ThriftMetastoreConfig(), new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(new HdfsConfig(), ImmutableSet.of()), ImmutableSet.of()), new HdfsConfig(), new NoHdfsAuthentication()), false), HiveIdentity.none());
return S3HiveQueryRunner.create(dockerizedS3DataLake, ImmutableMap.<String, String>builder().put("hive.insert-existing-partitions-behavior", "OVERWRITE").put("hive.non-managed-table-writes-enabled", "true").put("hive.metastore-cache-ttl", "1d").put("hive.metastore-refresh-interval", "1d").buildOrThrow());
}
use of io.trino.plugin.hive.authentication.NoHdfsAuthentication in project trino by trinodb.
the class TestCheckpointEntryIterator method setUp.
@BeforeClass
public void setUp() {
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
checkpointSchemaManager = new CheckpointSchemaManager(TESTING_TYPE_MANAGER);
}
Aggregations