use of io.prestosql.plugin.hive.HiveConfig in project boostkit-bigdata by kunpengcompute.
the class TestMetastoreHiveStatisticsProvider method testGetTableStatisticsSampling.
@Test
public void testGetTableStatisticsSampling() {
MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, schemaTableName, hivePartitions, table) -> {
assertEquals(schemaTableName, TABLE);
assertEquals(hivePartitions.size(), 1);
return ImmutableMap.of();
});
TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveConfig().setPartitionStatisticsSampleSize(1), new OrcFileWriterConfig(), new ParquetFileWriterConfig()).getSessionProperties());
statisticsProvider.getTableStatistics(session, TABLE, ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(partition("p1=string1/p2=1234"), partition("p1=string1/p2=1235")), true, table);
}
use of io.prestosql.plugin.hive.HiveConfig in project hetu-core by openlookeng.
the class TestSpatialJoins method createQueryRunner.
private static DistributedQueryRunner createQueryRunner() throws Exception {
DistributedQueryRunner queryRunner = new DistributedQueryRunner(testSessionBuilder().setSource(TestSpatialJoins.class.getSimpleName()).setCatalog("hive").setSchema("default").build(), 4);
queryRunner.installPlugin(new GeoPlugin());
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
HiveConfig hiveConfig = new HiveConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
metastore.createDatabase(new HiveIdentity(SESSION), Database.builder().setDatabaseName("default").setOwnerName("public").setOwnerType(PrincipalType.ROLE).build());
queryRunner.installPlugin(new HivePlugin("hive", Optional.of(metastore)));
queryRunner.createCatalog("hive", "hive");
return queryRunner;
}
use of io.prestosql.plugin.hive.HiveConfig in project hetu-core by openlookeng.
the class ParquetTester method assertMaxReadBytes.
static void assertMaxReadBytes(List<ObjectInspector> objectInspectors, Iterable<?>[] writeValues, Iterable<?>[] readValues, List<String> columnNames, List<Type> columnTypes, Optional<MessageType> parquetSchema, DataSize maxReadBlockSize) throws Exception {
CompressionCodecName compressionCodecName = UNCOMPRESSED;
HiveConfig config = new HiveConfig().setHiveStorageFormat(HiveStorageFormat.PARQUET).setUseParquetColumnNames(false).setParquetMaxReadBlockSize(maxReadBlockSize);
ConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig()).getSessionProperties());
try (TempFile tempFile = new TempFile("test", "parquet")) {
JobConf jobConf = new JobConf();
jobConf.setEnum(COMPRESSION, compressionCodecName);
jobConf.setBoolean(ENABLE_DICTIONARY, true);
jobConf.setEnum(WRITER_VERSION, PARQUET_1_0);
writeParquetColumn(jobConf, tempFile.getFile(), compressionCodecName, createTableProperties(columnNames, objectInspectors), getStandardStructObjectInspector(columnNames, objectInspectors), getIterators(writeValues), parquetSchema, false);
Iterator<?>[] expectedValues = getIterators(readValues);
try (ConnectorPageSource pageSource = getFileFormat().createFileFormatReader(session, HDFS_ENVIRONMENT, tempFile.getFile(), columnNames, columnTypes)) {
assertPageSource(columnTypes, expectedValues, pageSource, Optional.of(getParquetMaxReadBlockSize(session).toBytes()));
assertFalse(stream(expectedValues).allMatch(Iterator::hasNext));
}
}
}
use of io.prestosql.plugin.hive.HiveConfig in project hetu-core by openlookeng.
the class TestHiveGlueMetastore method createMetastore.
/**
* GlueHiveMetastore currently uses AWS Default Credential Provider Chain,
* See https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default
* on ways to set your AWS credentials which will be needed to run this test.
*/
@Override
protected HiveMetastore createMetastore(File tempDir) {
HiveConfig hiveConfig = new HiveConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
GlueHiveMetastoreConfig glueConfig = new GlueHiveMetastoreConfig();
glueConfig.setDefaultWarehouseDir(tempDir.toURI().toString());
return new GlueHiveMetastore(hdfsEnvironment, glueConfig);
}
use of io.prestosql.plugin.hive.HiveConfig in project hetu-core by openlookeng.
the class TestMetastoreHiveStatisticsProvider method testGetTableStatisticsSampling.
@Test
public void testGetTableStatisticsSampling() {
MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, schemaTableName, hivePartitions, table) -> {
assertEquals(schemaTableName, TABLE);
assertEquals(hivePartitions.size(), 1);
return ImmutableMap.of();
});
TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveConfig().setPartitionStatisticsSampleSize(1), new OrcFileWriterConfig(), new ParquetFileWriterConfig()).getSessionProperties());
statisticsProvider.getTableStatistics(session, TABLE, ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(partition("p1=string1/p2=1234"), partition("p1=string1/p2=1235")), true, table);
}
Aggregations