use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestDomainTranslator method setup.
@BeforeClass
public void setup() {
metadata = createTestMetadataManager();
domainTranslator = new RowExpressionDomainTranslator(metadata);
columnExtractor = new SubfieldExtractor(new FunctionResolution(metadata.getFunctionAndTypeManager()), TEST_EXPRESSION_OPTIMIZER, new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setRangeFiltersOnSubscriptsEnabled(true), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties())).toColumnExtractor();
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestBackgroundHiveSplitLoader method backgroundHiveSplitLoader.
private static BackgroundHiveSplitLoader backgroundHiveSplitLoader(List<LocatedFileStatus> files, DirectoryLister directoryLister, String fileStatusCacheTables) {
List<HivePartitionMetadata> hivePartitionMetadatas = ImmutableList.of(new HivePartitionMetadata(new HivePartition(new SchemaTableName("testSchema", "table_name")), Optional.empty(), TableToPartitionMapping.empty(), Optional.empty(), ImmutableSet.of()));
ConnectorSession connectorSession = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setMaxSplitSize(new DataSize(1.0, GIGABYTE)).setFileStatusCacheTables(fileStatusCacheTables), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
return new BackgroundHiveSplitLoader(SIMPLE_TABLE, hivePartitionMetadatas, Optional.empty(), createBucketSplitInfo(Optional.empty(), Optional.empty()), connectorSession, new TestingHdfsEnvironment(files), new NamenodeStats(), directoryLister, EXECUTOR, 2, false, false, false);
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class AbstractTestHiveClientLocal method initialize.
@BeforeClass
public void initialize() {
tempDir = Files.createTempDir();
ExtendedHiveMetastore metastore = createMetastore(tempDir);
metastore.createDatabase(METASTORE_CONTEXT, Database.builder().setDatabaseName(testDbName).setOwnerName("public").setOwnerType(PrincipalType.ROLE).build());
HiveClientConfig hiveConfig = new HiveClientConfig().setTimeZone("America/Los_Angeles");
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
setup(testDbName, hiveConfig, new CacheConfig(), metastoreClientConfig, metastore);
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestHiveFileFormats method testRcTextOptimizedWriter.
@Test(dataProvider = "rowCount")
public void testRcTextOptimizedWriter(int rowCount) throws Exception {
List<TestColumn> testColumns = TEST_COLUMNS.stream().filter(TestHiveFileFormats::withoutNullMapKeyTests).collect(toImmutableList());
TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setRcfileOptimizedWriterEnabled(true), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
assertThatFileFormat(RCTEXT).withColumns(testColumns).withRowsCount(rowCount).withSession(session).withFileWriterFactory(new RcFileFileWriterFactory(HDFS_ENVIRONMENT, FUNCTION_AND_TYPE_MANAGER, new NodeVersion("test"), HIVE_STORAGE_TIME_ZONE, STATS)).isReadableByRecordCursor(new GenericHiveRecordCursorProvider(HDFS_ENVIRONMENT)).isReadableByPageSource(new RcFilePageSourceFactory(FUNCTION_AND_TYPE_MANAGER, HDFS_ENVIRONMENT, STATS));
}
use of com.facebook.presto.cache.CacheConfig in project presto by prestodb.
the class TestHiveFileFormats method testRcBinaryOptimizedWriter.
@Test(dataProvider = "rowCount")
public void testRcBinaryOptimizedWriter(int rowCount) throws Exception {
List<TestColumn> testColumns = TEST_COLUMNS.stream().filter(testColumn -> !testColumn.getName().equals("t_empty_varchar")).filter(TestHiveFileFormats::withoutNullMapKeyTests).collect(toList());
TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig().setRcfileOptimizedWriterEnabled(true), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()).getSessionProperties());
assertThatFileFormat(RCBINARY).withColumns(testColumns).withRowsCount(rowCount).withSession(session).withFileWriterFactory(new RcFileFileWriterFactory(HDFS_ENVIRONMENT, FUNCTION_AND_TYPE_MANAGER, new NodeVersion("test"), HIVE_STORAGE_TIME_ZONE, STATS)).isReadableByRecordCursor(new GenericHiveRecordCursorProvider(HDFS_ENVIRONMENT)).isReadableByPageSource(new RcFilePageSourceFactory(FUNCTION_AND_TYPE_MANAGER, HDFS_ENVIRONMENT, STATS));
}
Aggregations