use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class HiveBenchmarkQueryRunner method createLocalQueryRunner.
public static LocalQueryRunner createLocalQueryRunner(File tempDir) {
Session session = testSessionBuilder().setCatalog("hive").setSchema("tpch").build();
LocalQueryRunner localQueryRunner = LocalQueryRunner.create(session);
// add tpch
localQueryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());
// add hive
File hiveDir = new File(tempDir, "hive_data");
HiveMetastore metastore = createTestingFileHiveMetastore(hiveDir);
metastore.createDatabase(Database.builder().setDatabaseName("tpch").setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build());
Map<String, String> hiveCatalogConfig = ImmutableMap.<String, String>builder().put("hive.max-split-size", "10GB").buildOrThrow();
localQueryRunner.createCatalog("hive", new TestingHiveConnectorFactory(metastore), hiveCatalogConfig);
localQueryRunner.execute("CREATE TABLE orders AS SELECT * FROM tpch.sf1.orders");
localQueryRunner.execute("CREATE TABLE lineitem AS SELECT * FROM tpch.sf1.lineitem");
return localQueryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class AbstractTestHiveLocal method initialize.
@BeforeClass(alwaysRun = true)
public void initialize() {
tempDir = Files.createTempDir();
HiveMetastore metastore = createMetastore(tempDir, HIVE_IDENTITY);
metastore.createDatabase(Database.builder().setDatabaseName(testDbName).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build());
HiveConfig hiveConfig = new HiveConfig().setParquetTimeZone("America/Los_Angeles").setRcfileTimeZone("America/Los_Angeles");
setup(testDbName, hiveConfig, metastore, HDFS_ENVIRONMENT);
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestCachingHiveMetastore method testLoadAfterInvalidate.
@Test(timeOut = 60_000, dataProviderClass = DataProviders.class, dataProvider = "trueFalse")
public void testLoadAfterInvalidate(boolean invalidateAll) throws Exception {
// State
CopyOnWriteArrayList<Column> tableColumns = new CopyOnWriteArrayList<>();
ConcurrentMap<String, Partition> tablePartitionsByName = new ConcurrentHashMap<>();
Map<String, String> tableParameters = new ConcurrentHashMap<>();
tableParameters.put("frequent-changing-table-parameter", "parameter initial value");
// Initialize data
String databaseName = "my_database";
String tableName = "my_table_name";
tableColumns.add(new Column("value", toHiveType(VARCHAR), Optional.empty()));
tableColumns.add(new Column("pk", toHiveType(VARCHAR), Optional.empty()));
List<String> partitionNames = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String partitionName = "pk=" + i;
tablePartitionsByName.put(partitionName, Partition.builder().setDatabaseName(databaseName).setTableName(tableName).setColumns(ImmutableList.copyOf(tableColumns)).setValues(List.of(Integer.toString(i))).withStorage(storage -> storage.setStorageFormat(fromHiveStorageFormat(TEXTFILE))).setParameters(Map.of("frequent-changing-partition-parameter", "parameter initial value")).build());
partitionNames.add(partitionName);
}
// Mock metastore
CountDownLatch getTableEnteredLatch = new CountDownLatch(1);
CountDownLatch getTableReturnLatch = new CountDownLatch(1);
CountDownLatch getTableFinishedLatch = new CountDownLatch(1);
CountDownLatch getPartitionsByNamesEnteredLatch = new CountDownLatch(1);
CountDownLatch getPartitionsByNamesReturnLatch = new CountDownLatch(1);
CountDownLatch getPartitionsByNamesFinishedLatch = new CountDownLatch(1);
HiveMetastore mockMetastore = new UnimplementedHiveMetastore() {
@Override
public Optional<Table> getTable(String databaseName, String tableName) {
Optional<Table> table = Optional.of(Table.builder().setDatabaseName(databaseName).setTableName(tableName).setTableType(EXTERNAL_TABLE.name()).setDataColumns(tableColumns).setParameters(ImmutableMap.copyOf(tableParameters)).withStorage(storage -> storage.setStorageFormat(fromHiveStorageFormat(TEXTFILE))).setOwner(Optional.empty()).build());
// 1
getTableEnteredLatch.countDown();
// 2
await(getTableReturnLatch, 10, SECONDS);
return table;
}
@Override
public Map<String, Optional<Partition>> getPartitionsByNames(Table table, List<String> partitionNames) {
Map<String, Optional<Partition>> result = new HashMap<>();
for (String partitionName : partitionNames) {
result.put(partitionName, Optional.ofNullable(tablePartitionsByName.get(partitionName)));
}
// loader#1
getPartitionsByNamesEnteredLatch.countDown();
// loader#2
await(getPartitionsByNamesReturnLatch, 10, SECONDS);
return result;
}
};
// Caching metastore
metastore = cachingHiveMetastore(mockMetastore, executor, new Duration(5, TimeUnit.MINUTES), Optional.of(new Duration(1, TimeUnit.MINUTES)), 1000);
// The test. Main thread does modifications and verifies subsequent load sees them. Background thread loads the state into the cache.
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
Future<Void> future = executor.submit(() -> {
try {
Table table;
table = metastore.getTable(databaseName, tableName).orElseThrow();
// 3
getTableFinishedLatch.countDown();
metastore.getPartitionsByNames(table, partitionNames);
// 6
getPartitionsByNamesFinishedLatch.countDown();
return (Void) null;
} catch (Throwable e) {
log.error(e);
throw e;
}
});
// 21
await(getTableEnteredLatch, 10, SECONDS);
tableParameters.put("frequent-changing-table-parameter", "main-thread-put-xyz");
if (invalidateAll) {
metastore.flushCache();
} else {
metastore.invalidateTable(databaseName, tableName);
}
// 2
getTableReturnLatch.countDown();
// 3
await(getTableFinishedLatch, 10, SECONDS);
Table table = metastore.getTable(databaseName, tableName).orElseThrow();
assertThat(table.getParameters()).isEqualTo(Map.of("frequent-changing-table-parameter", "main-thread-put-xyz"));
// 4
await(getPartitionsByNamesEnteredLatch, 10, SECONDS);
String partitionName = partitionNames.get(2);
Map<String, String> newPartitionParameters = Map.of("frequent-changing-partition-parameter", "main-thread-put-alice");
tablePartitionsByName.put(partitionName, Partition.builder(tablePartitionsByName.get(partitionName)).setParameters(newPartitionParameters).build());
if (invalidateAll) {
metastore.flushCache();
} else {
metastore.invalidateTable(databaseName, tableName);
}
// 5
getPartitionsByNamesReturnLatch.countDown();
// 6
await(getPartitionsByNamesFinishedLatch, 10, SECONDS);
Map<String, Optional<Partition>> loadedPartitions = metastore.getPartitionsByNames(table, partitionNames);
assertThat(loadedPartitions.get(partitionName)).isNotNull().isPresent().hasValueSatisfying(partition -> assertThat(partition.getParameters()).isEqualTo(newPartitionParameters));
// verify no failure in the background thread
future.get(10, SECONDS);
} finally {
getTableEnteredLatch.countDown();
getTableReturnLatch.countDown();
getTableFinishedLatch.countDown();
getPartitionsByNamesEnteredLatch.countDown();
getPartitionsByNamesReturnLatch.countDown();
getPartitionsByNamesFinishedLatch.countDown();
executor.shutdownNow();
executor.awaitTermination(10, SECONDS);
}
}
Aggregations