use of com.facebook.presto.hive.metastore.ExtendedHiveMetastore in project presto by prestodb.
the class TestHivePageSink method testAllFormats.
@Test
public void testAllFormats() throws Exception {
HiveClientConfig config = new HiveClientConfig();
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
File tempDir = Files.createTempDir();
try {
ExtendedHiveMetastore metastore = createTestingFileHiveMetastore(new File(tempDir, "metastore"));
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format == HiveStorageFormat.CSV) {
// CSV supports only unbounded VARCHAR type, which is not provided by lineitem
continue;
}
config.setHiveStorageFormat(format);
config.setCompressionCodec(NONE);
long uncompressedLength = writeTestFile(config, metastoreClientConfig, metastore, makeFileName(tempDir, config));
assertGreaterThan(uncompressedLength, 0L);
for (HiveCompressionCodec codec : HiveCompressionCodec.values()) {
if (codec == NONE || !codec.isSupportedStorageFormat(format)) {
continue;
}
config.setCompressionCodec(codec);
long length = writeTestFile(config, metastoreClientConfig, metastore, makeFileName(tempDir, config));
assertTrue(uncompressedLength > length, format("%s with %s compressed to %s which is not less than %s", format, codec, length, uncompressedLength));
}
}
} finally {
deleteRecursively(tempDir.toPath(), ALLOW_INSECURE);
}
}
use of com.facebook.presto.hive.metastore.ExtendedHiveMetastore in project presto by prestodb.
the class TestingSemiTransactionalHiveMetastore method create.
public static TestingSemiTransactionalHiveMetastore create() {
// none of these values matter, as we never use them
HiveClientConfig config = new HiveClientConfig();
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config, metastoreClientConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, HOST, PORT);
ColumnConverterProvider columnConverterProvider = HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER;
ExtendedHiveMetastore delegate = new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig), new HivePartitionMutator());
ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-%s"));
ListeningExecutorService renameExecutor = listeningDecorator(executor);
return new TestingSemiTransactionalHiveMetastore(hdfsEnvironment, delegate, renameExecutor, false, false, true, columnConverterProvider);
}
use of com.facebook.presto.hive.metastore.ExtendedHiveMetastore in project presto by prestodb.
the class AbstractTestHiveClient method setup.
protected final void setup(String databaseName, HiveClientConfig hiveClientConfig, ExtendedHiveMetastore hiveMetastore) {
HiveConnectorId connectorId = new HiveConnectorId("hive-test");
setupHive(connectorId.toString(), databaseName, hiveClientConfig.getTimeZone());
metastoreClient = hiveMetastore;
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig, new HiveS3Config()));
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication());
locationService = new HiveLocationService(hdfsEnvironment);
TypeManager typeManager = new TypeRegistry();
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(connectorId, metastoreClient, hdfsEnvironment, new HivePartitionManager(connectorId, TYPE_MANAGER, hiveClientConfig), timeZone, 10, true, true, false, true, HiveStorageFormat.RCBINARY, 1000, typeManager, locationService, new TableParameterCodec(), partitionUpdateCodec, newFixedThreadPool(2), new HiveTypeTranslator(), TEST_SERVER_VERSION);
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(connectorId, transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), new NamenodeStats(), hdfsEnvironment, new HadoopDirectoryLister(), newDirectExecutorService(), new HiveCoercionPolicy(typeManager), 100, hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getMaxInitialSplits(), false);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig), hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(JOIN_COMPILER), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig), getDefaultHiveDataStreamFactories(hiveClientConfig), TYPE_MANAGER);
}
use of com.facebook.presto.hive.metastore.ExtendedHiveMetastore in project presto by prestodb.
the class RollbackToSnapshotProcedure method rollbackToSnapshot.
public void rollbackToSnapshot(ConnectorSession clientSession, String schema, String table, Long snapshotId) {
SchemaTableName schemaTableName = new SchemaTableName(schema, table);
ConnectorMetadata metadata = metadataFactory.create();
Table icebergTable;
if (catalogType == HADOOP) {
icebergTable = resourceFactory.getCatalog(clientSession).loadTable(toIcebergTableIdentifier(schema, table));
} else {
ExtendedHiveMetastore metastore = ((IcebergHiveMetadata) metadata).getMetastore();
icebergTable = getHiveIcebergTable(metastore, hdfsEnvironment, clientSession, schemaTableName);
}
icebergTable.rollback().toSnapshotId(snapshotId).commit();
}
use of com.facebook.presto.hive.metastore.ExtendedHiveMetastore in project presto by prestodb.
the class IcebergSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout, SplitSchedulingContext splitSchedulingContext) {
IcebergTableLayoutHandle layoutHandle = (IcebergTableLayoutHandle) layout;
IcebergTableHandle table = layoutHandle.getTable();
if (!table.getSnapshotId().isPresent()) {
return new FixedSplitSource(ImmutableList.of());
}
Table icebergTable;
if (catalogType == HADOOP) {
icebergTable = getHadoopIcebergTable(resourceFactory, session, table.getSchemaTableName());
} else {
ExtendedHiveMetastore metastore = ((IcebergHiveMetadata) transactionManager.get(transaction)).getMetastore();
icebergTable = getHiveIcebergTable(metastore, hdfsEnvironment, session, table.getSchemaTableName());
}
TableScan tableScan = icebergTable.newScan().filter(toIcebergExpression(table.getPredicate())).useSnapshot(table.getSnapshotId().get());
// TODO Use residual. Right now there is no way to propagate residual to presto but at least we can
// propagate it at split level so the parquet pushdown can leverage it.
IcebergSplitSource splitSource = new IcebergSplitSource(session, tableScan.planTasks());
return splitSource;
}
Aggregations