use of com.facebook.presto.spi.type.TypeManager in project presto by prestodb.
the class AbstractTestHiveClient method setup.
protected final void setup(String databaseName, HiveClientConfig hiveClientConfig, ExtendedHiveMetastore hiveMetastore) {
HiveConnectorId connectorId = new HiveConnectorId("hive-test");
setupHive(connectorId.toString(), databaseName, hiveClientConfig.getTimeZone());
metastoreClient = hiveMetastore;
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig, new HiveS3Config()));
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication());
locationService = new HiveLocationService(hdfsEnvironment);
TypeManager typeManager = new TypeRegistry();
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(connectorId, metastoreClient, hdfsEnvironment, new HivePartitionManager(connectorId, TYPE_MANAGER, hiveClientConfig), timeZone, 10, true, true, false, true, HiveStorageFormat.RCBINARY, 1000, typeManager, locationService, new TableParameterCodec(), partitionUpdateCodec, newFixedThreadPool(2), new HiveTypeTranslator(), TEST_SERVER_VERSION);
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(connectorId, transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), new NamenodeStats(), hdfsEnvironment, new HadoopDirectoryLister(), newDirectExecutorService(), new HiveCoercionPolicy(typeManager), 100, hiveClientConfig.getMinPartitionBatchSize(), hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getMaxInitialSplits(), false);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveClientConfig), hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(JOIN_COMPILER), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, getDefaultHiveRecordCursorProvider(hiveClientConfig), getDefaultHiveDataStreamFactories(hiveClientConfig), TYPE_MANAGER);
}
use of com.facebook.presto.spi.type.TypeManager in project presto by prestodb.
the class ParquetHiveRecordCursor method createParquetRecordReader.
private ParquetRecordReader<FakeParquetRecord> createParquetRecordReader(HdfsEnvironment hdfsEnvironment, String sessionUser, Configuration configuration, Path path, long start, long length, List<HiveColumnHandle> columns, boolean useParquetColumnNames, TypeManager typeManager, boolean predicatePushdownEnabled, TupleDomain<HiveColumnHandle> effectivePredicate) {
ParquetDataSource dataSource = null;
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
dataSource = buildHdfsParquetDataSource(fileSystem, path, start, length);
ParquetMetadata parquetMetadata = hdfsEnvironment.doAs(sessionUser, () -> ParquetFileReader.readFooter(configuration, path, NO_FILTER));
List<BlockMetaData> blocks = parquetMetadata.getBlocks();
FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
MessageType fileSchema = fileMetaData.getSchema();
PrestoReadSupport readSupport = new PrestoReadSupport(useParquetColumnNames, columns, fileSchema);
List<parquet.schema.Type> fields = columns.stream().filter(column -> column.getColumnType() == REGULAR).map(column -> getParquetType(column, fileSchema, useParquetColumnNames)).filter(Objects::nonNull).collect(toList());
MessageType requestedSchema = new MessageType(fileSchema.getName(), fields);
LongArrayList offsets = new LongArrayList(blocks.size());
for (BlockMetaData block : blocks) {
long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
if (firstDataPage >= start && firstDataPage < start + length) {
if (predicatePushdownEnabled) {
ParquetPredicate parquetPredicate = buildParquetPredicate(columns, effectivePredicate, fileMetaData.getSchema(), typeManager);
if (predicateMatches(parquetPredicate, block, dataSource, requestedSchema, effectivePredicate)) {
offsets.add(block.getStartingPos());
}
} else {
offsets.add(block.getStartingPos());
}
}
}
ParquetInputSplit split = new ParquetInputSplit(path, start, start + length, length, null, offsets.toLongArray());
TaskAttemptContext taskContext = ContextUtil.newTaskAttemptContext(configuration, new TaskAttemptID());
return hdfsEnvironment.doAs(sessionUser, () -> {
ParquetRecordReader<FakeParquetRecord> realReader = new PrestoParquetRecordReader(readSupport);
realReader.initialize(split, taskContext);
return realReader;
});
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e, PrestoException.class);
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
}
String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
if (e.getClass().getSimpleName().equals("BlockMissingException")) {
throw new PrestoException(HIVE_MISSING_DATA, message, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
} finally {
if (dataSource != null) {
try {
dataSource.close();
} catch (IOException ignored) {
}
}
}
}
use of com.facebook.presto.spi.type.TypeManager in project presto by prestodb.
the class ParquetPageSourceFactory method createParquetPageSource.
public static ParquetPageSource createParquetPageSource(HdfsEnvironment hdfsEnvironment, String user, Configuration configuration, Path path, long start, long length, Properties schema, List<HiveColumnHandle> columns, boolean useParquetColumnNames, TypeManager typeManager, boolean predicatePushdownEnabled, TupleDomain<HiveColumnHandle> effectivePredicate) {
AggregatedMemoryContext systemMemoryContext = new AggregatedMemoryContext();
ParquetDataSource dataSource = null;
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(user, path, configuration);
dataSource = buildHdfsParquetDataSource(fileSystem, path, start, length);
ParquetMetadata parquetMetadata = ParquetMetadataReader.readFooter(fileSystem, path);
FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
MessageType fileSchema = fileMetaData.getSchema();
List<parquet.schema.Type> fields = columns.stream().filter(column -> column.getColumnType() == REGULAR).map(column -> getParquetType(column, fileSchema, useParquetColumnNames)).filter(Objects::nonNull).collect(toList());
MessageType requestedSchema = new MessageType(fileSchema.getName(), fields);
List<BlockMetaData> blocks = new ArrayList<>();
for (BlockMetaData block : parquetMetadata.getBlocks()) {
long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
if (firstDataPage >= start && firstDataPage < start + length) {
blocks.add(block);
}
}
if (predicatePushdownEnabled) {
ParquetPredicate parquetPredicate = buildParquetPredicate(columns, effectivePredicate, fileMetaData.getSchema(), typeManager);
final ParquetDataSource finalDataSource = dataSource;
blocks = blocks.stream().filter(block -> predicateMatches(parquetPredicate, block, finalDataSource, requestedSchema, effectivePredicate)).collect(toList());
}
ParquetReader parquetReader = new ParquetReader(fileSchema, requestedSchema, blocks, dataSource, typeManager, systemMemoryContext);
return new ParquetPageSource(parquetReader, dataSource, fileSchema, requestedSchema, length, schema, columns, effectivePredicate, typeManager, useParquetColumnNames, systemMemoryContext);
} catch (Exception e) {
try {
if (dataSource != null) {
dataSource.close();
}
} catch (IOException ignored) {
}
if (e instanceof PrestoException) {
throw (PrestoException) e;
}
String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
if (e.getClass().getSimpleName().equals("BlockMissingException")) {
throw new PrestoException(HIVE_MISSING_DATA, message, e);
}
throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
}
}
use of com.facebook.presto.spi.type.TypeManager in project presto by prestodb.
the class TestAnalyzer method setup.
@BeforeMethod(alwaysRun = true)
public void setup() throws Exception {
TypeManager typeManager = new TypeRegistry();
CatalogManager catalogManager = new CatalogManager();
transactionManager = createTestTransactionManager(catalogManager);
accessControl = new AccessControlManager(transactionManager);
metadata = new MetadataManager(new FeaturesConfig(), typeManager, new BlockEncodingManager(typeManager), new SessionPropertyManager(), new SchemaPropertyManager(), new TablePropertyManager(), transactionManager);
metadata.getFunctionRegistry().addFunctions(ImmutableList.of(APPLY_FUNCTION));
catalogManager.registerCatalog(createTestingCatalog(TPCH_CATALOG, TPCH_CONNECTOR_ID));
catalogManager.registerCatalog(createTestingCatalog(SECOND_CATALOG, SECOND_CONNECTOR_ID));
catalogManager.registerCatalog(createTestingCatalog(THIRD_CATALOG, THIRD_CONNECTOR_ID));
SchemaTableName table1 = new SchemaTableName("s1", "t1");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table1, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT), new ColumnMetadata("c", BIGINT), new ColumnMetadata("d", BIGINT)))));
SchemaTableName table2 = new SchemaTableName("s1", "t2");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table2, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT)))));
SchemaTableName table3 = new SchemaTableName("s1", "t3");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table3, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT), new ColumnMetadata("x", BIGINT, null, true)))));
// table in different catalog
SchemaTableName table4 = new SchemaTableName("s2", "t4");
inSetupTransaction(session -> metadata.createTable(session, SECOND_CATALOG, new ConnectorTableMetadata(table4, ImmutableList.of(new ColumnMetadata("a", BIGINT)))));
// table with a hidden column
SchemaTableName table5 = new SchemaTableName("s1", "t5");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table5, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", BIGINT, null, true)))));
// table with a varchar column
SchemaTableName table6 = new SchemaTableName("s1", "t6");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table6, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", VARCHAR), new ColumnMetadata("c", BIGINT), new ColumnMetadata("d", BIGINT)))));
// table with bigint, double, array of bigints and array of doubles column
SchemaTableName table7 = new SchemaTableName("s1", "t7");
inSetupTransaction(session -> metadata.createTable(session, TPCH_CATALOG, new ConnectorTableMetadata(table7, ImmutableList.of(new ColumnMetadata("a", BIGINT), new ColumnMetadata("b", DOUBLE), new ColumnMetadata("c", new ArrayType(BIGINT)), new ColumnMetadata("d", new ArrayType(DOUBLE))))));
// valid view referencing table in same schema
String viewData1 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select a from t1", Optional.of(TPCH_CATALOG), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(TPCH_CATALOG, "s1", "v1"), viewData1, false));
// stale view (different column type)
String viewData2 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select a from t1", Optional.of(TPCH_CATALOG), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", VARCHAR)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(TPCH_CATALOG, "s1", "v2"), viewData2, false));
// view referencing table in different schema from itself and session
String viewData3 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select a from t4", Optional.of(SECOND_CATALOG), Optional.of("s2"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("owner")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(THIRD_CATALOG, "s3", "v3"), viewData3, false));
// valid view with uppercase column name
String viewData4 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select A from t1", Optional.of("tpch"), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName("tpch", "s1", "v4"), viewData4, false));
// recursive view referencing to itself
String viewData5 = JsonCodec.jsonCodec(ViewDefinition.class).toJson(new ViewDefinition("select * from v5", Optional.of(TPCH_CATALOG), Optional.of("s1"), ImmutableList.of(new ViewColumn("a", BIGINT)), Optional.of("user")));
inSetupTransaction(session -> metadata.createView(session, new QualifiedObjectName(TPCH_CATALOG, "s1", "v5"), viewData5, false));
this.metadata = metadata;
}
use of com.facebook.presto.spi.type.TypeManager in project presto by prestodb.
the class AccumuloModule method configure.
@Override
public void configure(Binder binder) {
// Add appender to Log4J root logger
//create appender
JulAppender appender = new JulAppender();
appender.setLayout(new PatternLayout("%d %-5p %c - %m%n"));
appender.setThreshold(Level.INFO);
appender.activateOptions();
org.apache.log4j.Logger.getRootLogger().addAppender(appender);
binder.bind(TypeManager.class).toInstance(typeManager);
binder.bind(AccumuloConnector.class).in(Scopes.SINGLETON);
binder.bind(AccumuloConnectorId.class).toInstance(new AccumuloConnectorId(connectorId));
binder.bind(AccumuloMetadata.class).in(Scopes.SINGLETON);
binder.bind(AccumuloMetadataFactory.class).in(Scopes.SINGLETON);
binder.bind(AccumuloClient.class).in(Scopes.SINGLETON);
binder.bind(AccumuloSplitManager.class).in(Scopes.SINGLETON);
binder.bind(AccumuloRecordSetProvider.class).in(Scopes.SINGLETON);
binder.bind(AccumuloPageSinkProvider.class).in(Scopes.SINGLETON);
binder.bind(AccumuloHandleResolver.class).in(Scopes.SINGLETON);
binder.bind(AccumuloSessionProperties.class).in(Scopes.SINGLETON);
binder.bind(AccumuloTableProperties.class).in(Scopes.SINGLETON);
binder.bind(ZooKeeperMetadataManager.class).in(Scopes.SINGLETON);
binder.bind(AccumuloTableManager.class).in(Scopes.SINGLETON);
binder.bind(Connector.class).toProvider(ConnectorProvider.class);
configBinder(binder).bindConfig(AccumuloConfig.class);
jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
jsonCodecBinder(binder).bindMapJsonCodec(String.class, JsonCodec.listJsonCodec(AccumuloTable.class));
}
Aggregations