Search in sources :

Example 1 with NOT_SUPPORTED

use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.

the class ConcatFunction method generateConcat.

private static Class<?> generateConcat(TypeSignature type, int arity) {
    checkCondition(arity <= 254, NOT_SUPPORTED, "Too many arguments for string concatenation");
    ClassDefinition definition = new ClassDefinition(a(PUBLIC, FINAL), makeClassName(type.getBase() + "_concat" + arity + "ScalarFunction"), type(Object.class));
    // Generate constructor
    definition.declareDefaultConstructor(a(PRIVATE));
    // Generate concat()
    List<Parameter> parameters = IntStream.range(0, arity).mapToObj(i -> arg("arg" + i, Slice.class)).collect(toImmutableList());
    MethodDefinition method = definition.declareMethod(a(PUBLIC, STATIC), "concat", type(Slice.class), parameters);
    Scope scope = method.getScope();
    BytecodeBlock body = method.getBody();
    Variable length = scope.declareVariable(int.class, "length");
    body.append(length.set(constantInt(0)));
    for (int i = 0; i < arity; ++i) {
        body.append(length.set(generateCheckedAdd(length, parameters.get(i).invoke("length", int.class))));
    }
    Variable result = scope.declareVariable(Slice.class, "result");
    body.append(result.set(invokeStatic(Slices.class, "allocate", Slice.class, length)));
    Variable position = scope.declareVariable(int.class, "position");
    body.append(position.set(constantInt(0)));
    for (int i = 0; i < arity; ++i) {
        body.append(result.invoke("setBytes", void.class, position, parameters.get(i)));
        body.append(position.set(add(position, parameters.get(i).invoke("length", int.class))));
    }
    body.getVariable(result).retObject();
    return defineClass(definition, Object.class, ImmutableMap.of(), new DynamicClassLoader(ConcatFunction.class.getClassLoader()));
}
Also used : BytecodeBlock(io.airlift.bytecode.BytecodeBlock) INVALID_FUNCTION_ARGUMENT(io.prestosql.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT) Scope(io.airlift.bytecode.Scope) DynamicClassLoader(io.airlift.bytecode.DynamicClassLoader) BoundVariables(io.prestosql.metadata.BoundVariables) DEFAULT_NAMESPACE(io.prestosql.spi.connector.CatalogSchemaName.DEFAULT_NAMESPACE) CompilerUtils.makeClassName(io.prestosql.util.CompilerUtils.makeClassName) Access.a(io.airlift.bytecode.Access.a) UsedByGeneratedCode(io.prestosql.spi.annotation.UsedByGeneratedCode) Parameter.arg(io.airlift.bytecode.Parameter.arg) BytecodeExpressions.add(io.airlift.bytecode.expression.BytecodeExpressions.add) Slices(io.airlift.slice.Slices) MethodDefinition(io.airlift.bytecode.MethodDefinition) PrestoException(io.prestosql.spi.PrestoException) ImmutableMap(com.google.common.collect.ImmutableMap) SqlScalarFunction(io.prestosql.metadata.SqlScalarFunction) Collections.nCopies(java.util.Collections.nCopies) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Reflection.methodHandle(io.prestosql.spi.util.Reflection.methodHandle) BytecodeExpression(io.airlift.bytecode.expression.BytecodeExpression) List(java.util.List) PRIVATE(io.airlift.bytecode.Access.PRIVATE) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) ArgumentProperty.valueTypeArgumentProperty(io.prestosql.spi.function.BuiltInScalarFunctionImplementation.ArgumentProperty.valueTypeArgumentProperty) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) TypeSignature(io.prestosql.spi.type.TypeSignature) FunctionAndTypeManager(io.prestosql.metadata.FunctionAndTypeManager) ClassDefinition(io.airlift.bytecode.ClassDefinition) IntStream(java.util.stream.IntStream) ParameterizedType.type(io.airlift.bytecode.ParameterizedType.type) Variable(io.airlift.bytecode.Variable) BuiltInScalarFunctionImplementation(io.prestosql.spi.function.BuiltInScalarFunctionImplementation) MethodHandle(java.lang.invoke.MethodHandle) Slice(io.airlift.slice.Slice) FunctionKind(io.prestosql.spi.function.FunctionKind) BytecodeExpressions.constantInt(io.airlift.bytecode.expression.BytecodeExpressions.constantInt) Parameter(io.airlift.bytecode.Parameter) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) ImmutableList(com.google.common.collect.ImmutableList) Failures.checkCondition(io.prestosql.util.Failures.checkCondition) CompilerUtils.defineClass(io.prestosql.util.CompilerUtils.defineClass) RETURN_NULL_ON_NULL(io.prestosql.spi.function.BuiltInScalarFunctionImplementation.NullConvention.RETURN_NULL_ON_NULL) Signature(io.prestosql.spi.function.Signature) FINAL(io.airlift.bytecode.Access.FINAL) STATIC(io.airlift.bytecode.Access.STATIC) BytecodeExpressions.invokeStatic(io.airlift.bytecode.expression.BytecodeExpressions.invokeStatic) VARBINARY(io.prestosql.spi.type.VarbinaryType.VARBINARY) Math.addExact(java.lang.Math.addExact) PUBLIC(io.airlift.bytecode.Access.PUBLIC) DynamicClassLoader(io.airlift.bytecode.DynamicClassLoader) Variable(io.airlift.bytecode.Variable) Scope(io.airlift.bytecode.Scope) MethodDefinition(io.airlift.bytecode.MethodDefinition) Slice(io.airlift.slice.Slice) BytecodeBlock(io.airlift.bytecode.BytecodeBlock) Parameter(io.airlift.bytecode.Parameter) ClassDefinition(io.airlift.bytecode.ClassDefinition)

Example 2 with NOT_SUPPORTED

use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.

the class FormatFunction method valueConverter.

private static BiFunction<ConnectorSession, Block, Object> valueConverter(FunctionAndTypeManager functionAndTypeManager, Type type, int position) {
    if (type.equals(UNKNOWN)) {
        return (session, block) -> null;
    }
    if (type.equals(BOOLEAN)) {
        return (session, block) -> type.getBoolean(block, position);
    }
    if (type.equals(TINYINT) || type.equals(SMALLINT) || type.equals(INTEGER) || type.equals(BIGINT)) {
        return (session, block) -> type.getLong(block, position);
    }
    if (type.equals(REAL)) {
        return (session, block) -> intBitsToFloat(toIntExact(type.getLong(block, position)));
    }
    if (type.equals(DOUBLE)) {
        return (session, block) -> type.getDouble(block, position);
    }
    if (type.equals(DATE)) {
        return (session, block) -> LocalDate.ofEpochDay(type.getLong(block, position));
    }
    if (type.equals(TIMESTAMP_WITH_TIME_ZONE)) {
        return (session, block) -> toZonedDateTime(type.getLong(block, position));
    }
    if (type.equals(TIMESTAMP)) {
        return (session, block) -> toLocalDateTime(type.getLong(block, position));
    }
    if (type.equals(TIME)) {
        return (session, block) -> toLocalTime(session, type.getLong(block, position));
    }
    // TODO: support TIME WITH TIME ZONE by making SqlTimeWithTimeZone implement TemporalAccessor
    if (type.equals(JSON)) {
        FunctionHandle functionHandle = functionAndTypeManager.resolveFunction(Optional.empty(), QualifiedObjectName.valueOf(DEFAULT_NAMESPACE, "json_format"), fromTypes(JSON));
        MethodHandle handle = functionAndTypeManager.getBuiltInScalarFunctionImplementation(functionHandle).getMethodHandle();
        return (session, block) -> convertToString(handle, type.getSlice(block, position));
    }
    if (isShortDecimal(type)) {
        int scale = ((DecimalType) type).getScale();
        return (session, block) -> BigDecimal.valueOf(type.getLong(block, position), scale);
    }
    if (isLongDecimal(type)) {
        int scale = ((DecimalType) type).getScale();
        return (session, block) -> new BigDecimal(decodeUnscaledValue(type.getSlice(block, position)), scale);
    }
    if (isVarcharType(type) || isCharType(type)) {
        return (session, block) -> type.getSlice(block, position).toStringUtf8();
    }
    BiFunction<ConnectorSession, Block, Object> function;
    if (type.getJavaType() == long.class) {
        function = (session, block) -> type.getLong(block, position);
    } else if (type.getJavaType() == double.class) {
        function = (session, block) -> type.getDouble(block, position);
    } else if (type.getJavaType() == boolean.class) {
        function = (session, block) -> type.getBoolean(block, position);
    } else if (type.getJavaType() == Slice.class) {
        function = (session, block) -> type.getSlice(block, position);
    } else {
        function = (session, block) -> type.getObject(block, position);
    }
    MethodHandle handle = castToVarchar(functionAndTypeManager, type);
    if ((handle == null) || (handle.type().parameterCount() != 1)) {
        throw new PrestoException(NOT_SUPPORTED, "Type not supported for formatting: " + type.getDisplayName());
    }
    return (session, block) -> convertToString(handle, function.apply(session, block));
}
Also used : DateTimeEncoding.unpackZoneKey(io.prestosql.spi.type.DateTimeEncoding.unpackZoneKey) Varchars.isVarcharType(io.prestosql.spi.type.Varchars.isVarcharType) ZonedDateTime(java.time.ZonedDateTime) BiFunction(java.util.function.BiFunction) SCALAR(io.prestosql.spi.function.FunctionKind.SCALAR) OperatorNotFoundException(io.prestosql.metadata.OperatorNotFoundException) INVALID_FUNCTION_ARGUMENT(io.prestosql.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT) DecimalType(io.prestosql.spi.type.DecimalType) BoundVariables(io.prestosql.metadata.BoundVariables) DEFAULT_NAMESPACE(io.prestosql.spi.connector.CatalogSchemaName.DEFAULT_NAMESPACE) UsedByGeneratedCode(io.prestosql.spi.annotation.UsedByGeneratedCode) BigDecimal(java.math.BigDecimal) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) LocalTime(java.time.LocalTime) IllegalFormatException(java.util.IllegalFormatException) BOOLEAN(io.prestosql.spi.type.BooleanType.BOOLEAN) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) JSON(io.prestosql.type.JsonType.JSON) Type(io.prestosql.spi.type.Type) ZoneOffset(java.time.ZoneOffset) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) Chars.isCharType(io.prestosql.spi.type.Chars.isCharType) PrestoException(io.prestosql.spi.PrestoException) DateTimeEncoding.unpackMillisUtc(io.prestosql.spi.type.DateTimeEncoding.unpackMillisUtc) CastType(io.prestosql.metadata.CastType) SqlScalarFunction(io.prestosql.metadata.SqlScalarFunction) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) TIME(io.prestosql.spi.type.TimeType.TIME) Reflection.methodHandle(io.prestosql.spi.util.Reflection.methodHandle) Decimals.isLongDecimal(io.prestosql.spi.type.Decimals.isLongDecimal) TIMESTAMP(io.prestosql.spi.type.TimestampType.TIMESTAMP) TINYINT(io.prestosql.spi.type.TinyintType.TINYINT) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) Instant(java.time.Instant) ZoneId(java.time.ZoneId) String.format(java.lang.String.format) FunctionHandle(io.prestosql.spi.function.FunctionHandle) Decimals.isShortDecimal(io.prestosql.spi.type.Decimals.isShortDecimal) List(java.util.List) LocalDate(java.time.LocalDate) ArgumentProperty.valueTypeArgumentProperty(io.prestosql.spi.function.BuiltInScalarFunctionImplementation.ArgumentProperty.valueTypeArgumentProperty) Optional(java.util.Optional) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) TIMESTAMP_WITH_TIME_ZONE(io.prestosql.spi.type.TimestampWithTimeZoneType.TIMESTAMP_WITH_TIME_ZONE) Signature.withVariadicBound(io.prestosql.spi.function.Signature.withVariadicBound) FunctionAndTypeManager(io.prestosql.metadata.FunctionAndTypeManager) UNKNOWN(io.prestosql.spi.type.UnknownType.UNKNOWN) BuiltInScalarFunctionImplementation(io.prestosql.spi.function.BuiltInScalarFunctionImplementation) MethodHandle(java.lang.invoke.MethodHandle) Slice(io.airlift.slice.Slice) LocalDateTime(java.time.LocalDateTime) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) Float.intBitsToFloat(java.lang.Float.intBitsToFloat) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) VARCHAR(io.prestosql.spi.type.VarcharType.VARCHAR) Failures.internalError(io.prestosql.util.Failures.internalError) ImmutableList(com.google.common.collect.ImmutableList) RETURN_NULL_ON_NULL(io.prestosql.spi.function.BuiltInScalarFunctionImplementation.NullConvention.RETURN_NULL_ON_NULL) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) DATE(io.prestosql.spi.type.DateType.DATE) REAL(io.prestosql.spi.type.RealType.REAL) Math.toIntExact(java.lang.Math.toIntExact) Signature(io.prestosql.spi.function.Signature) Block(io.prestosql.spi.block.Block) Streams.mapWithIndex(com.google.common.collect.Streams.mapWithIndex) TypeSignatureProvider.fromTypes(io.prestosql.sql.analyzer.TypeSignatureProvider.fromTypes) Decimals.decodeUnscaledValue(io.prestosql.spi.type.Decimals.decodeUnscaledValue) SMALLINT(io.prestosql.spi.type.SmallintType.SMALLINT) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) DecimalType(io.prestosql.spi.type.DecimalType) Block(io.prestosql.spi.block.Block) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) PrestoException(io.prestosql.spi.PrestoException) FunctionHandle(io.prestosql.spi.function.FunctionHandle) BigDecimal(java.math.BigDecimal) MethodHandle(java.lang.invoke.MethodHandle)

Example 3 with NOT_SUPPORTED

use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.

the class HiveMetadata method getNewTableLayout.

@Override
public Optional<ConnectorNewTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
    validatePartitionColumns(tableMetadata);
    validateBucketColumns(tableMetadata);
    validateCsvColumns(tableMetadata);
    Optional<HiveBucketProperty> bucketProperty = HiveTableProperties.getBucketProperty(tableMetadata.getProperties());
    if (!bucketProperty.isPresent()) {
        // return preferred layout which is partitioned by partition columns
        List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
        if (partitionedBy.isEmpty() || !HiveSessionProperties.isWritePartitionDistributionEnabled(session)) {
            return Optional.empty();
        }
        return Optional.of(new ConnectorNewTableLayout(partitionedBy));
    }
    if (!bucketProperty.get().getSortedBy().isEmpty() && !HiveSessionProperties.isSortedWritingEnabled(session)) {
        throw new PrestoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
    }
    List<String> bucketedBy = bucketProperty.get().getBucketedBy();
    Map<String, HiveType> hiveTypeMap = tableMetadata.getColumns().stream().collect(toMap(ColumnMetadata::getName, column -> HiveType.toHiveType(typeTranslator, column.getType())));
    return Optional.of(new ConnectorNewTableLayout(new HivePartitioningHandle(bucketProperty.get().getBucketingVersion(), bucketProperty.get().getBucketCount(), bucketedBy.stream().map(hiveTypeMap::get).collect(toList()), OptionalInt.of(bucketProperty.get().getBucketCount())), bucketedBy));
}
Also used : TableStatistics(io.prestosql.spi.statistics.TableStatistics) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) PartialAndFinalAggregationType(io.prestosql.spi.PartialAndFinalAggregationType) FIELD_DELIM(org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM) HiveUtil.verifyPartitionTypeSupported(io.prestosql.plugin.hive.HiveUtil.verifyPartitionTypeSupported) FileSystem(org.apache.hadoop.fs.FileSystem) HIVE_FILESYSTEM_ERROR(io.prestosql.plugin.hive.HiveErrorCode.HIVE_FILESYSTEM_ERROR) HiveUtil.hiveColumnHandles(io.prestosql.plugin.hive.HiveUtil.hiveColumnHandles) MetastoreUtil(io.prestosql.plugin.hive.metastore.MetastoreUtil) TableAlreadyExistsException(io.prestosql.spi.connector.TableAlreadyExistsException) NullableValue(io.prestosql.spi.predicate.NullableValue) RoleGrant(io.prestosql.spi.security.RoleGrant) ConnectorVacuumTableHandle(io.prestosql.spi.connector.ConnectorVacuumTableHandle) FileStatus(org.apache.hadoop.fs.FileStatus) SCHEMA_NOT_EMPTY(io.prestosql.spi.StandardErrorCode.SCHEMA_NOT_EMPTY) HiveUtil.getPartitionKeyColumnHandles(io.prestosql.plugin.hive.HiveUtil.getPartitionKeyColumnHandles) ConnectorDeleteAsInsertTableHandle(io.prestosql.spi.connector.ConnectorDeleteAsInsertTableHandle) Future(java.util.concurrent.Future) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) BucketingVersion(io.prestosql.plugin.hive.HiveBucketing.BucketingVersion) ConnectorUpdateTableHandle(io.prestosql.spi.connector.ConnectorUpdateTableHandle) Map(java.util.Map) HiveTableProperties.getPartitionedBy(io.prestosql.plugin.hive.HiveTableProperties.getPartitionedBy) ENGLISH(java.util.Locale.ENGLISH) ConstraintApplicationResult(io.prestosql.spi.connector.ConstraintApplicationResult) SystemTable(io.prestosql.spi.connector.SystemTable) GrantInfo(io.prestosql.spi.security.GrantInfo) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) org.apache.hadoop.hive.serde.serdeConstants(org.apache.hadoop.hive.serde.serdeConstants) TableStatisticsMetadata(io.prestosql.spi.statistics.TableStatisticsMetadata) Set(java.util.Set) LOCATION_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.LOCATION_PROPERTY) HiveTableProperties.getTransactionalValue(io.prestosql.plugin.hive.HiveTableProperties.getTransactionalValue) MANAGED_TABLE(org.apache.hadoop.hive.metastore.TableType.MANAGED_TABLE) Collectors.joining(java.util.stream.Collectors.joining) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Stream(java.util.stream.Stream) Table(io.prestosql.plugin.hive.metastore.Table) Privilege(io.prestosql.spi.security.Privilege) INVALID_TABLE_PROPERTY(io.prestosql.spi.StandardErrorCode.INVALID_TABLE_PROPERTY) HiveTableProperties.isExternalTable(io.prestosql.plugin.hive.HiveTableProperties.isExternalTable) Domain(io.prestosql.spi.predicate.Domain) GENERIC_INTERNAL_ERROR(io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR) AccessControlMetadata(io.prestosql.plugin.hive.security.AccessControlMetadata) ColumnStatisticMetadata(io.prestosql.spi.statistics.ColumnStatisticMetadata) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) ConnectorVacuumTableInfo(io.prestosql.spi.connector.ConnectorVacuumTableInfo) ORC(io.prestosql.plugin.hive.HiveStorageFormat.ORC) SchemaTablePrefix(io.prestosql.spi.connector.SchemaTablePrefix) Joiner(com.google.common.base.Joiner) Iterables(com.google.common.collect.Iterables) Database(io.prestosql.plugin.hive.metastore.Database) Slice(io.airlift.slice.Slice) Partition(io.prestosql.plugin.hive.metastore.Partition) TRANSACTIONAL(io.prestosql.plugin.hive.HiveTableProperties.TRANSACTIONAL) HiveWriterFactory.getSnapshotSubFileIndex(io.prestosql.plugin.hive.HiveWriterFactory.getSnapshotSubFileIndex) Supplier(java.util.function.Supplier) ComputedStatistics(io.prestosql.spi.statistics.ComputedStatistics) ArrayList(java.util.ArrayList) HiveUtil.decodeViewData(io.prestosql.plugin.hive.HiveUtil.decodeViewData) OptionalLong(java.util.OptionalLong) TupleDomain.withColumnDomains(io.prestosql.spi.predicate.TupleDomain.withColumnDomains) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ConnectorPartitioningHandle(io.prestosql.spi.connector.ConnectorPartitioningHandle) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) RecordCursor(io.prestosql.spi.connector.RecordCursor) DiscretePredicates(io.prestosql.spi.connector.DiscretePredicates) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) HiveWriterFactory.removeSnapshotFileName(io.prestosql.plugin.hive.HiveWriterFactory.removeSnapshotFileName) ImmutableSortedMap(com.google.common.collect.ImmutableSortedMap) ConnectorOutputTableHandle(io.prestosql.spi.connector.ConnectorOutputTableHandle) Properties(java.util.Properties) HiveUtil.isPrestoView(io.prestosql.plugin.hive.HiveUtil.isPrestoView) TypeManager(io.prestosql.spi.type.TypeManager) IOException(java.io.IOException) USER(io.prestosql.spi.security.PrincipalType.USER) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) Streams.stream(com.google.common.collect.Streams.stream) IS_EXTERNAL_TABLE(io.prestosql.plugin.hive.HiveTableProperties.IS_EXTERNAL_TABLE) HiveColumnStatistics(io.prestosql.plugin.hive.metastore.HiveColumnStatistics) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) ConnectorTablePartitioning(io.prestosql.spi.connector.ConnectorTablePartitioning) TableType(org.apache.hadoop.hive.metastore.TableType) ConfigurationUtils(io.prestosql.plugin.hive.util.ConfigurationUtils) PrestoPrincipal(io.prestosql.spi.security.PrestoPrincipal) HiveWriterFactory.isSnapshotFile(io.prestosql.plugin.hive.HiveWriterFactory.isSnapshotFile) VarcharType(io.prestosql.spi.type.VarcharType) HiveTableProperties.getLocation(io.prestosql.plugin.hive.HiveTableProperties.getLocation) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) Statistics(io.prestosql.plugin.hive.util.Statistics) URL(java.net.URL) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) EXTERNAL_TABLE(org.apache.hadoop.hive.metastore.TableType.EXTERNAL_TABLE) ViewNotFoundException(io.prestosql.spi.connector.ViewNotFoundException) HiveTableProperties.getHiveStorageFormat(io.prestosql.plugin.hive.HiveTableProperties.getHiveStorageFormat) Duration(io.airlift.units.Duration) TableStatisticType(io.prestosql.spi.statistics.TableStatisticType) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) INVALID_ANALYZE_PROPERTY(io.prestosql.spi.StandardErrorCode.INVALID_ANALYZE_PROPERTY) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) Collectors.toMap(java.util.stream.Collectors.toMap) ConnectorTableProperties(io.prestosql.spi.connector.ConnectorTableProperties) Iterables.concat(com.google.common.collect.Iterables.concat) Path(org.apache.hadoop.fs.Path) Type(io.prestosql.spi.type.Type) Splitter(com.google.common.base.Splitter) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) Collectors.toSet(java.util.stream.Collectors.toSet) Constraint(io.prestosql.spi.connector.Constraint) PrestoException(io.prestosql.spi.PrestoException) ImmutableSet(com.google.common.collect.ImmutableSet) HiveWriteUtils.isS3FileSystem(io.prestosql.plugin.hive.HiveWriteUtils.isS3FileSystem) ImmutableMap(com.google.common.collect.ImmutableMap) Predicate(java.util.function.Predicate) HiveUtil.columnExtraInfo(io.prestosql.plugin.hive.HiveUtil.columnExtraInfo) Collections.emptyList(java.util.Collections.emptyList) Collection(java.util.Collection) HiveUtil.encodeViewData(io.prestosql.plugin.hive.HiveUtil.encodeViewData) HiveWriterFactory.isSnapshotSubFile(io.prestosql.plugin.hive.HiveWriterFactory.isSnapshotSubFile) ROW_COUNT(io.prestosql.spi.statistics.TableStatisticType.ROW_COUNT) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) String.format(java.lang.String.format) List(java.util.List) PRESTO_VIEW_FLAG(io.prestosql.plugin.hive.HiveUtil.PRESTO_VIEW_FLAG) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) Function.identity(java.util.function.Function.identity) ConnectorTransactionHandle(io.prestosql.spi.connector.ConnectorTransactionHandle) Optional(java.util.Optional) MoreObjects.firstNonNull(com.google.common.base.MoreObjects.firstNonNull) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) HiveStatisticsProvider(io.prestosql.plugin.hive.statistics.HiveStatisticsProvider) JsonCodec(io.airlift.json.JsonCodec) IntStream(java.util.stream.IntStream) ConnectorOutputMetadata(io.prestosql.spi.connector.ConnectorOutputMetadata) Logger(io.airlift.log.Logger) ConnectorViewDefinition(io.prestosql.spi.connector.ConnectorViewDefinition) ConnectorNewTableLayout(io.prestosql.spi.connector.ConnectorNewTableLayout) HashMap(java.util.HashMap) HiveUtil.toPartitionValues(io.prestosql.plugin.hive.HiveUtil.toPartitionValues) HivePrincipal(io.prestosql.plugin.hive.metastore.HivePrincipal) OptionalInt(java.util.OptionalInt) Function(java.util.function.Function) ColumnStatisticType(io.prestosql.spi.statistics.ColumnStatisticType) InMemoryRecordSet(io.prestosql.spi.connector.InMemoryRecordSet) HashSet(java.util.HashSet) HiveTableProperties.getExternalLocation(io.prestosql.plugin.hive.HiveTableProperties.getExternalLocation) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) OpenCSVSerde(org.apache.hadoop.hive.serde2.OpenCSVSerde) INVALID_SCHEMA_PROPERTY(io.prestosql.spi.StandardErrorCode.INVALID_SCHEMA_PROPERTY) ImmutableList(com.google.common.collect.ImmutableList) Verify.verify(com.google.common.base.Verify.verify) Objects.requireNonNull(java.util.Objects.requireNonNull) Suppliers(com.google.common.base.Suppliers) NoSuchElementException(java.util.NoSuchElementException) Block(io.prestosql.spi.block.Block) VerifyException(com.google.common.base.VerifyException) Collections.emptyMap(java.util.Collections.emptyMap) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) MalformedURLException(java.net.MalformedURLException) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) TupleDomain(io.prestosql.spi.predicate.TupleDomain) NON_INHERITABLE_PROPERTIES(io.prestosql.plugin.hive.HiveTableProperties.NON_INHERITABLE_PROPERTIES) Maps(com.google.common.collect.Maps) PRIMITIVE(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE) JobConf(org.apache.hadoop.mapred.JobConf) Collectors.toList(java.util.stream.Collectors.toList) Column(io.prestosql.plugin.hive.metastore.Column) GENERIC_USER_ERROR(io.prestosql.spi.StandardErrorCode.GENERIC_USER_ERROR) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) HiveBucketing.bucketedOnTimestamp(io.prestosql.plugin.hive.HiveBucketing.bucketedOnTimestamp) PrestoException(io.prestosql.spi.PrestoException) ConnectorNewTableLayout(io.prestosql.spi.connector.ConnectorNewTableLayout)

Example 4 with NOT_SUPPORTED

use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.

the class HiveSplitManager method getSplits.

@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, Supplier<List<Set<DynamicFilter>>> dynamicFilterSupplier, Optional<QueryType> queryType, Map<String, Object> queryInfo, Set<TupleDomain<ColumnMetadata>> userDefinedCachePredicates, boolean partOfReuse) {
    HiveTableHandle hiveTable = (HiveTableHandle) tableHandle;
    SchemaTableName tableName = hiveTable.getSchemaTableName();
    // get table metadata
    SemiTransactionalHiveMetastore metastore = metastoreProvider.apply((HiveTransactionHandle) transaction);
    Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
    if (table.getStorage().getStorageFormat().getInputFormat().contains("carbon")) {
        throw new PrestoException(NOT_SUPPORTED, "Hive connector can't read carbondata tables");
    }
    // verify table is not marked as non-readable
    String tableNotReadable = table.getParameters().get(OBJECT_NOT_READABLE);
    if (!isNullOrEmpty(tableNotReadable)) {
        throw new HiveNotReadableException(tableName, Optional.empty(), tableNotReadable);
    }
    // get partitions
    List<HivePartition> partitions = partitionManager.getOrLoadPartitions(session, metastore, new HiveIdentity(session), hiveTable);
    // short circuit if we don't have any partitions
    if (partitions.isEmpty()) {
        return new FixedSplitSource(ImmutableList.of());
    }
    // get buckets from first partition (arbitrary)
    Optional<HiveBucketing.HiveBucketFilter> bucketFilter = hiveTable.getBucketFilter();
    // validate bucket bucketed execution
    Optional<HiveBucketHandle> bucketHandle = hiveTable.getBucketHandle();
    if ((splitSchedulingStrategy == GROUPED_SCHEDULING) && !bucketHandle.isPresent()) {
        throw new PrestoException(GENERIC_INTERNAL_ERROR, "SchedulingPolicy is bucketed, but BucketHandle is not present");
    }
    // sort partitions
    partitions = Ordering.natural().onResultOf(HivePartition::getPartitionId).reverse().sortedCopy(partitions);
    Iterable<HivePartitionMetadata> hivePartitions = getPartitionMetadata(session, metastore, table, tableName, partitions, bucketHandle.map(HiveBucketHandle::toTableBucketProperty));
    HiveSplitLoader hiveSplitLoader = new BackgroundHiveSplitLoader(table, hivePartitions, hiveTable.getCompactEffectivePredicate(), BackgroundHiveSplitLoader.BucketSplitInfo.createBucketSplitInfo(bucketHandle, bucketFilter), session, hdfsEnvironment, namenodeStats, directoryLister, executor, splitLoaderConcurrency, recursiveDfsWalkerEnabled, metastore.getValidWriteIds(session, hiveTable, queryType.map(t -> t == QueryType.VACUUM).orElse(false)).map(validTxnWriteIdList -> validTxnWriteIdList.getTableValidWriteIdList(table.getDatabaseName() + "." + table.getTableName())), dynamicFilterSupplier, queryType, queryInfo, typeManager);
    HiveSplitSource splitSource;
    HiveStorageFormat hiveStorageFormat = HiveMetadata.extractHiveStorageFormat(table);
    switch(splitSchedulingStrategy) {
        case UNGROUPED_SCHEDULING:
            splitSource = HiveSplitSource.allAtOnce(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
            partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
            break;
        case GROUPED_SCHEDULING:
            splitSource = HiveSplitSource.bucketed(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
            partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
            break;
        default:
            throw new IllegalArgumentException("Unknown splitSchedulingStrategy: " + splitSchedulingStrategy);
    }
    hiveSplitLoader.start(splitSource);
    if (queryType.isPresent() && queryType.get() == QueryType.VACUUM) {
        HdfsContext hdfsContext = new HdfsContext(session, table.getDatabaseName(), table.getTableName());
        return new HiveVacuumSplitSource(splitSource, (HiveVacuumTableHandle) queryInfo.get("vacuumHandle"), hdfsEnvironment, hdfsContext, session);
    }
    return splitSource;
}
Also used : VersionEmbedder(io.prestosql.spi.VersionEmbedder) GROUPED_SCHEDULING(io.prestosql.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.GROUPED_SCHEDULING) Iterables.transform(com.google.common.collect.Iterables.transform) DynamicFilter(io.prestosql.spi.dynamicfilter.DynamicFilter) QueryType(io.prestosql.spi.resourcegroups.QueryType) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) ConnectorSplitManager(io.prestosql.spi.connector.ConnectorSplitManager) MetastoreUtil(io.prestosql.plugin.hive.metastore.MetastoreUtil) SERVER_SHUTTING_DOWN(io.prestosql.spi.StandardErrorCode.SERVER_SHUTTING_DOWN) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) BoundedExecutor(io.airlift.concurrent.BoundedExecutor) Iterables.concat(com.google.common.collect.Iterables.concat) Map(java.util.Map) PrestoException(io.prestosql.spi.PrestoException) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) Math.min(java.lang.Math.min) String.format(java.lang.String.format) DataSize(io.airlift.units.DataSize) List(java.util.List) Table(io.prestosql.plugin.hive.metastore.Table) GENERIC_INTERNAL_ERROR(io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR) ConnectorTransactionHandle(io.prestosql.spi.connector.ConnectorTransactionHandle) Optional(java.util.Optional) MoreObjects.firstNonNull(com.google.common.base.MoreObjects.firstNonNull) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) Nested(org.weakref.jmx.Nested) Partition(io.prestosql.plugin.hive.metastore.Partition) Strings.isNullOrEmpty(com.google.common.base.Strings.isNullOrEmpty) CounterStat(io.airlift.stats.CounterStat) Function(java.util.function.Function) Supplier(java.util.function.Supplier) Inject(javax.inject.Inject) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Managed(org.weakref.jmx.Managed) Objects.requireNonNull(java.util.Objects.requireNonNull) FixedSplitSource(io.prestosql.spi.connector.FixedSplitSource) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) Nullable(javax.annotation.Nullable) ExecutorService(java.util.concurrent.ExecutorService) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) Iterator(java.util.Iterator) Executor(java.util.concurrent.Executor) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) TupleDomain(io.prestosql.spi.predicate.TupleDomain) AbstractIterator(com.google.common.collect.AbstractIterator) TypeManager(io.prestosql.spi.type.TypeManager) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) Ordering(com.google.common.collect.Ordering) Column(io.prestosql.plugin.hive.metastore.Column) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) CounterStat(io.airlift.stats.CounterStat) PrestoException(io.prestosql.spi.PrestoException) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) FixedSplitSource(io.prestosql.spi.connector.FixedSplitSource) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Table(io.prestosql.plugin.hive.metastore.Table) SchemaTableName(io.prestosql.spi.connector.SchemaTableName)

Example 5 with NOT_SUPPORTED

use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.

the class CreateCubeTask method internalExecute.

@VisibleForTesting
public ListenableFuture<?> internalExecute(CreateCube statement, Metadata metadata, AccessControl accessControl, Session session, QueryStateMachine stateMachine, List<Expression> parameters) {
    Optional<CubeMetaStore> optionalCubeMetaStore = cubeManager.getMetaStore(STAR_TREE);
    if (!optionalCubeMetaStore.isPresent()) {
        throw new RuntimeException("HetuMetaStore is not initialized");
    }
    QualifiedObjectName cubeName = createQualifiedObjectName(session, statement, statement.getCubeName());
    QualifiedObjectName tableName = createQualifiedObjectName(session, statement, statement.getSourceTableName());
    Optional<TableHandle> cubeHandle = metadata.getTableHandle(session, cubeName);
    Optional<TableHandle> tableHandle = metadata.getTableHandle(session, tableName);
    if (optionalCubeMetaStore.get().getMetadataFromCubeName(cubeName.toString()).isPresent()) {
        if (!statement.isNotExists()) {
            throw new SemanticException(CUBE_ALREADY_EXISTS, statement, "Cube '%s' already exists", cubeName);
        }
        return immediateFuture(null);
    }
    if (cubeHandle.isPresent()) {
        if (!statement.isNotExists()) {
            throw new SemanticException(CUBE_OR_TABLE_ALREADY_EXISTS, statement, "Cube or Table '%s' already exists", cubeName);
        }
        return immediateFuture(null);
    }
    CatalogName catalogName = metadata.getCatalogHandle(session, cubeName.getCatalogName()).orElseThrow(() -> new PrestoException(NOT_FOUND, "Catalog not found: " + cubeName.getCatalogName()));
    if (!metadata.isPreAggregationSupported(session, catalogName)) {
        throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, String.format("Cube cannot be created on catalog '%s'", catalogName.toString()));
    }
    if (!tableHandle.isPresent()) {
        throw new SemanticException(MISSING_TABLE, statement, "Table '%s' does not exist", tableName);
    }
    TableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle.get());
    List<String> groupingSet = statement.getGroupingSet().stream().map(s -> s.getValue().toLowerCase(ENGLISH)).collect(Collectors.toList());
    Map<String, ColumnMetadata> sourceTableColumns = tableMetadata.getColumns().stream().collect(Collectors.toMap(ColumnMetadata::getName, col -> col));
    List<ColumnMetadata> cubeColumns = new ArrayList<>();
    Map<String, AggregationSignature> aggregations = new HashMap<>();
    Analysis analysis = analyzeStatement(statement, session, metadata, accessControl, parameters, stateMachine.getWarningCollector());
    Map<String, Field> fields = analysis.getOutputDescriptor().getAllFields().stream().collect(Collectors.toMap(col -> col.getName().map(String::toLowerCase).get(), col -> col));
    for (FunctionCall aggFunction : statement.getAggregations()) {
        String aggFunctionName = aggFunction.getName().toString().toLowerCase(ENGLISH);
        String argument = aggFunction.getArguments().isEmpty() || aggFunction.getArguments().get(0) instanceof LongLiteral ? null : ((Identifier) aggFunction.getArguments().get(0)).getValue().toLowerCase(ENGLISH);
        boolean distinct = aggFunction.isDistinct();
        String cubeColumnName = aggFunctionName + "_" + (argument == null ? "all" : argument) + (aggFunction.isDistinct() ? "_distinct" : "");
        CubeAggregateFunction cubeAggregateFunction = CubeAggregateFunction.valueOf(aggFunctionName.toUpperCase(ENGLISH));
        switch(cubeAggregateFunction) {
            case SUM:
                aggregations.put(cubeColumnName, AggregationSignature.sum(argument, distinct));
                break;
            case COUNT:
                AggregationSignature aggregationSignature = argument == null ? AggregationSignature.count() : AggregationSignature.count(argument, distinct);
                aggregations.put(cubeColumnName, aggregationSignature);
                break;
            case AVG:
                aggregations.put(cubeColumnName, AggregationSignature.avg(argument, distinct));
                break;
            case MAX:
                aggregations.put(cubeColumnName, AggregationSignature.max(argument, distinct));
                break;
            case MIN:
                aggregations.put(cubeColumnName, AggregationSignature.min(argument, distinct));
                break;
            default:
                throw new PrestoException(NOT_SUPPORTED, format("Unsupported aggregation function : %s", aggFunctionName));
        }
        Field tableField = fields.get(cubeColumnName);
        ColumnMetadata cubeCol = new ColumnMetadata(cubeColumnName, tableField.getType(), true, null, null, false, Collections.emptyMap());
        cubeColumns.add(cubeCol);
    }
    accessControl.checkCanCreateTable(session.getRequiredTransactionId(), session.getIdentity(), tableName);
    Map<String, Expression> sqlProperties = mapFromProperties(statement.getProperties());
    Map<String, Object> properties = metadata.getTablePropertyManager().getProperties(catalogName, cubeName.getCatalogName(), sqlProperties, session, metadata, parameters);
    if (properties.containsKey("partitioned_by")) {
        List<String> partitionCols = new ArrayList<>(((List<String>) properties.get("partitioned_by")));
        // put all partition columns at the end of the list
        groupingSet.removeAll(partitionCols);
        groupingSet.addAll(partitionCols);
    }
    for (String dimension : groupingSet) {
        if (!sourceTableColumns.containsKey(dimension)) {
            throw new SemanticException(MISSING_COLUMN, statement, "Column %s does not exist", dimension);
        }
        ColumnMetadata tableCol = sourceTableColumns.get(dimension);
        ColumnMetadata cubeCol = new ColumnMetadata(dimension, tableCol.getType(), tableCol.isNullable(), null, null, false, tableCol.getProperties());
        cubeColumns.add(cubeCol);
    }
    ConnectorTableMetadata cubeTableMetadata = new ConnectorTableMetadata(cubeName.asSchemaTableName(), ImmutableList.copyOf(cubeColumns), properties);
    try {
        metadata.createTable(session, cubeName.getCatalogName(), cubeTableMetadata, statement.isNotExists());
    } catch (PrestoException e) {
        // connectors are not required to handle the ignoreExisting flag
        if (!e.getErrorCode().equals(ALREADY_EXISTS.toErrorCode()) || !statement.isNotExists()) {
            throw e;
        }
    }
    CubeMetadataBuilder builder = optionalCubeMetaStore.get().getBuilder(cubeName.toString(), tableName.toString());
    groupingSet.forEach(dimension -> builder.addDimensionColumn(dimension, dimension));
    aggregations.forEach((column, aggregationSignature) -> builder.addAggregationColumn(column, aggregationSignature.getFunction(), aggregationSignature.getDimension(), aggregationSignature.isDistinct()));
    builder.addGroup(new HashSet<>(groupingSet));
    // Status and Table modified time will be updated on the first insert into the cube
    builder.setCubeStatus(CubeStatus.INACTIVE);
    builder.setTableLastUpdatedTime(-1L);
    statement.getSourceFilter().ifPresent(sourceTablePredicate -> {
        sourceTablePredicate = Coercer.addCoercions(sourceTablePredicate, analysis);
        builder.withCubeFilter(new CubeFilter(ExpressionFormatter.formatExpression(sourceTablePredicate, Optional.empty())));
    });
    builder.setCubeLastUpdatedTime(System.currentTimeMillis());
    optionalCubeMetaStore.get().persist(builder.build());
    return immediateFuture(null);
}
Also used : SqlParser(io.prestosql.sql.parser.SqlParser) CreateCube(io.prestosql.sql.tree.CreateCube) Statement(io.prestosql.sql.tree.Statement) CUBE_OR_TABLE_ALREADY_EXISTS(io.prestosql.sql.analyzer.SemanticErrorCode.CUBE_OR_TABLE_ALREADY_EXISTS) WarningCollector(io.prestosql.execution.warnings.WarningCollector) ExpressionFormatter(io.prestosql.sql.ExpressionFormatter) Map(java.util.Map) CubeFilter(io.hetu.core.spi.cube.CubeFilter) ENGLISH(java.util.Locale.ENGLISH) Coercer(io.prestosql.sql.planner.Coercer) Identifier(io.prestosql.sql.tree.Identifier) VisibleForTesting(org.assertj.core.util.VisibleForTesting) CubeMetaStore(io.hetu.core.spi.cube.io.CubeMetaStore) HeuristicIndexerManager(io.prestosql.heuristicindex.HeuristicIndexerManager) PrestoException(io.prestosql.spi.PrestoException) AccessControl(io.prestosql.security.AccessControl) CatalogName(io.prestosql.spi.connector.CatalogName) CubeAggregateFunction(io.hetu.core.spi.cube.CubeAggregateFunction) Collectors(java.util.stream.Collectors) Metadata(io.prestosql.metadata.Metadata) String.format(java.lang.String.format) List(java.util.List) LongLiteral(io.prestosql.sql.tree.LongLiteral) AggregationSignature(io.hetu.core.spi.cube.aggregator.AggregationSignature) Optional(java.util.Optional) Analysis(io.prestosql.sql.analyzer.Analysis) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) StandardErrorCode(io.prestosql.spi.StandardErrorCode) STAR_TREE(io.prestosql.cube.CubeManager.STAR_TREE) CubeMetadataBuilder(io.hetu.core.spi.cube.CubeMetadataBuilder) Field(io.prestosql.sql.analyzer.Field) MetadataUtil.createQualifiedObjectName(io.prestosql.metadata.MetadataUtil.createQualifiedObjectName) NodeUtils.mapFromProperties(io.prestosql.sql.NodeUtils.mapFromProperties) ALREADY_EXISTS(io.prestosql.spi.StandardErrorCode.ALREADY_EXISTS) CUBE_ALREADY_EXISTS(io.prestosql.sql.analyzer.SemanticErrorCode.CUBE_ALREADY_EXISTS) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) TableMetadata(io.prestosql.metadata.TableMetadata) TransactionManager(io.prestosql.transaction.TransactionManager) HashMap(java.util.HashMap) NOT_FOUND(io.prestosql.spi.StandardErrorCode.NOT_FOUND) TableHandle(io.prestosql.spi.metadata.TableHandle) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) HashSet(java.util.HashSet) CubeStatus(io.hetu.core.spi.cube.CubeStatus) SemanticException(io.prestosql.sql.analyzer.SemanticException) ImmutableList(com.google.common.collect.ImmutableList) FunctionCall(io.prestosql.sql.tree.FunctionCall) Session(io.prestosql.Session) MISSING_COLUMN(io.prestosql.sql.analyzer.SemanticErrorCode.MISSING_COLUMN) Futures.immediateFuture(com.google.common.util.concurrent.Futures.immediateFuture) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) MISSING_TABLE(io.prestosql.sql.analyzer.SemanticErrorCode.MISSING_TABLE) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) Analyzer(io.prestosql.sql.analyzer.Analyzer) CubeManager(io.prestosql.cube.CubeManager) Collections(java.util.Collections) Expression(io.prestosql.sql.tree.Expression) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) HashMap(java.util.HashMap) CubeAggregateFunction(io.hetu.core.spi.cube.CubeAggregateFunction) ArrayList(java.util.ArrayList) CubeMetaStore(io.hetu.core.spi.cube.io.CubeMetaStore) PrestoException(io.prestosql.spi.PrestoException) Field(io.prestosql.sql.analyzer.Field) Identifier(io.prestosql.sql.tree.Identifier) CubeMetadataBuilder(io.hetu.core.spi.cube.CubeMetadataBuilder) CubeFilter(io.hetu.core.spi.cube.CubeFilter) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) FunctionCall(io.prestosql.sql.tree.FunctionCall) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) SemanticException(io.prestosql.sql.analyzer.SemanticException) TableMetadata(io.prestosql.metadata.TableMetadata) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) LongLiteral(io.prestosql.sql.tree.LongLiteral) AggregationSignature(io.hetu.core.spi.cube.aggregator.AggregationSignature) MetadataUtil.createQualifiedObjectName(io.prestosql.metadata.MetadataUtil.createQualifiedObjectName) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) Expression(io.prestosql.sql.tree.Expression) Analysis(io.prestosql.sql.analyzer.Analysis) TableHandle(io.prestosql.spi.metadata.TableHandle) CatalogName(io.prestosql.spi.connector.CatalogName) VisibleForTesting(org.assertj.core.util.VisibleForTesting)

Aggregations

PrestoException (io.prestosql.spi.PrestoException)15 NOT_SUPPORTED (io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED)15 List (java.util.List)15 String.format (java.lang.String.format)13 Map (java.util.Map)12 Optional (java.util.Optional)12 ImmutableList (com.google.common.collect.ImmutableList)10 Type (io.prestosql.spi.type.Type)9 Slice (io.airlift.slice.Slice)8 ConnectorSession (io.prestosql.spi.connector.ConnectorSession)8 HashMap (java.util.HashMap)8 Set (java.util.Set)8 Collectors (java.util.stream.Collectors)8 Joiner (com.google.common.base.Joiner)7 Preconditions.checkArgument (com.google.common.base.Preconditions.checkArgument)7 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)7 ImmutableSet.toImmutableSet (com.google.common.collect.ImmutableSet.toImmutableSet)7 VarcharType (io.prestosql.spi.type.VarcharType)7 ENGLISH (java.util.Locale.ENGLISH)7 Stream (java.util.stream.Stream)7