use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.
the class ConcatFunction method generateConcat.
private static Class<?> generateConcat(TypeSignature type, int arity) {
checkCondition(arity <= 254, NOT_SUPPORTED, "Too many arguments for string concatenation");
ClassDefinition definition = new ClassDefinition(a(PUBLIC, FINAL), makeClassName(type.getBase() + "_concat" + arity + "ScalarFunction"), type(Object.class));
// Generate constructor
definition.declareDefaultConstructor(a(PRIVATE));
// Generate concat()
List<Parameter> parameters = IntStream.range(0, arity).mapToObj(i -> arg("arg" + i, Slice.class)).collect(toImmutableList());
MethodDefinition method = definition.declareMethod(a(PUBLIC, STATIC), "concat", type(Slice.class), parameters);
Scope scope = method.getScope();
BytecodeBlock body = method.getBody();
Variable length = scope.declareVariable(int.class, "length");
body.append(length.set(constantInt(0)));
for (int i = 0; i < arity; ++i) {
body.append(length.set(generateCheckedAdd(length, parameters.get(i).invoke("length", int.class))));
}
Variable result = scope.declareVariable(Slice.class, "result");
body.append(result.set(invokeStatic(Slices.class, "allocate", Slice.class, length)));
Variable position = scope.declareVariable(int.class, "position");
body.append(position.set(constantInt(0)));
for (int i = 0; i < arity; ++i) {
body.append(result.invoke("setBytes", void.class, position, parameters.get(i)));
body.append(position.set(add(position, parameters.get(i).invoke("length", int.class))));
}
body.getVariable(result).retObject();
return defineClass(definition, Object.class, ImmutableMap.of(), new DynamicClassLoader(ConcatFunction.class.getClassLoader()));
}
use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.
the class FormatFunction method valueConverter.
private static BiFunction<ConnectorSession, Block, Object> valueConverter(FunctionAndTypeManager functionAndTypeManager, Type type, int position) {
if (type.equals(UNKNOWN)) {
return (session, block) -> null;
}
if (type.equals(BOOLEAN)) {
return (session, block) -> type.getBoolean(block, position);
}
if (type.equals(TINYINT) || type.equals(SMALLINT) || type.equals(INTEGER) || type.equals(BIGINT)) {
return (session, block) -> type.getLong(block, position);
}
if (type.equals(REAL)) {
return (session, block) -> intBitsToFloat(toIntExact(type.getLong(block, position)));
}
if (type.equals(DOUBLE)) {
return (session, block) -> type.getDouble(block, position);
}
if (type.equals(DATE)) {
return (session, block) -> LocalDate.ofEpochDay(type.getLong(block, position));
}
if (type.equals(TIMESTAMP_WITH_TIME_ZONE)) {
return (session, block) -> toZonedDateTime(type.getLong(block, position));
}
if (type.equals(TIMESTAMP)) {
return (session, block) -> toLocalDateTime(type.getLong(block, position));
}
if (type.equals(TIME)) {
return (session, block) -> toLocalTime(session, type.getLong(block, position));
}
// TODO: support TIME WITH TIME ZONE by making SqlTimeWithTimeZone implement TemporalAccessor
if (type.equals(JSON)) {
FunctionHandle functionHandle = functionAndTypeManager.resolveFunction(Optional.empty(), QualifiedObjectName.valueOf(DEFAULT_NAMESPACE, "json_format"), fromTypes(JSON));
MethodHandle handle = functionAndTypeManager.getBuiltInScalarFunctionImplementation(functionHandle).getMethodHandle();
return (session, block) -> convertToString(handle, type.getSlice(block, position));
}
if (isShortDecimal(type)) {
int scale = ((DecimalType) type).getScale();
return (session, block) -> BigDecimal.valueOf(type.getLong(block, position), scale);
}
if (isLongDecimal(type)) {
int scale = ((DecimalType) type).getScale();
return (session, block) -> new BigDecimal(decodeUnscaledValue(type.getSlice(block, position)), scale);
}
if (isVarcharType(type) || isCharType(type)) {
return (session, block) -> type.getSlice(block, position).toStringUtf8();
}
BiFunction<ConnectorSession, Block, Object> function;
if (type.getJavaType() == long.class) {
function = (session, block) -> type.getLong(block, position);
} else if (type.getJavaType() == double.class) {
function = (session, block) -> type.getDouble(block, position);
} else if (type.getJavaType() == boolean.class) {
function = (session, block) -> type.getBoolean(block, position);
} else if (type.getJavaType() == Slice.class) {
function = (session, block) -> type.getSlice(block, position);
} else {
function = (session, block) -> type.getObject(block, position);
}
MethodHandle handle = castToVarchar(functionAndTypeManager, type);
if ((handle == null) || (handle.type().parameterCount() != 1)) {
throw new PrestoException(NOT_SUPPORTED, "Type not supported for formatting: " + type.getDisplayName());
}
return (session, block) -> convertToString(handle, function.apply(session, block));
}
use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.
the class HiveMetadata method getNewTableLayout.
@Override
public Optional<ConnectorNewTableLayout> getNewTableLayout(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
validatePartitionColumns(tableMetadata);
validateBucketColumns(tableMetadata);
validateCsvColumns(tableMetadata);
Optional<HiveBucketProperty> bucketProperty = HiveTableProperties.getBucketProperty(tableMetadata.getProperties());
if (!bucketProperty.isPresent()) {
// return preferred layout which is partitioned by partition columns
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
if (partitionedBy.isEmpty() || !HiveSessionProperties.isWritePartitionDistributionEnabled(session)) {
return Optional.empty();
}
return Optional.of(new ConnectorNewTableLayout(partitionedBy));
}
if (!bucketProperty.get().getSortedBy().isEmpty() && !HiveSessionProperties.isSortedWritingEnabled(session)) {
throw new PrestoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled");
}
List<String> bucketedBy = bucketProperty.get().getBucketedBy();
Map<String, HiveType> hiveTypeMap = tableMetadata.getColumns().stream().collect(toMap(ColumnMetadata::getName, column -> HiveType.toHiveType(typeTranslator, column.getType())));
return Optional.of(new ConnectorNewTableLayout(new HivePartitioningHandle(bucketProperty.get().getBucketingVersion(), bucketProperty.get().getBucketCount(), bucketedBy.stream().map(hiveTypeMap::get).collect(toList()), OptionalInt.of(bucketProperty.get().getBucketCount())), bucketedBy));
}
use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.
the class HiveSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, Supplier<List<Set<DynamicFilter>>> dynamicFilterSupplier, Optional<QueryType> queryType, Map<String, Object> queryInfo, Set<TupleDomain<ColumnMetadata>> userDefinedCachePredicates, boolean partOfReuse) {
HiveTableHandle hiveTable = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTable.getSchemaTableName();
// get table metadata
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply((HiveTransactionHandle) transaction);
Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (table.getStorage().getStorageFormat().getInputFormat().contains("carbon")) {
throw new PrestoException(NOT_SUPPORTED, "Hive connector can't read carbondata tables");
}
// verify table is not marked as non-readable
String tableNotReadable = table.getParameters().get(OBJECT_NOT_READABLE);
if (!isNullOrEmpty(tableNotReadable)) {
throw new HiveNotReadableException(tableName, Optional.empty(), tableNotReadable);
}
// get partitions
List<HivePartition> partitions = partitionManager.getOrLoadPartitions(session, metastore, new HiveIdentity(session), hiveTable);
// short circuit if we don't have any partitions
if (partitions.isEmpty()) {
return new FixedSplitSource(ImmutableList.of());
}
// get buckets from first partition (arbitrary)
Optional<HiveBucketing.HiveBucketFilter> bucketFilter = hiveTable.getBucketFilter();
// validate bucket bucketed execution
Optional<HiveBucketHandle> bucketHandle = hiveTable.getBucketHandle();
if ((splitSchedulingStrategy == GROUPED_SCHEDULING) && !bucketHandle.isPresent()) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "SchedulingPolicy is bucketed, but BucketHandle is not present");
}
// sort partitions
partitions = Ordering.natural().onResultOf(HivePartition::getPartitionId).reverse().sortedCopy(partitions);
Iterable<HivePartitionMetadata> hivePartitions = getPartitionMetadata(session, metastore, table, tableName, partitions, bucketHandle.map(HiveBucketHandle::toTableBucketProperty));
HiveSplitLoader hiveSplitLoader = new BackgroundHiveSplitLoader(table, hivePartitions, hiveTable.getCompactEffectivePredicate(), BackgroundHiveSplitLoader.BucketSplitInfo.createBucketSplitInfo(bucketHandle, bucketFilter), session, hdfsEnvironment, namenodeStats, directoryLister, executor, splitLoaderConcurrency, recursiveDfsWalkerEnabled, metastore.getValidWriteIds(session, hiveTable, queryType.map(t -> t == QueryType.VACUUM).orElse(false)).map(validTxnWriteIdList -> validTxnWriteIdList.getTableValidWriteIdList(table.getDatabaseName() + "." + table.getTableName())), dynamicFilterSupplier, queryType, queryInfo, typeManager);
HiveSplitSource splitSource;
HiveStorageFormat hiveStorageFormat = HiveMetadata.extractHiveStorageFormat(table);
switch(splitSchedulingStrategy) {
case UNGROUPED_SCHEDULING:
splitSource = HiveSplitSource.allAtOnce(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
break;
case GROUPED_SCHEDULING:
splitSource = HiveSplitSource.bucketed(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
break;
default:
throw new IllegalArgumentException("Unknown splitSchedulingStrategy: " + splitSchedulingStrategy);
}
hiveSplitLoader.start(splitSource);
if (queryType.isPresent() && queryType.get() == QueryType.VACUUM) {
HdfsContext hdfsContext = new HdfsContext(session, table.getDatabaseName(), table.getTableName());
return new HiveVacuumSplitSource(splitSource, (HiveVacuumTableHandle) queryInfo.get("vacuumHandle"), hdfsEnvironment, hdfsContext, session);
}
return splitSource;
}
use of io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED in project hetu-core by openlookeng.
the class CreateCubeTask method internalExecute.
@VisibleForTesting
public ListenableFuture<?> internalExecute(CreateCube statement, Metadata metadata, AccessControl accessControl, Session session, QueryStateMachine stateMachine, List<Expression> parameters) {
Optional<CubeMetaStore> optionalCubeMetaStore = cubeManager.getMetaStore(STAR_TREE);
if (!optionalCubeMetaStore.isPresent()) {
throw new RuntimeException("HetuMetaStore is not initialized");
}
QualifiedObjectName cubeName = createQualifiedObjectName(session, statement, statement.getCubeName());
QualifiedObjectName tableName = createQualifiedObjectName(session, statement, statement.getSourceTableName());
Optional<TableHandle> cubeHandle = metadata.getTableHandle(session, cubeName);
Optional<TableHandle> tableHandle = metadata.getTableHandle(session, tableName);
if (optionalCubeMetaStore.get().getMetadataFromCubeName(cubeName.toString()).isPresent()) {
if (!statement.isNotExists()) {
throw new SemanticException(CUBE_ALREADY_EXISTS, statement, "Cube '%s' already exists", cubeName);
}
return immediateFuture(null);
}
if (cubeHandle.isPresent()) {
if (!statement.isNotExists()) {
throw new SemanticException(CUBE_OR_TABLE_ALREADY_EXISTS, statement, "Cube or Table '%s' already exists", cubeName);
}
return immediateFuture(null);
}
CatalogName catalogName = metadata.getCatalogHandle(session, cubeName.getCatalogName()).orElseThrow(() -> new PrestoException(NOT_FOUND, "Catalog not found: " + cubeName.getCatalogName()));
if (!metadata.isPreAggregationSupported(session, catalogName)) {
throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, String.format("Cube cannot be created on catalog '%s'", catalogName.toString()));
}
if (!tableHandle.isPresent()) {
throw new SemanticException(MISSING_TABLE, statement, "Table '%s' does not exist", tableName);
}
TableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle.get());
List<String> groupingSet = statement.getGroupingSet().stream().map(s -> s.getValue().toLowerCase(ENGLISH)).collect(Collectors.toList());
Map<String, ColumnMetadata> sourceTableColumns = tableMetadata.getColumns().stream().collect(Collectors.toMap(ColumnMetadata::getName, col -> col));
List<ColumnMetadata> cubeColumns = new ArrayList<>();
Map<String, AggregationSignature> aggregations = new HashMap<>();
Analysis analysis = analyzeStatement(statement, session, metadata, accessControl, parameters, stateMachine.getWarningCollector());
Map<String, Field> fields = analysis.getOutputDescriptor().getAllFields().stream().collect(Collectors.toMap(col -> col.getName().map(String::toLowerCase).get(), col -> col));
for (FunctionCall aggFunction : statement.getAggregations()) {
String aggFunctionName = aggFunction.getName().toString().toLowerCase(ENGLISH);
String argument = aggFunction.getArguments().isEmpty() || aggFunction.getArguments().get(0) instanceof LongLiteral ? null : ((Identifier) aggFunction.getArguments().get(0)).getValue().toLowerCase(ENGLISH);
boolean distinct = aggFunction.isDistinct();
String cubeColumnName = aggFunctionName + "_" + (argument == null ? "all" : argument) + (aggFunction.isDistinct() ? "_distinct" : "");
CubeAggregateFunction cubeAggregateFunction = CubeAggregateFunction.valueOf(aggFunctionName.toUpperCase(ENGLISH));
switch(cubeAggregateFunction) {
case SUM:
aggregations.put(cubeColumnName, AggregationSignature.sum(argument, distinct));
break;
case COUNT:
AggregationSignature aggregationSignature = argument == null ? AggregationSignature.count() : AggregationSignature.count(argument, distinct);
aggregations.put(cubeColumnName, aggregationSignature);
break;
case AVG:
aggregations.put(cubeColumnName, AggregationSignature.avg(argument, distinct));
break;
case MAX:
aggregations.put(cubeColumnName, AggregationSignature.max(argument, distinct));
break;
case MIN:
aggregations.put(cubeColumnName, AggregationSignature.min(argument, distinct));
break;
default:
throw new PrestoException(NOT_SUPPORTED, format("Unsupported aggregation function : %s", aggFunctionName));
}
Field tableField = fields.get(cubeColumnName);
ColumnMetadata cubeCol = new ColumnMetadata(cubeColumnName, tableField.getType(), true, null, null, false, Collections.emptyMap());
cubeColumns.add(cubeCol);
}
accessControl.checkCanCreateTable(session.getRequiredTransactionId(), session.getIdentity(), tableName);
Map<String, Expression> sqlProperties = mapFromProperties(statement.getProperties());
Map<String, Object> properties = metadata.getTablePropertyManager().getProperties(catalogName, cubeName.getCatalogName(), sqlProperties, session, metadata, parameters);
if (properties.containsKey("partitioned_by")) {
List<String> partitionCols = new ArrayList<>(((List<String>) properties.get("partitioned_by")));
// put all partition columns at the end of the list
groupingSet.removeAll(partitionCols);
groupingSet.addAll(partitionCols);
}
for (String dimension : groupingSet) {
if (!sourceTableColumns.containsKey(dimension)) {
throw new SemanticException(MISSING_COLUMN, statement, "Column %s does not exist", dimension);
}
ColumnMetadata tableCol = sourceTableColumns.get(dimension);
ColumnMetadata cubeCol = new ColumnMetadata(dimension, tableCol.getType(), tableCol.isNullable(), null, null, false, tableCol.getProperties());
cubeColumns.add(cubeCol);
}
ConnectorTableMetadata cubeTableMetadata = new ConnectorTableMetadata(cubeName.asSchemaTableName(), ImmutableList.copyOf(cubeColumns), properties);
try {
metadata.createTable(session, cubeName.getCatalogName(), cubeTableMetadata, statement.isNotExists());
} catch (PrestoException e) {
// connectors are not required to handle the ignoreExisting flag
if (!e.getErrorCode().equals(ALREADY_EXISTS.toErrorCode()) || !statement.isNotExists()) {
throw e;
}
}
CubeMetadataBuilder builder = optionalCubeMetaStore.get().getBuilder(cubeName.toString(), tableName.toString());
groupingSet.forEach(dimension -> builder.addDimensionColumn(dimension, dimension));
aggregations.forEach((column, aggregationSignature) -> builder.addAggregationColumn(column, aggregationSignature.getFunction(), aggregationSignature.getDimension(), aggregationSignature.isDistinct()));
builder.addGroup(new HashSet<>(groupingSet));
// Status and Table modified time will be updated on the first insert into the cube
builder.setCubeStatus(CubeStatus.INACTIVE);
builder.setTableLastUpdatedTime(-1L);
statement.getSourceFilter().ifPresent(sourceTablePredicate -> {
sourceTablePredicate = Coercer.addCoercions(sourceTablePredicate, analysis);
builder.withCubeFilter(new CubeFilter(ExpressionFormatter.formatExpression(sourceTablePredicate, Optional.empty())));
});
builder.setCubeLastUpdatedTime(System.currentTimeMillis());
optionalCubeMetaStore.get().persist(builder.build());
return immediateFuture(null);
}
Aggregations