use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexLiteral in project drill by apache.
the class MapRDBStatistics method convertLikeToRange.
/*
* Helper function to perform additional pre-processing for LIKE predicates
*/
private RexNode convertLikeToRange(RexCall condition, RexBuilder builder) {
Preconditions.checkArgument(condition.getOperator().getKind() == SqlKind.LIKE, "Unable to convertLikeToRange: argument is not a LIKE condition!");
HBaseRegexParser parser = null;
RexNode arg = null;
RexLiteral pattern = null, escape = null;
String patternStr = null, escapeStr = null;
if (condition.getOperands().size() == 2) {
// No escape character specified
for (RexNode op : condition.getOperands()) {
if (op.getKind() == SqlKind.LITERAL) {
pattern = (RexLiteral) op;
} else {
arg = op;
}
}
// Get the PATTERN strings from the corresponding RexLiteral
if (pattern.getTypeName() == SqlTypeName.DECIMAL || pattern.getTypeName() == SqlTypeName.INTEGER) {
patternStr = pattern.getValue().toString();
} else if (pattern.getTypeName() == SqlTypeName.CHAR) {
patternStr = pattern.getValue2().toString();
}
if (patternStr != null) {
parser = new HBaseRegexParser(patternStr);
}
} else if (condition.getOperands().size() == 3) {
// Escape character specified
for (RexNode op : condition.getOperands()) {
if (op.getKind() == SqlKind.LITERAL) {
// Assume first literal specifies PATTERN and the second literal specifies the ESCAPE char
if (pattern == null) {
pattern = (RexLiteral) op;
} else {
escape = (RexLiteral) op;
}
} else {
arg = op;
}
}
// Get the PATTERN and ESCAPE strings from the corresponding RexLiteral
if (pattern.getTypeName() == SqlTypeName.DECIMAL || pattern.getTypeName() == SqlTypeName.INTEGER) {
patternStr = pattern.getValue().toString();
} else if (pattern.getTypeName() == SqlTypeName.CHAR) {
patternStr = pattern.getValue2().toString();
}
if (escape.getTypeName() == SqlTypeName.DECIMAL || escape.getTypeName() == SqlTypeName.INTEGER) {
escapeStr = escape.getValue().toString();
} else if (escape.getTypeName() == SqlTypeName.CHAR) {
escapeStr = escape.getValue2().toString();
}
if (patternStr != null && escapeStr != null) {
parser = new HBaseRegexParser(patternStr, escapeStr.toCharArray()[0]);
}
}
if (parser != null) {
parser.parse();
String prefix = parser.getPrefixString();
/*
* If there is a literal prefix, convert it into an EQUALITY or RANGE predicate
*/
if (prefix != null) {
if (prefix.equals(parser.getLikeString())) {
// No WILDCARD present. This turns the LIKE predicate to EQUALITY predicate
if (arg != null) {
return builder.makeCall(SqlStdOperatorTable.EQUALS, arg, pattern);
}
} else {
// WILDCARD present. This turns the LIKE predicate to RANGE predicate
byte[] startKey = HConstants.EMPTY_START_ROW;
byte[] stopKey = HConstants.EMPTY_END_ROW;
startKey = prefix.getBytes(Charsets.UTF_8);
stopKey = startKey.clone();
boolean isMaxVal = true;
for (int i = stopKey.length - 1; i >= 0; --i) {
int nextByteValue = (0xff & stopKey[i]) + 1;
if (nextByteValue < 0xff) {
stopKey[i] = (byte) nextByteValue;
isMaxVal = false;
break;
} else {
stopKey[i] = 0;
}
}
if (isMaxVal) {
stopKey = HConstants.EMPTY_END_ROW;
}
try {
// TODO: This maybe a potential bug since we assume UTF-8 encoding. However, we follow the
// current DB implementation. See HBaseFilterBuilder.createHBaseScanSpec "like" CASE statement
RexLiteral startKeyLiteral = builder.makeLiteral(new String(startKey, Charsets.UTF_8.toString()));
RexLiteral stopKeyLiteral = builder.makeLiteral(new String(stopKey, Charsets.UTF_8.toString()));
if (arg != null) {
RexNode startPred = builder.makeCall(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, arg, startKeyLiteral);
RexNode stopPred = builder.makeCall(SqlStdOperatorTable.LESS_THAN, arg, stopKeyLiteral);
return builder.makeCall(SqlStdOperatorTable.AND, startPred, stopPred);
}
} catch (UnsupportedEncodingException ex) {
// Encoding not supported - Do nothing!
logger.debug("Statistics: convertLikeToRange: Unsupported Encoding Exception -> {}", ex.getMessage());
}
}
}
}
// Could not convert - return condition as-is.
return condition;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexLiteral in project drill by apache.
the class SimpleRexRemap method rewriteEqualOnCharToLike.
public RexNode rewriteEqualOnCharToLike(RexNode expr, Map<RexNode, LogicalExpression> equalOnCastCharExprs) {
Map<RexNode, RexNode> srcToReplace = Maps.newIdentityHashMap();
for (Map.Entry<RexNode, LogicalExpression> entry : equalOnCastCharExprs.entrySet()) {
RexNode equalOp = entry.getKey();
LogicalExpression opInput = entry.getValue();
final List<RexNode> operands = ((RexCall) equalOp).getOperands();
RexLiteral newLiteral = null;
RexNode input = null;
if (operands.size() == 2) {
RexLiteral oplit = null;
if (operands.get(0) instanceof RexLiteral) {
oplit = (RexLiteral) operands.get(0);
if (oplit.getTypeName() == SqlTypeName.CHAR) {
newLiteral = builder.makeLiteral(((NlsString) oplit.getValue()).getValue() + "%");
input = operands.get(1);
}
} else if (operands.get(1) instanceof RexLiteral) {
oplit = (RexLiteral) operands.get(1);
if (oplit.getTypeName() == SqlTypeName.CHAR) {
newLiteral = builder.makeLiteral(((NlsString) oplit.getValue()).getValue() + "%");
input = operands.get(0);
}
}
}
if (newLiteral != null) {
srcToReplace.put(equalOp, builder.makeCall(SqlStdOperatorTable.LIKE, input, newLiteral));
}
}
if (srcToReplace.size() > 0) {
RexReplace replacer = new RexReplace(srcToReplace);
RexNode resultRex = expr.accept(replacer);
return resultRex;
}
return expr;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexLiteral in project flink by apache.
the class HiveParserBaseSemanticAnalyzer method genValues.
public static RelNode genValues(String tabAlias, Table tmpTable, HiveParserRowResolver rowResolver, RelOptCluster cluster, List<List<String>> values) {
List<TypeInfo> tmpTableTypes = tmpTable.getCols().stream().map(f -> TypeInfoUtils.getTypeInfoFromTypeString(f.getType())).collect(Collectors.toList());
RexBuilder rexBuilder = cluster.getRexBuilder();
// calcite types for each field
List<RelDataType> calciteTargetTypes = tmpTableTypes.stream().map(ti -> HiveParserTypeConverter.convert((PrimitiveTypeInfo) ti, rexBuilder.getTypeFactory())).collect(Collectors.toList());
// calcite field names
List<String> calciteFieldNames = IntStream.range(0, calciteTargetTypes.size()).mapToObj(SqlUtil::deriveAliasFromOrdinal).collect(Collectors.toList());
// calcite type for each row
List<RelDataType> calciteRowTypes = new ArrayList<>();
List<List<RexLiteral>> rows = new ArrayList<>();
for (List<String> value : values) {
Preconditions.checkArgument(value.size() == tmpTableTypes.size(), String.format("Values table col length (%d) and data length (%d) mismatch", tmpTableTypes.size(), value.size()));
List<RexLiteral> row = new ArrayList<>();
for (int i = 0; i < tmpTableTypes.size(); i++) {
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) tmpTableTypes.get(i);
RelDataType calciteType = calciteTargetTypes.get(i);
String col = value.get(i);
if (col == null) {
row.add(rexBuilder.makeNullLiteral(calciteType));
} else {
switch(primitiveTypeInfo.getPrimitiveCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
row.add(rexBuilder.makeExactLiteral(new BigDecimal(col), calciteType));
break;
case DECIMAL:
BigDecimal bigDec = new BigDecimal(col);
row.add(SqlTypeUtil.isValidDecimalValue(bigDec, calciteType) ? rexBuilder.makeExactLiteral(bigDec, calciteType) : rexBuilder.makeNullLiteral(calciteType));
break;
case FLOAT:
case DOUBLE:
row.add(rexBuilder.makeApproxLiteral(new BigDecimal(col), calciteType));
break;
case BOOLEAN:
row.add(rexBuilder.makeLiteral(Boolean.parseBoolean(col)));
break;
default:
row.add(rexBuilder.makeCharLiteral(HiveParserUtils.asUnicodeString(col)));
}
}
}
calciteRowTypes.add(rexBuilder.getTypeFactory().createStructType(row.stream().map(RexLiteral::getType).collect(Collectors.toList()), calciteFieldNames));
rows.add(row);
}
// compute the final row type
RelDataType calciteRowType = rexBuilder.getTypeFactory().leastRestrictive(calciteRowTypes);
for (int i = 0; i < calciteFieldNames.size(); i++) {
ColumnInfo colInfo = new ColumnInfo(calciteFieldNames.get(i), HiveParserTypeConverter.convert(calciteRowType.getFieldList().get(i).getType()), tabAlias, false);
rowResolver.put(tabAlias, calciteFieldNames.get(i), colInfo);
}
return HiveParserUtils.genValuesRelNode(cluster, rexBuilder.getTypeFactory().createStructType(calciteRowType.getFieldList()), rows);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexLiteral in project flink by apache.
the class StreamExecOverAggregate method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
if (overSpec.getGroups().size() > 1) {
throw new TableException("All aggregates must be computed on the same window.");
}
final OverSpec.GroupSpec group = overSpec.getGroups().get(0);
final int[] orderKeys = group.getSort().getFieldIndices();
final boolean[] isAscendingOrders = group.getSort().getAscendingOrders();
if (orderKeys.length != 1 || isAscendingOrders.length != 1) {
throw new TableException("The window can only be ordered by a single time column.");
}
if (!isAscendingOrders[0]) {
throw new TableException("The window can only be ordered in ASCENDING mode.");
}
final int[] partitionKeys = overSpec.getPartition().getFieldIndices();
if (partitionKeys.length > 0 && config.getStateRetentionTime() < 0) {
LOG.warn("No state retention interval configured for a query which accumulates state. " + "Please provide a query configuration with valid retention interval to prevent " + "excessive state size. You may specify a retention time of 0 to not clean up the state.");
}
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final int orderKey = orderKeys[0];
final LogicalType orderKeyType = inputRowType.getFields().get(orderKey).getType();
// check time field && identify window rowtime attribute
final int rowTimeIdx;
if (isRowtimeAttribute(orderKeyType)) {
rowTimeIdx = orderKey;
} else if (isProctimeAttribute(orderKeyType)) {
rowTimeIdx = -1;
} else {
throw new TableException("OVER windows' ordering in stream mode must be defined on a time attribute.");
}
final List<RexLiteral> constants = overSpec.getConstants();
final List<String> fieldNames = new ArrayList<>(inputRowType.getFieldNames());
final List<LogicalType> fieldTypes = new ArrayList<>(inputRowType.getChildren());
IntStream.range(0, constants.size()).forEach(i -> fieldNames.add("TMP" + i));
for (int i = 0; i < constants.size(); ++i) {
fieldNames.add("TMP" + i);
fieldTypes.add(FlinkTypeFactory.toLogicalType(constants.get(i).getType()));
}
final RowType aggInputRowType = RowType.of(fieldTypes.toArray(new LogicalType[0]), fieldNames.toArray(new String[0]));
final CodeGeneratorContext ctx = new CodeGeneratorContext(config.getTableConfig());
final KeyedProcessFunction<RowData, RowData, RowData> overProcessFunction;
if (group.getLowerBound().isPreceding() && group.getLowerBound().isUnbounded() && group.getUpperBound().isCurrentRow()) {
// unbounded OVER window
overProcessFunction = createUnboundedOverProcessFunction(ctx, group.getAggCalls(), constants, aggInputRowType, inputRowType, rowTimeIdx, group.isRows(), config, planner.getRelBuilder());
} else if (group.getLowerBound().isPreceding() && !group.getLowerBound().isUnbounded() && group.getUpperBound().isCurrentRow()) {
final Object boundValue = OverAggregateUtil.getBoundary(overSpec, group.getLowerBound());
if (boundValue instanceof BigDecimal) {
throw new TableException("the specific value is decimal which haven not supported yet.");
}
// bounded OVER window
final long precedingOffset = -1 * (long) boundValue + (group.isRows() ? 1 : 0);
overProcessFunction = createBoundedOverProcessFunction(ctx, group.getAggCalls(), constants, aggInputRowType, inputRowType, rowTimeIdx, group.isRows(), precedingOffset, config, planner.getRelBuilder());
} else {
throw new TableException("OVER RANGE FOLLOWING windows are not supported yet.");
}
final KeyedProcessOperator<RowData, RowData, RowData> operator = new KeyedProcessOperator<>(overProcessFunction);
OneInputTransformation<RowData, RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(OVER_AGGREGATE_TRANSFORMATION, config), operator, InternalTypeInfo.of(getOutputType()), inputTransform.getParallelism());
// set KeyType and Selector for state
final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(partitionKeys, InternalTypeInfo.of(inputRowType));
transform.setStateKeySelector(selector);
transform.setStateKeyType(selector.getProducedType());
return transform;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexLiteral in project flink by apache.
the class FlinkRelMdCollation method values.
/**
* Helper method to determine a {@link org.apache.calcite.rel.core.Values}'s collation.
*
* <p>We actually under-report the collations. A Values with 0 or 1 rows - an edge case, but
* legitimate and very common - is ordered by every permutation of every subset of the columns.
*
* <p>So, our algorithm aims to:
*
* <ul>
* <li>produce at most N collations (where N is the number of columns);
* <li>make each collation as long as possible;
* <li>do not repeat combinations already emitted - if we've emitted {@code (a, b)} do not
* later emit {@code (b, a)};
* <li>probe the actual values and make sure that each collation is consistent with the data
* </ul>
*
* <p>So, for an empty Values with 4 columns, we would emit {@code (a, b, c, d), (b, c, d), (c,
* d), (d)}.
*/
public static List<RelCollation> values(RelMetadataQuery mq, RelDataType rowType, com.google.common.collect.ImmutableList<com.google.common.collect.ImmutableList<RexLiteral>> tuples) {
// for future use
Util.discard(mq);
final List<RelCollation> list = new ArrayList<>();
final int n = rowType.getFieldCount();
final List<Pair<RelFieldCollation, com.google.common.collect.Ordering<List<RexLiteral>>>> pairs = new ArrayList<>();
outer: for (int i = 0; i < n; i++) {
pairs.clear();
for (int j = i; j < n; j++) {
final RelFieldCollation fieldCollation = new RelFieldCollation(j);
com.google.common.collect.Ordering<List<RexLiteral>> comparator = comparator(fieldCollation);
com.google.common.collect.Ordering<List<RexLiteral>> ordering;
if (pairs.isEmpty()) {
ordering = comparator;
} else {
ordering = Util.last(pairs).right.compound(comparator);
}
pairs.add(Pair.of(fieldCollation, ordering));
if (!ordering.isOrdered(tuples)) {
if (j == i) {
continue outer;
}
pairs.remove(pairs.size() - 1);
}
}
if (!pairs.isEmpty()) {
list.add(RelCollations.of(Pair.left(pairs)));
}
}
return list;
}
Aggregations