use of org.apache.lucene.util.BytesRef in project crate by crate.
the class InsertPlannerTest method testInsertPlan.
@Test
public void testInsertPlan() throws Exception {
UpsertById upsertById = e.plan("insert into users (id, name) values (42, 'Deep Thought')");
assertThat(upsertById.insertColumns().length, is(2));
Reference idRef = upsertById.insertColumns()[0];
assertThat(idRef.ident().columnIdent().fqn(), is("id"));
Reference nameRef = upsertById.insertColumns()[1];
assertThat(nameRef.ident().columnIdent().fqn(), is("name"));
assertThat(upsertById.items().size(), is(1));
UpsertById.Item item = upsertById.items().get(0);
assertThat(item.index(), is("users"));
assertThat(item.id(), is("42"));
assertThat(item.routing(), is("42"));
assertThat(item.insertValues().length, is(2));
assertThat((Long) item.insertValues()[0], is(42L));
assertThat((BytesRef) item.insertValues()[1], is(new BytesRef("Deep Thought")));
}
use of org.apache.lucene.util.BytesRef in project crate by crate.
the class ReplaceFunction method compile.
@Override
public Scalar<BytesRef, Object> compile(List<Symbol> arguments) {
assert arguments.size() >= 3 : "number of arguments muts be > 3";
String pattern = null;
if (arguments.get(1).symbolType() == SymbolType.LITERAL) {
Literal literal = (Literal) arguments.get(1);
Object patternVal = literal.value();
if (patternVal == null) {
return this;
}
pattern = ((BytesRef) patternVal).utf8ToString();
}
BytesRef flags = null;
if (arguments.size() == 4) {
assert arguments.get(3).symbolType() == SymbolType.LITERAL : "4th argument must be of type " + SymbolType.LITERAL;
flags = (BytesRef) ((Literal) arguments.get(2)).value();
}
if (pattern != null) {
regexMatcher = new RegexMatcher(pattern, flags);
} else {
regexMatcher = null;
}
return this;
}
use of org.apache.lucene.util.BytesRef in project crate by crate.
the class InsertFromValuesAnalyzer method addValues.
private void addValues(DocTableRelation tableRelation, ValueNormalizer valueNormalizer, EvaluatingNormalizer normalizer, ExpressionAnalyzer expressionAnalyzer, ExpressionAnalysisContext expressionAnalysisContext, TransactionContext transactionContext, ValuesResolver valuesResolver, ExpressionAnalyzer valuesAwareExpressionAnalyzer, ValuesList node, List<Assignment> assignments, InsertFromValuesAnalyzedStatement context, ReferenceToLiteralConverter.Context referenceToLiteralContext, int numPrimaryKeys, Function<List<BytesRef>, String> idFunction, int bulkIdx) throws IOException {
if (context.tableInfo().isPartitioned()) {
context.newPartitionMap();
}
List<BytesRef> primaryKeyValues = new ArrayList<>(numPrimaryKeys);
String routingValue = null;
List<ColumnIdent> primaryKey = context.tableInfo().primaryKey();
Object[] insertValues = new Object[node.values().size()];
for (int i = 0, valuesSize = node.values().size(); i < valuesSize; i++) {
Expression expression = node.values().get(i);
Symbol valuesSymbol = normalizer.normalize(expressionAnalyzer.convert(expression, expressionAnalysisContext), transactionContext);
// implicit type conversion
Reference column = context.columns().get(i);
final ColumnIdent columnIdent = column.ident().columnIdent();
Object value;
try {
valuesSymbol = valueNormalizer.normalizeInputForReference(valuesSymbol, column);
value = ((Input) valuesSymbol).value();
} catch (IllegalArgumentException | UnsupportedOperationException e) {
throw new ColumnValidationException(columnIdent.sqlFqn(), e);
} catch (ClassCastException e) {
// symbol is no Input
throw new ColumnValidationException(columnIdent.name(), SymbolFormatter.format("Invalid value '%s' in insert statement", valuesSymbol));
}
if (context.primaryKeyColumnIndices().contains(i)) {
if (value == null) {
throw new IllegalArgumentException("Primary key value must not be NULL");
}
int idx = primaryKey.indexOf(columnIdent);
if (idx < 0) {
// oh look, one or more nested primary keys!
assert value instanceof Map : "value must be instance of Map";
for (ColumnIdent pkIdent : primaryKey) {
if (!pkIdent.getRoot().equals(columnIdent)) {
continue;
}
int pkIdx = primaryKey.indexOf(pkIdent);
Object nestedValue = StringObjectMaps.fromMapByPath((Map) value, pkIdent.path());
addPrimaryKeyValue(pkIdx, nestedValue, primaryKeyValues);
}
} else {
addPrimaryKeyValue(idx, value, primaryKeyValues);
}
}
if (i == context.routingColumnIndex()) {
routingValue = extractRoutingValue(columnIdent, value, context);
}
if (context.partitionedByIndices().contains(i)) {
Object rest = processPartitionedByValues(columnIdent, value, context);
if (rest != null) {
insertValues[i] = rest;
}
} else {
insertValues[i] = value;
}
}
if (!assignments.isEmpty()) {
valuesResolver.insertValues = insertValues;
valuesResolver.columns = context.columns();
Symbol[] onDupKeyAssignments = new Symbol[assignments.size()];
valuesResolver.assignmentColumns = new ArrayList<>(assignments.size());
expressionAnalyzer.setResolveFieldsOperation(Operation.UPDATE);
for (int i = 0; i < assignments.size(); i++) {
Assignment assignment = assignments.get(i);
Reference columnName = tableRelation.resolveField((Field) expressionAnalyzer.convert(assignment.columnName(), expressionAnalysisContext));
assert columnName != null : "columnName must not be null";
Symbol valueSymbol = normalizer.normalize(valuesAwareExpressionAnalyzer.convert(assignment.expression(), expressionAnalysisContext), transactionContext);
Symbol assignmentExpression = valueNormalizer.normalizeInputForReference(valueSymbol, columnName);
onDupKeyAssignments[i] = assignmentExpression;
if (valuesResolver.assignmentColumns.size() == i) {
valuesResolver.assignmentColumns.add(columnName.ident().columnIdent().fqn());
}
}
context.addOnDuplicateKeyAssignments(onDupKeyAssignments);
context.addOnDuplicateKeyAssignmentsColumns(valuesResolver.assignmentColumns.toArray(new String[valuesResolver.assignmentColumns.size()]));
}
// process generated column expressions and add columns + values
GeneratedExpressionContext ctx = new GeneratedExpressionContext(tableRelation, context, normalizer, transactionContext, referenceToLiteralContext, primaryKeyValues, insertValues, routingValue);
processGeneratedExpressions(ctx);
insertValues = ctx.insertValues;
routingValue = ctx.routingValue;
context.sourceMaps().add(insertValues);
String id = idFunction.apply(primaryKeyValues);
context.addIdAndRouting(id, routingValue);
if (bulkIdx >= 0) {
context.bulkIndices().add(bulkIdx);
}
}
use of org.apache.lucene.util.BytesRef in project crate by crate.
the class NumberOfReplicas method fromSettings.
public static BytesRef fromSettings(Settings settings) {
BytesRef numberOfReplicas;
String autoExpandReplicas = settings.get(AUTO_EXPAND_REPLICAS);
if (autoExpandReplicas != null && !Booleans.isExplicitFalse(autoExpandReplicas)) {
validateExpandReplicaSetting(autoExpandReplicas);
numberOfReplicas = new BytesRef(autoExpandReplicas);
} else {
numberOfReplicas = new BytesRef(MoreObjects.firstNonNull(settings.get(NUMBER_OF_REPLICAS), "1"));
}
return numberOfReplicas;
}
use of org.apache.lucene.util.BytesRef in project crate by crate.
the class PartitionPropertiesAnalyzer method toPartitionName.
public static PartitionName toPartitionName(DocTableInfo tableInfo, List<Assignment> partitionProperties, Row parameters) {
Preconditions.checkArgument(tableInfo.isPartitioned(), "table '%s' is not partitioned", tableInfo.ident().fqn());
Preconditions.checkArgument(partitionProperties.size() == tableInfo.partitionedBy().size(), "The table \"%s\" is partitioned by %s columns but the PARTITION clause contains %s columns", tableInfo.ident().fqn(), tableInfo.partitionedBy().size(), partitionProperties.size());
Map<ColumnIdent, Object> properties = assignmentsToMap(partitionProperties, parameters);
BytesRef[] values = new BytesRef[properties.size()];
for (Map.Entry<ColumnIdent, Object> entry : properties.entrySet()) {
Object value = entry.getValue();
int idx = tableInfo.partitionedBy().indexOf(entry.getKey());
try {
Reference reference = tableInfo.partitionedByColumns().get(idx);
Object converted = reference.valueType().value(value);
values[idx] = converted == null ? null : DataTypes.STRING.value(converted);
} catch (IndexOutOfBoundsException ex) {
throw new IllegalArgumentException(String.format(Locale.ENGLISH, "\"%s\" is no known partition column", entry.getKey().sqlFqn()));
}
}
return new PartitionName(tableInfo.ident(), Arrays.asList(values));
}
Aggregations