use of io.crate.metadata.doc.DocTableInfo in project crate by crate.
the class CopyStatementPlanner method planCopyFrom.
public Plan planCopyFrom(CopyFromAnalyzedStatement analysis, Planner.Context context) {
/**
* copy from has two "modes":
*
* 1: non-partitioned tables or partitioned tables with partition ident --> import into single es index
* -> collect raw source and import as is
*
* 2: partitioned table without partition ident
* -> collect document and partition by values
* -> exclude partitioned by columns from document
* -> insert into es index (partition determined by partition by value)
*/
DocTableInfo table = analysis.table();
int clusteredByPrimaryKeyIdx = table.primaryKey().indexOf(analysis.table().clusteredBy());
List<String> partitionedByNames;
String partitionIdent = null;
List<BytesRef> partitionValues;
if (analysis.partitionIdent() == null) {
if (table.isPartitioned()) {
partitionedByNames = Lists.newArrayList(Lists.transform(table.partitionedBy(), ColumnIdent::fqn));
} else {
partitionedByNames = Collections.emptyList();
}
partitionValues = ImmutableList.of();
} else {
assert table.isPartitioned() : "table must be partitioned if partitionIdent is set";
// partitionIdent is present -> possible to index raw source into concrete es index
partitionValues = PartitionName.decodeIdent(analysis.partitionIdent());
partitionIdent = analysis.partitionIdent();
partitionedByNames = Collections.emptyList();
}
SourceIndexWriterProjection sourceIndexWriterProjection = new SourceIndexWriterProjection(table.ident(), partitionIdent, table.getReference(DocSysColumns.RAW), table.primaryKey(), table.partitionedBy(), partitionValues, table.clusteredBy(), clusteredByPrimaryKeyIdx, analysis.settings(), null, partitionedByNames.size() > 0 ? partitionedByNames.toArray(new String[partitionedByNames.size()]) : null, // autoCreateIndices
table.isPartitioned());
List<Projection> projections = Collections.<Projection>singletonList(sourceIndexWriterProjection);
partitionedByNames.removeAll(Lists.transform(table.primaryKey(), ColumnIdent::fqn));
int referencesSize = table.primaryKey().size() + partitionedByNames.size() + 1;
referencesSize = clusteredByPrimaryKeyIdx == -1 ? referencesSize + 1 : referencesSize;
List<Symbol> toCollect = new ArrayList<>(referencesSize);
// add primaryKey columns
for (ColumnIdent primaryKey : table.primaryKey()) {
toCollect.add(table.getReference(primaryKey));
}
// add partitioned columns (if not part of primaryKey)
Set<Reference> referencedReferences = new HashSet<>();
for (String partitionedColumn : partitionedByNames) {
Reference reference = table.getReference(ColumnIdent.fromPath(partitionedColumn));
Symbol symbol;
if (reference instanceof GeneratedReference) {
symbol = ((GeneratedReference) reference).generatedExpression();
referencedReferences.addAll(((GeneratedReference) reference).referencedReferences());
} else {
symbol = reference;
}
toCollect.add(symbol);
}
// add clusteredBy column (if not part of primaryKey)
if (clusteredByPrimaryKeyIdx == -1 && table.clusteredBy() != null && !DocSysColumns.ID.equals(table.clusteredBy())) {
toCollect.add(table.getReference(table.clusteredBy()));
}
// add _raw or _doc
if (table.isPartitioned() && analysis.partitionIdent() == null) {
toCollect.add(table.getReference(DocSysColumns.DOC));
} else {
toCollect.add(table.getReference(DocSysColumns.RAW));
}
// add columns referenced by generated columns which are used as partitioned by column
for (Reference reference : referencedReferences) {
if (!toCollect.contains(reference)) {
toCollect.add(reference);
}
}
DiscoveryNodes allNodes = clusterService.state().nodes();
FileUriCollectPhase collectPhase = new FileUriCollectPhase(context.jobId(), context.nextExecutionPhaseId(), "copyFrom", getExecutionNodes(allNodes, analysis.settings().getAsInt("num_readers", allNodes.getSize()), analysis.nodePredicate()), analysis.uri(), toCollect, projections, analysis.settings().get("compression", null), analysis.settings().getAsBoolean("shared", null));
Collect collect = new Collect(collectPhase, TopN.NO_LIMIT, 0, 1, 1, null);
return Merge.ensureOnHandler(collect, context, Collections.singletonList(MergeCountProjection.INSTANCE));
}
use of io.crate.metadata.doc.DocTableInfo in project crate by crate.
the class AlterTableOperation method executeAlterTableRenameTable.
public CompletableFuture<Long> executeAlterTableRenameTable(AnalyzedAlterTableRename statement) {
DocTableInfo sourceTableInfo = statement.sourceTableInfo();
RelationName sourceRelationName = sourceTableInfo.ident();
RelationName targetRelationName = statement.targetTableIdent();
return renameTable(sourceRelationName, targetRelationName, sourceTableInfo.isPartitioned());
}
use of io.crate.metadata.doc.DocTableInfo in project crate by crate.
the class TransportShardUpsertAction method processRequestItems.
@Override
protected WritePrimaryResult<ShardUpsertRequest, ShardResponse> processRequestItems(IndexShard indexShard, ShardUpsertRequest request, AtomicBoolean killed) {
ShardResponse shardResponse = new ShardResponse(request.returnValues());
String indexName = request.index();
DocTableInfo tableInfo = schemas.getTableInfo(RelationName.fromIndexName(indexName), Operation.INSERT);
Reference[] insertColumns = request.insertColumns();
GeneratedColumns.Validation valueValidation = request.validateConstraints() ? GeneratedColumns.Validation.VALUE_MATCH : GeneratedColumns.Validation.NONE;
TransactionContext txnCtx = TransactionContext.of(request.sessionSettings());
InsertSourceGen insertSourceGen = insertColumns == null ? null : InsertSourceGen.of(txnCtx, nodeCtx, tableInfo, indexName, valueValidation, Arrays.asList(insertColumns));
UpdateSourceGen updateSourceGen = request.updateColumns() == null ? null : new UpdateSourceGen(txnCtx, nodeCtx, tableInfo, request.updateColumns());
ReturnValueGen returnValueGen = request.returnValues() == null ? null : new ReturnValueGen(txnCtx, nodeCtx, tableInfo, request.returnValues());
Translog.Location translogLocation = null;
for (ShardUpsertRequest.Item item : request.items()) {
int location = item.location();
if (killed.get()) {
// set failure on response and skip all next items.
// this way replica operation will be executed, but only items with a valid source (= was processed on primary)
// will be processed on the replica
shardResponse.failure(new InterruptedException());
break;
}
try {
IndexItemResponse indexItemResponse = indexItem(request, item, indexShard, updateSourceGen, insertSourceGen, returnValueGen);
if (indexItemResponse != null) {
if (indexItemResponse.translog != null) {
shardResponse.add(location);
translogLocation = indexItemResponse.translog;
}
if (indexItemResponse.returnValues != null) {
shardResponse.addResultRows(indexItemResponse.returnValues);
}
}
} catch (Exception e) {
if (retryPrimaryException(e)) {
throw Exceptions.toRuntimeException(e);
}
if (logger.isDebugEnabled()) {
logger.debug("Failed to execute upsert on nodeName={}, shardId={} id={} error={}", clusterService.localNode().getName(), request.shardId(), item.id(), e);
}
// *mark* the item as failed by setting the source to null
// to prevent the replica operation from processing this concrete item
item.source(null);
if (!request.continueOnError()) {
shardResponse.failure(e);
break;
}
shardResponse.add(location, new ShardResponse.Failure(item.id(), userFriendlyCrateExceptionTopOnly(e), (e instanceof VersionConflictEngineException)));
}
}
return new WritePrimaryResult<>(request, shardResponse, translogLocation, null, indexShard);
}
use of io.crate.metadata.doc.DocTableInfo in project crate by crate.
the class InsertFromValues method evaluateValueTableFunction.
private static Iterator<Row> evaluateValueTableFunction(TableFunctionImplementation<?> funcImplementation, List<Symbol> arguments, List<Reference> allTargetReferences, DocTableInfo tableInfo, Row params, PlannerContext plannerContext, SubQueryResults subQueryResults) {
SymbolEvaluator symbolEval = new SymbolEvaluator(plannerContext.transactionContext(), plannerContext.nodeContext(), subQueryResults);
Function<? super Symbol, Input<?>> eval = (symbol) -> symbol.accept(symbolEval, params);
ArrayList<Input<?>> boundArguments = new ArrayList<>(arguments.size());
for (int i = 0; i < arguments.size(); i++) {
boundArguments.add(eval.apply(arguments.get(i)));
}
// noinspection unchecked
Iterable<Row> rows = funcImplementation.evaluate(plannerContext.transactionContext(), plannerContext.nodeContext(), boundArguments.toArray(new Input[0]));
return StreamSupport.stream(rows.spliterator(), false).map(row -> cast(row, allTargetReferences, tableInfo)).iterator();
}
use of io.crate.metadata.doc.DocTableInfo in project crate by crate.
the class RestoreSnapshotPlan method bind.
@VisibleForTesting
public static BoundRestoreSnapshot bind(AnalyzedRestoreSnapshot restoreSnapshot, CoordinatorTxnCtx txnCtx, NodeContext nodeCtx, Row parameters, SubQueryResults subQueryResults, Schemas schemas) {
Function<? super Symbol, Object> eval = x -> SymbolEvaluator.evaluate(txnCtx, nodeCtx, x, parameters, subQueryResults);
Settings settings = GenericPropertiesConverter.genericPropertiesToSettings(restoreSnapshot.properties().map(eval), SnapshotSettings.SETTINGS);
HashSet<BoundRestoreSnapshot.RestoreTableInfo> restoreTables = new HashSet<>(restoreSnapshot.tables().size());
for (Table<Symbol> table : restoreSnapshot.tables()) {
var relationName = RelationName.of(table.getName(), txnCtx.sessionContext().searchPath().currentSchema());
try {
DocTableInfo docTableInfo = schemas.getTableInfo(relationName, Operation.RESTORE_SNAPSHOT);
if (table.partitionProperties().isEmpty()) {
throw new RelationAlreadyExists(relationName);
}
var partitionName = toPartitionName(docTableInfo, Lists2.map(table.partitionProperties(), x -> x.map(eval)));
if (docTableInfo.partitions().contains(partitionName)) {
throw new PartitionAlreadyExistsException(partitionName);
}
restoreTables.add(new BoundRestoreSnapshot.RestoreTableInfo(relationName, partitionName));
} catch (RelationUnknown | SchemaUnknownException e) {
if (table.partitionProperties().isEmpty()) {
restoreTables.add(new BoundRestoreSnapshot.RestoreTableInfo(relationName, null));
} else {
var partitionName = toPartitionName(relationName, Lists2.map(table.partitionProperties(), x -> x.map(eval)));
restoreTables.add(new BoundRestoreSnapshot.RestoreTableInfo(relationName, partitionName));
}
}
}
return new BoundRestoreSnapshot(restoreSnapshot.repository(), restoreSnapshot.snapshot(), restoreTables, restoreSnapshot.includeTables(), restoreSnapshot.includeCustomMetadata(), restoreSnapshot.customMetadataTypes(), restoreSnapshot.includeGlobalSettings(), restoreSnapshot.globalSettings(), settings);
}
Aggregations