Search in sources :

Example 1 with FileUriCollectPhase

use of io.crate.planner.node.dql.FileUriCollectPhase in project crate by crate.

the class FileCollectSource method getCollector.

@Override
public CrateCollector getCollector(CollectPhase collectPhase, BatchConsumer consumer, JobCollectContext jobCollectContext) {
    FileUriCollectPhase fileUriCollectPhase = (FileUriCollectPhase) collectPhase;
    InputFactory.Context<LineCollectorExpression<?>> ctx = inputFactory.ctxForRefs(FileLineReferenceResolver::getImplementation);
    ctx.add(collectPhase.toCollect());
    String[] readers = fileUriCollectPhase.nodeIds().toArray(new String[fileUriCollectPhase.nodeIds().size()]);
    Arrays.sort(readers);
    List<String> fileUris;
    fileUris = targetUriToStringList(fileUriCollectPhase.targetUri());
    BatchIterator fileReadingIterator = FileReadingIterator.newInstance(fileUris, ctx.topLevelInputs(), ctx.expressions(), fileUriCollectPhase.compression(), fileInputFactoryMap, fileUriCollectPhase.sharedStorage(), readers.length, Arrays.binarySearch(readers, clusterService.state().nodes().getLocalNodeId()));
    return BatchIteratorCollectorBridge.newInstance(fileReadingIterator, consumer);
}
Also used : FileInputFactory(io.crate.operation.collect.files.FileInputFactory) InputFactory(io.crate.operation.InputFactory) LineCollectorExpression(io.crate.operation.collect.files.LineCollectorExpression) FileLineReferenceResolver(io.crate.operation.reference.file.FileLineReferenceResolver) BatchIterator(io.crate.data.BatchIterator) FileUriCollectPhase(io.crate.planner.node.dql.FileUriCollectPhase)

Example 2 with FileUriCollectPhase

use of io.crate.planner.node.dql.FileUriCollectPhase in project crate by crate.

the class CopyStatementPlanner method planCopyFrom.

public Plan planCopyFrom(CopyFromAnalyzedStatement analysis, Planner.Context context) {
    /**
         * copy from has two "modes":
         *
         * 1: non-partitioned tables or partitioned tables with partition ident --> import into single es index
         *    -> collect raw source and import as is
         *
         * 2: partitioned table without partition ident
         *    -> collect document and partition by values
         *    -> exclude partitioned by columns from document
         *    -> insert into es index (partition determined by partition by value)
         */
    DocTableInfo table = analysis.table();
    int clusteredByPrimaryKeyIdx = table.primaryKey().indexOf(analysis.table().clusteredBy());
    List<String> partitionedByNames;
    String partitionIdent = null;
    List<BytesRef> partitionValues;
    if (analysis.partitionIdent() == null) {
        if (table.isPartitioned()) {
            partitionedByNames = Lists.newArrayList(Lists.transform(table.partitionedBy(), ColumnIdent::fqn));
        } else {
            partitionedByNames = Collections.emptyList();
        }
        partitionValues = ImmutableList.of();
    } else {
        assert table.isPartitioned() : "table must be partitioned if partitionIdent is set";
        // partitionIdent is present -> possible to index raw source into concrete es index
        partitionValues = PartitionName.decodeIdent(analysis.partitionIdent());
        partitionIdent = analysis.partitionIdent();
        partitionedByNames = Collections.emptyList();
    }
    SourceIndexWriterProjection sourceIndexWriterProjection = new SourceIndexWriterProjection(table.ident(), partitionIdent, table.getReference(DocSysColumns.RAW), table.primaryKey(), table.partitionedBy(), partitionValues, table.clusteredBy(), clusteredByPrimaryKeyIdx, analysis.settings(), null, partitionedByNames.size() > 0 ? partitionedByNames.toArray(new String[partitionedByNames.size()]) : null, // autoCreateIndices
    table.isPartitioned());
    List<Projection> projections = Collections.<Projection>singletonList(sourceIndexWriterProjection);
    partitionedByNames.removeAll(Lists.transform(table.primaryKey(), ColumnIdent::fqn));
    int referencesSize = table.primaryKey().size() + partitionedByNames.size() + 1;
    referencesSize = clusteredByPrimaryKeyIdx == -1 ? referencesSize + 1 : referencesSize;
    List<Symbol> toCollect = new ArrayList<>(referencesSize);
    // add primaryKey columns
    for (ColumnIdent primaryKey : table.primaryKey()) {
        toCollect.add(table.getReference(primaryKey));
    }
    // add partitioned columns (if not part of primaryKey)
    Set<Reference> referencedReferences = new HashSet<>();
    for (String partitionedColumn : partitionedByNames) {
        Reference reference = table.getReference(ColumnIdent.fromPath(partitionedColumn));
        Symbol symbol;
        if (reference instanceof GeneratedReference) {
            symbol = ((GeneratedReference) reference).generatedExpression();
            referencedReferences.addAll(((GeneratedReference) reference).referencedReferences());
        } else {
            symbol = reference;
        }
        toCollect.add(symbol);
    }
    // add clusteredBy column (if not part of primaryKey)
    if (clusteredByPrimaryKeyIdx == -1 && table.clusteredBy() != null && !DocSysColumns.ID.equals(table.clusteredBy())) {
        toCollect.add(table.getReference(table.clusteredBy()));
    }
    // add _raw or _doc
    if (table.isPartitioned() && analysis.partitionIdent() == null) {
        toCollect.add(table.getReference(DocSysColumns.DOC));
    } else {
        toCollect.add(table.getReference(DocSysColumns.RAW));
    }
    // add columns referenced by generated columns which are used as partitioned by column
    for (Reference reference : referencedReferences) {
        if (!toCollect.contains(reference)) {
            toCollect.add(reference);
        }
    }
    DiscoveryNodes allNodes = clusterService.state().nodes();
    FileUriCollectPhase collectPhase = new FileUriCollectPhase(context.jobId(), context.nextExecutionPhaseId(), "copyFrom", getExecutionNodes(allNodes, analysis.settings().getAsInt("num_readers", allNodes.getSize()), analysis.nodePredicate()), analysis.uri(), toCollect, projections, analysis.settings().get("compression", null), analysis.settings().getAsBoolean("shared", null));
    Collect collect = new Collect(collectPhase, TopN.NO_LIMIT, 0, 1, 1, null);
    return Merge.ensureOnHandler(collect, context, Collections.singletonList(MergeCountProjection.INSTANCE));
}
Also used : DocTableInfo(io.crate.metadata.doc.DocTableInfo) GeneratedReference(io.crate.metadata.GeneratedReference) Collect(io.crate.planner.node.dql.Collect) Symbol(io.crate.analyze.symbol.Symbol) GeneratedReference(io.crate.metadata.GeneratedReference) Reference(io.crate.metadata.Reference) SourceIndexWriterProjection(io.crate.planner.projection.SourceIndexWriterProjection) WriterProjection(io.crate.planner.projection.WriterProjection) MergeCountProjection(io.crate.planner.projection.MergeCountProjection) SourceIndexWriterProjection(io.crate.planner.projection.SourceIndexWriterProjection) Projection(io.crate.planner.projection.Projection) FileUriCollectPhase(io.crate.planner.node.dql.FileUriCollectPhase) ColumnIdent(io.crate.metadata.ColumnIdent) BytesRef(org.apache.lucene.util.BytesRef) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes)

Example 3 with FileUriCollectPhase

use of io.crate.planner.node.dql.FileUriCollectPhase in project crate by crate.

the class MapSideDataCollectOperationTest method testFileUriCollect.

@Test
public void testFileUriCollect() throws Exception {
    ClusterService clusterService = new NoopClusterService();
    Functions functions = getFunctions();
    CollectSourceResolver collectSourceResolver = mock(CollectSourceResolver.class);
    when(collectSourceResolver.getService(any(RoutedCollectPhase.class))).thenReturn(new FileCollectSource(functions, clusterService, Collections.<String, FileInputFactory>emptyMap()));
    MapSideDataCollectOperation collectOperation = new MapSideDataCollectOperation(collectSourceResolver, threadPool);
    File tmpFile = temporaryFolder.newFile("fileUriCollectOperation.json");
    try (OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream(tmpFile), StandardCharsets.UTF_8)) {
        writer.write("{\"name\": \"Arthur\", \"id\": 4, \"details\": {\"age\": 38}}\n");
        writer.write("{\"id\": 5, \"name\": \"Trillian\", \"details\": {\"age\": 33}}\n");
    }
    FileUriCollectPhase collectNode = new FileUriCollectPhase(UUID.randomUUID(), 0, "test", Collections.singletonList("noop_id"), Literal.of(Paths.get(tmpFile.toURI()).toUri().toString()), Arrays.<Symbol>asList(createReference("name", DataTypes.STRING), createReference(new ColumnIdent("details", "age"), DataTypes.INTEGER)), Collections.emptyList(), null, false);
    String threadPoolName = JobCollectContext.threadPoolName(collectNode, "noop_id");
    TestingBatchConsumer consumer = new TestingBatchConsumer();
    JobCollectContext jobCollectContext = mock(JobCollectContext.class);
    CrateCollector collectors = collectOperation.createCollector(collectNode, consumer, jobCollectContext);
    collectOperation.launchCollector(collectors, threadPoolName);
    assertThat(new CollectionBucket(consumer.getResult()), contains(isRow("Arthur", 38), isRow("Trillian", 33)));
}
Also used : CollectSourceResolver(io.crate.operation.collect.sources.CollectSourceResolver) Functions(io.crate.metadata.Functions) FileUriCollectPhase(io.crate.planner.node.dql.FileUriCollectPhase) ColumnIdent(io.crate.metadata.ColumnIdent) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) ClusterService(org.elasticsearch.cluster.ClusterService) FileCollectSource(io.crate.operation.collect.sources.FileCollectSource) FileOutputStream(java.io.FileOutputStream) FileInputFactory(io.crate.operation.collect.files.FileInputFactory) OutputStreamWriter(java.io.OutputStreamWriter) TestingBatchConsumer(io.crate.testing.TestingBatchConsumer) NoopClusterService(org.elasticsearch.test.cluster.NoopClusterService) File(java.io.File) RoutedCollectPhase(io.crate.planner.node.dql.RoutedCollectPhase) CollectionBucket(io.crate.data.CollectionBucket) Test(org.junit.Test) CrateUnitTest(io.crate.test.integration.CrateUnitTest)

Aggregations

FileUriCollectPhase (io.crate.planner.node.dql.FileUriCollectPhase)3 ColumnIdent (io.crate.metadata.ColumnIdent)2 FileInputFactory (io.crate.operation.collect.files.FileInputFactory)2 Symbol (io.crate.analyze.symbol.Symbol)1 BatchIterator (io.crate.data.BatchIterator)1 CollectionBucket (io.crate.data.CollectionBucket)1 Functions (io.crate.metadata.Functions)1 GeneratedReference (io.crate.metadata.GeneratedReference)1 Reference (io.crate.metadata.Reference)1 DocTableInfo (io.crate.metadata.doc.DocTableInfo)1 InputFactory (io.crate.operation.InputFactory)1 LineCollectorExpression (io.crate.operation.collect.files.LineCollectorExpression)1 CollectSourceResolver (io.crate.operation.collect.sources.CollectSourceResolver)1 FileCollectSource (io.crate.operation.collect.sources.FileCollectSource)1 FileLineReferenceResolver (io.crate.operation.reference.file.FileLineReferenceResolver)1 Collect (io.crate.planner.node.dql.Collect)1 RoutedCollectPhase (io.crate.planner.node.dql.RoutedCollectPhase)1 MergeCountProjection (io.crate.planner.projection.MergeCountProjection)1 Projection (io.crate.planner.projection.Projection)1 SourceIndexWriterProjection (io.crate.planner.projection.SourceIndexWriterProjection)1