Search in sources :

Example 1 with CodecReader

use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.

the class AddIndexesTask method doLogic.

@Override
public int doLogic() throws Exception {
    IndexWriter writer = getRunData().getIndexWriter();
    if (useAddIndexesDir) {
        writer.addIndexes(inputDir);
    } else {
        try (IndexReader r = DirectoryReader.open(inputDir)) {
            CodecReader[] leaves = new CodecReader[r.leaves().size()];
            int i = 0;
            for (LeafReaderContext leaf : r.leaves()) {
                leaves[i++] = SlowCodecReaderWrapper.wrap(leaf.reader());
            }
            writer.addIndexes(leaves);
        }
    }
    return 1;
}
Also used : CodecReader(org.apache.lucene.index.CodecReader) IndexWriter(org.apache.lucene.index.IndexWriter) IndexReader(org.apache.lucene.index.IndexReader) LeafReaderContext(org.apache.lucene.index.LeafReaderContext)

Example 2 with CodecReader

use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.

the class TestMultiTermsEnum method testNoTermsInField.

// LUCENE-6826
public void testNoTermsInField() throws Exception {
    Directory directory = new RAMDirectory();
    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(new MockAnalyzer(random())));
    Document document = new Document();
    document.add(new StringField("deleted", "0", Field.Store.YES));
    writer.addDocument(document);
    DirectoryReader reader = DirectoryReader.open(writer);
    writer.close();
    Directory directory2 = new RAMDirectory();
    writer = new IndexWriter(directory2, new IndexWriterConfig(new MockAnalyzer(random())));
    List<LeafReaderContext> leaves = reader.leaves();
    CodecReader[] codecReaders = new CodecReader[leaves.size()];
    for (int i = 0; i < leaves.size(); i++) {
        codecReaders[i] = new MigratingCodecReader((CodecReader) leaves.get(i).reader());
    }
    // <- bang
    writer.addIndexes(codecReaders);
    IOUtils.close(writer, reader, directory);
}
Also used : FilterCodecReader(org.apache.lucene.index.FilterCodecReader) CodecReader(org.apache.lucene.index.CodecReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) Document(org.apache.lucene.document.Document) RAMDirectory(org.apache.lucene.store.RAMDirectory) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) IndexWriter(org.apache.lucene.index.IndexWriter) StringField(org.apache.lucene.document.StringField) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) RAMDirectory(org.apache.lucene.store.RAMDirectory) Directory(org.apache.lucene.store.Directory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 3 with CodecReader

use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.

the class TaxonomyMergeUtils method merge.

/**
   * Merges the given taxonomy and index directories and commits the changes to
   * the given writers.
   */
public static void merge(Directory srcIndexDir, Directory srcTaxoDir, OrdinalMap map, IndexWriter destIndexWriter, DirectoryTaxonomyWriter destTaxoWriter, FacetsConfig srcConfig) throws IOException {
    // merge the taxonomies
    destTaxoWriter.addTaxonomy(srcTaxoDir, map);
    int[] ordinalMap = map.getMap();
    DirectoryReader reader = DirectoryReader.open(srcIndexDir);
    try {
        List<LeafReaderContext> leaves = reader.leaves();
        int numReaders = leaves.size();
        CodecReader[] wrappedLeaves = new CodecReader[numReaders];
        for (int i = 0; i < numReaders; i++) {
            wrappedLeaves[i] = SlowCodecReaderWrapper.wrap(new OrdinalMappingLeafReader(leaves.get(i).reader(), ordinalMap, srcConfig));
        }
        destIndexWriter.addIndexes(wrappedLeaves);
        // commit changes to taxonomy and index respectively.
        destTaxoWriter.commit();
        destIndexWriter.commit();
    } finally {
        reader.close();
    }
}
Also used : CodecReader(org.apache.lucene.index.CodecReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) LeafReaderContext(org.apache.lucene.index.LeafReaderContext)

Example 4 with CodecReader

use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.

the class DirectUpdateHandler2 method mergeIndexes.

@Override
public int mergeIndexes(MergeIndexesCommand cmd) throws IOException {
    mergeIndexesCommands.mark();
    int rc;
    log.info("start " + cmd);
    List<DirectoryReader> readers = cmd.readers;
    if (readers != null && readers.size() > 0) {
        List<CodecReader> mergeReaders = new ArrayList<>();
        for (DirectoryReader reader : readers) {
            for (LeafReaderContext leaf : reader.leaves()) {
                mergeReaders.add(SlowCodecReaderWrapper.wrap(leaf.reader()));
            }
        }
        RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
        try {
            iw.get().addIndexes(mergeReaders.toArray(new CodecReader[mergeReaders.size()]));
        } finally {
            iw.decref();
        }
        rc = 1;
    } else {
        rc = 0;
    }
    log.info("end_mergeIndexes");
    // TODO: consider soft commit issues
    if (rc == 1 && commitTracker.getTimeUpperBound() > 0) {
        commitTracker.scheduleCommitWithin(commitTracker.getTimeUpperBound());
    } else if (rc == 1 && softCommitTracker.getTimeUpperBound() > 0) {
        softCommitTracker.scheduleCommitWithin(softCommitTracker.getTimeUpperBound());
    }
    return rc;
}
Also used : CodecReader(org.apache.lucene.index.CodecReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) IndexWriter(org.apache.lucene.index.IndexWriter) ArrayList(java.util.ArrayList) LeafReaderContext(org.apache.lucene.index.LeafReaderContext)

Example 5 with CodecReader

use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.

the class SolrIndexSplitter method split.

public void split() throws IOException {
    List<LeafReaderContext> leaves = searcher.getRawReader().leaves();
    List<FixedBitSet[]> segmentDocSets = new ArrayList<>(leaves.size());
    log.info("SolrIndexSplitter: partitions=" + numPieces + " segments=" + leaves.size());
    for (LeafReaderContext readerContext : leaves) {
        // make sure we're going in order
        assert readerContext.ordInParent == segmentDocSets.size();
        FixedBitSet[] docSets = split(readerContext);
        segmentDocSets.add(docSets);
    }
    for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) {
        log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : ""));
        boolean success = false;
        RefCounted<IndexWriter> iwRef = null;
        IndexWriter iw = null;
        if (cores != null) {
            SolrCore subCore = cores.get(partitionNumber);
            iwRef = subCore.getUpdateHandler().getSolrCoreState().getIndexWriter(subCore);
            iw = iwRef.get();
        } else {
            SolrCore core = searcher.getCore();
            String path = paths.get(partitionNumber);
            iw = SolrIndexWriter.create(core, "SplittingIndexWriter" + partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path, core.getDirectoryFactory(), true, core.getLatestSchema(), core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
        }
        try {
            // This removes deletions but optimize might still be needed because sub-shards will have the same number of segments as the parent shard.
            for (int segmentNumber = 0; segmentNumber < leaves.size(); segmentNumber++) {
                log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : "") + " segment #" + segmentNumber + " segmentCount=" + leaves.size());
                CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
                iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(segmentNumber)[partitionNumber]));
            }
            // we commit explicitly instead of sending a CommitUpdateCommand through the processor chain
            // because the sub-shard cores will just ignore such a commit because the update log is not
            // in active state at this time.
            //TODO no commitUpdateCommand
            SolrIndexWriter.setCommitData(iw, -1);
            iw.commit();
            success = true;
        } finally {
            if (iwRef != null) {
                iwRef.decref();
            } else {
                if (success) {
                    iw.close();
                } else {
                    IOUtils.closeWhileHandlingException(iw);
                }
            }
        }
    }
}
Also used : FilterCodecReader(org.apache.lucene.index.FilterCodecReader) CodecReader(org.apache.lucene.index.CodecReader) SolrCore(org.apache.solr.core.SolrCore) ArrayList(java.util.ArrayList) IndexWriter(org.apache.lucene.index.IndexWriter) FixedBitSet(org.apache.lucene.util.FixedBitSet) LeafReaderContext(org.apache.lucene.index.LeafReaderContext)

Aggregations

CodecReader (org.apache.lucene.index.CodecReader)9 LeafReaderContext (org.apache.lucene.index.LeafReaderContext)8 DirectoryReader (org.apache.lucene.index.DirectoryReader)6 IndexWriter (org.apache.lucene.index.IndexWriter)6 ArrayList (java.util.ArrayList)3 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)3 Document (org.apache.lucene.document.Document)3 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)3 Directory (org.apache.lucene.store.Directory)3 FilterCodecReader (org.apache.lucene.index.FilterCodecReader)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 PrintStream (java.io.PrintStream)1 Field (org.apache.lucene.document.Field)1 FieldType (org.apache.lucene.document.FieldType)1 IntPoint (org.apache.lucene.document.IntPoint)1 StoredField (org.apache.lucene.document.StoredField)1 StringField (org.apache.lucene.document.StringField)1 TextField (org.apache.lucene.document.TextField)1 FilterLeafReader (org.apache.lucene.index.FilterLeafReader)1 IndexReader (org.apache.lucene.index.IndexReader)1