use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.
the class AddIndexesTask method doLogic.
@Override
public int doLogic() throws Exception {
IndexWriter writer = getRunData().getIndexWriter();
if (useAddIndexesDir) {
writer.addIndexes(inputDir);
} else {
try (IndexReader r = DirectoryReader.open(inputDir)) {
CodecReader[] leaves = new CodecReader[r.leaves().size()];
int i = 0;
for (LeafReaderContext leaf : r.leaves()) {
leaves[i++] = SlowCodecReaderWrapper.wrap(leaf.reader());
}
writer.addIndexes(leaves);
}
}
return 1;
}
use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.
the class TestMultiTermsEnum method testNoTermsInField.
// LUCENE-6826
public void testNoTermsInField() throws Exception {
Directory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(new MockAnalyzer(random())));
Document document = new Document();
document.add(new StringField("deleted", "0", Field.Store.YES));
writer.addDocument(document);
DirectoryReader reader = DirectoryReader.open(writer);
writer.close();
Directory directory2 = new RAMDirectory();
writer = new IndexWriter(directory2, new IndexWriterConfig(new MockAnalyzer(random())));
List<LeafReaderContext> leaves = reader.leaves();
CodecReader[] codecReaders = new CodecReader[leaves.size()];
for (int i = 0; i < leaves.size(); i++) {
codecReaders[i] = new MigratingCodecReader((CodecReader) leaves.get(i).reader());
}
// <- bang
writer.addIndexes(codecReaders);
IOUtils.close(writer, reader, directory);
}
use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.
the class TaxonomyMergeUtils method merge.
/**
* Merges the given taxonomy and index directories and commits the changes to
* the given writers.
*/
public static void merge(Directory srcIndexDir, Directory srcTaxoDir, OrdinalMap map, IndexWriter destIndexWriter, DirectoryTaxonomyWriter destTaxoWriter, FacetsConfig srcConfig) throws IOException {
// merge the taxonomies
destTaxoWriter.addTaxonomy(srcTaxoDir, map);
int[] ordinalMap = map.getMap();
DirectoryReader reader = DirectoryReader.open(srcIndexDir);
try {
List<LeafReaderContext> leaves = reader.leaves();
int numReaders = leaves.size();
CodecReader[] wrappedLeaves = new CodecReader[numReaders];
for (int i = 0; i < numReaders; i++) {
wrappedLeaves[i] = SlowCodecReaderWrapper.wrap(new OrdinalMappingLeafReader(leaves.get(i).reader(), ordinalMap, srcConfig));
}
destIndexWriter.addIndexes(wrappedLeaves);
// commit changes to taxonomy and index respectively.
destTaxoWriter.commit();
destIndexWriter.commit();
} finally {
reader.close();
}
}
use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.
the class DirectUpdateHandler2 method mergeIndexes.
@Override
public int mergeIndexes(MergeIndexesCommand cmd) throws IOException {
mergeIndexesCommands.mark();
int rc;
log.info("start " + cmd);
List<DirectoryReader> readers = cmd.readers;
if (readers != null && readers.size() > 0) {
List<CodecReader> mergeReaders = new ArrayList<>();
for (DirectoryReader reader : readers) {
for (LeafReaderContext leaf : reader.leaves()) {
mergeReaders.add(SlowCodecReaderWrapper.wrap(leaf.reader()));
}
}
RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
try {
iw.get().addIndexes(mergeReaders.toArray(new CodecReader[mergeReaders.size()]));
} finally {
iw.decref();
}
rc = 1;
} else {
rc = 0;
}
log.info("end_mergeIndexes");
// TODO: consider soft commit issues
if (rc == 1 && commitTracker.getTimeUpperBound() > 0) {
commitTracker.scheduleCommitWithin(commitTracker.getTimeUpperBound());
} else if (rc == 1 && softCommitTracker.getTimeUpperBound() > 0) {
softCommitTracker.scheduleCommitWithin(softCommitTracker.getTimeUpperBound());
}
return rc;
}
use of org.apache.lucene.index.CodecReader in project lucene-solr by apache.
the class SolrIndexSplitter method split.
public void split() throws IOException {
List<LeafReaderContext> leaves = searcher.getRawReader().leaves();
List<FixedBitSet[]> segmentDocSets = new ArrayList<>(leaves.size());
log.info("SolrIndexSplitter: partitions=" + numPieces + " segments=" + leaves.size());
for (LeafReaderContext readerContext : leaves) {
// make sure we're going in order
assert readerContext.ordInParent == segmentDocSets.size();
FixedBitSet[] docSets = split(readerContext);
segmentDocSets.add(docSets);
}
for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) {
log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : ""));
boolean success = false;
RefCounted<IndexWriter> iwRef = null;
IndexWriter iw = null;
if (cores != null) {
SolrCore subCore = cores.get(partitionNumber);
iwRef = subCore.getUpdateHandler().getSolrCoreState().getIndexWriter(subCore);
iw = iwRef.get();
} else {
SolrCore core = searcher.getCore();
String path = paths.get(partitionNumber);
iw = SolrIndexWriter.create(core, "SplittingIndexWriter" + partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path, core.getDirectoryFactory(), true, core.getLatestSchema(), core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
}
try {
// This removes deletions but optimize might still be needed because sub-shards will have the same number of segments as the parent shard.
for (int segmentNumber = 0; segmentNumber < leaves.size(); segmentNumber++) {
log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : "") + " segment #" + segmentNumber + " segmentCount=" + leaves.size());
CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(segmentNumber)[partitionNumber]));
}
// we commit explicitly instead of sending a CommitUpdateCommand through the processor chain
// because the sub-shard cores will just ignore such a commit because the update log is not
// in active state at this time.
//TODO no commitUpdateCommand
SolrIndexWriter.setCommitData(iw, -1);
iw.commit();
success = true;
} finally {
if (iwRef != null) {
iwRef.decref();
} else {
if (success) {
iw.close();
} else {
IOUtils.closeWhileHandlingException(iw);
}
}
}
}
}
Aggregations