Search in sources :

Example 1 with IndexDeletionPolicy

use of org.apache.lucene.index.IndexDeletionPolicy in project lucene-solr by apache.

the class ReplicationHandler method inform.

@Override
@SuppressWarnings("unchecked")
public void inform(SolrCore core) {
    this.core = core;
    registerCloseHook();
    Object nbtk = initArgs.get(NUMBER_BACKUPS_TO_KEEP_INIT_PARAM);
    if (nbtk != null) {
        numberBackupsToKeep = Integer.parseInt(nbtk.toString());
    } else {
        numberBackupsToKeep = 0;
    }
    NamedList slave = (NamedList) initArgs.get("slave");
    boolean enableSlave = isEnabled(slave);
    if (enableSlave) {
        currentIndexFetcher = pollingIndexFetcher = new IndexFetcher(slave, this, core);
        setupPolling((String) slave.get(POLL_INTERVAL));
        isSlave = true;
    }
    NamedList master = (NamedList) initArgs.get("master");
    boolean enableMaster = isEnabled(master);
    if (enableMaster || enableSlave) {
        if (core.getCoreContainer().getZkController() != null) {
            LOG.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" + " intend this behavior, it usually indicates a mis-configuration. Master setting is " + Boolean.toString(enableMaster) + " and slave setting is " + Boolean.toString(enableSlave));
        }
    }
    if (!enableSlave && !enableMaster) {
        enableMaster = true;
        master = new NamedList<>();
    }
    if (enableMaster) {
        includeConfFiles = (String) master.get(CONF_FILES);
        if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
            List<String> files = Arrays.asList(includeConfFiles.split(","));
            for (String file : files) {
                if (file.trim().length() == 0)
                    continue;
                String[] strs = file.trim().split(":");
                // if there is an alias add it or it is null
                confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
            }
            LOG.info("Replication enabled for following config files: " + includeConfFiles);
        }
        List backup = master.getAll("backupAfter");
        boolean backupOnCommit = backup.contains("commit");
        boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize");
        List replicateAfter = master.getAll(REPLICATE_AFTER);
        replicateOnCommit = replicateAfter.contains("commit");
        replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize");
        if (!replicateOnCommit && !replicateOnOptimize) {
            replicateOnCommit = true;
        }
        // save the last optimized commit point.
        if (replicateOnOptimize) {
            IndexDeletionPolicyWrapper wrapper = core.getDeletionPolicy();
            IndexDeletionPolicy policy = wrapper == null ? null : wrapper.getWrappedDeletionPolicy();
            if (policy instanceof SolrDeletionPolicy) {
                SolrDeletionPolicy solrPolicy = (SolrDeletionPolicy) policy;
                if (solrPolicy.getMaxOptimizedCommitsToKeep() < 1) {
                    solrPolicy.setMaxOptimizedCommitsToKeep(1);
                }
            } else {
                LOG.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
            }
        }
        if (replicateOnOptimize || backupOnOptimize) {
            core.getUpdateHandler().registerOptimizeCallback(getEventListener(backupOnOptimize, replicateOnOptimize));
        }
        if (replicateOnCommit || backupOnCommit) {
            replicateOnCommit = true;
            core.getUpdateHandler().registerCommitCallback(getEventListener(backupOnCommit, replicateOnCommit));
        }
        if (replicateAfter.contains("startup")) {
            replicateOnStart = true;
            RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
            try {
                DirectoryReader reader = s == null ? null : s.get().getIndexReader();
                if (reader != null && reader.getIndexCommit() != null && reader.getIndexCommit().getGeneration() != 1L) {
                    try {
                        if (replicateOnOptimize) {
                            Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
                            for (IndexCommit ic : commits) {
                                if (ic.getSegmentCount() == 1) {
                                    if (indexCommitPoint == null || indexCommitPoint.getGeneration() < ic.getGeneration())
                                        indexCommitPoint = ic;
                                }
                            }
                        } else {
                            indexCommitPoint = reader.getIndexCommit();
                        }
                    } finally {
                    // We don't need to save commit points for replication, the SolrDeletionPolicy
                    // always saves the last commit point (and the last optimized commit point, if needed)
                    /***
              if(indexCommitPoint != null){
                core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
              }
              ***/
                    }
                }
                // ensure the writer is init'd so that we have a list of commit points
                RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
                iw.decref();
            } catch (IOException e) {
                LOG.warn("Unable to get IndexCommit on startup", e);
            } finally {
                if (s != null)
                    s.decref();
            }
        }
        String reserve = (String) master.get(RESERVE);
        if (reserve != null && !reserve.trim().equals("")) {
            reserveCommitDuration = readIntervalMs(reserve);
        }
        LOG.info("Commits will be reserved for  " + reserveCommitDuration);
        isMaster = true;
    }
}
Also used : DirectoryReader(org.apache.lucene.index.DirectoryReader) NamedList(org.apache.solr.common.util.NamedList) IndexDeletionPolicyWrapper(org.apache.solr.core.IndexDeletionPolicyWrapper) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) IOException(java.io.IOException) IndexCommit(org.apache.lucene.index.IndexCommit) SolrIndexWriter(org.apache.solr.update.SolrIndexWriter) IndexWriter(org.apache.lucene.index.IndexWriter) SolrDeletionPolicy(org.apache.solr.core.SolrDeletionPolicy) List(java.util.List) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) IndexDeletionPolicy(org.apache.lucene.index.IndexDeletionPolicy)

Example 2 with IndexDeletionPolicy

use of org.apache.lucene.index.IndexDeletionPolicy in project lucene-solr by apache.

the class CreateIndexTask method createWriterConfig.

public static IndexWriterConfig createWriterConfig(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) {
    @SuppressWarnings("deprecation") IndexWriterConfig iwConf = new IndexWriterConfig(runData.getAnalyzer());
    iwConf.setOpenMode(mode);
    IndexDeletionPolicy indexDeletionPolicy = getIndexDeletionPolicy(config);
    iwConf.setIndexDeletionPolicy(indexDeletionPolicy);
    if (commit != null) {
        iwConf.setIndexCommit(commit);
    }
    final String mergeScheduler = config.get("merge.scheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
    if (mergeScheduler.equals(NoMergeScheduler.class.getName())) {
        iwConf.setMergeScheduler(NoMergeScheduler.INSTANCE);
    } else {
        try {
            iwConf.setMergeScheduler(Class.forName(mergeScheduler).asSubclass(MergeScheduler.class).newInstance());
        } catch (Exception e) {
            throw new RuntimeException("unable to instantiate class '" + mergeScheduler + "' as merge scheduler", e);
        }
        if (mergeScheduler.equals("org.apache.lucene.index.ConcurrentMergeScheduler")) {
            ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) iwConf.getMergeScheduler();
            int maxThreadCount = config.get("concurrent.merge.scheduler.max.thread.count", ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
            int maxMergeCount = config.get("concurrent.merge.scheduler.max.merge.count", ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
            cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
        }
    }
    final String defaultCodec = config.get("default.codec", null);
    if (defaultCodec != null) {
        try {
            Class<? extends Codec> clazz = Class.forName(defaultCodec).asSubclass(Codec.class);
            iwConf.setCodec(clazz.newInstance());
        } catch (Exception e) {
            throw new RuntimeException("Couldn't instantiate Codec: " + defaultCodec, e);
        }
    }
    final String postingsFormat = config.get("codec.postingsFormat", null);
    if (defaultCodec == null && postingsFormat != null) {
        try {
            final PostingsFormat postingsFormatChosen = PostingsFormat.forName(postingsFormat);
            iwConf.setCodec(new Lucene70Codec() {

                @Override
                public PostingsFormat getPostingsFormatForField(String field) {
                    return postingsFormatChosen;
                }
            });
        } catch (Exception e) {
            throw new RuntimeException("Couldn't instantiate Postings Format: " + postingsFormat, e);
        }
    }
    final String mergePolicy = config.get("merge.policy", "org.apache.lucene.index.LogByteSizeMergePolicy");
    boolean isCompound = config.get("compound", true);
    iwConf.setUseCompoundFile(isCompound);
    if (mergePolicy.equals(NoMergePolicy.class.getName())) {
        iwConf.setMergePolicy(NoMergePolicy.INSTANCE);
    } else {
        try {
            iwConf.setMergePolicy(Class.forName(mergePolicy).asSubclass(MergePolicy.class).newInstance());
        } catch (Exception e) {
            throw new RuntimeException("unable to instantiate class '" + mergePolicy + "' as merge policy", e);
        }
        iwConf.getMergePolicy().setNoCFSRatio(isCompound ? 1.0 : 0.0);
        if (iwConf.getMergePolicy() instanceof LogMergePolicy) {
            LogMergePolicy logMergePolicy = (LogMergePolicy) iwConf.getMergePolicy();
            logMergePolicy.setMergeFactor(config.get("merge.factor", OpenIndexTask.DEFAULT_MERGE_PFACTOR));
        }
    }
    final double ramBuffer = config.get("ram.flush.mb", OpenIndexTask.DEFAULT_RAM_FLUSH_MB);
    final int maxBuffered = config.get("max.buffered", OpenIndexTask.DEFAULT_MAX_BUFFERED);
    if (maxBuffered == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
        iwConf.setRAMBufferSizeMB(ramBuffer);
        iwConf.setMaxBufferedDocs(maxBuffered);
    } else {
        iwConf.setMaxBufferedDocs(maxBuffered);
        iwConf.setRAMBufferSizeMB(ramBuffer);
    }
    return iwConf;
}
Also used : ConcurrentMergeScheduler(org.apache.lucene.index.ConcurrentMergeScheduler) IOException(java.io.IOException) NoMergeScheduler(org.apache.lucene.index.NoMergeScheduler) PostingsFormat(org.apache.lucene.codecs.PostingsFormat) Lucene70Codec(org.apache.lucene.codecs.lucene70.Lucene70Codec) NoMergePolicy(org.apache.lucene.index.NoMergePolicy) LogMergePolicy(org.apache.lucene.index.LogMergePolicy) IndexDeletionPolicy(org.apache.lucene.index.IndexDeletionPolicy) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 3 with IndexDeletionPolicy

use of org.apache.lucene.index.IndexDeletionPolicy in project lucene-solr by apache.

the class SolrSnapshotManager method deleteNonSnapshotIndexFiles.

/**
   * This method deletes index files not associated with the specified <code>snapshots</code>.
   *
   * @param core The Solr core
   * @param dir The index directory storing the snapshot.
   * @param snapshots The snapshots to be preserved.
   * @throws IOException in case of I/O errors.
   */
public static void deleteNonSnapshotIndexFiles(SolrCore core, Directory dir, Collection<SnapshotMetaData> snapshots) throws IOException {
    final Set<Long> genNumbers = new HashSet<>();
    for (SnapshotMetaData m : snapshots) {
        genNumbers.add(m.getGenerationNumber());
    }
    deleteSnapshotIndexFiles(core, dir, new IndexDeletionPolicy() {

        @Override
        public void onInit(List<? extends IndexCommit> commits) throws IOException {
            for (IndexCommit ic : commits) {
                if (!genNumbers.contains(ic.getGeneration())) {
                    log.info("Deleting non-snapshotted index commit with generation {}", ic.getGeneration());
                    ic.delete();
                }
            }
        }

        @Override
        public void onCommit(List<? extends IndexCommit> commits) throws IOException {
        }
    });
}
Also used : IndexDeletionPolicy(org.apache.lucene.index.IndexDeletionPolicy) SnapshotMetaData(org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.SnapshotMetaData) IOException(java.io.IOException) IndexCommit(org.apache.lucene.index.IndexCommit) HashSet(java.util.HashSet)

Example 4 with IndexDeletionPolicy

use of org.apache.lucene.index.IndexDeletionPolicy in project lucene-solr by apache.

the class SolrCore method initDeletionPolicy.

private IndexDeletionPolicyWrapper initDeletionPolicy(IndexDeletionPolicyWrapper delPolicyWrapper) {
    if (delPolicyWrapper != null) {
        return delPolicyWrapper;
    }
    final PluginInfo info = solrConfig.getPluginInfo(IndexDeletionPolicy.class.getName());
    final IndexDeletionPolicy delPolicy;
    if (info != null) {
        delPolicy = createInstance(info.className, IndexDeletionPolicy.class, "Deletion Policy for SOLR", this, getResourceLoader());
        if (delPolicy instanceof NamedListInitializedPlugin) {
            ((NamedListInitializedPlugin) delPolicy).init(info.initArgs);
        }
    } else {
        delPolicy = new SolrDeletionPolicy();
    }
    return new IndexDeletionPolicyWrapper(delPolicy, snapshotMgr);
}
Also used : IndexDeletionPolicy(org.apache.lucene.index.IndexDeletionPolicy) NamedListInitializedPlugin(org.apache.solr.util.plugin.NamedListInitializedPlugin)

Example 5 with IndexDeletionPolicy

use of org.apache.lucene.index.IndexDeletionPolicy in project jackrabbit by apache.

the class AbstractIndex method getIndexReader.

/**
 * Returns an <code>IndexReader</code> on this index. This index reader
 * may be used to delete documents.
 *
 * @return an <code>IndexReader</code> on this index.
 * @throws IOException if the reader cannot be obtained.
 */
protected synchronized CommittableIndexReader getIndexReader() throws IOException {
    if (indexWriter != null) {
        indexWriter.close();
        log.debug("closing IndexWriter.");
        indexWriter = null;
    }
    if (indexReader == null) {
        IndexDeletionPolicy idp = getIndexDeletionPolicy();
        IndexReader reader = IndexReader.open(getDirectory(), idp, false, termInfosIndexDivisor);
        indexReader = new CommittableIndexReader(reader);
    }
    return indexReader;
}
Also used : IndexReader(org.apache.lucene.index.IndexReader) IndexDeletionPolicy(org.apache.lucene.index.IndexDeletionPolicy)

Aggregations

IndexDeletionPolicy (org.apache.lucene.index.IndexDeletionPolicy)5 IOException (java.io.IOException)3 IndexCommit (org.apache.lucene.index.IndexCommit)2 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 List (java.util.List)1 PostingsFormat (org.apache.lucene.codecs.PostingsFormat)1 Lucene70Codec (org.apache.lucene.codecs.lucene70.Lucene70Codec)1 ConcurrentMergeScheduler (org.apache.lucene.index.ConcurrentMergeScheduler)1 DirectoryReader (org.apache.lucene.index.DirectoryReader)1 IndexReader (org.apache.lucene.index.IndexReader)1 IndexWriter (org.apache.lucene.index.IndexWriter)1 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)1 LogMergePolicy (org.apache.lucene.index.LogMergePolicy)1 NoMergePolicy (org.apache.lucene.index.NoMergePolicy)1 NoMergeScheduler (org.apache.lucene.index.NoMergeScheduler)1 NamedList (org.apache.solr.common.util.NamedList)1 IndexDeletionPolicyWrapper (org.apache.solr.core.IndexDeletionPolicyWrapper)1 SolrDeletionPolicy (org.apache.solr.core.SolrDeletionPolicy)1 SnapshotMetaData (org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.SnapshotMetaData)1