use of org.apache.lucene.index.DirectoryReader in project lucene-solr by apache.
the class TestContextQuery method testMixedContextQuery.
@Test
public void testMixedContextQuery() throws Exception {
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
Document document = new Document();
document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "type1"));
document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type2"));
document.add(new ContextSuggestField("suggest_field", "suggestion3", 2, "type3"));
iw.addDocument(document);
document = new Document();
document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
iw.addDocument(document);
if (rarely()) {
iw.commit();
}
DirectoryReader reader = iw.getReader();
SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
query.addContext("type1", 7);
query.addContext("type2", 6);
query.addAllContexts();
TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
assertSuggestions(suggest, new Entry("suggestion1", "type1", 4 * 7), new Entry("suggestion2", "type2", 3 * 6), new Entry("suggestion3", "type3", 2), new Entry("suggestion4", "type4", 1));
reader.close();
iw.close();
}
use of org.apache.lucene.index.DirectoryReader in project lucene-solr by apache.
the class TestContextQuery method testRandomContextQueryScoring.
@Test
public void testRandomContextQueryScoring() throws Exception {
Analyzer analyzer = new MockAnalyzer(random());
try (RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"))) {
int numSuggestions = atLeast(20);
int numContexts = atLeast(5);
Set<Integer> seenWeights = new HashSet<>();
List<Entry> expectedEntries = new ArrayList<>();
List<CharSequence> contexts = new ArrayList<>();
for (int i = 1; i <= numContexts; i++) {
CharSequence context = TestUtil.randomSimpleString(random(), 10) + i;
contexts.add(context);
for (int j = 1; j <= numSuggestions; j++) {
String suggestion = "sugg_" + TestUtil.randomSimpleString(random(), 10) + j;
int weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions);
while (seenWeights.contains(weight)) {
weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions);
}
seenWeights.add(weight);
Document document = new Document();
document.add(new ContextSuggestField("suggest_field", suggestion, weight, context));
iw.addDocument(document);
expectedEntries.add(new Entry(suggestion, context.toString(), i * weight));
}
if (rarely()) {
iw.commit();
}
}
Entry[] expectedResults = expectedEntries.toArray(new Entry[expectedEntries.size()]);
ArrayUtil.introSort(expectedResults, new Comparator<Entry>() {
@Override
public int compare(Entry o1, Entry o2) {
int cmp = Float.compare(o2.value, o1.value);
if (cmp != 0) {
return cmp;
} else {
return o1.output.compareTo(o2.output);
}
}
});
try (DirectoryReader reader = iw.getReader()) {
SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
for (int i = 0; i < contexts.size(); i++) {
query.addContext(contexts.get(i), i + 1);
}
TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false);
assertSuggestions(suggest, Arrays.copyOfRange(expectedResults, 0, 4));
}
}
}
use of org.apache.lucene.index.DirectoryReader in project lucene-solr by apache.
the class TestContextQuery method testEmptyContextWithBoosts.
@Test
public void testEmptyContextWithBoosts() throws Exception {
Analyzer analyzer = new MockAnalyzer(random());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
Document document = new Document();
document.add(new ContextSuggestField("suggest_field", "suggestion1", 4));
document.add(new ContextSuggestField("suggest_field", "suggestion2", 3));
document.add(new ContextSuggestField("suggest_field", "suggestion3", 2));
iw.addDocument(document);
document = new Document();
document.add(new ContextSuggestField("suggest_field", "suggestion4", 1, "type4"));
iw.addDocument(document);
if (rarely()) {
iw.commit();
}
DirectoryReader reader = iw.getReader();
SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
query.addContext("type4", 10);
query.addAllContexts();
TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
assertSuggestions(suggest, new Entry("suggestion4", "type4", 1 * 10), new Entry("suggestion1", null, 4), new Entry("suggestion2", null, 3), new Entry("suggestion3", null, 2));
reader.close();
iw.close();
}
use of org.apache.lucene.index.DirectoryReader in project lucene-solr by apache.
the class ReplicationHandler method getMaxVersion.
/**
* Retrieves the maximum version number from an index commit.
*/
private long getMaxVersion(IndexCommit commit) throws IOException {
try (DirectoryReader reader = DirectoryReader.open(commit)) {
IndexSearcher searcher = new IndexSearcher(reader);
VersionInfo vinfo = core.getUpdateHandler().getUpdateLog().getVersionInfo();
return Math.abs(vinfo.getMaxVersionFromIndex(searcher));
}
}
use of org.apache.lucene.index.DirectoryReader in project lucene-solr by apache.
the class ReplicationHandler method inform.
@Override
@SuppressWarnings("unchecked")
public void inform(SolrCore core) {
this.core = core;
registerCloseHook();
Object nbtk = initArgs.get(NUMBER_BACKUPS_TO_KEEP_INIT_PARAM);
if (nbtk != null) {
numberBackupsToKeep = Integer.parseInt(nbtk.toString());
} else {
numberBackupsToKeep = 0;
}
NamedList slave = (NamedList) initArgs.get("slave");
boolean enableSlave = isEnabled(slave);
if (enableSlave) {
currentIndexFetcher = pollingIndexFetcher = new IndexFetcher(slave, this, core);
setupPolling((String) slave.get(POLL_INTERVAL));
isSlave = true;
}
NamedList master = (NamedList) initArgs.get("master");
boolean enableMaster = isEnabled(master);
if (enableMaster || enableSlave) {
if (core.getCoreContainer().getZkController() != null) {
LOG.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" + " intend this behavior, it usually indicates a mis-configuration. Master setting is " + Boolean.toString(enableMaster) + " and slave setting is " + Boolean.toString(enableSlave));
}
}
if (!enableSlave && !enableMaster) {
enableMaster = true;
master = new NamedList<>();
}
if (enableMaster) {
includeConfFiles = (String) master.get(CONF_FILES);
if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
List<String> files = Arrays.asList(includeConfFiles.split(","));
for (String file : files) {
if (file.trim().length() == 0)
continue;
String[] strs = file.trim().split(":");
// if there is an alias add it or it is null
confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
}
LOG.info("Replication enabled for following config files: " + includeConfFiles);
}
List backup = master.getAll("backupAfter");
boolean backupOnCommit = backup.contains("commit");
boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize");
List replicateAfter = master.getAll(REPLICATE_AFTER);
replicateOnCommit = replicateAfter.contains("commit");
replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize");
if (!replicateOnCommit && !replicateOnOptimize) {
replicateOnCommit = true;
}
// save the last optimized commit point.
if (replicateOnOptimize) {
IndexDeletionPolicyWrapper wrapper = core.getDeletionPolicy();
IndexDeletionPolicy policy = wrapper == null ? null : wrapper.getWrappedDeletionPolicy();
if (policy instanceof SolrDeletionPolicy) {
SolrDeletionPolicy solrPolicy = (SolrDeletionPolicy) policy;
if (solrPolicy.getMaxOptimizedCommitsToKeep() < 1) {
solrPolicy.setMaxOptimizedCommitsToKeep(1);
}
} else {
LOG.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
}
}
if (replicateOnOptimize || backupOnOptimize) {
core.getUpdateHandler().registerOptimizeCallback(getEventListener(backupOnOptimize, replicateOnOptimize));
}
if (replicateOnCommit || backupOnCommit) {
replicateOnCommit = true;
core.getUpdateHandler().registerCommitCallback(getEventListener(backupOnCommit, replicateOnCommit));
}
if (replicateAfter.contains("startup")) {
replicateOnStart = true;
RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
try {
DirectoryReader reader = s == null ? null : s.get().getIndexReader();
if (reader != null && reader.getIndexCommit() != null && reader.getIndexCommit().getGeneration() != 1L) {
try {
if (replicateOnOptimize) {
Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
for (IndexCommit ic : commits) {
if (ic.getSegmentCount() == 1) {
if (indexCommitPoint == null || indexCommitPoint.getGeneration() < ic.getGeneration())
indexCommitPoint = ic;
}
}
} else {
indexCommitPoint = reader.getIndexCommit();
}
} finally {
// We don't need to save commit points for replication, the SolrDeletionPolicy
// always saves the last commit point (and the last optimized commit point, if needed)
/***
if(indexCommitPoint != null){
core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
}
***/
}
}
// ensure the writer is init'd so that we have a list of commit points
RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
iw.decref();
} catch (IOException e) {
LOG.warn("Unable to get IndexCommit on startup", e);
} finally {
if (s != null)
s.decref();
}
}
String reserve = (String) master.get(RESERVE);
if (reserve != null && !reserve.trim().equals("")) {
reserveCommitDuration = readIntervalMs(reserve);
}
LOG.info("Commits will be reserved for " + reserveCommitDuration);
isMaster = true;
}
}
Aggregations