use of org.opensearch.Version in project OpenSearch by opensearch-project.
the class MainResponseTests method createServerTestInstance.
@Override
protected org.opensearch.action.main.MainResponse createServerTestInstance(XContentType xContentType) {
String clusterUuid = randomAlphaOfLength(10);
ClusterName clusterName = new ClusterName(randomAlphaOfLength(10));
String nodeName = randomAlphaOfLength(10);
final String date = new Date(randomNonNegativeLong()).toString();
Version version = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT);
Build build = new Build(Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(), version.toString(), version.before(Version.V_1_0_0) ? null : "opensearch");
return new org.opensearch.action.main.MainResponse(nodeName, version, clusterName, clusterUuid, build);
}
use of org.opensearch.Version in project OpenSearch by opensearch-project.
the class StemmerTokenFilterFactoryTests method testMultipleLanguagesThrowsException.
public void testMultipleLanguagesThrowsException() throws IOException {
Version v = VersionUtils.randomVersion(random());
Settings settings = Settings.builder().put("index.analysis.filter.my_english.type", "stemmer").putList("index.analysis.filter.my_english.language", "english", "light_english").put(SETTING_VERSION_CREATED, v).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN));
assertEquals("Invalid stemmer class specified: [english, light_english]", e.getMessage());
}
use of org.opensearch.Version in project OpenSearch by opensearch-project.
the class StemmerTokenFilterFactoryTests method testEnglishFilterFactory.
public void testEnglishFilterFactory() throws IOException {
int iters = scaledRandomIntBetween(20, 100);
for (int i = 0; i < iters; i++) {
Version v = VersionUtils.randomVersion(random());
Settings settings = Settings.builder().put("index.analysis.filter.my_english.type", "stemmer").put("index.analysis.filter.my_english.language", "english").put("index.analysis.analyzer.my_english.tokenizer", "whitespace").put("index.analysis.analyzer.my_english.filter", "my_english").put(SETTING_VERSION_CREATED, v).put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_english");
assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class));
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader("foo bar"));
TokenStream create = tokenFilter.create(tokenizer);
IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers;
NamedAnalyzer analyzer = indexAnalyzers.get("my_english");
assertThat(create, instanceOf(PorterStemFilter.class));
assertAnalyzesTo(analyzer, "consolingly", new String[] { "consolingli" });
}
}
use of org.opensearch.Version in project OpenSearch by opensearch-project.
the class CandidateQueryTests method testPercolateSmallAndLargeDocument.
public void testPercolateSmallAndLargeDocument() throws Exception {
List<ParseContext.Document> docs = new ArrayList<>();
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(new TermQuery(new Term("field", "value1")), Occur.MUST);
builder.add(new TermQuery(new Term("field", "value2")), Occur.MUST);
addQuery(builder.build(), docs);
builder = new BooleanQuery.Builder();
builder.add(new TermQuery(new Term("field", "value2")), Occur.MUST);
builder.add(new TermQuery(new Term("field", "value3")), Occur.MUST);
addQuery(builder.build(), docs);
builder = new BooleanQuery.Builder();
builder.add(new TermQuery(new Term("field", "value3")), Occur.MUST);
builder.add(new TermQuery(new Term("field", "value4")), Occur.MUST);
addQuery(builder.build(), docs);
indexWriter.addDocuments(docs);
indexWriter.close();
directoryReader = DirectoryReader.open(directory);
IndexSearcher shardSearcher = newSearcher(directoryReader);
shardSearcher.setQueryCache(null);
Version v = Version.CURRENT;
try (Directory directory = new ByteBuffersDirectory()) {
try (IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig())) {
List<Document> documents = new ArrayList<>();
Document document = new Document();
document.add(new StringField("field", "value1", Field.Store.NO));
document.add(new StringField("field", "value2", Field.Store.NO));
documents.add(document);
document = new Document();
document.add(new StringField("field", "value5", Field.Store.NO));
document.add(new StringField("field", "value6", Field.Store.NO));
documents.add(document);
document = new Document();
document.add(new StringField("field", "value3", Field.Store.NO));
document.add(new StringField("field", "value4", Field.Store.NO));
documents.add(document);
// IW#addDocuments(...) ensures we end up with a single segment
iw.addDocuments(documents);
}
try (IndexReader ir = DirectoryReader.open(directory)) {
IndexSearcher percolateSearcher = new IndexSearcher(ir);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery();
assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(CoveringQuery.class));
TopDocs topDocs = shardSearcher.search(query, 10);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(0, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
}
}
// This will trigger using the TermsQuery instead of individual term query clauses in the CoveringQuery:
try (Directory directory = new ByteBuffersDirectory()) {
try (IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig())) {
Document document = new Document();
for (int i = 0; i < 1024; i++) {
int fieldNumber = 2 + i;
document.add(new StringField("field", "value" + fieldNumber, Field.Store.NO));
}
iw.addDocument(document);
}
try (IndexReader ir = DirectoryReader.open(directory)) {
IndexSearcher percolateSearcher = new IndexSearcher(ir);
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v);
BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery();
assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class));
TopDocs topDocs = shardSearcher.search(query, 10);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10);
assertEquals(2L, topDocs.totalHits.value);
assertEquals(2, topDocs.scoreDocs.length);
assertEquals(1, topDocs.scoreDocs[0].doc);
assertEquals(2, topDocs.scoreDocs[1].doc);
}
}
}
use of org.opensearch.Version in project OpenSearch by opensearch-project.
the class CandidateQueryTests method testMsmAndRanges_disjunction.
public void testMsmAndRanges_disjunction() throws Exception {
// Recreates a similar scenario that made testDuel() fail randomly:
// https://github.com/elastic/elasticsearch/issues/29393
List<ParseContext.Document> docs = new ArrayList<>();
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.setMinimumNumberShouldMatch(2);
BooleanQuery.Builder builder1 = new BooleanQuery.Builder();
builder1.add(new TermQuery(new Term("field", "value1")), Occur.FILTER);
builder.add(builder1.build(), Occur.SHOULD);
builder.add(new TermQuery(new Term("field", "value2")), Occur.MUST_NOT);
builder.add(IntPoint.newRangeQuery("int_field", 0, 5), Occur.SHOULD);
builder.add(IntPoint.newRangeQuery("int_field", 6, 10), Occur.SHOULD);
addQuery(builder.build(), docs);
indexWriter.addDocuments(docs);
indexWriter.close();
directoryReader = DirectoryReader.open(directory);
IndexSearcher shardSearcher = newSearcher(directoryReader);
shardSearcher.setQueryCache(null);
Version v = Version.CURRENT;
List<BytesReference> sources = Collections.singletonList(new BytesArray("{}"));
Document document = new Document();
document.add(new IntPoint("int_field", 4));
document.add(new IntPoint("int_field", 7));
MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v);
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC));
assertEquals(1L, topDocs.totalHits.value);
assertEquals(0, topDocs.scoreDocs[0].doc);
}
Aggregations