use of org.apache.lucene.tests.analysis.MockAnalyzer in project OpenSearch by opensearch-project.
the class SliceBuilderTests method testToFilterWithRouting.
public void testToFilterWithRouting() throws IOException {
Directory dir = new ByteBuffersDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
ClusterService clusterService = mock(ClusterService.class);
ClusterState state = mock(ClusterState.class);
when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA);
when(clusterService.state()).thenReturn(state);
OperationRouting routing = mock(OperationRouting.class);
GroupShardsIterator<ShardIterator> it = new GroupShardsIterator<>(Collections.singletonList(new PlainShardIterator(new ShardId("index", "index", 1), Collections.emptyList())));
when(routing.searchShards(any(), any(), any(), any())).thenReturn(it);
when(clusterService.operationRouting()).thenReturn(routing);
when(clusterService.getSettings()).thenReturn(Settings.EMPTY);
try (IndexReader reader = DirectoryReader.open(dir)) {
Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT);
QueryShardContext context = createShardContext(version, reader, "field", DocValuesType.SORTED, 5, 0);
SliceBuilder builder = new SliceBuilder("field", 6, 10);
String[] routings = new String[] { "foo" };
Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, version);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, version);
assertEquals(new DocValuesSliceQuery("field", 6, 10), query);
}
}
use of org.apache.lucene.tests.analysis.MockAnalyzer in project OpenSearch by opensearch-project.
the class SliceBuilderTests method testInvalidField.
public void testInvalidField() throws IOException {
Directory dir = new ByteBuffersDirectory();
try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) {
writer.commit();
}
try (IndexReader reader = DirectoryReader.open(dir)) {
QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", null, 1, 0);
SliceBuilder builder = new SliceBuilder("field", 5, 10);
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> builder.toFilter(null, createRequest(0), context, Version.CURRENT));
assertThat(exc.getMessage(), containsString("cannot load numeric doc values"));
}
}
use of org.apache.lucene.tests.analysis.MockAnalyzer in project OpenSearch by opensearch-project.
the class StoreTests method testNewChecksums.
public void testNewChecksums() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// set default codec - all segments need checksums
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
int docs = 1 + random().nextInt(100);
for (int i = 0; i < docs; i++) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
if (random().nextBoolean()) {
for (int i = 0; i < docs; i++) {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
}
if (random().nextBoolean()) {
// flush
DirectoryReader.open(writer).close();
}
Store.MetadataSnapshot metadata;
// check before we committed
try {
store.getMetadata();
fail("no index present - expected exception");
} catch (IndexNotFoundException ex) {
// expected
}
writer.commit();
writer.close();
metadata = store.getMetadata();
assertThat(metadata.asMap().isEmpty(), is(false));
for (StoreFileMetadata meta : metadata) {
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
assertThat(meta.hash().length, greaterThan(0));
}
}
}
assertConsistent(store, metadata);
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, store.directory());
IOUtils.close(store);
}
use of org.apache.lucene.tests.analysis.MockAnalyzer in project OpenSearch by opensearch-project.
the class StoreTests method testUserDataRead.
public void testUserDataRead() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
config.setIndexDeletionPolicy(deletionPolicy);
IndexWriter writer = new IndexWriter(store.directory(), config);
Document doc = new Document();
doc.add(new TextField("id", "1", Field.Store.NO));
writer.addDocument(doc);
Map<String, String> commitData = new HashMap<>(2);
String syncId = "a sync id";
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
writer.setLiveCommitData(commitData.entrySet());
writer.commit();
writer.close();
Store.MetadataSnapshot metadata;
metadata = store.getMetadata(randomBoolean() ? null : deletionPolicy.snapshot());
assertFalse(metadata.asMap().isEmpty());
// do not check for correct files, we have enough tests for that above
assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, store.directory());
IOUtils.close(store);
}
use of org.apache.lucene.tests.analysis.MockAnalyzer in project OpenSearch by opensearch-project.
the class StoreTests method testCleanupFromSnapshot.
public void testCleanupFromSnapshot() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// this time random codec....
IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
// we keep all commits and that allows us clean based on multiple snapshots
indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
int docs = 1 + random().nextInt(100);
int numCommits = 0;
for (int i = 0; i < docs; i++) {
if (i > 0 && randomIntBetween(0, 10) == 0) {
writer.commit();
numCommits++;
}
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
if (numCommits < 1) {
writer.commit();
Document doc = new Document();
doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
Store.MetadataSnapshot firstMeta = store.getMetadata();
if (random().nextBoolean()) {
for (int i = 0; i < docs; i++) {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
}
writer.commit();
writer.close();
Store.MetadataSnapshot secondMeta = store.getMetadata();
if (randomBoolean()) {
store.cleanupAndVerify("test", firstMeta);
String[] strings = store.directory().listAll();
int numNotFound = 0;
for (String file : strings) {
if (file.startsWith("extra")) {
continue;
}
assertTrue(firstMeta.contains(file) || file.equals("write.lock"));
if (secondMeta.contains(file) == false) {
numNotFound++;
}
}
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
} else {
store.cleanupAndVerify("test", secondMeta);
String[] strings = store.directory().listAll();
int numNotFound = 0;
for (String file : strings) {
if (file.startsWith("extra")) {
continue;
}
assertTrue(file, secondMeta.contains(file) || file.equals("write.lock"));
if (firstMeta.contains(file) == false) {
numNotFound++;
}
}
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
}
deleteContent(store.directory());
IOUtils.close(store);
}
Aggregations