use of org.opensearch.test.DummyShardLock in project OpenSearch by opensearch-project.
the class StoreTests method testNewChecksums.
public void testNewChecksums() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// set default codec - all segments need checksums
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
int docs = 1 + random().nextInt(100);
for (int i = 0; i < docs; i++) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
if (random().nextBoolean()) {
for (int i = 0; i < docs; i++) {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
}
if (random().nextBoolean()) {
// flush
DirectoryReader.open(writer).close();
}
Store.MetadataSnapshot metadata;
// check before we committed
try {
store.getMetadata(null);
fail("no index present - expected exception");
} catch (IndexNotFoundException ex) {
// expected
}
writer.commit();
writer.close();
metadata = store.getMetadata(null);
assertThat(metadata.asMap().isEmpty(), is(false));
for (StoreFileMetadata meta : metadata) {
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
assertThat(meta.hash().length, greaterThan(0));
}
}
}
assertConsistent(store, metadata);
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, store.directory());
IOUtils.close(store);
}
use of org.opensearch.test.DummyShardLock in project OpenSearch by opensearch-project.
the class RefreshListenersTests method setupListeners.
@Before
public void setupListeners() throws Exception {
// Setup dependencies of the listeners
maxListeners = randomIntBetween(1, 1000);
// Now setup the InternalEngine which is much more complicated because we aren't mocking anything
threadPool = new TestThreadPool(getTestName());
refreshMetric = new MeanMetric();
listeners = new RefreshListeners(() -> maxListeners, () -> engine.refresh("too-many-listeners"), logger, threadPool.getThreadContext(), refreshMetric);
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
String allocationId = UUIDs.randomBase64UUID(random());
Directory directory = newDirectory();
store = new Store(shardId, indexSettings, directory, new DummyShardLock(shardId));
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
Engine.EventListener eventListener = new Engine.EventListener() {
@Override
public void onFailedEngine(String reason, @Nullable Exception e) {
// we don't need to notify anybody in this test
}
};
store.createEmpty(Version.CURRENT.luceneVersion);
final long primaryTerm = randomNonNegativeLong();
final String translogUUID = Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm);
store.associateIndexWithNewTranslog(translogUUID);
EngineConfig config = new EngineConfig(shardId, threadPool, indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> RetentionLeases.EMPTY, () -> primaryTerm, EngineTestCase.tombstoneDocSupplier());
engine = new InternalEngine(config);
engine.recoverFromTranslog((e, s) -> 0, Long.MAX_VALUE);
listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation);
}
use of org.opensearch.test.DummyShardLock in project OpenSearch by opensearch-project.
the class IndexShardTests method testFailShard.
public void testFailShard() throws Exception {
allowShardFailures();
IndexShard shard = newStartedShard();
final ShardPath shardPath = shard.shardPath();
assertNotNull(shardPath);
// fail shard
shard.failShard("test shard fail", new CorruptIndexException("", ""));
shard.close("do not assert history", false);
shard.store().close();
// check state file still exists
ShardStateMetadata shardStateMetadata = load(logger, shardPath.getShardStatePath());
assertEquals(shardStateMetadata, getShardStateMetadata(shard));
// but index can't be opened for a failed shard
assertThat("store index should be corrupted", StoreUtils.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(), (shardId, lockTimeoutMS, details) -> new DummyShardLock(shardId)), equalTo(false));
}
use of org.opensearch.test.DummyShardLock in project OpenSearch by opensearch-project.
the class RemoveCorruptedShardDataCommandTests method reopenIndexShard.
private IndexShard reopenIndexShard(boolean corrupted) throws IOException {
// open shard with the same location
final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE);
final IndexMetadata metadata = IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexShard.indexSettings().getSettings()).put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum")).build();
CheckedFunction<IndexSettings, Store, IOException> storeProvider = corrupted == false ? null : indexSettings -> {
final ShardId shardId = shardPath.getShardId();
final BaseDirectoryWrapper baseDirectoryWrapper = newFSDirectory(shardPath.resolveIndex());
baseDirectoryWrapper.setCheckIndexOnClose(false);
return new Store(shardId, indexSettings, baseDirectoryWrapper, new DummyShardLock(shardId));
};
return newShard(shardRouting, shardPath, metadata, storeProvider, null, indexShard.engineFactory, indexShard.getEngineConfigFactory(), indexShard.getGlobalCheckpointSyncer(), indexShard.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER);
}
use of org.opensearch.test.DummyShardLock in project OpenSearch by opensearch-project.
the class StoreTests method testRecoveryDiff.
public void testRecoveryDiff() throws IOException, InterruptedException {
int numDocs = 2 + random().nextInt(100);
List<Document> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
long seed = random().nextLong();
Store.MetadataSnapshot first;
{
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
IndexWriter writer = new IndexWriter(store.directory(), iwc);
final boolean lotsOfSegments = rarely(random);
for (Document d : docs) {
writer.addDocument(d);
if (lotsOfSegments && random.nextBoolean()) {
writer.commit();
} else if (rarely(random)) {
writer.commit();
}
}
writer.commit();
writer.close();
first = store.getMetadata(null);
assertDeleteContent(store, store.directory());
store.close();
}
long time = new Date().getTime();
while (time == new Date().getTime()) {
// bump the time
Thread.sleep(10);
}
Store.MetadataSnapshot second;
Store store;
{
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
final ShardId shardId = new ShardId("index", "_na_", 1);
store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
IndexWriter writer = new IndexWriter(store.directory(), iwc);
final boolean lotsOfSegments = rarely(random);
for (Document d : docs) {
writer.addDocument(d);
if (lotsOfSegments && random.nextBoolean()) {
writer.commit();
} else if (rarely(random)) {
writer.commit();
}
}
writer.commit();
writer.close();
second = store.getMetadata(null);
}
Store.RecoveryDiff diff = first.recoveryDiff(second);
assertThat(first.size(), equalTo(second.size()));
for (StoreFileMetadata md : first) {
assertThat(second.get(md.name()), notNullValue());
// si files are different - containing timestamps etc
assertThat(second.get(md.name()).isSame(md), equalTo(false));
}
assertThat(diff.different.size(), equalTo(first.size()));
// in lucene 5 nothing is identical - we use random ids in file headers
assertThat(diff.identical.size(), equalTo(0));
assertThat(diff.missing, empty());
// check the self diff
Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
assertThat(selfDiff.identical.size(), equalTo(first.size()));
assertThat(selfDiff.different, empty());
assertThat(selfDiff.missing, empty());
// lets add some deletes
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
IndexWriter writer = new IndexWriter(store.directory(), iwc);
writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
StoreFileMetadata delFile = null;
for (StoreFileMetadata md : metadata) {
if (md.name().endsWith(".liv")) {
delFile = md;
break;
}
}
Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
if (delFile != null) {
// segments_N + del file
assertThat(afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2));
assertThat(afterDeleteDiff.different.size(), equalTo(0));
assertThat(afterDeleteDiff.missing.size(), equalTo(2));
} else {
// an entire segment must be missing (single doc segment got dropped)
assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
assertThat(afterDeleteDiff.different.size(), equalTo(0));
// the commit file is different
assertThat(afterDeleteDiff.missing.size(), equalTo(1));
}
// check the self diff
selfDiff = metadata.recoveryDiff(metadata);
assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
assertThat(selfDiff.different, empty());
assertThat(selfDiff.missing, empty());
// add a new commit
iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
// force CFS - easier to test here since we know it will add 3 files
iwc.setUseCompoundFile(true);
iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
writer = new IndexWriter(store.directory(), iwc);
writer.addDocument(docs.get(0));
writer.close();
Store.MetadataSnapshot newCommitMetadata = store.getMetadata(null);
Store.RecoveryDiff newCommitDiff = newCommitMetadata.recoveryDiff(metadata);
if (delFile != null) {
// segments_N, del file, cfs, cfe, si for the
assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetadata.size() - 5));
// new segment
// the del file must be different
assertThat(newCommitDiff.different.size(), equalTo(1));
assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
// segments_N,cfs, cfe, si for the new segment
assertThat(newCommitDiff.missing.size(), equalTo(4));
} else {
// segments_N, cfs, cfe, si for the new
assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetadata.size() - 4));
// segment
assertThat(newCommitDiff.different.size(), equalTo(0));
// an entire segment must be missing (single doc segment got dropped) plus
assertThat(newCommitDiff.missing.size(), equalTo(4));
// the commit is different
}
deleteContent(store.directory());
IOUtils.close(store);
}
Aggregations