use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class RefreshListenersTests method setupListeners.
@Before
public void setupListeners() throws Exception {
// Setup dependencies of the listeners
maxListeners = randomIntBetween(1, 1000);
listeners = new RefreshListeners(() -> maxListeners, () -> engine.refresh("too-many-listeners"), // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test.
Runnable::run, logger);
// Now setup the InternalEngine which is much more complicated because we aren't mocking anything
threadPool = new TestThreadPool(getTestName());
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
Directory directory = newDirectory();
DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
@Override
public Directory newDirectory() throws IOException {
return directory;
}
};
store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
Engine.EventListener eventListener = new Engine.EventListener() {
@Override
public void onFailedEngine(String reason, @Nullable Exception e) {
// we don't need to notify anybody in this test
}
};
TranslogHandler translogHandler = new TranslogHandler(xContentRegistry(), shardId.getIndexName(), logger);
EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, threadPool, indexSettings, null, store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, translogHandler, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
engine = new InternalEngine(config);
listeners.setTranslog(engine.getTranslog());
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class StoreTests method testNewChecksums.
public void testNewChecksums() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
// set default codec - all segments need checksums
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
int docs = 1 + random().nextInt(100);
for (int i = 0; i < docs; i++) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
if (random().nextBoolean()) {
for (int i = 0; i < docs; i++) {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
}
if (random().nextBoolean()) {
// flush
DirectoryReader.open(writer).close();
}
Store.MetadataSnapshot metadata;
// check before we committed
try {
store.getMetadata(null);
fail("no index present - expected exception");
} catch (IndexNotFoundException ex) {
// expected
}
writer.commit();
writer.close();
metadata = store.getMetadata(null);
assertThat(metadata.asMap().isEmpty(), is(false));
for (StoreFileMetaData meta : metadata) {
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
assertThat(meta.hash().length, greaterThan(0));
}
}
}
assertConsistent(store, metadata);
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, directoryService);
IOUtils.close(store);
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class StoreTests method testStoreStats.
public void testStoreStats() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)).build();
Store store = new Store(shardId, IndexSettingsModule.newIndexSettings("index", settings), directoryService, new DummyShardLock(shardId));
long initialStoreSize = 0;
for (String extraFiles : store.directory().listAll()) {
assertTrue("expected extraFS file but got: " + extraFiles, extraFiles.startsWith("extra"));
initialStoreSize += store.directory().fileLength(extraFiles);
}
StoreStats stats = store.stats();
assertEquals(stats.getSize().getBytes(), initialStoreSize);
Directory dir = store.directory();
final long length;
try (IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT)) {
int iters = scaledRandomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
}
length = output.getFilePointer();
}
assertTrue(numNonExtraFiles(store) > 0);
stats = store.stats();
assertEquals(stats.getSizeInBytes(), length + initialStoreSize);
deleteContent(store.directory());
IOUtils.close(store);
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class StoreTests method testCleanupFromSnapshot.
public void testCleanupFromSnapshot() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
// this time random codec....
IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
// we keep all commits and that allows us clean based on multiple snapshots
indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
int docs = 1 + random().nextInt(100);
int numCommits = 0;
for (int i = 0; i < docs; i++) {
if (i > 0 && randomIntBetween(0, 10) == 0) {
writer.commit();
numCommits++;
}
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
if (numCommits < 1) {
writer.commit();
Document doc = new Document();
doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
Store.MetadataSnapshot firstMeta = store.getMetadata(null);
if (random().nextBoolean()) {
for (int i = 0; i < docs; i++) {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
}
writer.commit();
writer.close();
Store.MetadataSnapshot secondMeta = store.getMetadata(null);
if (randomBoolean()) {
store.cleanupAndVerify("test", firstMeta);
String[] strings = store.directory().listAll();
int numChecksums = 0;
int numNotFound = 0;
for (String file : strings) {
if (file.startsWith("extra")) {
continue;
}
assertTrue(firstMeta.contains(file) || file.equals("write.lock"));
if (secondMeta.contains(file) == false) {
numNotFound++;
}
}
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
} else {
store.cleanupAndVerify("test", secondMeta);
String[] strings = store.directory().listAll();
int numChecksums = 0;
int numNotFound = 0;
for (String file : strings) {
if (file.startsWith("extra")) {
continue;
}
assertTrue(file, secondMeta.contains(file) || file.equals("write.lock"));
if (firstMeta.contains(file) == false) {
numNotFound++;
}
}
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
}
deleteContent(store.directory());
IOUtils.close(store);
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class ShadowEngineTests method createStore.
protected Store createStore(final Directory directory) throws IOException {
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY);
final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
@Override
public Directory newDirectory() throws IOException {
return directory;
}
};
return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
}
Aggregations