use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class IndexShardTestCase method createStore.
private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
final ShardId shardId = shardPath.getShardId();
final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
@Override
public Directory newDirectory() throws IOException {
return newFSDirectory(shardPath.resolveIndex());
}
};
return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
}
use of org.elasticsearch.test.DummyShardLock in project crate by crate.
the class StoreTests method testNewChecksums.
public void testNewChecksums() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// set default codec - all segments need checksums
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
int docs = 1 + random().nextInt(100);
for (int i = 0; i < docs; i++) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
if (random().nextBoolean()) {
for (int i = 0; i < docs; i++) {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
}
if (random().nextBoolean()) {
// flush
DirectoryReader.open(writer).close();
}
Store.MetadataSnapshot metadata;
// check before we committed
try {
store.getMetadata(null);
fail("no index present - expected exception");
} catch (IndexNotFoundException ex) {
// expected
}
writer.commit();
writer.close();
metadata = store.getMetadata(null);
assertThat(metadata.asMap().isEmpty(), is(false));
for (StoreFileMetadata meta : metadata) {
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
assertThat(meta.hash().length, greaterThan(0));
}
}
}
assertConsistent(store, metadata);
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, store.directory());
IOUtils.close(store);
}
use of org.elasticsearch.test.DummyShardLock in project crate by crate.
the class StoreTests method testRecoveryDiff.
public void testRecoveryDiff() throws IOException, InterruptedException {
int numDocs = 2 + random().nextInt(100);
List<Document> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
long seed = random().nextLong();
Store.MetadataSnapshot first;
{
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
IndexWriter writer = new IndexWriter(store.directory(), iwc);
final boolean lotsOfSegments = rarely(random);
for (Document d : docs) {
writer.addDocument(d);
if (lotsOfSegments && random.nextBoolean()) {
writer.commit();
} else if (rarely(random)) {
writer.commit();
}
}
writer.commit();
writer.close();
first = store.getMetadata(null);
assertDeleteContent(store, store.directory());
store.close();
}
long time = new Date().getTime();
while (time == new Date().getTime()) {
// bump the time
Thread.sleep(10);
}
Store.MetadataSnapshot second;
Store store;
{
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
final ShardId shardId = new ShardId("index", "_na_", 1);
store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
IndexWriter writer = new IndexWriter(store.directory(), iwc);
final boolean lotsOfSegments = rarely(random);
for (Document d : docs) {
writer.addDocument(d);
if (lotsOfSegments && random.nextBoolean()) {
writer.commit();
} else if (rarely(random)) {
writer.commit();
}
}
writer.commit();
writer.close();
second = store.getMetadata(null);
}
Store.RecoveryDiff diff = first.recoveryDiff(second);
assertThat(first.size(), equalTo(second.size()));
for (StoreFileMetadata md : first) {
assertThat(second.get(md.name()), notNullValue());
// si files are different - containing timestamps etc
assertThat(second.get(md.name()).isSame(md), equalTo(false));
}
assertThat(diff.different.size(), equalTo(first.size()));
// in lucene 5 nothing is identical - we use random ids in file headers
assertThat(diff.identical.size(), equalTo(0));
assertThat(diff.missing, empty());
// check the self diff
Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
assertThat(selfDiff.identical.size(), equalTo(first.size()));
assertThat(selfDiff.different, empty());
assertThat(selfDiff.missing, empty());
// lets add some deletes
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
IndexWriter writer = new IndexWriter(store.directory(), iwc);
writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
StoreFileMetadata delFile = null;
for (StoreFileMetadata md : metadata) {
if (md.name().endsWith(".liv")) {
delFile = md;
break;
}
}
Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
if (delFile != null) {
// segments_N + del file
assertThat(afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2));
assertThat(afterDeleteDiff.different.size(), equalTo(0));
assertThat(afterDeleteDiff.missing.size(), equalTo(2));
} else {
// an entire segment must be missing (single doc segment got dropped)
assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
assertThat(afterDeleteDiff.different.size(), equalTo(0));
// the commit file is different
assertThat(afterDeleteDiff.missing.size(), equalTo(1));
}
// check the self diff
selfDiff = metadata.recoveryDiff(metadata);
assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
assertThat(selfDiff.different, empty());
assertThat(selfDiff.missing, empty());
// add a new commit
iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
// force CFS - easier to test here since we know it will add 3 files
iwc.setUseCompoundFile(true);
iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
writer = new IndexWriter(store.directory(), iwc);
writer.addDocument(docs.get(0));
writer.close();
Store.MetadataSnapshot newCommitMetadata = store.getMetadata(null);
Store.RecoveryDiff newCommitDiff = newCommitMetadata.recoveryDiff(metadata);
if (delFile != null) {
assertThat(newCommitDiff.identical.size(), // segments_N, del file, cfs, cfe, si for the new segment
equalTo(newCommitMetadata.size() - 5));
// the del file must be different
assertThat(newCommitDiff.different.size(), equalTo(1));
assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
// segments_N,cfs, cfe, si for the new segment
assertThat(newCommitDiff.missing.size(), equalTo(4));
} else {
assertThat(newCommitDiff.identical.size(), // segments_N, cfs, cfe, si for the new segment
equalTo(newCommitMetadata.size() - 4));
assertThat(newCommitDiff.different.size(), equalTo(0));
assertThat(newCommitDiff.missing.size(), // an entire segment must be missing (single doc segment got dropped) plus the commit is different
equalTo(4));
}
deleteContent(store.directory());
IOUtils.close(store);
}
use of org.elasticsearch.test.DummyShardLock in project crate by crate.
the class StoreTests method testCanOpenIndex.
public void testCanOpenIndex() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
IndexWriterConfig iwc = newIndexWriterConfig();
Path tempDir = createTempDir();
final BaseDirectoryWrapper dir = newFSDirectory(tempDir);
assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l, d) -> new DummyShardLock(id)));
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
writer.close();
assertTrue(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l, d) -> new DummyShardLock(id)));
Store store = new Store(shardId, INDEX_SETTINGS, dir, new DummyShardLock(shardId));
store.markStoreCorrupted(new CorruptIndexException("foo", "bar"));
assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l, d) -> new DummyShardLock(id)));
store.close();
}
use of org.elasticsearch.test.DummyShardLock in project crate by crate.
the class StoreTests method testGetPendingFiles.
public void testGetPendingFiles() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
final String testfile = "testfile";
try (Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId))) {
store.directory().createOutput(testfile, IOContext.DEFAULT).close();
try (IndexInput input = store.directory().openInput(testfile, IOContext.DEFAULT)) {
store.directory().deleteFile(testfile);
assertEquals(FilterDirectory.unwrap(store.directory()).getPendingDeletions(), store.directory().getPendingDeletions());
}
}
}
Aggregations