use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class StoreTests method testRefCount.
public void testRefCount() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
IndexSettings indexSettings = INDEX_SETTINGS;
Store store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
int incs = randomIntBetween(1, 100);
for (int i = 0; i < incs; i++) {
if (randomBoolean()) {
store.incRef();
} else {
assertTrue(store.tryIncRef());
}
store.ensureOpen();
}
for (int i = 0; i < incs; i++) {
store.decRef();
store.ensureOpen();
}
store.incRef();
store.close();
for (int i = 0; i < incs; i++) {
if (randomBoolean()) {
store.incRef();
} else {
assertTrue(store.tryIncRef());
}
store.ensureOpen();
}
for (int i = 0; i < incs; i++) {
store.decRef();
store.ensureOpen();
}
store.decRef();
assertThat(store.refCount(), Matchers.equalTo(0));
assertFalse(store.tryIncRef());
try {
store.incRef();
fail(" expected exception");
} catch (AlreadyClosedException ex) {
}
try {
store.ensureOpen();
fail(" expected exception");
} catch (AlreadyClosedException ex) {
}
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class IndexShardTests method testFailShard.
public void testFailShard() throws Exception {
IndexShard shard = newStartedShard();
final ShardPath shardPath = shard.shardPath();
assertNotNull(shardPath);
// fail shard
shard.failShard("test shard fail", new CorruptIndexException("", ""));
closeShards(shard);
// check state file still exists
ShardStateMetaData shardStateMetaData = load(logger, shardPath.getShardStatePath());
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
// but index can't be opened for a failed shard
assertThat("store index should be corrupted", Store.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(), (shardId, lockTimeoutMS) -> new DummyShardLock(shardId)), equalTo(false));
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class StoreTests method testRecoveryDiff.
public void testRecoveryDiff() throws IOException, InterruptedException {
int numDocs = 2 + random().nextInt(100);
List<Document> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
long seed = random().nextLong();
Store.MetadataSnapshot first;
{
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random);
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
IndexWriter writer = new IndexWriter(store.directory(), iwc);
final boolean lotsOfSegments = rarely(random);
for (Document d : docs) {
writer.addDocument(d);
if (lotsOfSegments && random.nextBoolean()) {
writer.commit();
} else if (rarely(random)) {
writer.commit();
}
}
writer.commit();
writer.close();
first = store.getMetadata(null);
assertDeleteContent(store, directoryService);
store.close();
}
long time = new Date().getTime();
while (time == new Date().getTime()) {
// bump the time
Thread.sleep(10);
}
Store.MetadataSnapshot second;
Store store;
{
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random);
store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
IndexWriter writer = new IndexWriter(store.directory(), iwc);
final boolean lotsOfSegments = rarely(random);
for (Document d : docs) {
writer.addDocument(d);
if (lotsOfSegments && random.nextBoolean()) {
writer.commit();
} else if (rarely(random)) {
writer.commit();
}
}
writer.commit();
writer.close();
second = store.getMetadata(null);
}
Store.RecoveryDiff diff = first.recoveryDiff(second);
assertThat(first.size(), equalTo(second.size()));
for (StoreFileMetaData md : first) {
assertThat(second.get(md.name()), notNullValue());
// si files are different - containing timestamps etc
assertThat(second.get(md.name()).isSame(md), equalTo(false));
}
assertThat(diff.different.size(), equalTo(first.size()));
// in lucene 5 nothing is identical - we use random ids in file headers
assertThat(diff.identical.size(), equalTo(0));
assertThat(diff.missing, empty());
// check the self diff
Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
assertThat(selfDiff.identical.size(), equalTo(first.size()));
assertThat(selfDiff.different, empty());
assertThat(selfDiff.missing, empty());
// lets add some deletes
Random random = new Random(seed);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
iwc.setUseCompoundFile(random.nextBoolean());
iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
IndexWriter writer = new IndexWriter(store.directory(), iwc);
writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
StoreFileMetaData delFile = null;
for (StoreFileMetaData md : metadata) {
if (md.name().endsWith(".liv")) {
delFile = md;
break;
}
}
Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
if (delFile != null) {
// segments_N + del file
assertThat(afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2));
assertThat(afterDeleteDiff.different.size(), equalTo(0));
assertThat(afterDeleteDiff.missing.size(), equalTo(2));
} else {
// an entire segment must be missing (single doc segment got dropped)
assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
assertThat(afterDeleteDiff.different.size(), equalTo(0));
// the commit file is different
assertThat(afterDeleteDiff.missing.size(), equalTo(1));
}
// check the self diff
selfDiff = metadata.recoveryDiff(metadata);
assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
assertThat(selfDiff.different, empty());
assertThat(selfDiff.missing, empty());
// add a new commit
iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
// force CFS - easier to test here since we know it will add 3 files
iwc.setUseCompoundFile(true);
iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
writer = new IndexWriter(store.directory(), iwc);
writer.addDocument(docs.get(0));
writer.close();
Store.MetadataSnapshot newCommitMetaData = store.getMetadata(null);
Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
if (delFile != null) {
// segments_N, del file, cfs, cfe, si for the new segment
assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5));
// the del file must be different
assertThat(newCommitDiff.different.size(), equalTo(1));
assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
// segments_N,cfs, cfe, si for the new segment
assertThat(newCommitDiff.missing.size(), equalTo(4));
} else {
// segments_N, cfs, cfe, si for the new segment
assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4));
assertThat(newCommitDiff.different.size(), equalTo(0));
// an entire segment must be missing (single doc segment got dropped) plus the commit is different
assertThat(newCommitDiff.missing.size(), equalTo(4));
}
deleteContent(store.directory());
IOUtils.close(store);
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class StoreTests method testMarkCorruptedOnTruncatedSegmentsFile.
public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException {
IndexWriterConfig iwc = newIndexWriterConfig();
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
IndexWriter writer = new IndexWriter(store.directory(), iwc);
int numDocs = 1 + random().nextInt(10);
List<Document> docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
for (Document d : docs) {
writer.addDocument(d);
}
writer.commit();
writer.close();
SegmentInfos segmentCommitInfos = store.readLastCommittedSegmentsInfo();
store.directory().deleteFile(segmentCommitInfos.getSegmentsFileName());
try (IndexOutput out = store.directory().createOutput(segmentCommitInfos.getSegmentsFileName(), IOContext.DEFAULT)) {
// empty file
}
try {
if (randomBoolean()) {
store.getMetadata(null);
} else {
store.readLastCommittedSegmentsInfo();
}
fail("corrupted segments_N file");
} catch (CorruptIndexException ex) {
// expected
}
assertTrue(store.isMarkedCorrupted());
// we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call
Lucene.cleanLuceneIndex(store.directory());
store.close();
}
use of org.elasticsearch.test.DummyShardLock in project elasticsearch by elastic.
the class StoreTests method testDeserializeCorruptionException.
public void testDeserializeCorruptionException() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
// I use ram dir to prevent that virusscanner being a PITA
final Directory dir = new RAMDirectory();
DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) {
@Override
public Directory newDirectory() throws IOException {
return dir;
}
};
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
CorruptIndexException ex = new CorruptIndexException("foo", "bar");
store.markStoreCorrupted(ex);
try {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
assertEquals(ex.getMessage(), e.getMessage());
assertEquals(ex.toString(), e.toString());
assertArrayEquals(ex.getStackTrace(), e.getStackTrace());
}
store.removeCorruptionMarker();
assertFalse(store.isMarkedCorrupted());
FileNotFoundException ioe = new FileNotFoundException("foobar");
store.markStoreCorrupted(ioe);
try {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
assertEquals("foobar (resource=preexisting_corruption)", e.getMessage());
assertArrayEquals(ioe.getStackTrace(), e.getCause().getStackTrace());
}
store.close();
}
Aggregations