use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class AnalysisModuleTests method testRegisterHunspellDictionary.
public void testRegisterHunspellDictionary() throws Exception {
Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
Environment environment = new Environment(settings);
InputStream aff = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.aff");
InputStream dic = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.dic");
Dictionary dictionary;
try (Directory tmp = new SimpleFSDirectory(environment.tmpFile())) {
dictionary = new Dictionary(tmp, "hunspell", aff, dic);
}
AnalysisModule module = new AnalysisModule(environment, singletonList(new AnalysisPlugin() {
@Override
public Map<String, Dictionary> getHunspellDictionaries() {
return singletonMap("foo", dictionary);
}
}));
assertSame(dictionary, module.getHunspellService().getDictionary("foo"));
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class StoreTests method testCanReadOldCorruptionMarker.
public void testCanReadOldCorruptionMarker() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
// I use ram dir to prevent that virusscanner being a PITA
final Directory dir = new RAMDirectory();
DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) {
@Override
public Directory newDirectory() throws IOException {
return dir;
}
};
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
CorruptIndexException exception = new CorruptIndexException("foo", "bar");
String uuid = Store.CORRUPTED + UUIDs.randomBase64UUID();
try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) {
CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION_STACK_TRACE);
output.writeString(ExceptionsHelper.detailedMessage(exception));
output.writeString(ExceptionsHelper.stackTrace(exception));
CodecUtil.writeFooter(output);
}
try {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid + "] caused by: CorruptIndexException[foo (resource=bar)]"));
assertTrue(e.getMessage().contains(ExceptionsHelper.stackTrace(exception)));
}
store.removeCorruptionMarker();
try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) {
CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION_START);
output.writeString(ExceptionsHelper.detailedMessage(exception));
CodecUtil.writeFooter(output);
}
try {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid + "] caused by: CorruptIndexException[foo (resource=bar)]"));
assertFalse(e.getMessage().contains(ExceptionsHelper.stackTrace(exception)));
}
store.removeCorruptionMarker();
try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) {
// corrupted header
CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION_START - 1);
CodecUtil.writeFooter(output);
}
try {
store.failIfCorrupted();
fail("should be too old");
} catch (IndexFormatTooOldException e) {
}
store.removeCorruptionMarker();
try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) {
// corrupted header
CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION + 1);
CodecUtil.writeFooter(output);
}
try {
store.failIfCorrupted();
fail("should be too new");
} catch (IndexFormatTooNewException e) {
}
store.close();
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class StoreTests method testChecksumCorrupted.
public void testChecksumCorrupted() throws IOException {
Directory dir = newDirectory();
IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
int iters = scaledRandomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
}
output.writeInt(CodecUtil.FOOTER_MAGIC);
output.writeInt(0);
String checksum = Store.digestToString(output.getChecksum());
// write a wrong checksum to the file
output.writeLong(output.getChecksum() + 1);
output.close();
IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
indexInput.seek(0);
BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024));
long length = indexInput.length();
IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum), dir.createOutput("foo1.bar", IOContext.DEFAULT));
// we write the checksum in the try / catch block below
length -= 8;
while (length > 0) {
if (random().nextInt(10) == 0) {
verifyingOutput.writeByte(indexInput.readByte());
length--;
} else {
int min = (int) Math.min(length, ref.bytes.length);
indexInput.readBytes(ref.bytes, ref.offset, min);
verifyingOutput.writeBytes(ref.bytes, ref.offset, min);
length -= min;
}
}
try {
BytesRef checksumBytes = new BytesRef(8);
checksumBytes.length = 8;
indexInput.readBytes(checksumBytes.bytes, checksumBytes.offset, checksumBytes.length);
if (randomBoolean()) {
verifyingOutput.writeBytes(checksumBytes.bytes, checksumBytes.offset, checksumBytes.length);
} else {
for (int i = 0; i < checksumBytes.length; i++) {
verifyingOutput.writeByte(checksumBytes.bytes[i]);
}
}
fail("should be a corrupted index");
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
// ok
}
IOUtils.close(indexInput, verifyingOutput, dir);
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class StoreTests method testVerifyingIndexOutputWithBogusInput.
public void testVerifyingIndexOutputWithBogusInput() throws IOException {
Directory dir = newDirectory();
int length = scaledRandomIntBetween(10, 1024);
IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, ""), dir.createOutput("foo1.bar", IOContext.DEFAULT));
try {
while (length > 0) {
verifyingOutput.writeByte((byte) random().nextInt());
length--;
}
fail("should be a corrupted index");
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
// ok
}
IOUtils.close(verifyingOutput, dir);
}
use of org.apache.lucene.store.Directory in project elasticsearch by elastic.
the class IndicesQueryCacheTests method testStatsOnEviction.
// Make sure the cache behaves correctly when a segment that is associated
// with an empty cache gets closed. In that particular case, the eviction
// callback is called with a number of evicted entries equal to 0
// see https://github.com/elastic/elasticsearch/issues/15043
public void testStatsOnEviction() throws IOException {
Directory dir1 = newDirectory();
IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig());
w1.addDocument(new Document());
DirectoryReader r1 = DirectoryReader.open(w1);
w1.close();
ShardId shard1 = new ShardId("index", "_na_", 0);
r1 = ElasticsearchDirectoryReader.wrap(r1, shard1);
IndexSearcher s1 = new IndexSearcher(r1);
s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
Directory dir2 = newDirectory();
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
w2.addDocument(new Document());
DirectoryReader r2 = DirectoryReader.open(w2);
w2.close();
ShardId shard2 = new ShardId("index", "_na_", 1);
r2 = ElasticsearchDirectoryReader.wrap(r2, shard2);
IndexSearcher s2 = new IndexSearcher(r2);
s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
Settings settings = Settings.builder().put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10).put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true).build();
IndicesQueryCache cache = new IndicesQueryCache(settings);
s1.setQueryCache(cache);
s2.setQueryCache(cache);
assertEquals(1, s1.count(new DummyQuery(0)));
for (int i = 1; i <= 20; ++i) {
assertEquals(1, s2.count(new DummyQuery(i)));
}
QueryCacheStats stats1 = cache.getStats(shard1);
assertEquals(0L, stats1.getCacheSize());
assertEquals(1L, stats1.getCacheCount());
// this used to fail because we were evicting an empty cache on
// the segment from r1
IOUtils.close(r1, dir1);
cache.onClose(shard1);
IOUtils.close(r2, dir2);
cache.onClose(shard2);
// this triggers some assertions
cache.close();
}
Aggregations