use of org.apache.lucene.store.FilterDirectory in project lucene-solr by apache.
the class TestBKD method testBitFlippedOnPartition2.
/** Make sure corruption on a recursed partition is caught, when BKDWriter does get angry */
public void testBitFlippedOnPartition2() throws Exception {
// Generate fixed data set:
int numDocs = atLeast(10000);
int numBytesPerDim = 4;
int numDims = 3;
byte[][][] docValues = new byte[numDocs][][];
byte counter = 0;
for (int docID = 0; docID < numDocs; docID++) {
byte[][] values = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = new byte[numBytesPerDim];
for (int i = 0; i < values[dim].length; i++) {
values[dim][i] = counter;
counter++;
}
}
docValues[docID] = values;
}
try (Directory dir0 = newMockDirectory()) {
Directory dir = new FilterDirectory(dir0) {
boolean corrupted;
@Override
public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException {
IndexOutput out = in.createTempOutput(prefix, suffix, context);
//System.out.println("prefix=" + prefix + " suffix=" + suffix);
if (corrupted == false && suffix.equals("bkd_left1")) {
//System.out.println("now corrupt byte=" + x + " prefix=" + prefix + " suffix=" + suffix);
corrupted = true;
return new CorruptingIndexOutput(dir0, 22072, out);
} else {
return out;
}
}
};
Throwable t = expectThrows(CorruptIndexException.class, () -> {
verify(dir, docValues, null, numDims, numBytesPerDim, 50, 0.1);
});
assertCorruptionDetected(t);
}
}
use of org.apache.lucene.store.FilterDirectory in project elasticsearch by elastic.
the class DirectoryUtilsTests method testGetLeave.
public void testGetLeave() throws IOException {
Path file = createTempDir();
final int iters = scaledRandomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
{
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(dir) {
}, FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(dir, FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
Set<String> stringSet = Collections.emptySet();
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean()), FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
Set<String> stringSet = Collections.emptySet();
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {
}, FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
Set<String> stringSet = Collections.emptySet();
BaseDirectoryWrapper dir = newFSDirectory(file);
RAMDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {
}, RAMDirectory.class, null);
assertThat(directory, nullValue());
dir.close();
}
}
}
use of org.apache.lucene.store.FilterDirectory in project jackrabbit-oak by apache.
the class LucenePropertyIndexTest method createIndexCopier.
private IndexCopier createIndexCopier() {
try {
return new IndexCopier(executorService, temporaryFolder.getRoot()) {
@Override
public Directory wrapForRead(String indexPath, IndexDefinition definition, Directory remote, String dirName) throws IOException {
Directory ret = super.wrapForRead(indexPath, definition, remote, dirName);
corDir = getFSDirPath(ret);
return ret;
}
@Override
public Directory wrapForWrite(IndexDefinition definition, Directory remote, boolean reindexMode, String dirName) throws IOException {
Directory ret = super.wrapForWrite(definition, remote, reindexMode, dirName);
cowDir = getFSDirPath(ret);
return ret;
}
private String getFSDirPath(Directory dir) {
if (dir instanceof CopyOnReadDirectory) {
dir = ((CopyOnReadDirectory) dir).getLocal();
}
dir = unwrap(dir);
if (dir instanceof FSDirectory) {
return ((FSDirectory) dir).getDirectory().getAbsolutePath();
}
return null;
}
private Directory unwrap(Directory dir) {
if (dir instanceof FilterDirectory) {
return unwrap(((FilterDirectory) dir).getDelegate());
}
return dir;
}
};
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.lucene.store.FilterDirectory in project jackrabbit-oak by apache.
the class IndexCopierTest method cowConcurrentAccess.
/**
* Test the interaction between COR and COW using same underlying directory
*/
@Test
public void cowConcurrentAccess() throws Exception {
CollectingExecutor executor = new CollectingExecutor();
ExecutorService executorService = Executors.newFixedThreadPool(2);
executor.setForwardingExecutor(executorService);
Directory baseDir = new CloseSafeDir();
IndexDefinition defn = new IndexDefinition(root, builder.getNodeState(), indexPath);
IndexCopier copier = new RAMIndexCopier(baseDir, executor, getWorkDir(), true);
Directory remote = new CloseSafeDir();
byte[] f1 = writeFile(remote, "f1");
Directory cor1 = copier.wrapForRead(indexPath, defn, remote, INDEX_DATA_CHILD_NAME);
readAndAssert(cor1, "f1", f1);
cor1.close();
final CountDownLatch pauseCopyLatch = new CountDownLatch(1);
Directory remote2 = new FilterDirectory(remote) {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
try {
pauseCopyLatch.await();
} catch (InterruptedException ignore) {
}
return super.createOutput(name, context);
}
};
//Start copying a file to remote via COW
Directory cow1 = copier.wrapForWrite(defn, remote2, false, INDEX_DATA_CHILD_NAME);
byte[] f2 = writeFile(cow1, "f2");
//Before copy is done to remote lets delete f1 from remote and
//open a COR and close it such that it triggers delete of f1
remote.deleteFile("f1");
Directory cor2 = copier.wrapForRead(indexPath, defn, remote, INDEX_DATA_CHILD_NAME);
//Ensure that deletion task submitted to executor get processed immediately
executor.enableImmediateExecution();
cor2.close();
executor.enableDelayedExecution();
assertFalse(baseDir.fileExists("f1"));
assertFalse("f2 should not have been copied to remote so far", remote.fileExists("f2"));
assertTrue("f2 should exist", baseDir.fileExists("f2"));
pauseCopyLatch.countDown();
cow1.close();
assertTrue("f2 should exist", remote.fileExists("f2"));
executorService.shutdown();
}
use of org.apache.lucene.store.FilterDirectory in project lucene-solr by apache.
the class ConcurrentMergeScheduler method wrapForMerge.
@Override
public Directory wrapForMerge(OneMerge merge, Directory in) {
Thread mergeThread = Thread.currentThread();
if (!MergeThread.class.isInstance(mergeThread)) {
throw new AssertionError("wrapForMerge should be called from MergeThread. Current thread: " + mergeThread);
}
// Return a wrapped Directory which has rate-limited output.
RateLimiter rateLimiter = ((MergeThread) mergeThread).rateLimiter;
return new FilterDirectory(in) {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
ensureOpen();
// somewhere that is failing to pass down the right IOContext:
assert context.context == IOContext.Context.MERGE : "got context=" + context.context;
// always be called from that context. Verify this.
assert mergeThread == Thread.currentThread() : "Not the same merge thread, current=" + Thread.currentThread() + ", expected=" + mergeThread;
return new RateLimitedIndexOutput(rateLimiter, in.createOutput(name, context));
}
};
}
Aggregations