Search in sources :

Example 41 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project elasticsearch by elastic.

the class ExceptionSerializationTests method testWriteThrowable.

public void testWriteThrowable() throws IOException {
    final QueryShardException queryShardException = new QueryShardException(new Index("foo", "_na_"), "foobar", null);
    final UnknownException unknownException = new UnknownException("this exception is unknown", queryShardException);
    final Exception[] causes = new Exception[] { new IllegalStateException("foobar"), new IllegalArgumentException("alalaal"), new NullPointerException("boom"), new EOFException("dadada"), new ElasticsearchSecurityException("nono!"), new NumberFormatException("not a number"), new CorruptIndexException("baaaam booom", "this is my resource"), new IndexFormatTooNewException("tooo new", 1, 2, 3), new IndexFormatTooOldException("tooo new", 1, 2, 3), new IndexFormatTooOldException("tooo new", "very old version"), new ArrayIndexOutOfBoundsException("booom"), new StringIndexOutOfBoundsException("booom"), new FileNotFoundException("booom"), new NoSuchFileException("booom"), new AlreadyClosedException("closed!!", new NullPointerException()), new LockObtainFailedException("can't lock directory", new NullPointerException()), unknownException };
    for (final Exception cause : causes) {
        ElasticsearchException ex = new ElasticsearchException("topLevel", cause);
        ElasticsearchException deserialized = serialize(ex);
        assertEquals(deserialized.getMessage(), ex.getMessage());
        assertTrue("Expected: " + deserialized.getCause().getMessage() + " to contain: " + ex.getCause().getClass().getName() + " but it didn't", deserialized.getCause().getMessage().contains(ex.getCause().getMessage()));
        if (ex.getCause().getClass() != UnknownException.class) {
            // unknown exception is not directly mapped
            assertEquals(deserialized.getCause().getClass(), ex.getCause().getClass());
        } else {
            assertEquals(deserialized.getCause().getClass(), NotSerializableExceptionWrapper.class);
        }
        assertArrayEquals(deserialized.getStackTrace(), ex.getStackTrace());
        assertTrue(deserialized.getStackTrace().length > 1);
        assertVersionSerializable(VersionUtils.randomVersion(random()), cause);
        assertVersionSerializable(VersionUtils.randomVersion(random()), ex);
        assertVersionSerializable(VersionUtils.randomVersion(random()), deserialized);
    }
}
Also used : FileNotFoundException(java.io.FileNotFoundException) NoSuchFileException(java.nio.file.NoSuchFileException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) Index(org.elasticsearch.index.Index) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) IllegalShardRoutingStateException(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException) ActionTransportException(org.elasticsearch.transport.ActionTransportException) SearchContextMissingException(org.elasticsearch.search.SearchContextMissingException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) AccessDeniedException(java.nio.file.AccessDeniedException) SearchException(org.elasticsearch.search.SearchException) LockObtainFailedException(org.apache.lucene.store.LockObtainFailedException) RecoverFilesRecoveryException(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException) ClusterBlockException(org.elasticsearch.cluster.block.ClusterBlockException) NotDirectoryException(java.nio.file.NotDirectoryException) DirectoryNotEmptyException(java.nio.file.DirectoryNotEmptyException) IOException(java.io.IOException) IndexTemplateMissingException(org.elasticsearch.indices.IndexTemplateMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) FailedNodeException(org.elasticsearch.action.FailedNodeException) SearchParseException(org.elasticsearch.search.SearchParseException) URISyntaxException(java.net.URISyntaxException) AliasesNotFoundException(org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) TimestampParsingException(org.elasticsearch.action.TimestampParsingException) RoutingMissingException(org.elasticsearch.action.RoutingMissingException) RepositoryException(org.elasticsearch.repositories.RepositoryException) AlreadyExpiredException(org.elasticsearch.index.AlreadyExpiredException) InvalidIndexTemplateException(org.elasticsearch.indices.InvalidIndexTemplateException) QueryShardException(org.elasticsearch.index.query.QueryShardException) FileSystemException(java.nio.file.FileSystemException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) IllegalIndexShardStateException(org.elasticsearch.index.shard.IllegalIndexShardStateException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) SearchPhaseExecutionException(org.elasticsearch.action.search.SearchPhaseExecutionException) ActionNotFoundTransportException(org.elasticsearch.transport.ActionNotFoundTransportException) ParsingException(org.elasticsearch.common.ParsingException) FileSystemLoopException(java.nio.file.FileSystemLoopException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) FileAlreadyExistsException(java.nio.file.FileAlreadyExistsException) CircuitBreakingException(org.elasticsearch.common.breaker.CircuitBreakingException) AtomicMoveNotSupportedException(java.nio.file.AtomicMoveNotSupportedException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) LockObtainFailedException(org.apache.lucene.store.LockObtainFailedException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) EOFException(java.io.EOFException) QueryShardException(org.elasticsearch.index.query.QueryShardException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException)

Example 42 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project elasticsearch by elastic.

the class RefCountedTests method testMultiThreaded.

public void testMultiThreaded() throws InterruptedException {
    final MyRefCounted counted = new MyRefCounted();
    Thread[] threads = new Thread[randomIntBetween(2, 5)];
    final CountDownLatch latch = new CountDownLatch(1);
    final CopyOnWriteArrayList<Exception> exceptions = new CopyOnWriteArrayList<>();
    for (int i = 0; i < threads.length; i++) {
        threads[i] = new Thread() {

            @Override
            public void run() {
                try {
                    latch.await();
                    for (int j = 0; j < 10000; j++) {
                        counted.incRef();
                        try {
                            counted.ensureOpen();
                        } finally {
                            counted.decRef();
                        }
                    }
                } catch (Exception e) {
                    exceptions.add(e);
                }
            }
        };
        threads[i].start();
    }
    latch.countDown();
    for (int i = 0; i < threads.length; i++) {
        threads[i].join();
    }
    counted.decRef();
    try {
        counted.ensureOpen();
        fail("expected to be closed");
    } catch (AlreadyClosedException ex) {
        assertThat(ex.getMessage(), equalTo("closed"));
    }
    assertThat(counted.refCount(), is(0));
    assertThat(exceptions, Matchers.emptyIterable());
}
Also used : AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) CountDownLatch(java.util.concurrent.CountDownLatch) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IOException(java.io.IOException) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 43 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project geode by apache.

the class LuceneEventListener method process.

protected boolean process(final List<AsyncEvent> events) {
    // Try to get a PDX instance if possible, rather than a deserialized object
    DefaultQuery.setPdxReadSerialized(true);
    Set<IndexRepository> affectedRepos = new HashSet<IndexRepository>();
    try {
        for (AsyncEvent event : events) {
            Region region = event.getRegion();
            Object key = event.getKey();
            Object callbackArgument = event.getCallbackArgument();
            IndexRepository repository = repositoryManager.getRepository(region, key, callbackArgument);
            Object value = getValue(region.getEntry(key));
            if (value != null) {
                repository.update(key, value);
            } else {
                repository.delete(key);
            }
            affectedRepos.add(repository);
        }
        for (IndexRepository repo : affectedRepos) {
            repo.commit();
        }
        return true;
    } catch (BucketNotFoundException | RegionDestroyedException | PrimaryBucketException e) {
        logger.debug("Bucket not found while saving to lucene index: " + e.getMessage(), e);
        return false;
    } catch (CacheClosedException e) {
        logger.debug("Unable to save to lucene index, cache has been closed", e);
        return false;
    } catch (AlreadyClosedException e) {
        logger.debug("Unable to commit, the lucene index is already closed", e);
        return false;
    } catch (IOException e) {
        throw new InternalGemFireError("Unable to save to lucene index", e);
    } finally {
        DefaultQuery.setPdxReadSerialized(false);
    }
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CacheClosedException(org.apache.geode.cache.CacheClosedException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IOException(java.io.IOException) AsyncEvent(org.apache.geode.cache.asyncqueue.AsyncEvent) PrimaryBucketException(org.apache.geode.internal.cache.PrimaryBucketException) IndexRepository(org.apache.geode.cache.lucene.internal.repository.IndexRepository) Region(org.apache.geode.cache.Region) BucketNotFoundException(org.apache.geode.internal.cache.BucketNotFoundException) HashSet(java.util.HashSet) InternalGemFireError(org.apache.geode.InternalGemFireError)

Example 44 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.

the class TestIndexWriterExceptions method testMergeExceptionIsTragic.

public void testMergeExceptionIsTragic() throws Exception {
    MockDirectoryWrapper dir = newMockDirectory();
    final AtomicBoolean didFail = new AtomicBoolean();
    dir.failOn(new MockDirectoryWrapper.Failure() {

        @Override
        public void eval(MockDirectoryWrapper dir) throws IOException {
            if (random().nextInt(10) != 0) {
                return;
            }
            if (didFail.get()) {
                // Already failed
                return;
            }
            StackTraceElement[] trace = Thread.currentThread().getStackTrace();
            for (int i = 0; i < trace.length; i++) {
                if ("merge".equals(trace[i].getMethodName())) {
                    if (VERBOSE) {
                        System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
                        new Throwable().printStackTrace(System.out);
                    }
                    didFail.set(true);
                    throw new FakeIOException();
                }
            }
        }
    });
    IndexWriterConfig iwc = newIndexWriterConfig();
    MergePolicy mp = iwc.getMergePolicy();
    if (mp instanceof TieredMergePolicy) {
        TieredMergePolicy tmp = (TieredMergePolicy) mp;
        if (tmp.getMaxMergedSegmentMB() < 0.2) {
            tmp.setMaxMergedSegmentMB(0.2);
        }
    }
    MergeScheduler ms = iwc.getMergeScheduler();
    if (ms instanceof ConcurrentMergeScheduler) {
        ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
    }
    IndexWriter w = new IndexWriter(dir, iwc);
    while (true) {
        try {
            Document doc = new Document();
            doc.add(newStringField("field", "string", Field.Store.NO));
            w.addDocument(doc);
            if (random().nextInt(10) == 7) {
                // Flush new segment:
                DirectoryReader.open(w).close();
            }
        } catch (AlreadyClosedException ace) {
            // OK: e.g. CMS hit the exc in BG thread and closed the writer
            break;
        } catch (FakeIOException fioe) {
            // OK: e.g. SMS hit the exception
            break;
        }
    }
    assertNotNull(w.getTragicException());
    assertFalse(w.isOpen());
    assertTrue(didFail.get());
    if (ms instanceof ConcurrentMergeScheduler) {
        // Sneaky: CMS's merge thread will be concurrently rolling back IW due
        // to the tragedy, with this main thread, so we have to wait here
        // to ensure the rollback has finished, else MDW still sees open files:
        ((ConcurrentMergeScheduler) ms).sync();
    }
    dir.close();
}
Also used : MockDirectoryWrapper(org.apache.lucene.store.MockDirectoryWrapper) FakeIOException(org.apache.lucene.store.MockDirectoryWrapper.FakeIOException) IOException(java.io.IOException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Document(org.apache.lucene.document.Document) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FakeIOException(org.apache.lucene.store.MockDirectoryWrapper.FakeIOException)

Example 45 with AlreadyClosedException

use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.

the class TestIndexWriterExceptions method testNoLostDeletesOrUpdates.

// Make sure if we hit a transient IOException (e.g., disk
// full), and then the exception stops (e.g., disk frees
// up), so we successfully close IW or open an NRT
// reader, we don't lose any deletes or updates:
public void testNoLostDeletesOrUpdates() throws Throwable {
    int deleteCount = 0;
    int docBase = 0;
    int docCount = 0;
    MockDirectoryWrapper dir = newMockDirectory();
    final AtomicBoolean shouldFail = new AtomicBoolean();
    dir.failOn(new MockDirectoryWrapper.Failure() {

        @Override
        public void eval(MockDirectoryWrapper dir) throws IOException {
            if (shouldFail.get() == false) {
                // flushing buffer, on closing the file:
                return;
            }
            if (random().nextInt(3) != 2) {
                return;
            }
            StackTraceElement[] trace = Thread.currentThread().getStackTrace();
            boolean sawSeal = false;
            boolean sawWrite = false;
            for (int i = 0; i < trace.length; i++) {
                if ("sealFlushedSegment".equals(trace[i].getMethodName())) {
                    sawSeal = true;
                    break;
                }
                if ("writeLiveDocs".equals(trace[i].getMethodName()) || "writeFieldUpdates".equals(trace[i].getMethodName())) {
                    sawWrite = true;
                }
            }
            // the segment is aborted and docs are lost:
            if (sawWrite && sawSeal == false) {
                if (VERBOSE) {
                    System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
                    new Throwable().printStackTrace(System.out);
                }
                shouldFail.set(false);
                throw new FakeIOException();
            }
        }
    });
    RandomIndexWriter w = null;
    boolean tragic = false;
    for (int iter = 0; iter < 10 * RANDOM_MULTIPLIER; iter++) {
        int numDocs = atLeast(100);
        if (VERBOSE) {
            System.out.println("\nTEST: iter=" + iter + " numDocs=" + numDocs + " docBase=" + docBase + " delCount=" + deleteCount);
        }
        if (w == null) {
            IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
            w = new RandomIndexWriter(random(), dir, iwc);
            // Since we hit exc during merging, a partial
            // forceMerge can easily return when there are still
            // too many segments in the index:
            w.setDoRandomForceMergeAssert(false);
        }
        for (int i = 0; i < numDocs; i++) {
            Document doc = new Document();
            doc.add(new StringField("id", "" + (docBase + i), Field.Store.NO));
            doc.add(new NumericDocValuesField("f", 1L));
            doc.add(new NumericDocValuesField("cf", 2L));
            doc.add(new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(1L)));
            doc.add(new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(2L)));
            w.addDocument(doc);
        }
        docCount += numDocs;
        // TODO: we could make the test more evil, by letting
        // it throw more than one exc, randomly, before "recovering"
        // TODO: we could also install an infoStream and try
        // to fail in "more evil" places inside BDS
        shouldFail.set(true);
        boolean doClose = false;
        try {
            for (int i = 0; i < numDocs; i++) {
                if (random().nextInt(10) == 7) {
                    boolean fieldUpdate = random().nextBoolean();
                    int docid = docBase + i;
                    if (fieldUpdate) {
                        long value = iter;
                        if (VERBOSE) {
                            System.out.println("  update id=" + docid + " to value " + value);
                        }
                        Term idTerm = new Term("id", Integer.toString(docid));
                        if (random().nextBoolean()) {
                            // update only numeric field
                            w.updateDocValues(idTerm, new NumericDocValuesField("f", value), new NumericDocValuesField("cf", value * 2));
                        } else if (random().nextBoolean()) {
                            w.updateDocValues(idTerm, new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(value)), new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(value * 2)));
                        } else {
                            w.updateDocValues(idTerm, new NumericDocValuesField("f", value), new NumericDocValuesField("cf", value * 2), new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(value)), new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(value * 2)));
                        }
                    }
                    // sometimes do both deletes and updates
                    if (!fieldUpdate || random().nextBoolean()) {
                        if (VERBOSE) {
                            System.out.println("  delete id=" + docid);
                        }
                        deleteCount++;
                        w.deleteDocuments(new Term("id", "" + docid));
                    }
                }
            }
            // Trigger writeLiveDocs + writeFieldUpdates so we hit fake exc:
            IndexReader r = w.getReader();
            // Sometimes we will make it here (we only randomly
            // throw the exc):
            assertEquals(docCount - deleteCount, r.numDocs());
            r.close();
            // Sometimes close, so the disk full happens on close:
            if (random().nextBoolean()) {
                if (VERBOSE) {
                    System.out.println("  now close writer");
                }
                doClose = true;
                w.commit();
                w.close();
                w = null;
            }
        } catch (Throwable t) {
            // throws it as a wrapped IOE, so don't fail in this case.
            if (t instanceof FakeIOException || (t.getCause() instanceof FakeIOException)) {
                // expected
                if (VERBOSE) {
                    System.out.println("TEST: hit expected IOE");
                }
                if (t instanceof AlreadyClosedException) {
                    // FakeIOExc struck during merge and writer is now closed:
                    w = null;
                    tragic = true;
                }
            } else {
                throw t;
            }
        }
        shouldFail.set(false);
        if (w != null) {
            MergeScheduler ms = w.w.getConfig().getMergeScheduler();
            if (ms instanceof ConcurrentMergeScheduler) {
                ((ConcurrentMergeScheduler) ms).sync();
            }
            if (w.w.getTragicException() != null) {
                // Tragic exc in CMS closed the writer
                w = null;
            }
        }
        IndexReader r;
        if (doClose && w != null) {
            if (VERBOSE) {
                System.out.println("  now 2nd close writer");
            }
            w.close();
            w = null;
        }
        if (w == null || random().nextBoolean()) {
            // disk" bits are good:
            if (VERBOSE) {
                System.out.println("TEST: verify against non-NRT reader");
            }
            if (w != null) {
                w.commit();
            }
            r = DirectoryReader.open(dir);
        } else {
            if (VERBOSE) {
                System.out.println("TEST: verify against NRT reader");
            }
            r = w.getReader();
        }
        if (tragic == false) {
            assertEquals(docCount - deleteCount, r.numDocs());
        }
        BytesRef scratch = new BytesRef();
        for (LeafReaderContext context : r.leaves()) {
            LeafReader reader = context.reader();
            Bits liveDocs = reader.getLiveDocs();
            NumericDocValues f = reader.getNumericDocValues("f");
            NumericDocValues cf = reader.getNumericDocValues("cf");
            BinaryDocValues bf = reader.getBinaryDocValues("bf");
            BinaryDocValues bcf = reader.getBinaryDocValues("bcf");
            for (int i = 0; i < reader.maxDoc(); i++) {
                if (liveDocs == null || liveDocs.get(i)) {
                    assertEquals(i, f.advance(i));
                    assertEquals(i, cf.advance(i));
                    assertEquals(i, bf.advance(i));
                    assertEquals(i, bcf.advance(i));
                    assertEquals("doc=" + (docBase + i), cf.longValue(), f.longValue() * 2);
                    assertEquals("doc=" + (docBase + i), TestBinaryDocValuesUpdates.getValue(bcf), TestBinaryDocValuesUpdates.getValue(bf) * 2);
                }
            }
        }
        r.close();
        // Sometimes re-use RIW, other times open new one:
        if (w != null && random().nextBoolean()) {
            if (VERBOSE) {
                System.out.println("TEST: close writer");
            }
            w.close();
            w = null;
        }
        docBase += numDocs;
    }
    if (w != null) {
        w.close();
    }
    // Final verify:
    if (tragic == false) {
        IndexReader r = DirectoryReader.open(dir);
        assertEquals(docCount - deleteCount, r.numDocs());
        r.close();
    }
    dir.close();
}
Also used : AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Document(org.apache.lucene.document.Document) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) BytesRef(org.apache.lucene.util.BytesRef) MockDirectoryWrapper(org.apache.lucene.store.MockDirectoryWrapper) FakeIOException(org.apache.lucene.store.MockDirectoryWrapper.FakeIOException) IOException(java.io.IOException) BinaryDocValuesField(org.apache.lucene.document.BinaryDocValuesField) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FakeIOException(org.apache.lucene.store.MockDirectoryWrapper.FakeIOException) StringField(org.apache.lucene.document.StringField) Bits(org.apache.lucene.util.Bits)

Aggregations

AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)79 IOException (java.io.IOException)53 LockObtainFailedException (org.apache.lucene.store.LockObtainFailedException)16 CountDownLatch (java.util.concurrent.CountDownLatch)15 MockDirectoryWrapper (org.apache.lucene.store.MockDirectoryWrapper)14 TranslogCorruptedException (org.elasticsearch.index.translog.TranslogCorruptedException)13 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)12 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)11 Document (org.apache.lucene.document.Document)11 ElasticsearchException (org.elasticsearch.ElasticsearchException)11 ReleasableLock (org.elasticsearch.common.util.concurrent.ReleasableLock)10 UncheckedIOException (java.io.UncheckedIOException)9 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)9 EOFException (java.io.EOFException)8 ArrayList (java.util.ArrayList)7 FileNotFoundException (java.io.FileNotFoundException)6 FileAlreadyExistsException (java.nio.file.FileAlreadyExistsException)6 NoSuchFileException (java.nio.file.NoSuchFileException)6 BrokenBarrierException (java.util.concurrent.BrokenBarrierException)6 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)6