Search in sources :

Example 16 with CopyOnWriteArrayList

use of java.util.concurrent.CopyOnWriteArrayList in project elasticsearch by elastic.

the class RestClientMultipleHostsIntegTests method testAsyncRequests.

public void testAsyncRequests() throws Exception {
    int numRequests = randomIntBetween(5, 20);
    final CountDownLatch latch = new CountDownLatch(numRequests);
    final List<TestResponse> responses = new CopyOnWriteArrayList<>();
    for (int i = 0; i < numRequests; i++) {
        final String method = RestClientTestUtil.randomHttpMethod(getRandom());
        //we don't test status codes that are subject to retries as they interfere with hosts being stopped
        final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom());
        restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() {

            @Override
            public void onSuccess(Response response) {
                responses.add(new TestResponse(method, statusCode, response));
                latch.countDown();
            }

            @Override
            public void onFailure(Exception exception) {
                responses.add(new TestResponse(method, statusCode, exception));
                latch.countDown();
            }
        });
    }
    assertTrue(latch.await(5, TimeUnit.SECONDS));
    assertEquals(numRequests, responses.size());
    for (TestResponse testResponse : responses) {
        Response response = testResponse.getResponse();
        assertEquals(testResponse.method, response.getRequestLine().getMethod());
        assertEquals(testResponse.statusCode, response.getStatusLine().getStatusCode());
        assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + testResponse.statusCode, response.getRequestLine().getUri());
    }
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 17 with CopyOnWriteArrayList

use of java.util.concurrent.CopyOnWriteArrayList in project crate by crate.

the class BlobRecoverySourceHandler method phase1.

/**
     * Perform phase1 of the recovery operations. Once this {@link SnapshotIndexCommit}
     * snapshot has been performed no commit operations (files being fsync'd)
     * are effectively allowed on this index until all recovery phases are done
     * <p/>
     * Phase1 examines the segment files on the target node and copies over the
     * segments that are missing. Only segments that have the same size and
     * checksum can be reused
     */
public void phase1(final SnapshotIndexCommit snapshot, final Translog.View translogView) {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
        // CRATE CHANGE
        if (blobRecoveryHandler != null) {
            blobRecoveryHandler.phase1();
        }
        StopWatch stopWatch = new StopWatch().start();
        final Store.MetadataSnapshot recoverySourceMetadata;
        try {
            recoverySourceMetadata = store.getMetadata(snapshot);
        } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
            shard.engine().failEngine("recovery", ex);
            throw ex;
        }
        for (String name : snapshot.getFiles()) {
            final StoreFileMetaData md = recoverySourceMetadata.get(name);
            if (md == null) {
                logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
                throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " + recoverySourceMetadata.asMap().size() + " files", name);
            }
        }
        // Generate a "diff" of all the identical, different, and missing
        // segment files on the target node, using the existing files on
        // the source node
        String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
        String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
        final boolean recoverWithSyncId = recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
        if (recoverWithSyncId) {
            final long numDocsTarget = request.metadataSnapshot().getNumDocs();
            final long numDocsSource = recoverySourceMetadata.getNumDocs();
            if (numDocsTarget != numDocsSource) {
                throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number of docs differ: " + numDocsTarget + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsSource + "(" + request.targetNode().getName() + ")");
            }
            // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
            // so we don't return here
            logger.trace("[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target", indexName, shardId, request.targetNode(), recoverySourceSyncId);
        } else {
            final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
            for (StoreFileMetaData md : diff.identical) {
                response.phase1ExistingFileNames.add(md.name());
                response.phase1ExistingFileSizes.add(md.length());
                existingTotalSize += md.length();
                if (logger.isTraceEnabled()) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", indexName, shardId, request.targetNode(), md.name(), md.checksum(), md.length());
                }
                totalSize += md.length();
            }
            for (StoreFileMetaData md : Iterables.concat(diff.different, diff.missing)) {
                if (request.metadataSnapshot().asMap().containsKey(md.name())) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", indexName, shardId, request.targetNode(), md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
                } else {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", indexName, shardId, request.targetNode(), md.name());
                }
                response.phase1FileNames.add(md.name());
                response.phase1FileSizes.add(md.length());
                totalSize += md.length();
            }
            response.phase1TotalSize = totalSize;
            response.phase1ExistingTotalSize = existingTotalSize;
            logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", indexName, shardId, request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
            cancellableThreads.execute(new Interruptable() {

                @Override
                public void run() throws InterruptedException {
                    RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, translogView.totalOperations());
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                }
            });
            // This latch will be used to wait until all files have been transferred to the target node
            final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
            final CopyOnWriteArrayList<Throwable> exceptions = new CopyOnWriteArrayList<>();
            final AtomicReference<Throwable> corruptedEngine = new AtomicReference<>();
            int fileIndex = 0;
            ThreadPoolExecutor pool;
            // How many bytes we've copied since we last called RateLimiter.pause
            final AtomicLong bytesSinceLastPause = new AtomicLong();
            for (final String name : response.phase1FileNames) {
                long fileSize = response.phase1FileSizes.get(fileIndex);
                // separately.
                if (fileSize > RecoverySettings.SMALL_FILE_CUTOFF_BYTES) {
                    pool = recoverySettings.concurrentStreamPool();
                } else {
                    pool = recoverySettings.concurrentSmallFileStreamPool();
                }
                pool.execute(new AbstractRunnable() {

                    @Override
                    public void onFailure(Throwable t) {
                        // we either got rejected or the store can't be incremented / we are canceled
                        logger.debug("Failed to transfer file [" + name + "] on recovery");
                    }

                    @Override
                    public void onAfter() {
                        // Signify this file has completed by decrementing the latch
                        latch.countDown();
                    }

                    @Override
                    protected void doRun() {
                        cancellableThreads.checkForCancel();
                        store.incRef();
                        final StoreFileMetaData md = recoverySourceMetadata.get(name);
                        try (final IndexInput indexInput = store.directory().openInput(name, IOContext.READONCE)) {
                            // at least one!
                            final int BUFFER_SIZE = (int) Math.max(1, recoverySettings.fileChunkSize().getBytes());
                            final byte[] buf = new byte[BUFFER_SIZE];
                            boolean shouldCompressRequest = recoverySettings.compress();
                            if (CompressorFactory.isCompressed(indexInput)) {
                                shouldCompressRequest = false;
                            }
                            final long len = indexInput.length();
                            long readCount = 0;
                            final TransportRequestOptions requestOptions = TransportRequestOptions.builder().withCompress(shouldCompressRequest).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(recoverySettings.internalActionTimeout()).build();
                            while (readCount < len) {
                                if (shard.state() == IndexShardState.CLOSED) {
                                    // check if the shard got closed on us
                                    throw new IndexShardClosedException(shard.shardId());
                                }
                                int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                                final long position = indexInput.getFilePointer();
                                // Pause using the rate limiter, if desired, to throttle the recovery
                                RateLimiter rl = recoverySettings.rateLimiter();
                                long throttleTimeInNanos = 0;
                                if (rl != null) {
                                    long bytes = bytesSinceLastPause.addAndGet(toRead);
                                    if (bytes > rl.getMinPauseCheckBytes()) {
                                        // Time to pause
                                        bytesSinceLastPause.addAndGet(-bytes);
                                        throttleTimeInNanos = rl.pause(bytes);
                                        shard.recoveryStats().addThrottleTime(throttleTimeInNanos);
                                    }
                                }
                                indexInput.readBytes(buf, 0, toRead, false);
                                final BytesArray content = new BytesArray(buf, 0, toRead);
                                readCount += toRead;
                                final boolean lastChunk = readCount == len;
                                final RecoveryFileChunkRequest fileChunkRequest = new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, lastChunk, translogView.totalOperations(), throttleTimeInNanos);
                                cancellableThreads.execute(new Interruptable() {

                                    @Override
                                    public void run() throws InterruptedException {
                                        // Actually send the file chunk to the target node, waiting for it to complete
                                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, fileChunkRequest, requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                                    }
                                });
                            }
                        } catch (Throwable e) {
                            final Throwable corruptIndexException;
                            if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) {
                                if (store.checkIntegrityNoException(md) == false) {
                                    // we are corrupted on the primary -- fail!
                                    logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                                    if (corruptedEngine.compareAndSet(null, corruptIndexException) == false) {
                                        // if we are not the first exception, add ourselves as suppressed to the main one:
                                        corruptedEngine.get().addSuppressed(e);
                                    }
                                } else {
                                    // corruption has happened on the way to replica
                                    RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
                                    exception.addSuppressed(e);
                                    // last exception first
                                    exceptions.add(0, exception);
                                    logger.warn("{} Remote file corruption on node {}, recovering {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode(), md);
                                }
                            } else {
                                // last exceptions first
                                exceptions.add(0, e);
                            }
                        } finally {
                            store.decRef();
                        }
                    }
                });
                fileIndex++;
            }
            cancellableThreads.execute(new Interruptable() {

                @Override
                public void run() throws InterruptedException {
                    // Wait for all files that need to be transferred to finish transferring
                    latch.await();
                }
            });
            if (corruptedEngine.get() != null) {
                shard.engine().failEngine("recovery", corruptedEngine.get());
                throw corruptedEngine.get();
            } else {
                ExceptionsHelper.rethrowAndSuppress(exceptions);
            }
            cancellableThreads.execute(new Interruptable() {

                @Override
                public void run() throws InterruptedException {
                    // are deleted
                    try {
                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, translogView.totalOperations()), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                    } catch (RemoteTransportException remoteException) {
                        final IOException corruptIndexException;
                        //   - maybe due to old segments without checksums or length only checks
                        if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException)) != null) {
                            try {
                                final Store.MetadataSnapshot recoverySourceMetadata = store.getMetadata(snapshot);
                                StoreFileMetaData[] metadata = Iterables.toArray(recoverySourceMetadata, StoreFileMetaData.class);
                                ArrayUtil.timSort(metadata, new Comparator<StoreFileMetaData>() {

                                    @Override
                                    public int compare(StoreFileMetaData o1, StoreFileMetaData o2) {
                                        // check small files first
                                        return Long.compare(o1.length(), o2.length());
                                    }
                                });
                                for (StoreFileMetaData md : metadata) {
                                    logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md);
                                    if (store.checkIntegrityNoException(md) == false) {
                                        // we are corrupted on the primary -- fail!
                                        shard.engine().failEngine("recovery", corruptIndexException);
                                        logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                                        throw corruptIndexException;
                                    }
                                }
                            } catch (IOException ex) {
                                remoteException.addSuppressed(ex);
                                throw remoteException;
                            }
                            // corruption has happened on the way to replica
                            RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
                            exception.addSuppressed(remoteException);
                            logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode());
                            throw exception;
                        } else {
                            throw remoteException;
                        }
                    }
                }
            });
        }
        prepareTargetForTranslog(translogView);
        logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime());
        response.phase1Time = stopWatch.totalTime().millis();
    } catch (Throwable e) {
        throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
        store.decRef();
    }
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Store(org.elasticsearch.index.store.Store) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) IndexInput(org.apache.lucene.store.IndexInput) TransportRequestOptions(org.elasticsearch.transport.TransportRequestOptions) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) BytesArray(org.elasticsearch.common.bytes.BytesArray) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) Interruptable(org.elasticsearch.common.util.CancellableThreads.Interruptable) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) RateLimiter(org.apache.lucene.store.RateLimiter) StopWatch(org.elasticsearch.common.StopWatch) AtomicLong(java.util.concurrent.atomic.AtomicLong) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 18 with CopyOnWriteArrayList

use of java.util.concurrent.CopyOnWriteArrayList in project elasticsearch by elastic.

the class ESIntegTestCase method indexRandom.

/**
     * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
     * indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
     * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
     * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
     * layout.
     *
     * @param forceRefresh   if <tt>true</tt> all involved indices are refreshed once the documents are indexed.
     * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once
     *                       all documents are indexed. This is useful to produce deleted documents on the server side.
     * @param maybeFlush     if <tt>true</tt> this method may randomly execute full flushes after index operations.
     * @param builders       the documents to index.
     */
public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
    Random random = random();
    Set<String> indicesSet = new HashSet<>();
    for (IndexRequestBuilder builder : builders) {
        indicesSet.add(builder.request().index());
    }
    Set<Tuple<String, String>> bogusIds = new HashSet<>();
    if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) {
        builders = new ArrayList<>(builders);
        final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
        // inject some bogus docs
        final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2);
        final int unicodeLen = between(1, 10);
        for (int i = 0; i < numBogusDocs; i++) {
            String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet());
            String index = RandomPicks.randomFrom(random, indices);
            bogusIds.add(new Tuple<>(index, id));
            builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}", XContentType.JSON));
        }
    }
    final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
    Collections.shuffle(builders, random());
    final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Exception>> errors = new CopyOnWriteArrayList<>();
    List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
    // If you are indexing just a few documents then frequently do it one at a time.  If many then frequently in bulk.
    if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) {
        if (frequently()) {
            logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
            for (IndexRequestBuilder indexRequestBuilder : builders) {
                indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors));
                postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
            }
        } else {
            logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
            for (IndexRequestBuilder indexRequestBuilder : builders) {
                indexRequestBuilder.execute().actionGet();
                postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
            }
        }
    } else {
        List<List<IndexRequestBuilder>> partition = eagerPartition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE, Math.max(1, (int) (builders.size() * randomDouble()))));
        logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size());
        for (List<IndexRequestBuilder> segmented : partition) {
            BulkRequestBuilder bulkBuilder = client().prepareBulk();
            for (IndexRequestBuilder indexRequestBuilder : segmented) {
                bulkBuilder.add(indexRequestBuilder);
            }
            BulkResponse actionGet = bulkBuilder.execute().actionGet();
            assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false));
        }
    }
    for (CountDownLatch operation : inFlightAsyncOperations) {
        operation.await();
    }
    final List<Exception> actualErrors = new ArrayList<>();
    for (Tuple<IndexRequestBuilder, Exception> tuple : errors) {
        if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) {
            // re-index if rejected
            tuple.v1().execute().actionGet();
        } else {
            actualErrors.add(tuple.v2());
        }
    }
    assertThat(actualErrors, emptyIterable());
    if (!bogusIds.isEmpty()) {
        // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
        for (Tuple<String, String> doc : bogusIds) {
            assertEquals("failed to delete a dummy doc [" + doc.v1() + "][" + doc.v2() + "]", DocWriteResponse.Result.DELETED, client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get().getResult());
        }
    }
    if (forceRefresh) {
        assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
    }
}
Also used : CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Random(java.util.Random) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) BulkResponse(org.elasticsearch.action.bulk.BulkResponse) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) ElasticsearchException(org.elasticsearch.ElasticsearchException) ShardOperationFailedException(org.elasticsearch.action.ShardOperationFailedException) CreateIndexRequestBuilder(org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) GetIndexResponse(org.elasticsearch.action.admin.indices.get.GetIndexResponse) IndexResponse(org.elasticsearch.action.index.IndexResponse) BulkRequestBuilder(org.elasticsearch.action.bulk.BulkRequestBuilder) Tuple(org.elasticsearch.common.collect.Tuple) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 19 with CopyOnWriteArrayList

use of java.util.concurrent.CopyOnWriteArrayList in project elasticsearch by elastic.

the class CacheTests method testDependentKeyDeadlock.

public void testDependentKeyDeadlock() throws BrokenBarrierException, InterruptedException {
    class Key {

        private final int key;

        Key(int key) {
            this.key = key;
        }

        @Override
        public boolean equals(Object o) {
            if (this == o)
                return true;
            if (o == null || getClass() != o.getClass())
                return false;
            Key key1 = (Key) o;
            return key == key1.key;
        }

        @Override
        public int hashCode() {
            return key % 2;
        }
    }
    int numberOfThreads = randomIntBetween(2, 32);
    final Cache<Key, Integer> cache = CacheBuilder.<Key, Integer>builder().build();
    CopyOnWriteArrayList<ExecutionException> failures = new CopyOnWriteArrayList<>();
    CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
    CountDownLatch deadlockLatch = new CountDownLatch(numberOfThreads);
    List<Thread> threads = new ArrayList<>();
    for (int i = 0; i < numberOfThreads; i++) {
        Thread thread = new Thread(() -> {
            try {
                try {
                    barrier.await();
                } catch (BrokenBarrierException | InterruptedException e) {
                    throw new AssertionError(e);
                }
                Random random = new Random(random().nextLong());
                for (int j = 0; j < numberOfEntries; j++) {
                    Key key = new Key(random.nextInt(numberOfEntries));
                    try {
                        cache.computeIfAbsent(key, k -> {
                            if (k.key == 0) {
                                return 0;
                            } else {
                                Integer value = cache.get(new Key(k.key / 2));
                                return value != null ? value : 0;
                            }
                        });
                    } catch (ExecutionException e) {
                        failures.add(e);
                        break;
                    }
                }
            } finally {
                // successfully avoided deadlock, release the main thread
                deadlockLatch.countDown();
            }
        });
        threads.add(thread);
        thread.start();
    }
    AtomicBoolean deadlock = new AtomicBoolean();
    assert !deadlock.get();
    // start a watchdog service
    ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
    scheduler.scheduleAtFixedRate(() -> {
        Set<Long> ids = threads.stream().map(t -> t.getId()).collect(Collectors.toSet());
        ThreadMXBean mxBean = ManagementFactory.getThreadMXBean();
        long[] deadlockedThreads = mxBean.findDeadlockedThreads();
        if (!deadlock.get() && deadlockedThreads != null) {
            for (long deadlockedThread : deadlockedThreads) {
                // ensure that we detected deadlock on our threads
                if (ids.contains(deadlockedThread)) {
                    deadlock.set(true);
                    // release the main test thread to fail the test
                    for (int i = 0; i < numberOfThreads; i++) {
                        deadlockLatch.countDown();
                    }
                    break;
                }
            }
        }
    }, 1, 1, TimeUnit.SECONDS);
    // everything is setup, release the hounds
    barrier.await();
    // wait for either deadlock to be detected or the threads to terminate
    deadlockLatch.await();
    // shutdown the watchdog service
    scheduler.shutdown();
    assertThat(failures, is(empty()));
    assertFalse("deadlock", deadlock.get());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Random(java.util.Random) ArrayList(java.util.ArrayList) AtomicReferenceArray(java.util.concurrent.atomic.AtomicReferenceArray) HashSet(java.util.HashSet) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ESTestCase(org.elasticsearch.test.ESTestCase) ManagementFactory(java.lang.management.ManagementFactory) Before(org.junit.Before) CyclicBarrier(java.util.concurrent.CyclicBarrier) Matchers.empty(org.hamcrest.Matchers.empty) Set(java.util.Set) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) ThreadMXBean(java.lang.management.ThreadMXBean) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) Matchers.is(org.hamcrest.Matchers.is) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) ArrayList(java.util.ArrayList) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Random(java.util.Random) ExecutionException(java.util.concurrent.ExecutionException) ThreadMXBean(java.lang.management.ThreadMXBean) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) CountDownLatch(java.util.concurrent.CountDownLatch) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 20 with CopyOnWriteArrayList

use of java.util.concurrent.CopyOnWriteArrayList in project elasticsearch by elastic.

the class CacheTests method testComputeIfAbsentCallsOnce.

public void testComputeIfAbsentCallsOnce() throws BrokenBarrierException, InterruptedException {
    int numberOfThreads = randomIntBetween(2, 32);
    final Cache<Integer, String> cache = CacheBuilder.<Integer, String>builder().build();
    AtomicReferenceArray flags = new AtomicReferenceArray(numberOfEntries);
    for (int j = 0; j < numberOfEntries; j++) {
        flags.set(j, false);
    }
    CopyOnWriteArrayList<ExecutionException> failures = new CopyOnWriteArrayList<>();
    CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
    for (int i = 0; i < numberOfThreads; i++) {
        Thread thread = new Thread(() -> {
            try {
                barrier.await();
                for (int j = 0; j < numberOfEntries; j++) {
                    try {
                        cache.computeIfAbsent(j, key -> {
                            assertTrue(flags.compareAndSet(key, false, true));
                            return Integer.toString(key);
                        });
                    } catch (ExecutionException e) {
                        failures.add(e);
                        break;
                    }
                }
                barrier.await();
            } catch (BrokenBarrierException | InterruptedException e) {
                throw new AssertionError(e);
            }
        });
        thread.start();
    }
    // wait for all threads to be ready
    barrier.await();
    // wait for all threads to finish
    barrier.await();
    assertThat(failures, is(empty()));
}
Also used : BrokenBarrierException(java.util.concurrent.BrokenBarrierException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicReferenceArray(java.util.concurrent.atomic.AtomicReferenceArray) ExecutionException(java.util.concurrent.ExecutionException) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Aggregations

CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)304 CountDownLatch (java.util.concurrent.CountDownLatch)84 ArrayList (java.util.ArrayList)83 List (java.util.List)76 Test (org.junit.Test)71 IOException (java.io.IOException)53 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)32 HashMap (java.util.HashMap)24 Map (java.util.Map)24 ExecutionException (java.util.concurrent.ExecutionException)23 LinkedList (java.util.LinkedList)21 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)19 Set (java.util.Set)18 TimeUnit (java.util.concurrent.TimeUnit)18 ClientRequest (io.undertow.client.ClientRequest)17 Test (org.junit.jupiter.api.Test)17 HashSet (java.util.HashSet)16 ExecutorService (java.util.concurrent.ExecutorService)16 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)16 ClientConnection (io.undertow.client.ClientConnection)15