Search in sources :

Example 1 with TestIssueLogging

use of org.opensearch.test.junit.annotations.TestIssueLogging in project OpenSearch by opensearch-project.

the class LoggingListenerTests method runTestCustomLevelPerMethod.

private void runTestCustomLevelPerMethod(final Class<?> clazz) throws Exception {
    LoggingListener loggingListener = new LoggingListener();
    Description suiteDescription = Description.createSuiteDescription(clazz);
    Logger xyzLogger = LogManager.getLogger("xyz");
    Logger abcLogger = LogManager.getLogger("abc");
    final Level level = LogManager.getRootLogger().getLevel();
    assertThat(xyzLogger.getLevel(), equalTo(level));
    assertThat(abcLogger.getLevel(), equalTo(level));
    loggingListener.testRunStarted(suiteDescription);
    assertThat(xyzLogger.getLevel(), equalTo(level));
    assertThat(abcLogger.getLevel(), equalTo(level));
    Method method = clazz.getMethod("annotatedTestMethod");
    TestLogging testLogging = method.getAnnotation(TestLogging.class);
    TestIssueLogging testIssueLogging = method.getAnnotation(TestIssueLogging.class);
    Annotation[] annotations = Stream.of(testLogging, testIssueLogging).filter(Objects::nonNull).toArray(Annotation[]::new);
    Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotations);
    loggingListener.testStarted(testDescription);
    assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE));
    assertThat(abcLogger.getLevel(), equalTo(level));
    loggingListener.testFinished(testDescription);
    assertThat(xyzLogger.getLevel(), equalTo(level));
    assertThat(abcLogger.getLevel(), equalTo(level));
    loggingListener.testRunFinished(new Result());
    assertThat(xyzLogger.getLevel(), equalTo(level));
    assertThat(abcLogger.getLevel(), equalTo(level));
}
Also used : Description(org.junit.runner.Description) TestLogging(org.opensearch.test.junit.annotations.TestLogging) TestIssueLogging(org.opensearch.test.junit.annotations.TestIssueLogging) LoggingListener(org.opensearch.test.junit.listeners.LoggingListener) Level(org.apache.logging.log4j.Level) Method(java.lang.reflect.Method) Logger(org.apache.logging.log4j.Logger) Annotation(java.lang.annotation.Annotation) Result(org.junit.runner.Result)

Example 2 with TestIssueLogging

use of org.opensearch.test.junit.annotations.TestIssueLogging in project OpenSearch by opensearch-project.

the class LoggingListener method testStarted.

@Override
public void testStarted(final Description description) throws Exception {
    final TestLogging testLogging = description.getAnnotation(TestLogging.class);
    final TestIssueLogging testIssueLogging = description.getAnnotation(TestIssueLogging.class);
    previousLoggingMap = processTestLogging(testLogging, testIssueLogging);
}
Also used : TestLogging(org.opensearch.test.junit.annotations.TestLogging) TestIssueLogging(org.opensearch.test.junit.annotations.TestIssueLogging)

Example 3 with TestIssueLogging

use of org.opensearch.test.junit.annotations.TestIssueLogging in project OpenSearch by opensearch-project.

the class SearchQueryIT method testQuotedQueryStringWithBoost.

@TestIssueLogging(value = "org.opensearch.search.query.SearchQueryIT:DEBUG", issueUrl = "https://github.com/elastic/elasticsearch/issues/43144")
public void testQuotedQueryStringWithBoost() throws InterruptedException {
    float boost = 10.0f;
    assertAcked(prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1)));
    indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("important", "phrase match", "less_important", "nothing important"), client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match"));
    SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")).get();
    assertHitCount(searchResponse, 2L);
    assertFirstHit(searchResponse, hasId("1"));
    assertSecondHit(searchResponse, hasId("2"));
    assertThat((double) searchResponse.getHits().getAt(0).getScore(), closeTo(boost * searchResponse.getHits().getAt(1).getScore(), .1));
}
Also used : OpenSearchAssertions.assertSearchResponse(org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse) SearchResponse(org.opensearch.action.search.SearchResponse) TestIssueLogging(org.opensearch.test.junit.annotations.TestIssueLogging)

Example 4 with TestIssueLogging

use of org.opensearch.test.junit.annotations.TestIssueLogging in project OpenSearch by opensearch-project.

the class ClusterDisruptionIT method testAckedIndexing.

/**
 * Test that we do not loose document whose indexing request was successful, under a randomly selected disruption scheme
 * We also collect &amp; report the type of indexing failures that occur.
 * <p>
 * This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
 */
@TestIssueLogging(value = "_root:DEBUG,org.opensearch.action.bulk:TRACE,org.opensearch.action.get:TRACE," + "org.opensearch.discovery:TRACE,org.opensearch.action.support.replication:TRACE," + "org.opensearch.cluster.service:TRACE,org.opensearch.indices.recovery:TRACE," + "org.opensearch.indices.cluster:TRACE,org.opensearch.index.shard:TRACE", issueUrl = "https://github.com/elastic/elasticsearch/issues/41068")
public void testAckedIndexing() throws Exception {
    final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5;
    final String timeout = seconds + "s";
    final List<String> nodes = startCluster(rarely() ? 5 : 3);
    assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2)).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomInt(2))));
    ensureGreen();
    ServiceDisruptionScheme disruptionScheme = addRandomDisruptionScheme();
    logger.info("disruption scheme [{}] added", disruptionScheme);
    // id -> node sent.
    final ConcurrentHashMap<String, String> ackedDocs = new ConcurrentHashMap<>();
    final AtomicBoolean stop = new AtomicBoolean(false);
    List<Thread> indexers = new ArrayList<>(nodes.size());
    List<Semaphore> semaphores = new ArrayList<>(nodes.size());
    final AtomicInteger idGenerator = new AtomicInteger(0);
    final AtomicReference<CountDownLatch> countDownLatchRef = new AtomicReference<>();
    final List<Exception> exceptedExceptions = new CopyOnWriteArrayList<>();
    final ConflictMode conflictMode = ConflictMode.randomMode();
    final List<String> fieldNames = IntStream.rangeClosed(0, randomInt(10)).mapToObj(n -> "f" + n).collect(Collectors.toList());
    logger.info("starting indexers using conflict mode " + conflictMode);
    try {
        for (final String node : nodes) {
            final Semaphore semaphore = new Semaphore(0);
            semaphores.add(semaphore);
            final Client client = client(node);
            final String name = "indexer_" + indexers.size();
            final int numPrimaries = getNumShards("test").numPrimaries;
            Thread thread = new Thread(() -> {
                while (!stop.get()) {
                    String id = null;
                    try {
                        if (!semaphore.tryAcquire(10, TimeUnit.SECONDS)) {
                            continue;
                        }
                        logger.info("[{}] Acquired semaphore and it has {} permits left", name, semaphore.availablePermits());
                        try {
                            id = Integer.toString(idGenerator.incrementAndGet());
                            int shard = Math.floorMod(Murmur3HashFunction.hash(id), numPrimaries);
                            logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
                            IndexRequestBuilder indexRequestBuilder = client.prepareIndex("test").setId(id).setSource(Collections.singletonMap(randomFrom(fieldNames), randomNonNegativeLong()), XContentType.JSON).setTimeout(timeout);
                            if (conflictMode == ConflictMode.external) {
                                indexRequestBuilder.setVersion(randomIntBetween(1, 10)).setVersionType(VersionType.EXTERNAL);
                            } else if (conflictMode == ConflictMode.create) {
                                indexRequestBuilder.setCreate(true);
                            }
                            IndexResponse response = indexRequestBuilder.get(timeout);
                            assertThat(response.getResult(), is(oneOf(CREATED, UPDATED)));
                            ackedDocs.put(id, node);
                            logger.trace("[{}] indexed id [{}] through node [{}], response [{}]", name, id, node, response);
                        } catch (OpenSearchException e) {
                            exceptedExceptions.add(e);
                            final String docId = id;
                            logger.trace(() -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e);
                        } finally {
                            countDownLatchRef.get().countDown();
                            logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount());
                        }
                    } catch (InterruptedException e) {
                    // fine - semaphore interrupt
                    } catch (AssertionError | Exception e) {
                        logger.info(() -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), e);
                    }
                }
            });
            thread.setName(name);
            thread.start();
            indexers.add(thread);
        }
        int docsPerIndexer = randomInt(3);
        logger.info("indexing {} docs per indexer before partition", docsPerIndexer);
        countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
        for (Semaphore semaphore : semaphores) {
            semaphore.release(docsPerIndexer);
        }
        assertTrue(countDownLatchRef.get().await(1, TimeUnit.MINUTES));
        for (int iter = 1 + randomInt(2); iter > 0; iter--) {
            logger.info("starting disruptions & indexing (iteration [{}])", iter);
            disruptionScheme.startDisrupting();
            docsPerIndexer = 1 + randomInt(5);
            logger.info("indexing {} docs per indexer during partition", docsPerIndexer);
            countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
            Collections.shuffle(semaphores, random());
            for (Semaphore semaphore : semaphores) {
                assertThat(semaphore.availablePermits(), equalTo(0));
                semaphore.release(docsPerIndexer);
            }
            logger.info("waiting for indexing requests to complete");
            assertTrue(countDownLatchRef.get().await(docsPerIndexer * seconds * 1000 + 2000, TimeUnit.MILLISECONDS));
            logger.info("stopping disruption");
            disruptionScheme.stopDisrupting();
            for (String node : internalCluster().getNodeNames()) {
                ensureStableCluster(nodes.size(), TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() + DISRUPTION_HEALING_OVERHEAD.millis()), true, node);
            }
            // is the super-connected node and recovery source and target are on opposite sides of the bridge
            if (disruptionScheme instanceof NetworkDisruption && ((NetworkDisruption) disruptionScheme).getDisruptedLinks() instanceof Bridge) {
                assertBusy(() -> assertAcked(client().admin().cluster().prepareReroute().setRetryFailed(true)));
            }
            ensureGreen("test");
            logger.info("validating successful docs");
            assertBusy(() -> {
                for (String node : nodes) {
                    try {
                        logger.debug("validating through node [{}] ([{}] acked docs)", node, ackedDocs.size());
                        for (String id : ackedDocs.keySet()) {
                            assertTrue("doc [" + id + "] indexed via node [" + ackedDocs.get(id) + "] not found", client(node).prepareGet("test", id).setPreference("_local").get().isExists());
                        }
                    } catch (AssertionError | NoShardAvailableActionException e) {
                        throw new AssertionError(e.getMessage() + " (checked via node [" + node + "]", e);
                    }
                }
            }, 30, TimeUnit.SECONDS);
            logger.info("done validating (iteration [{}])", iter);
        }
    } finally {
        logger.info("shutting down indexers");
        stop.set(true);
        for (Thread indexer : indexers) {
            indexer.interrupt();
            indexer.join(60000);
        }
        if (exceptedExceptions.size() > 0) {
            StringBuilder sb = new StringBuilder();
            for (Exception e : exceptedExceptions) {
                sb.append("\n").append(e.getMessage());
            }
            logger.debug("Indexing exceptions during disruption: {}", sb);
        }
    }
}
Also used : IndexRequestBuilder(org.opensearch.action.index.IndexRequestBuilder) ClusterBootstrapService(org.opensearch.cluster.coordination.ClusterBootstrapService) IndexResponse(org.opensearch.action.index.IndexResponse) Matchers.not(org.hamcrest.Matchers.not) OpenSearchException(org.opensearch.OpenSearchException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ConcurrentCollections(org.opensearch.common.util.concurrent.ConcurrentCollections) Bridge(org.opensearch.test.disruption.NetworkDisruption.Bridge) IndexShardTestCase(org.opensearch.index.shard.IndexShardTestCase) Matchers.everyItem(org.hamcrest.Matchers.everyItem) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ActionListener(org.opensearch.action.ActionListener) GetResponse(org.opensearch.action.get.GetResponse) ShardStateAction(org.opensearch.cluster.action.shard.ShardStateAction) Client(org.opensearch.client.Client) TimeValue(org.opensearch.common.unit.TimeValue) IndicesService(org.opensearch.indices.IndicesService) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) ServiceDisruptionScheme(org.opensearch.test.disruption.ServiceDisruptionScheme) Collectors(java.util.stream.Collectors) CountDownLatch(java.util.concurrent.CountDownLatch) VersionType(org.opensearch.index.VersionType) List(java.util.List) UPDATED(org.opensearch.action.DocWriteResponse.Result.UPDATED) CREATED(org.opensearch.action.DocWriteResponse.Result.CREATED) Matchers.equalTo(org.hamcrest.Matchers.equalTo) LagDetector(org.opensearch.cluster.coordination.LagDetector) XContentType(org.opensearch.common.xcontent.XContentType) Matchers.is(org.hamcrest.Matchers.is) OpenSearchIntegTestCase(org.opensearch.test.OpenSearchIntegTestCase) Matchers.in(org.hamcrest.Matchers.in) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) IntStream(java.util.stream.IntStream) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) InternalTestCluster(org.opensearch.test.InternalTestCluster) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) ClusterState(org.opensearch.cluster.ClusterState) IndexShard(org.opensearch.index.shard.IndexShard) Murmur3HashFunction(org.opensearch.cluster.routing.Murmur3HashFunction) ShardRoutingState(org.opensearch.cluster.routing.ShardRoutingState) TestIssueLogging(org.opensearch.test.junit.annotations.TestIssueLogging) OpenSearchAssertions.assertAcked(org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked) Matchers.oneOf(org.hamcrest.Matchers.oneOf) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) Semaphore(java.util.concurrent.Semaphore) ShardRouting(org.opensearch.cluster.routing.ShardRouting) TimeUnit(java.util.concurrent.TimeUnit) TwoPartitions(org.opensearch.test.disruption.NetworkDisruption.TwoPartitions) NetworkDisruption(org.opensearch.test.disruption.NetworkDisruption) NoShardAvailableActionException(org.opensearch.action.NoShardAvailableActionException) Collections(java.util.Collections) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ServiceDisruptionScheme(org.opensearch.test.disruption.ServiceDisruptionScheme) Semaphore(java.util.concurrent.Semaphore) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Client(org.opensearch.client.Client) NetworkDisruption(org.opensearch.test.disruption.NetworkDisruption) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) OpenSearchException(org.opensearch.OpenSearchException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) NoShardAvailableActionException(org.opensearch.action.NoShardAvailableActionException) IndexRequestBuilder(org.opensearch.action.index.IndexRequestBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) NoShardAvailableActionException(org.opensearch.action.NoShardAvailableActionException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IndexResponse(org.opensearch.action.index.IndexResponse) OpenSearchException(org.opensearch.OpenSearchException) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Bridge(org.opensearch.test.disruption.NetworkDisruption.Bridge) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) TestIssueLogging(org.opensearch.test.junit.annotations.TestIssueLogging)

Example 5 with TestIssueLogging

use of org.opensearch.test.junit.annotations.TestIssueLogging in project OpenSearch by opensearch-project.

the class LoggingListenerTests method runTestInvalidMethodTestLoggingAnnotation.

private void runTestInvalidMethodTestLoggingAnnotation(final Class<?> clazz) throws Exception {
    final LoggingListener loggingListener = new LoggingListener();
    final Description suiteDescription = Description.createSuiteDescription(clazz);
    loggingListener.testRunStarted(suiteDescription);
    final Method method = clazz.getMethod("invalidMethod");
    final TestLogging testLogging = method.getAnnotation(TestLogging.class);
    final TestIssueLogging testIssueLogging = method.getAnnotation(TestIssueLogging.class);
    final Annotation[] annotations = Stream.of(testLogging, testIssueLogging).filter(Objects::nonNull).toArray(Annotation[]::new);
    Description testDescription = Description.createTestDescription(clazz, "invalidMethod", annotations);
    final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> loggingListener.testStarted(testDescription));
    assertThat(e.getMessage(), equalTo("invalid test logging annotation [abc:INFO:WARN]"));
}
Also used : Description(org.junit.runner.Description) TestLogging(org.opensearch.test.junit.annotations.TestLogging) TestIssueLogging(org.opensearch.test.junit.annotations.TestIssueLogging) LoggingListener(org.opensearch.test.junit.listeners.LoggingListener) Method(java.lang.reflect.Method) Annotation(java.lang.annotation.Annotation)

Aggregations

TestIssueLogging (org.opensearch.test.junit.annotations.TestIssueLogging)6 TestLogging (org.opensearch.test.junit.annotations.TestLogging)4 Annotation (java.lang.annotation.Annotation)3 Method (java.lang.reflect.Method)3 Description (org.junit.runner.Description)3 LoggingListener (org.opensearch.test.junit.listeners.LoggingListener)3 Level (org.apache.logging.log4j.Level)2 Logger (org.apache.logging.log4j.Logger)2 Result (org.junit.runner.Result)2 ArrayList (java.util.ArrayList)1 Collections (java.util.Collections)1 List (java.util.List)1 Set (java.util.Set)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 Semaphore (java.util.concurrent.Semaphore)1 TimeUnit (java.util.concurrent.TimeUnit)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1