Search in sources :

Example 1 with Collections.singleton

use of java.util.Collections.singleton in project kafka by apache.

the class TransactionalMessageCopier method runEventLoop.

public static void runEventLoop(Namespace parsedArgs) {
    final String transactionalId = parsedArgs.getString("transactionalId");
    final String outputTopic = parsedArgs.getString("outputTopic");
    String consumerGroup = parsedArgs.getString("consumerGroup");
    final KafkaProducer<String, String> producer = createProducer(parsedArgs);
    final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
    final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
    boolean groupMode = parsedArgs.getBoolean("groupMode");
    String topicName = parsedArgs.getString("inputTopic");
    final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
    final AtomicLong totalMessageProcessed = new AtomicLong(0);
    if (groupMode) {
        consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {

            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
                numMessagesProcessedSinceLastRebalance.set(0);
                // We use message cap for remaining here as the remainingMessages are not set yet.
                System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
            }
        });
    } else {
        TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
        consumer.assign(singleton(inputPartition));
        remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
    }
    final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
    producer.initTransactions();
    final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
    Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
        isShuttingDown.set(true);
        consumer.wakeup();
        System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
    });
    final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
    try {
        Random random = new Random();
        while (!isShuttingDown.get() && remainingMessages.get() > 0) {
            System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
            if (records.count() > 0) {
                try {
                    producer.beginTransaction();
                    for (ConsumerRecord<String, String> record : records) {
                        producer.send(producerRecordFromConsumerRecord(outputTopic, record));
                    }
                    long messagesSentWithinCurrentTxn = records.count();
                    ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
                    producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
                    if (enableRandomAborts && random.nextInt() % 3 == 0) {
                        abortTransactionAndResetPosition(producer, consumer);
                    } else {
                        producer.commitTransaction();
                        remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
                        numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
                        totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
                    }
                } catch (ProducerFencedException e) {
                    throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
                } catch (KafkaException e) {
                    log.debug("Aborting transaction after catching exception", e);
                    abortTransactionAndResetPosition(producer, consumer);
                }
            }
        }
    } catch (WakeupException e) {
        if (!isShuttingDown.get()) {
            // as part of shutdown.
            throw e;
        }
    } finally {
        Utils.closeQuietly(producer, "producer");
        Utils.closeQuietly(consumer, "consumer");
    }
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Exit(org.apache.kafka.common.utils.Exit) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Date(java.util.Date) LoggerFactory(org.slf4j.LoggerFactory) KafkaException(org.apache.kafka.common.KafkaException) SimpleDateFormat(java.text.SimpleDateFormat) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Random(java.util.Random) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Arguments.store(net.sourceforge.argparse4j.impl.Arguments.store) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Collections.singleton(java.util.Collections.singleton) ArgumentParser(net.sourceforge.argparse4j.inf.ArgumentParser) Namespace(net.sourceforge.argparse4j.inf.Namespace) Duration(java.time.Duration) Map(java.util.Map) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) DateFormat(java.text.DateFormat) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) Properties(java.util.Properties) Arguments.storeTrue(net.sourceforge.argparse4j.impl.Arguments.storeTrue) WakeupException(org.apache.kafka.common.errors.WakeupException) ArgumentParsers(net.sourceforge.argparse4j.ArgumentParsers) Collection(java.util.Collection) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) WakeupException(org.apache.kafka.common.errors.WakeupException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Random(java.util.Random) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException)

Example 2 with Collections.singleton

use of java.util.Collections.singleton in project kafka by apache.

the class WorkerSinkTaskTest method testPollRedeliveryWithConsumerRebalance.

@Test
public void testPollRedeliveryWithConsumerRebalance() throws Exception {
    createTask(initialState);
    expectInitializeTask();
    expectTaskGetTopic(true);
    expectPollInitialAssignment();
    // If a retriable exception is thrown, we should redeliver the same batch, pausing the consumer in the meantime
    expectConsumerPoll(1);
    expectConversionAndTransformation(1);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
    // Pause
    EasyMock.expect(consumer.assignment()).andReturn(INITIAL_ASSIGNMENT);
    consumer.pause(INITIAL_ASSIGNMENT);
    PowerMock.expectLastCall();
    // Empty consumer poll (all partitions are paused) with rebalance; one new partition is assigned
    EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
        rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet());
        rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3));
        return ConsumerRecords.empty();
    });
    Set<TopicPartition> newAssignment = new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3));
    EasyMock.expect(consumer.assignment()).andReturn(newAssignment).times(3);
    EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
    sinkTask.open(Collections.singleton(TOPIC_PARTITION3));
    EasyMock.expectLastCall();
    // All partitions are re-paused in order to pause any newly-assigned partitions so that redelivery efforts can continue
    consumer.pause(newAssignment);
    EasyMock.expectLastCall();
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
    // Next delivery attempt fails again
    expectConsumerPoll(0);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
    // Non-empty consumer poll; all initially-assigned partitions are revoked in rebalance, and new partitions are allowed to resume
    ConsumerRecord<byte[], byte[]> newRecord = new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET, RAW_KEY, RAW_VALUE);
    EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
        rebalanceListener.getValue().onPartitionsRevoked(INITIAL_ASSIGNMENT);
        rebalanceListener.getValue().onPartitionsAssigned(Collections.emptyList());
        return new ConsumerRecords<>(Collections.singletonMap(TOPIC_PARTITION3, Collections.singletonList(newRecord)));
    });
    newAssignment = Collections.singleton(TOPIC_PARTITION3);
    EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(newAssignment)).times(3);
    final Map<TopicPartition, OffsetAndMetadata> offsets = INITIAL_ASSIGNMENT.stream().collect(Collectors.toMap(Function.identity(), tp -> new OffsetAndMetadata(FIRST_OFFSET)));
    sinkTask.preCommit(offsets);
    EasyMock.expectLastCall().andReturn(offsets);
    sinkTask.close(INITIAL_ASSIGNMENT);
    EasyMock.expectLastCall();
    // All partitions are resumed, as all previously paused-for-redelivery partitions were revoked
    newAssignment.forEach(tp -> {
        consumer.resume(Collections.singleton(tp));
        EasyMock.expectLastCall();
    });
    expectConversionAndTransformation(1);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall();
    PowerMock.replayAll();
    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    workerTask.iteration();
    workerTask.iteration();
    workerTask.iteration();
    workerTask.iteration();
    workerTask.iteration();
    PowerMock.verifyAll();
}
Also used : Arrays(java.util.Arrays) MockTime(org.apache.kafka.common.utils.MockTime) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Schema(org.apache.kafka.connect.data.Schema) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) Converter(org.apache.kafka.connect.storage.Converter) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) MetricName(org.apache.kafka.common.MetricName) Assert.fail(org.junit.Assert.fail) IExpectationSetters(org.easymock.IExpectationSetters) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Time(org.apache.kafka.common.utils.Time) WakeupException(org.apache.kafka.common.errors.WakeupException) Collection(java.util.Collection) Set(java.util.Set) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) PowerMock(org.powermock.api.easymock.PowerMock) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Header(org.apache.kafka.common.header.Header) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assert.assertFalse(org.junit.Assert.assertFalse) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Whitebox(org.powermock.reflect.Whitebox) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) Headers(org.apache.kafka.common.header.Headers) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Assert.assertSame(org.junit.Assert.assertSame) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) HeaderConverter(org.apache.kafka.connect.storage.HeaderConverter) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) MetricGroup(org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup) PowerMockRunner(org.powermock.modules.junit4.PowerMockRunner) PowerMockIgnore(org.powermock.core.classloader.annotations.PowerMockIgnore) StringConverter(org.apache.kafka.connect.storage.StringConverter) ExecutorService(java.util.concurrent.ExecutorService) SinkConnector(org.apache.kafka.connect.sink.SinkConnector) SinkTask(org.apache.kafka.connect.sink.SinkTask) Before(org.junit.Before) Capture(org.easymock.Capture) Iterator(java.util.Iterator) PluginClassLoader(org.apache.kafka.connect.runtime.isolation.PluginClassLoader) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Mock(org.powermock.api.easymock.annotation.Mock) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) TimeUnit(java.util.concurrent.TimeUnit) RetriableException(org.apache.kafka.connect.errors.RetriableException) CaptureType(org.easymock.CaptureType) Assert.assertNull(org.junit.Assert.assertNull) ConnectException(org.apache.kafka.connect.errors.ConnectException) SinkTaskMetricsGroup(org.apache.kafka.connect.runtime.WorkerSinkTask.SinkTaskMetricsGroup) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RetriableException(org.apache.kafka.connect.errors.RetriableException) HashSet(java.util.HashSet) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 3 with Collections.singleton

use of java.util.Collections.singleton in project sonarlint-core by SonarSource.

the class StandaloneIssueMediumTests method simpleJavaWithIncludedAndExcludedRules.

@Test
void simpleJavaWithIncludedAndExcludedRules() throws Exception {
    var inputFile = prepareInputFile(A_JAVA_FILE_PATH, "import java.util.Optional;\n" + "public class Foo {\n" + "  public void foo(Optional<String> name) {  // for squid:S3553, not in Sonar Way\n" + "    int x;\n" + "    System.out.println(\"Foo\" + name.isPresent());\n" + "  }\n" + "}", false);
    // exclusion wins
    final Collection<RuleKey> excludedRules = Collections.singleton(new RuleKey("squid", "S3553"));
    final Collection<RuleKey> includedRules = Collections.singleton(new RuleKey("squid", "S3553"));
    final List<Issue> issues = new ArrayList<>();
    sonarlint.analyze(StandaloneAnalysisConfiguration.builder().setBaseDir(baseDir.toPath()).addInputFile(inputFile).addExcludedRules(excludedRules).addIncludedRules(includedRules).build(), issues::add, null, null);
    assertThat(issues).extracting(Issue::getRuleKey, Issue::getStartLine, i -> i.getInputFile().relativePath(), Issue::getSeverity).containsOnly(tuple("java:S106", 5, A_JAVA_FILE_PATH, "MAJOR"), tuple("java:S1220", null, A_JAVA_FILE_PATH, "MINOR"), tuple("java:S1481", 4, A_JAVA_FILE_PATH, "MINOR"));
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Language(org.sonarsource.sonarlint.core.commons.Language) Disabled(org.junit.jupiter.api.Disabled) StandaloneSonarLintEngineImpl(org.sonarsource.sonarlint.core.StandaloneSonarLintEngineImpl) AfterAll(org.junit.jupiter.api.AfterAll) Future(java.util.concurrent.Future) Collections.singleton(java.util.Collections.singleton) BeforeAll(org.junit.jupiter.api.BeforeAll) Map(java.util.Map) Path(java.nio.file.Path) EnumSet(java.util.EnumSet) Collection(java.util.Collection) Set(java.util.Set) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) StandaloneAnalysisConfiguration(org.sonarsource.sonarlint.core.client.api.standalone.StandaloneAnalysisConfiguration) Test(org.junit.jupiter.api.Test) List(java.util.List) TempDir(org.junit.jupiter.api.io.TempDir) Issue(org.sonarsource.sonarlint.core.client.api.common.analysis.Issue) ComponentContainer(org.sonarsource.sonarlint.core.plugin.commons.pico.ComponentContainer) PluginLocator(org.sonarsource.sonarlint.core.util.PluginLocator) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Assertions.fail(org.junit.jupiter.api.Assertions.fail) HashMap(java.util.HashMap) TestUtils(testutils.TestUtils) ArrayList(java.util.ArrayList) SonarLintModuleFileSystem(org.sonarsource.sonarlint.core.analysis.sonarapi.SonarLintModuleFileSystem) Charset(java.nio.charset.Charset) StandaloneGlobalConfiguration(org.sonarsource.sonarlint.core.client.api.standalone.StandaloneGlobalConfiguration) OnDiskTestClientInputFile(testutils.OnDiskTestClientInputFile) Assumptions.assumeTrue(org.junit.jupiter.api.Assumptions.assumeTrue) Nullable(javax.annotation.Nullable) ClientModuleInfo(org.sonarsource.sonarlint.core.analysis.api.ClientModuleInfo) Files(java.nio.file.Files) RuleDetails(org.sonarsource.sonarlint.core.client.api.common.RuleDetails) Assertions.tuple(org.assertj.core.api.Assertions.tuple) SystemUtils(org.apache.commons.lang3.SystemUtils) ClientInputFile(org.sonarsource.sonarlint.core.analysis.api.ClientInputFile) ClientFileSystemFixtures.aClientFileSystemWith(org.sonarsource.sonarlint.core.client.api.common.ClientFileSystemFixtures.aClientFileSystemWith) IOException(java.io.IOException) FileUtils(org.apache.commons.io.FileUtils) Assertions.entry(org.assertj.core.api.Assertions.entry) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) Paths(java.nio.file.Paths) RuleKey(org.sonarsource.sonarlint.core.client.api.common.RuleKey) Collections(java.util.Collections) NodeJsHelper(org.sonarsource.sonarlint.core.NodeJsHelper) Issue(org.sonarsource.sonarlint.core.client.api.common.analysis.Issue) RuleKey(org.sonarsource.sonarlint.core.client.api.common.RuleKey) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Test(org.junit.jupiter.api.Test)

Aggregations

Collection (java.util.Collection)3 Collections (java.util.Collections)3 Collections.singleton (java.util.Collections.singleton)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 Duration (java.time.Duration)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Set (java.util.Set)2 Executors (java.util.concurrent.Executors)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)2 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)2 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 File (java.io.File)1 IOException (java.io.IOException)1 Charset (java.nio.charset.Charset)1 StandardCharsets (java.nio.charset.StandardCharsets)1