Search in sources :

Example 21 with KafkaTopicPartition

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.

the class FlinkKafkaConsumer method fetchOffsetsWithTimestamp.

@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions, long timestamp) {
    Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
    for (KafkaTopicPartition partition : partitions) {
        partitionOffsetsRequest.put(new TopicPartition(partition.getTopic(), partition.getPartition()), timestamp);
    }
    final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());
    // this is ok because this is a one-time operation that happens only on startup
    try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
        for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset : consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {
            result.put(new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()), (partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
        }
    }
    return result;
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) PropertiesUtil.getLong(org.apache.flink.util.PropertiesUtil.getLong) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) HashMap(java.util.HashMap) Map(java.util.Map) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp)

Example 22 with KafkaTopicPartition

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.

the class KafkaConnectorOptionsUtil method buildSpecificOffsets.

private static void buildSpecificOffsets(ReadableConfig tableOptions, String topic, Map<KafkaTopicPartition, Long> specificOffsets) {
    String specificOffsetsStrOpt = tableOptions.get(SCAN_STARTUP_SPECIFIC_OFFSETS);
    final Map<Integer, Long> offsetMap = parseSpecificOffsets(specificOffsetsStrOpt, SCAN_STARTUP_SPECIFIC_OFFSETS.key());
    offsetMap.forEach((partition, offset) -> {
        final KafkaTopicPartition topicPartition = new KafkaTopicPartition(topic, partition);
        specificOffsets.put(topicPartition, offset);
    });
}
Also used : KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition)

Example 23 with KafkaTopicPartition

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.

the class KafkaDynamicTableFactoryTest method testTableSource.

@Test
public void testTableSource() {
    final DynamicTableSource actualSource = createTableSource(SCHEMA, getBasicSourceOptions());
    final KafkaDynamicSource actualKafkaSource = (KafkaDynamicSource) actualSource;
    final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
    specificOffsets.put(new KafkaTopicPartition(TOPIC, PARTITION_0), OFFSET_0);
    specificOffsets.put(new KafkaTopicPartition(TOPIC, PARTITION_1), OFFSET_1);
    final DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = new DecodingFormatMock(",", true);
    // Test scan source equals
    final KafkaDynamicSource expectedKafkaSource = createExpectedScanSource(SCHEMA_DATA_TYPE, null, valueDecodingFormat, new int[0], new int[] { 0, 1, 2 }, null, Collections.singletonList(TOPIC), null, KAFKA_SOURCE_PROPERTIES, StartupMode.SPECIFIC_OFFSETS, specificOffsets, 0);
    assertThat(actualKafkaSource).isEqualTo(expectedKafkaSource);
    ScanTableSource.ScanRuntimeProvider provider = actualKafkaSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
    assertKafkaSource(provider);
}
Also used : ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) HashMap(java.util.HashMap) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) DecodingFormatMock(org.apache.flink.table.factories.TestFormatFactory.DecodingFormatMock) DeserializationSchema(org.apache.flink.api.common.serialization.DeserializationSchema) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 24 with KafkaTopicPartition

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.

the class KafkaDynamicTableFactoryTest method testTableSourceWithPattern.

@Test
public void testTableSourceWithPattern() {
    final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSourceOptions(), options -> {
        options.remove("topic");
        options.put("topic-pattern", TOPIC_REGEX);
        options.put("scan.startup.mode", ScanStartupMode.EARLIEST_OFFSET.toString());
        options.remove("scan.startup.specific-offsets");
    });
    final DynamicTableSource actualSource = createTableSource(SCHEMA, modifiedOptions);
    final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
    DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = new DecodingFormatMock(",", true);
    // Test scan source equals
    final KafkaDynamicSource expectedKafkaSource = createExpectedScanSource(SCHEMA_DATA_TYPE, null, valueDecodingFormat, new int[0], new int[] { 0, 1, 2 }, null, null, Pattern.compile(TOPIC_REGEX), KAFKA_SOURCE_PROPERTIES, StartupMode.EARLIEST, specificOffsets, 0);
    final KafkaDynamicSource actualKafkaSource = (KafkaDynamicSource) actualSource;
    assertThat(actualKafkaSource).isEqualTo(expectedKafkaSource);
    ScanTableSource.ScanRuntimeProvider provider = actualKafkaSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
    assertKafkaSource(provider);
}
Also used : HashMap(java.util.HashMap) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) DecodingFormatMock(org.apache.flink.table.factories.TestFormatFactory.DecodingFormatMock) DeserializationSchema(org.apache.flink.api.common.serialization.DeserializationSchema) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 25 with KafkaTopicPartition

use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.

the class FlinkKafkaConsumer09 method convertToFlinkKafkaTopicPartition.

// ------------------------------------------------------------------------
//  Utilities 
// ------------------------------------------------------------------------
/**
	 * Converts a list of Kafka PartitionInfo's to Flink's KafkaTopicPartition (which are serializable)
	 * 
	 * @param partitions A list of Kafka PartitionInfos.
	 * @return A list of KafkaTopicPartitions
	 */
private static List<KafkaTopicPartition> convertToFlinkKafkaTopicPartition(List<PartitionInfo> partitions) {
    checkNotNull(partitions);
    List<KafkaTopicPartition> ret = new ArrayList<>(partitions.size());
    for (PartitionInfo pi : partitions) {
        ret.add(new KafkaTopicPartition(pi.topic(), pi.partition()));
    }
    return ret;
}
Also used : ArrayList(java.util.ArrayList) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Aggregations

KafkaTopicPartition (org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition)35 HashMap (java.util.HashMap)26 Test (org.junit.Test)18 ArrayList (java.util.ArrayList)14 Map (java.util.Map)8 Properties (java.util.Properties)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)7 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)7 UnregisteredMetricsGroup (org.apache.flink.metrics.groups.UnregisteredMetricsGroup)7 TestProcessingTimeService (org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService)7 TopicPartition (org.apache.kafka.common.TopicPartition)7 KafkaConsumerThread (org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread)6 AbstractStreamOperatorTestHarness (org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness)6 KeyedDeserializationSchemaWrapper (org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper)6 SimpleStringSchema (org.apache.flink.streaming.util.serialization.SimpleStringSchema)6 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)6 InvocationOnMock (org.mockito.invocation.InvocationOnMock)6 OptionalLong (java.util.OptionalLong)5 StreamSource (org.apache.flink.streaming.api.operators.StreamSource)5 Mockito.anyLong (org.mockito.Mockito.anyLong)5