use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.
the class FlinkKafkaConsumer method fetchOffsetsWithTimestamp.
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(Collection<KafkaTopicPartition> partitions, long timestamp) {
Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
for (KafkaTopicPartition partition : partitions) {
partitionOffsetsRequest.put(new TopicPartition(partition.getTopic(), partition.getPartition()), timestamp);
}
final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());
// this is ok because this is a one-time operation that happens only on startup
try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset : consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {
result.put(new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()), (partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
}
}
return result;
}
use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.
the class KafkaConnectorOptionsUtil method buildSpecificOffsets.
private static void buildSpecificOffsets(ReadableConfig tableOptions, String topic, Map<KafkaTopicPartition, Long> specificOffsets) {
String specificOffsetsStrOpt = tableOptions.get(SCAN_STARTUP_SPECIFIC_OFFSETS);
final Map<Integer, Long> offsetMap = parseSpecificOffsets(specificOffsetsStrOpt, SCAN_STARTUP_SPECIFIC_OFFSETS.key());
offsetMap.forEach((partition, offset) -> {
final KafkaTopicPartition topicPartition = new KafkaTopicPartition(topic, partition);
specificOffsets.put(topicPartition, offset);
});
}
use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSource.
@Test
public void testTableSource() {
final DynamicTableSource actualSource = createTableSource(SCHEMA, getBasicSourceOptions());
final KafkaDynamicSource actualKafkaSource = (KafkaDynamicSource) actualSource;
final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
specificOffsets.put(new KafkaTopicPartition(TOPIC, PARTITION_0), OFFSET_0);
specificOffsets.put(new KafkaTopicPartition(TOPIC, PARTITION_1), OFFSET_1);
final DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = new DecodingFormatMock(",", true);
// Test scan source equals
final KafkaDynamicSource expectedKafkaSource = createExpectedScanSource(SCHEMA_DATA_TYPE, null, valueDecodingFormat, new int[0], new int[] { 0, 1, 2 }, null, Collections.singletonList(TOPIC), null, KAFKA_SOURCE_PROPERTIES, StartupMode.SPECIFIC_OFFSETS, specificOffsets, 0);
assertThat(actualKafkaSource).isEqualTo(expectedKafkaSource);
ScanTableSource.ScanRuntimeProvider provider = actualKafkaSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertKafkaSource(provider);
}
use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSourceWithPattern.
@Test
public void testTableSourceWithPattern() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSourceOptions(), options -> {
options.remove("topic");
options.put("topic-pattern", TOPIC_REGEX);
options.put("scan.startup.mode", ScanStartupMode.EARLIEST_OFFSET.toString());
options.remove("scan.startup.specific-offsets");
});
final DynamicTableSource actualSource = createTableSource(SCHEMA, modifiedOptions);
final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = new DecodingFormatMock(",", true);
// Test scan source equals
final KafkaDynamicSource expectedKafkaSource = createExpectedScanSource(SCHEMA_DATA_TYPE, null, valueDecodingFormat, new int[0], new int[] { 0, 1, 2 }, null, null, Pattern.compile(TOPIC_REGEX), KAFKA_SOURCE_PROPERTIES, StartupMode.EARLIEST, specificOffsets, 0);
final KafkaDynamicSource actualKafkaSource = (KafkaDynamicSource) actualSource;
assertThat(actualKafkaSource).isEqualTo(expectedKafkaSource);
ScanTableSource.ScanRuntimeProvider provider = actualKafkaSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertKafkaSource(provider);
}
use of org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition in project flink by apache.
the class FlinkKafkaConsumer09 method convertToFlinkKafkaTopicPartition.
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Converts a list of Kafka PartitionInfo's to Flink's KafkaTopicPartition (which are serializable)
*
* @param partitions A list of Kafka PartitionInfos.
* @return A list of KafkaTopicPartitions
*/
private static List<KafkaTopicPartition> convertToFlinkKafkaTopicPartition(List<PartitionInfo> partitions) {
checkNotNull(partitions);
List<KafkaTopicPartition> ret = new ArrayList<>(partitions.size());
for (PartitionInfo pi : partitions) {
ret.add(new KafkaTopicPartition(pi.topic(), pi.partition()));
}
return ret;
}
Aggregations