Search in sources :

Example 1 with FlinkKafkaPartitioner

use of org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner in project flink by apache.

the class KafkaITCase method testTimestamps.

/**
 * Kafka 20 specific test, ensuring Timestamps are properly written to and read from Kafka.
 */
@Test(timeout = 60000)
public void testTimestamps() throws Exception {
    final String topic = "tstopic";
    createTestTopic(topic, 3, 1);
    // ---------- Produce an event time stream into Kafka -------------------
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    DataStream<Long> streamWithTimestamps = env.addSource(new SourceFunction<Long>() {

        private static final long serialVersionUID = -2255115836471289626L;

        boolean running = true;

        @Override
        public void run(SourceContext<Long> ctx) throws Exception {
            long i = 0;
            while (running) {
                ctx.collectWithTimestamp(i, i * 2);
                if (i++ == 1110L) {
                    running = false;
                }
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    });
    final TypeInformationSerializationSchema<Long> longSer = new TypeInformationSerializationSchema<>(Types.LONG, env.getConfig());
    FlinkKafkaProducer<Long> prod = new FlinkKafkaProducer<>(topic, new KeyedSerializationSchemaWrapper<>(longSer), standardProps, Optional.of(new FlinkKafkaPartitioner<Long>() {

        private static final long serialVersionUID = -6730989584364230617L;

        @Override
        public int partition(Long next, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            return (int) (next % 3);
        }
    }));
    prod.setWriteTimestampToKafka(true);
    streamWithTimestamps.addSink(prod).setParallelism(3);
    env.execute("Produce some");
    // ---------- Consume stream from Kafka -------------------
    env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    FlinkKafkaConsumer<Long> kafkaSource = new FlinkKafkaConsumer<>(topic, new KafkaITCase.LimitedLongDeserializer(), standardProps);
    kafkaSource.assignTimestampsAndWatermarks(new AssignerWithPunctuatedWatermarks<Long>() {

        private static final long serialVersionUID = -4834111173247835189L;

        @Nullable
        @Override
        public Watermark checkAndGetNextWatermark(Long lastElement, long extractedTimestamp) {
            if (lastElement % 11 == 0) {
                return new Watermark(lastElement);
            }
            return null;
        }

        @Override
        public long extractTimestamp(Long element, long previousElementTimestamp) {
            return previousElementTimestamp;
        }
    });
    DataStream<Long> stream = env.addSource(kafkaSource);
    GenericTypeInfo<Object> objectTypeInfo = new GenericTypeInfo<>(Object.class);
    stream.transform("timestamp validating operator", objectTypeInfo, new TimestampValidatingOperator()).setParallelism(1);
    env.execute("Consume again");
    deleteTestTopic(topic);
}
Also used : FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) IOException(java.io.IOException) GenericTypeInfo(org.apache.flink.api.java.typeutils.GenericTypeInfo) TypeInformationSerializationSchema(org.apache.flink.api.common.serialization.TypeInformationSerializationSchema) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Nullable(javax.annotation.Nullable) Test(org.junit.Test)

Example 2 with FlinkKafkaPartitioner

use of org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner in project flink by apache.

the class FlinkKafkaProducer method open.

// ----------------------------------- Utilities --------------------------
/**
 * Initializes the connection to Kafka.
 */
@Override
public void open(Configuration configuration) throws Exception {
    if (logFailuresOnly) {
        callback = new Callback() {

            @Override
            public void onCompletion(RecordMetadata metadata, Exception e) {
                if (e != null) {
                    LOG.error("Error while sending record to Kafka: " + e.getMessage(), e);
                }
                acknowledgeMessage();
            }
        };
    } else {
        callback = new Callback() {

            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null && asyncException == null) {
                    asyncException = exception;
                }
                acknowledgeMessage();
            }
        };
    }
    RuntimeContext ctx = getRuntimeContext();
    if (flinkKafkaPartitioner != null) {
        flinkKafkaPartitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());
    }
    if (kafkaSchema instanceof KafkaContextAware) {
        KafkaContextAware<IN> contextAwareSchema = (KafkaContextAware<IN>) kafkaSchema;
        contextAwareSchema.setParallelInstanceId(ctx.getIndexOfThisSubtask());
        contextAwareSchema.setNumParallelInstances(ctx.getNumberOfParallelSubtasks());
    }
    if (kafkaSchema != null) {
        kafkaSchema.open(RuntimeContextInitializationContextAdapters.serializationAdapter(getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
    }
    super.open(configuration);
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) TwoPhaseCommitSinkFunction(org.apache.flink.streaming.api.functions.sink.TwoPhaseCommitSinkFunction) LoggerFactory(org.slf4j.LoggerFactory) ExceptionUtils(org.apache.flink.util.ExceptionUtils) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) StringUtils(org.apache.commons.lang3.StringUtils) NetUtils(org.apache.flink.util.NetUtils) Lists(org.apache.flink.shaded.guava30.com.google.common.collect.Lists) ListState(org.apache.flink.api.common.state.ListState) TypeSerializerSnapshot(org.apache.flink.api.common.typeutils.TypeSerializerSnapshot) Duration(java.time.Duration) Map(java.util.Map) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) Preconditions.checkNotNull(org.apache.flink.util.Preconditions.checkNotNull) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) FlinkFixedPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner) TransactionalIdsGenerator(org.apache.flink.streaming.connectors.kafka.internals.TransactionalIdsGenerator) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Collection(java.util.Collection) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Preconditions(org.apache.flink.util.Preconditions) KeyedSerializationSchema(org.apache.flink.streaming.util.serialization.KeyedSerializationSchema) KafkaMetricMutableWrapper(org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricMutableWrapper) MetricGroup(org.apache.flink.metrics.MetricGroup) List(java.util.List) InvalidTxnStateException(org.apache.kafka.common.errors.InvalidTxnStateException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) SimpleTypeSerializerSnapshot(org.apache.flink.api.common.typeutils.SimpleTypeSerializerSnapshot) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Optional(java.util.Optional) Callback(org.apache.kafka.clients.producer.Callback) Time(org.apache.flink.api.common.time.Time) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TemporaryClassLoaderContext(org.apache.flink.util.TemporaryClassLoaderContext) PublicEvolving(org.apache.flink.annotation.PublicEvolving) HashMap(java.util.HashMap) RuntimeContextInitializationContextAdapters(org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters) DataOutputView(org.apache.flink.core.memory.DataOutputView) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) KafkaSerializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KafkaSerializationSchemaWrapper) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) DataInputView(org.apache.flink.core.memory.DataInputView) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Nullable(javax.annotation.Nullable) Preconditions.checkState(org.apache.flink.util.Preconditions.checkState) Logger(org.slf4j.Logger) Properties(java.util.Properties) Producer(org.apache.kafka.clients.producer.Producer) BlockingDeque(java.util.concurrent.BlockingDeque) Configuration(org.apache.flink.configuration.Configuration) FlinkKafkaInternalProducer(org.apache.flink.streaming.connectors.kafka.internals.FlinkKafkaInternalProducer) IOException(java.io.IOException) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) AtomicLong(java.util.concurrent.atomic.AtomicLong) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque) Internal(org.apache.flink.annotation.Internal) TypeSerializerSingleton(org.apache.flink.api.common.typeutils.base.TypeSerializerSingleton) ClosureCleaner(org.apache.flink.api.java.ClosureCleaner) Comparator(java.util.Comparator) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) Collections(java.util.Collections) Callback(org.apache.kafka.clients.producer.Callback) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) InvalidTxnStateException(org.apache.kafka.common.errors.InvalidTxnStateException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) IOException(java.io.IOException)

Example 3 with FlinkKafkaPartitioner

use of org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner in project flink by apache.

the class FlinkKafkaProducerBase method open.

// ----------------------------------- Utilities --------------------------
/**
 * Initializes the connection to Kafka.
 */
@Override
public void open(Configuration configuration) throws Exception {
    if (schema instanceof KeyedSerializationSchemaWrapper) {
        ((KeyedSerializationSchemaWrapper<IN>) schema).getSerializationSchema().open(RuntimeContextInitializationContextAdapters.serializationAdapter(getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
    }
    producer = getKafkaProducer(this.producerConfig);
    RuntimeContext ctx = getRuntimeContext();
    if (null != flinkKafkaPartitioner) {
        flinkKafkaPartitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks());
    }
    LOG.info("Starting FlinkKafkaProducer ({}/{}) to produce into default topic {}", ctx.getIndexOfThisSubtask() + 1, ctx.getNumberOfParallelSubtasks(), defaultTopicId);
    // register Kafka metrics to Flink accumulators
    if (!Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) {
        Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
        if (metrics == null) {
            // MapR's Kafka implementation returns null here.
            LOG.info("Producer implementation does not support metrics");
        } else {
            final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer");
            for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
                kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue()));
            }
        }
    }
    if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
        LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
        flushOnCheckpoint = false;
    }
    if (logFailuresOnly) {
        callback = new Callback() {

            @Override
            public void onCompletion(RecordMetadata metadata, Exception e) {
                if (e != null) {
                    LOG.error("Error while sending record to Kafka: " + e.getMessage(), e);
                }
                acknowledgeMessage();
            }
        };
    } else {
        callback = new Callback() {

            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null && asyncException == null) {
                    asyncException = exception;
                }
                acknowledgeMessage();
            }
        };
    }
}
Also used : RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KeyedSerializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KeyedSerializationSchemaWrapper) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) RuntimeContextInitializationContextAdapters(org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters) FunctionSnapshotContext(org.apache.flink.runtime.state.FunctionSnapshotContext) NetUtils(org.apache.flink.util.NetUtils) ArrayList(java.util.ArrayList) KafkaMetricWrapper(org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricWrapper) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) Map(java.util.Map) Objects.requireNonNull(java.util.Objects.requireNonNull) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Logger(org.slf4j.Logger) Properties(java.util.Properties) CheckpointedFunction(org.apache.flink.streaming.api.checkpoint.CheckpointedFunction) FunctionInitializationContext(org.apache.flink.runtime.state.FunctionInitializationContext) Configuration(org.apache.flink.configuration.Configuration) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) KeyedSerializationSchema(org.apache.flink.streaming.util.serialization.KeyedSerializationSchema) RichSinkFunction(org.apache.flink.streaming.api.functions.sink.RichSinkFunction) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) SerializableObject(org.apache.flink.util.SerializableObject) MetricGroup(org.apache.flink.metrics.MetricGroup) List(java.util.List) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Internal(org.apache.flink.annotation.Internal) ClosureCleaner(org.apache.flink.api.java.ClosureCleaner) Comparator(java.util.Comparator) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) Callback(org.apache.kafka.clients.producer.Callback) Collections(java.util.Collections) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) KeyedSerializationSchemaWrapper(org.apache.flink.streaming.connectors.kafka.internals.KeyedSerializationSchemaWrapper) MetricGroup(org.apache.flink.metrics.MetricGroup) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) MetricName(org.apache.kafka.common.MetricName) Callback(org.apache.kafka.clients.producer.Callback) KafkaMetricWrapper(org.apache.flink.streaming.connectors.kafka.internals.metrics.KafkaMetricWrapper) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) HashMap(java.util.HashMap) Map(java.util.Map)

Example 4 with FlinkKafkaPartitioner

use of org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner in project flink by apache.

the class KafkaConnectorOptionsUtil method initializePartitioner.

/**
 * Returns a class value with the given class name.
 */
private static <T> FlinkKafkaPartitioner<T> initializePartitioner(String name, ClassLoader classLoader) {
    try {
        Class<?> clazz = Class.forName(name, true, classLoader);
        if (!FlinkKafkaPartitioner.class.isAssignableFrom(clazz)) {
            throw new ValidationException(String.format("Sink partitioner class '%s' should extend from the required class %s", name, FlinkKafkaPartitioner.class.getName()));
        }
        @SuppressWarnings("unchecked") final FlinkKafkaPartitioner<T> kafkaPartitioner = InstantiationUtil.instantiate(name, FlinkKafkaPartitioner.class, classLoader);
        return kafkaPartitioner;
    } catch (ClassNotFoundException | FlinkException e) {
        throw new ValidationException(String.format("Could not find and instantiate partitioner class '%s'", name), e);
    }
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) KEY_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FORMAT) FORMAT(org.apache.flink.table.factories.FactoryUtil.FORMAT) VALUE_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.VALUE_FORMAT) FlinkException(org.apache.flink.util.FlinkException)

Example 5 with FlinkKafkaPartitioner

use of org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner in project flink by apache.

the class KafkaProducerTestBase method testExactlyOnce.

/**
 * This test sets KafkaProducer so that it will automatically flush the data and and fails the
 * broker to check whether flushed records since last checkpoint were not duplicated.
 */
protected void testExactlyOnce(boolean regularSink, int sinksCount) throws Exception {
    final String topic = (regularSink ? "exactlyOnceTopicRegularSink" : "exactlyTopicCustomOperator") + sinksCount;
    final int partition = 0;
    final int numElements = 1000;
    final int failAfterElements = 333;
    for (int i = 0; i < sinksCount; i++) {
        createTestTopic(topic + i, 1, 1);
    }
    TypeInformationSerializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.enableCheckpointing(500);
    env.setParallelism(1);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
    Properties properties = new Properties();
    properties.putAll(standardProps);
    properties.putAll(secureProps);
    // process exactly failAfterElements number of elements and then shutdown Kafka broker and
    // fail application
    List<Integer> expectedElements = getIntegersSequence(numElements);
    DataStream<Integer> inputStream = env.addSource(new IntegerSource(numElements)).map(new FailingIdentityMapper<Integer>(failAfterElements));
    for (int i = 0; i < sinksCount; i++) {
        FlinkKafkaPartitioner<Integer> partitioner = new FlinkKafkaPartitioner<Integer>() {

            @Override
            public int partition(Integer record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
                return partition;
            }
        };
        if (regularSink) {
            StreamSink<Integer> kafkaSink = kafkaServer.getProducerSink(topic + i, schema, properties, partitioner);
            inputStream.addSink(kafkaSink.getUserFunction());
        } else {
            kafkaServer.produceIntoKafka(inputStream, topic + i, schema, properties, partitioner);
        }
    }
    FailingIdentityMapper.failedBefore = false;
    TestUtils.tryExecute(env, "Exactly once test");
    for (int i = 0; i < sinksCount; i++) {
        // assert that before failure we successfully snapshot/flushed all expected elements
        assertExactlyOnceForTopic(properties, topic + i, expectedElements);
        deleteTestTopic(topic + i);
    }
}
Also used : ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Properties(java.util.Properties) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) TypeInformationSerializationSchema(org.apache.flink.api.common.serialization.TypeInformationSerializationSchema) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) IntegerSource(org.apache.flink.streaming.connectors.kafka.testutils.IntegerSource)

Aggregations

FlinkKafkaPartitioner (org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner)5 Properties (java.util.Properties)3 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Collections (java.util.Collections)2 Comparator (java.util.Comparator)2 HashMap (java.util.HashMap)2 List (java.util.List)2 Map (java.util.Map)2 Nullable (javax.annotation.Nullable)2 Internal (org.apache.flink.annotation.Internal)2 VisibleForTesting (org.apache.flink.annotation.VisibleForTesting)2 RuntimeContext (org.apache.flink.api.common.functions.RuntimeContext)2 RuntimeContextInitializationContextAdapters (org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters)2 ClosureCleaner (org.apache.flink.api.java.ClosureCleaner)2 Configuration (org.apache.flink.configuration.Configuration)2 MetricGroup (org.apache.flink.metrics.MetricGroup)2 FunctionInitializationContext (org.apache.flink.runtime.state.FunctionInitializationContext)2 FunctionSnapshotContext (org.apache.flink.runtime.state.FunctionSnapshotContext)2