use of org.apache.flink.connector.base.DeliveryGuarantee in project flink by apache.
the class PulsarSinkBuilder method build.
/**
* Build the {@link PulsarSink}.
*
* @return a PulsarSink with the settings made for this builder.
*/
public PulsarSink<IN> build() {
// Change delivery guarantee.
DeliveryGuarantee deliveryGuarantee = configBuilder.get(PULSAR_WRITE_DELIVERY_GUARANTEE);
if (deliveryGuarantee == DeliveryGuarantee.NONE) {
LOG.warn("You haven't set delivery guarantee or set it to NONE, this would cause data loss. Make sure you have known this shortcoming.");
} else if (deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE) {
LOG.info("Exactly once require flink checkpoint and your pulsar cluster should support the transaction.");
configBuilder.override(PULSAR_ENABLE_TRANSACTION, true);
configBuilder.override(PULSAR_SEND_TIMEOUT_MS, 0L);
if (!configBuilder.contains(PULSAR_WRITE_TRANSACTION_TIMEOUT)) {
LOG.warn("The default pulsar transaction timeout is 3 hours, make sure it was greater than your checkpoint interval.");
} else {
Long timeout = configBuilder.get(PULSAR_WRITE_TRANSACTION_TIMEOUT);
LOG.warn("The configured transaction timeout is {} mille seconds, make sure it was greater than your checkpoint interval.", timeout);
}
}
if (!configBuilder.contains(PULSAR_PRODUCER_NAME)) {
LOG.warn("We recommend set a readable producer name through setProducerName(String) in production mode.");
}
checkNotNull(serializationSchema, "serializationSchema must be set.");
if (serializationSchema instanceof PulsarSchemaWrapper && !Boolean.TRUE.equals(configBuilder.get(PULSAR_WRITE_SCHEMA_EVOLUTION))) {
LOG.info("It seems like you want to send message in Pulsar Schema." + " You can enableSchemaEvolution for using this feature." + " We would use Schema.BYTES as the default schema if you don't enable this option.");
}
// Topic metadata listener validation.
if (metadataListener == null) {
if (topicRouter == null) {
throw new NullPointerException("No topic names or custom topic router are provided.");
} else {
LOG.warn("No topic set has been provided, make sure your custom topic router support empty topic set.");
this.metadataListener = new TopicMetadataListener();
}
}
// Topic routing mode validate.
if (topicRoutingMode == null) {
LOG.info("No topic routing mode has been chosen. We use round-robin mode as default.");
this.topicRoutingMode = TopicRoutingMode.ROUND_ROBIN;
}
if (messageDelayer == null) {
this.messageDelayer = MessageDelayer.never();
}
// This is an unmodifiable configuration for Pulsar.
// We don't use Pulsar's built-in configure classes for compatible requirement.
SinkConfiguration sinkConfiguration = configBuilder.build(SINK_CONFIG_VALIDATOR, SinkConfiguration::new);
return new PulsarSink<>(sinkConfiguration, serializationSchema, metadataListener, topicRoutingMode, topicRouter, messageDelayer);
}
use of org.apache.flink.connector.base.DeliveryGuarantee in project flink by apache.
the class TopicProducerRegister method createMessageBuilder.
/**
* Create a TypedMessageBuilder which could be sent to Pulsar directly. First, we would create a
* topic-related producer or use a cached instead. Then we would try to find a topic-related
* transaction. We would generate a transaction instance if there is no transaction. Finally, we
* create the message builder and put the element into it.
*/
public <T> TypedMessageBuilder<T> createMessageBuilder(String topic, Schema<T> schema) {
Producer<T> producer = getOrCreateProducer(topic, schema);
DeliveryGuarantee deliveryGuarantee = sinkConfiguration.getDeliveryGuarantee();
if (deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE) {
Transaction transaction = getOrCreateTransaction(topic);
return producer.newMessage(transaction);
} else {
return producer.newMessage();
}
}
use of org.apache.flink.connector.base.DeliveryGuarantee in project flink by apache.
the class KafkaDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, autoCompleteSchemaRegistrySubject(context));
final Optional<EncodingFormat<SerializationSchema<RowData>>> keyEncodingFormat = getKeyEncodingFormat(helper);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = getValueEncodingFormat(helper);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
final DeliveryGuarantee deliveryGuarantee = validateDeprecatedSemantic(tableOptions);
validateTableSinkOptions(tableOptions);
KafkaConnectorOptionsUtil.validateDeliveryGuarantee(tableOptions);
validatePKConstraints(context.getObjectIdentifier(), context.getPrimaryKeyIndexes(), context.getCatalogTable().getOptions(), valueEncodingFormat);
final DataType physicalDataType = context.getPhysicalRowDataType();
final int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
final int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
final Integer parallelism = tableOptions.getOptional(SINK_PARALLELISM).orElse(null);
return createKafkaTableSink(physicalDataType, keyEncodingFormat.orElse(null), valueEncodingFormat, keyProjection, valueProjection, keyPrefix, tableOptions.get(TOPIC).get(0), getKafkaProperties(context.getCatalogTable().getOptions()), getFlinkKafkaPartitioner(tableOptions, context.getClassLoader()).orElse(null), deliveryGuarantee, parallelism, tableOptions.get(TRANSACTIONAL_ID_PREFIX));
}
Aggregations