Search in sources :

Example 11 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project hive by apache.

the class TransactionalKafkaWriter method checkExceptions.

/**
 * Checks for existing exception. In case of exception will close consumer and rethrow as IOException
 * @throws IOException abort if possible, close consumer then rethrow exception.
 */
private void checkExceptions() throws IOException {
    if (sendExceptionRef.get() != null && sendExceptionRef.get() instanceof KafkaException && sendExceptionRef.get().getCause() instanceof ProducerFencedException) {
        // producer.send() may throw a KafkaException which wraps a FencedException re throw its wrapped inner cause.
        sendExceptionRef.updateAndGet(e -> (KafkaException) e.getCause());
    }
    if (sendExceptionRef.get() != null) {
        final Exception exception = sendExceptionRef.get();
        logHints(exception);
        if (tryToAbortTx(exception)) {
            LOG.error("Aborting Transaction [{}] cause by ERROR [{}]", writerIdTopicId, exception.getMessage());
            producer.abortTransaction();
        }
        LOG.error("Closing writer [{}] caused by ERROR [{}]", writerIdTopicId, exception.getMessage());
        producer.close(0, TimeUnit.MILLISECONDS);
        throw new IOException(exception);
    }
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) IOException(java.io.IOException) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) OutOfOrderSequenceException(org.apache.kafka.common.errors.OutOfOrderSequenceException) IOException(java.io.IOException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException)

Example 12 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project hono by eclipse.

the class CachingKafkaProducerFactoryTest method testIsFatalError.

/**
 * Verifies that {@link CachingKafkaProducerFactory#isFatalError(Throwable)} returns true for the expected exception
 * types.
 */
@Test
public void testIsFatalError() {
    assertThat(CachingKafkaProducerFactory.isFatalError(new ProducerFencedException("test"))).isTrue();
    assertThat(CachingKafkaProducerFactory.isFatalError(new OutOfOrderSequenceException("test"))).isTrue();
    assertThat(CachingKafkaProducerFactory.isFatalError(new AuthorizationException("test"))).isTrue();
    assertThat(CachingKafkaProducerFactory.isFatalError(new UnsupportedVersionException("test"))).isTrue();
    assertThat(CachingKafkaProducerFactory.isFatalError(new UnsupportedForMessageFormatException("test"))).isTrue();
    assertThat(CachingKafkaProducerFactory.isFatalError(new KafkaException("test"))).isFalse();
}
Also used : UnsupportedForMessageFormatException(org.apache.kafka.common.errors.UnsupportedForMessageFormatException) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) KafkaException(org.apache.kafka.common.KafkaException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) OutOfOrderSequenceException(org.apache.kafka.common.errors.OutOfOrderSequenceException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Test(org.junit.jupiter.api.Test)

Example 13 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project hazelcast by hazelcast.

the class WriteKafkaP method init.

@Override
public void init(@Nonnull Outbox outbox, @Nonnull Context context) {
    this.context = context;
    ProcessingGuarantee guarantee = context.processingGuarantee() == EXACTLY_ONCE && !exactlyOnce ? AT_LEAST_ONCE : context.processingGuarantee();
    snapshotUtility = new TransactionPoolSnapshotUtility<>(outbox, context, false, guarantee, TXN_POOL_SIZE, (processorIndex, txnIndex) -> new KafkaTransactionId(context.jobId(), context.jobConfig().getName(), context.vertexName(), processorIndex, txnIndex), txnId -> {
        if (txnId != null) {
            properties.put("transactional.id", txnId.getKafkaId());
        }
        return new KafkaTransaction<>(txnId, properties, context.logger());
    }, txnId -> {
        try {
            recoverTransaction(txnId, true);
        } catch (ProducerFencedException e) {
            context.logger().warning("Failed to finish the commit of a transaction ID saved in the " + "snapshot, data loss can occur. Transaction id: " + txnId.getKafkaId(), e);
        }
    }, txnId -> recoverTransaction(txnId, false));
}
Also used : ProcessingGuarantee(com.hazelcast.jet.config.ProcessingGuarantee) LoggingUtil(com.hazelcast.jet.impl.util.LoggingUtil) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Outbox(com.hazelcast.jet.core.Outbox) Processor(com.hazelcast.jet.core.Processor) HashMap(java.util.HashMap) KafkaProcessors(com.hazelcast.jet.kafka.KafkaProcessors) ExceptionUtil.sneakyThrow(com.hazelcast.jet.impl.util.ExceptionUtil.sneakyThrow) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Watermark(com.hazelcast.jet.core.Watermark) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ILogger(com.hazelcast.logging.ILogger) Map(java.util.Map) Inbox(com.hazelcast.jet.core.Inbox) Nonnull(javax.annotation.Nonnull) FunctionEx(com.hazelcast.function.FunctionEx) TransactionalResource(com.hazelcast.jet.impl.processor.TwoPhaseSnapshotCommitUtility.TransactionalResource) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) TransactionPoolSnapshotUtility(com.hazelcast.jet.impl.processor.TransactionPoolSnapshotUtility) EXACTLY_ONCE(com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE) SupplierEx(com.hazelcast.function.SupplierEx) Serializable(java.io.Serializable) Objects(java.util.Objects) Util.idToString(com.hazelcast.jet.Util.idToString) InvalidTxnStateException(org.apache.kafka.common.errors.InvalidTxnStateException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) ProcessingGuarantee(com.hazelcast.jet.config.ProcessingGuarantee) Callback(org.apache.kafka.clients.producer.Callback) AT_LEAST_ONCE(com.hazelcast.jet.config.ProcessingGuarantee.AT_LEAST_ONCE) TwoPhaseSnapshotCommitUtility(com.hazelcast.jet.impl.processor.TwoPhaseSnapshotCommitUtility) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException)

Example 14 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project hive by apache.

the class KafkaStorageHandler method commitInsertTable.

@Override
public void commitInsertTable(Table table, boolean overwrite) throws MetaException {
    boolean isExactlyOnce = table.getParameters().get(KafkaTableProperties.WRITE_SEMANTIC_PROPERTY.getName()).equals(KafkaOutputFormat.WriteSemantic.EXACTLY_ONCE.name());
    String optimiticCommitVal = table.getParameters().get(KafkaTableProperties.HIVE_KAFKA_OPTIMISTIC_COMMIT.getName());
    boolean isTwoPhaseCommit = !Boolean.parseBoolean(optimiticCommitVal);
    if (!isExactlyOnce || !isTwoPhaseCommit) {
        // Case it is not 2 phase commit no open transaction to handel.
        return;
    }
    final Path queryWorkingDir = getQueryWorkingDir(table);
    final Map<String, Pair<Long, Short>> transactionsMap;
    final int maxTries = Integer.parseInt(table.getParameters().get(KafkaTableProperties.MAX_RETRIES.getName()));
    // We have 4 Stages ahead of us:
    // 1 Fetch Transactions state from HDFS.
    // 2 Build/inti all the Kafka producers and perform a pre commit call to check if we can go ahead with commit.
    // 3 Commit Transactions one by one.
    // 4 Clean workingDirectory.
    // First stage fetch the Transactions states
    final RetryUtils.Task<Map<String, Pair<Long, Short>>> fetchTransactionStates = new RetryUtils.Task<Map<String, Pair<Long, Short>>>() {

        @Override
        public Map<String, Pair<Long, Short>> perform() throws Exception {
            return TransactionalKafkaWriter.getTransactionsState(FileSystem.get(getConf()), queryWorkingDir);
        }
    };
    try {
        transactionsMap = RetryUtils.retry(fetchTransactionStates, (error) -> (error instanceof IOException), maxTries);
    } catch (Exception e) {
        // Can not go further
        LOG.error("Can not fetch Transaction states due [{}]", e.getMessage());
        throw new MetaException(e.getMessage());
    }
    // Second Stage Resume Producers and Pre commit
    final Properties baseProducerPros = buildProducerProperties(table);
    final Map<String, HiveKafkaProducer> producersMap = new HashMap<>();
    final RetryUtils.Task<Void> buildProducersTask = new RetryUtils.Task<Void>() {

        @Override
        public Void perform() throws Exception {
            assert producersMap.size() == 0;
            transactionsMap.forEach((key, value) -> {
                // Base Producer propeties, missing the transaction Id.
                baseProducerPros.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, key);
                HiveKafkaProducer<byte[], byte[]> producer = new HiveKafkaProducer<>(baseProducerPros);
                producer.resumeTransaction(value.getLeft(), value.getRight());
                // This is a dummy RPC call to ensure that the producer still resumable and signal the Pre-commit as per :
                // https://cwiki.apache.org/confluence/display/KAFKA/Transactional+Messaging+in+Kafka#EndPhase
                producer.sendOffsetsToTransaction(ImmutableMap.of(), "__dry_run");
                producersMap.put(key, producer);
            });
            return null;
        }
    };
    RetryUtils.CleanupAfterFailure cleanUpTheMap = new RetryUtils.CleanupAfterFailure() {

        @Override
        public void cleanup() {
            producersMap.forEach((s, producer) -> producer.close(0, TimeUnit.MILLISECONDS));
            producersMap.clear();
        }
    };
    final Predicate<Throwable> isRetrayable = (error) -> !KafkaUtils.exceptionIsFatal(error) && !(error instanceof ProducerFencedException);
    try {
        RetryUtils.retry(buildProducersTask, isRetrayable, cleanUpTheMap, maxTries, "Error while Builing Producers");
    } catch (Exception e) {
        // Can not go further
        LOG.error("Can not fetch build produces due [{}]", e);
        throw new MetaException(e.getMessage());
    }
    // Third Stage Commit Transactions, this part is the actual critical section.
    // The commit might be retried on error, but keep in mind in some cases, like open transaction can expire
    // after timeout duration of 15 mins it is not possible to go further.
    final Set<String> committedTx = new HashSet<>();
    final RetryUtils.Task<Void> commitTask = new RetryUtils.Task() {

        @Override
        public Object perform() throws Exception {
            producersMap.forEach((key, producer) -> {
                if (!committedTx.contains(key)) {
                    producer.commitTransaction();
                    committedTx.add(key);
                    producer.close();
                    LOG.info("Committed Transaction [{}]", key);
                }
            });
            return null;
        }
    };
    try {
        RetryUtils.retry(commitTask, isRetrayable, maxTries);
    } catch (Exception e) {
        // at this point we are in a funky state if one commit happend!! close and log it
        producersMap.forEach((key, producer) -> producer.close(0, TimeUnit.MILLISECONDS));
        LOG.error("Commit transaction failed", e);
        if (committedTx.size() > 0) {
            LOG.error("Partial Data Got Commited Some actions need to be Done");
            committedTx.stream().forEach(key -> LOG.error("Transaction [{}] is an orphen commit", key));
        }
        throw new MetaException(e.getMessage());
    }
    // Stage four, clean the Query Directory
    final RetryUtils.Task<Void> cleanQueryDirTask = new RetryUtils.Task<Void>() {

        @Override
        public Void perform() throws Exception {
            cleanWorkingDirectory(queryWorkingDir);
            return null;
        }
    };
    try {
        RetryUtils.retry(cleanQueryDirTask, (error) -> error instanceof IOException, maxTries);
    } catch (Exception e) {
        // just log it
        LOG.error("Faild to clean Query Working Directory [{}] due to [{}]", queryWorkingDir, e.getMessage());
    }
}
Also used : LockType(org.apache.hadoop.hive.metastore.api.LockType) Arrays(java.util.Arrays) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) FileSystem(org.apache.hadoop.fs.FileSystem) URISyntaxException(java.net.URISyntaxException) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) AbstractSerDe(org.apache.hadoop.hive.serde2.AbstractSerDe) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) HashSet(java.util.HashSet) Pair(org.apache.commons.lang3.tuple.Pair) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) URI(java.net.URI) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) OutputFormat(org.apache.hadoop.mapred.OutputFormat) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) DefaultHiveAuthorizationProvider(org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider) Logger(org.slf4j.Logger) Properties(java.util.Properties) ImmutableMap(com.google.common.collect.ImmutableMap) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Predicate(java.util.function.Predicate) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HiveMetaHook(org.apache.hadoop.hive.metastore.HiveMetaHook) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) IOException(java.io.IOException) DefaultHiveMetaHook(org.apache.hadoop.hive.metastore.DefaultHiveMetaHook) Table(org.apache.hadoop.hive.metastore.api.Table) StorageHandlerInfo(org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo) JobConf(org.apache.hadoop.mapred.JobConf) TimeUnit(java.util.concurrent.TimeUnit) HiveCustomStorageHandlerUtils(org.apache.hadoop.hive.ql.security.authorization.HiveCustomStorageHandlerUtils) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TableType(org.apache.hadoop.hive.metastore.TableType) Preconditions(com.google.common.base.Preconditions) HiveAuthorizationProvider(org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider) HashMap(java.util.HashMap) Properties(java.util.Properties) Pair(org.apache.commons.lang3.tuple.Pair) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) URISyntaxException(java.net.URISyntaxException) IOException(java.io.IOException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 15 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project apache-kafka-on-k8s by banzaicloud.

the class StreamTask method commitOffsets.

/**
 * @throws TaskMigratedException if committing offsets failed (non-EOS)
 *                               or if the task producer got fenced (EOS)
 */
private void commitOffsets(final boolean startNewTransaction) {
    try {
        if (commitOffsetNeeded) {
            log.trace("Committing offsets");
            final Map<TopicPartition, OffsetAndMetadata> consumedOffsetsAndMetadata = new HashMap<>(consumedOffsets.size());
            for (final Map.Entry<TopicPartition, Long> entry : consumedOffsets.entrySet()) {
                final TopicPartition partition = entry.getKey();
                final long offset = entry.getValue() + 1;
                consumedOffsetsAndMetadata.put(partition, new OffsetAndMetadata(offset));
                stateMgr.putOffsetLimit(partition, offset);
            }
            if (eosEnabled) {
                producer.sendOffsetsToTransaction(consumedOffsetsAndMetadata, applicationId);
                producer.commitTransaction();
                transactionInFlight = false;
                if (startNewTransaction) {
                    transactionInFlight = true;
                    producer.beginTransaction();
                }
            } else {
                consumer.commitSync(consumedOffsetsAndMetadata);
            }
            commitOffsetNeeded = false;
        } else if (eosEnabled && !startNewTransaction && transactionInFlight) {
            // need to make sure to commit txn for suspend case
            producer.commitTransaction();
            transactionInFlight = false;
        }
    } catch (final CommitFailedException | ProducerFencedException fatal) {
        throw new TaskMigratedException(this, fatal);
    }
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) HashMap(java.util.HashMap) Map(java.util.Map) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Aggregations

ProducerFencedException (org.apache.kafka.common.errors.ProducerFencedException)19 KafkaException (org.apache.kafka.common.KafkaException)11 TopicPartition (org.apache.kafka.common.TopicPartition)9 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)8 HashMap (java.util.HashMap)6 StreamsException (org.apache.kafka.streams.errors.StreamsException)5 Map (java.util.Map)4 OutOfOrderSequenceException (org.apache.kafka.common.errors.OutOfOrderSequenceException)4 HashSet (java.util.HashSet)3 Properties (java.util.Properties)3 Set (java.util.Set)3 TimeoutException (org.apache.kafka.common.errors.TimeoutException)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Random (java.util.Random)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 Namespace (net.sourceforge.argparse4j.inf.Namespace)2 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)2 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)2