Search in sources :

Example 11 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class RecordAccumulatorTest method testAbortUnsentBatches.

@Test
public void testAbortUnsentBatches() throws Exception {
    long lingerMs = Long.MAX_VALUE;
    int numRecords = 100;
    final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
    final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
    final KafkaException cause = new KafkaException();
    class TestCallback implements Callback {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            assertEquals(cause, exception);
            numExceptionReceivedInCallback.incrementAndGet();
        }
    }
    for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs);
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
    assertFalse(result.readyNodes.isEmpty());
    Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertTrue(accum.hasUndrained());
    assertTrue(accum.hasIncomplete());
    accum.abortUndrainedBatches(cause);
    int numDrainedRecords = 0;
    for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
        for (ProducerBatch batch : drainedEntry.getValue()) {
            assertTrue(batch.isClosed());
            assertFalse(batch.produceFuture.completed());
            numDrainedRecords += batch.recordCount;
        }
    }
    assertTrue(numDrainedRecords > 0);
    assertTrue(numExceptionReceivedInCallback.get() > 0);
    assertEquals(numRecords, numExceptionReceivedInCallback.get() + numDrainedRecords);
    assertFalse(accum.hasUndrained());
    assertTrue(accum.hasIncomplete());
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ArrayList(java.util.ArrayList) Arrays.asList(java.util.Arrays.asList) List(java.util.List) Map(java.util.Map) Test(org.junit.Test)

Example 12 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSourceTask method sendRecords.

/**
 * Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
 * be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
 * @return true if all messages were sent, false if some need to be retried
 */
private boolean sendRecords() {
    int processed = 0;
    recordBatch(toSend.size());
    final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
    for (final SourceRecord preTransformRecord : toSend) {
        final SourceRecord record = transformationChain.apply(preTransformRecord);
        if (record == null) {
            counter.skipRecord();
            commitTaskRecord(preTransformRecord);
            continue;
        }
        RecordHeaders headers = convertHeaderFor(record);
        byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
        byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
        final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
        log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
        // messages and update the offsets.
        synchronized (this) {
            if (!lastSendFailed) {
                if (!flushing) {
                    outstandingMessages.put(producerRecord, producerRecord);
                } else {
                    outstandingMessagesBacklog.put(producerRecord, producerRecord);
                }
                // Offsets are converted & serialized in the OffsetWriter
                offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
            }
        }
        try {
            final String topic = producerRecord.topic();
            producer.send(producerRecord, new Callback() {

                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        // Given the default settings for zero data loss, this should basically never happen --
                        // between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
                        // timeouts, callbacks with exceptions should never be invoked in practice. If the
                        // user overrode these settings, the best we can do is notify them of the failure via
                        // logging.
                        log.error("{} failed to send record to {}: {}", this, topic, e);
                        log.debug("{} Failed record: {}", this, preTransformRecord);
                    } else {
                        log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                        commitTaskRecord(preTransformRecord);
                    }
                    recordSent(producerRecord);
                    counter.completeRecord();
                }
            });
            lastSendFailed = false;
        } catch (RetriableException e) {
            log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
            toSend = toSend.subList(processed, toSend.size());
            lastSendFailed = true;
            counter.retryRemaining();
            return false;
        } catch (KafkaException e) {
            throw new ConnectException("Unrecoverable exception trying to send", e);
        }
        processed++;
    }
    toSend = null;
    return true;
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) RetriableException(org.apache.kafka.common.errors.RetriableException) ExecutionException(java.util.concurrent.ExecutionException) ConnectException(org.apache.kafka.connect.errors.ConnectException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 13 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class KafkaStatusBackingStoreTest method putConnectorStateShouldOverride.

@Test
public void putConnectorStateShouldOverride() {
    final byte[] value = new byte[0];
    String otherWorkerId = "anotherhost:8083";
    KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    Converter converter = mock(Converter.class);
    final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
    // the persisted came from a different host and has a newer generation
    Map<String, Object> firstStatusRead = new HashMap<>();
    firstStatusRead.put("worker_id", otherWorkerId);
    firstStatusRead.put("state", "RUNNING");
    firstStatusRead.put("generation", 1L);
    Map<String, Object> secondStatusRead = new HashMap<>();
    secondStatusRead.put("worker_id", WORKER_ID);
    secondStatusRead.put("state", "UNASSIGNED");
    secondStatusRead.put("generation", 0L);
    expect(converter.toConnectData(STATUS_TOPIC, value)).andReturn(new SchemaAndValue(null, firstStatusRead)).andReturn(new SchemaAndValue(null, secondStatusRead));
    expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
    final Capture<Callback> callbackCapture = newCapture();
    kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
    expectLastCall().andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, null);
            store.read(consumerRecord(1, "status-connector-conn", value));
            return null;
        }
    });
    replayAll();
    store.read(consumerRecord(0, "status-connector-conn", value));
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0);
    store.put(status);
    assertEquals(status, store.get(CONNECTOR));
    verifyAll();
}
Also used : HashMap(java.util.HashMap) Schema(org.apache.kafka.connect.data.Schema) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Struct(org.apache.kafka.connect.data.Struct) Callback(org.apache.kafka.clients.producer.Callback) EasyMock.anyObject(org.easymock.EasyMock.anyObject) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Example 14 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class KafkaStatusBackingStoreTest method putConnectorState.

@Test
public void putConnectorState() {
    KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    Converter converter = mock(Converter.class);
    KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
    byte[] value = new byte[0];
    expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
    final Capture<Callback> callbackCapture = newCapture();
    kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
    expectLastCall().andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, null);
            return null;
        }
    });
    replayAll();
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
    store.put(status);
    // state is not visible until read back from the log
    assertEquals(null, store.get(CONNECTOR));
    verifyAll();
}
Also used : Schema(org.apache.kafka.connect.data.Schema) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) Struct(org.apache.kafka.connect.data.Struct) Callback(org.apache.kafka.clients.producer.Callback) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Example 15 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class KafkaStatusBackingStoreTest method putConnectorStateRetriableFailure.

@Test
public void putConnectorStateRetriableFailure() {
    KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    Converter converter = mock(Converter.class);
    KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
    byte[] value = new byte[0];
    expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
    final Capture<Callback> callbackCapture = newCapture();
    kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
    expectLastCall().andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, new TimeoutException());
            return null;
        }
    }).andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, null);
            return null;
        }
    });
    replayAll();
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
    store.put(status);
    // state is not visible until read back from the log
    assertEquals(null, store.get(CONNECTOR));
    verifyAll();
}
Also used : Schema(org.apache.kafka.connect.data.Schema) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) Struct(org.apache.kafka.connect.data.Struct) IAnswer(org.easymock.IAnswer) Callback(org.apache.kafka.clients.producer.Callback) MockTime(org.apache.kafka.common.utils.MockTime) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Aggregations

Callback (org.apache.kafka.clients.producer.Callback)81 Test (org.junit.Test)47 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)39 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)37 KafkaException (org.apache.kafka.common.KafkaException)21 Future (java.util.concurrent.Future)18 TimeoutException (org.apache.kafka.common.errors.TimeoutException)18 ExecutionException (java.util.concurrent.ExecutionException)15 ArrayList (java.util.ArrayList)14 List (java.util.List)13 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 HashMap (java.util.HashMap)12 Properties (java.util.Properties)12 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 TopicPartition (org.apache.kafka.common.TopicPartition)12 Schema (org.apache.kafka.connect.data.Schema)12 Struct (org.apache.kafka.connect.data.Struct)12 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)11 StreamsException (org.apache.kafka.streams.errors.StreamsException)11