Search in sources :

Example 76 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class RecordAccumulatorTest method testSplitAndReenqueue.

@Test
public void testSplitAndReenqueue() throws ExecutionException, InterruptedException {
    long now = time.milliseconds();
    RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10);
    // Create a big batch
    ByteBuffer buffer = ByteBuffer.allocate(4096);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    ProducerBatch batch = new ProducerBatch(tp1, builder, now, true);
    byte[] value = new byte[1024];
    final AtomicInteger acked = new AtomicInteger(0);
    Callback cb = new Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            acked.incrementAndGet();
        }
    };
    // Append two messages so the batch is too big.
    Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
    Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
    assertNotNull(future1);
    assertNotNull(future2);
    batch.close();
    // Enqueue the batch to the accumulator as if the batch was created by the accumulator.
    accum.reenqueue(batch, now);
    time.sleep(101L);
    // Drain the batch.
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
    assertTrue(result.readyNodes.size() > 0, "The batch should be ready");
    Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertEquals(1, drained.size(), "Only node1 should be drained");
    assertEquals(1, drained.get(node1.id()).size(), "Only one batch should be drained");
    // Split and reenqueue the batch.
    accum.splitAndReenqueue(drained.get(node1.id()).get(0));
    time.sleep(101L);
    drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertFalse(drained.isEmpty());
    assertFalse(drained.get(node1.id()).isEmpty());
    drained.get(node1.id()).get(0).complete(acked.get(), 100L);
    assertEquals(1, acked.get(), "The first message should have been acked.");
    assertTrue(future1.isDone());
    assertEquals(0, future1.get().offset());
    drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertFalse(drained.isEmpty());
    assertFalse(drained.get(node1.id()).isEmpty());
    drained.get(node1.id()).get(0).complete(acked.get(), 100L);
    assertEquals(2, acked.get(), "Both message should have been acked.");
    assertTrue(future2.isDone());
    assertEquals(1, future2.get().offset());
}
Also used : ByteBuffer(java.nio.ByteBuffer) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Arrays.asList(java.util.Arrays.asList) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.jupiter.api.Test)

Example 77 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class RecordAccumulatorTest method testAbortUnsentBatches.

@Test
public void testAbortUnsentBatches() throws Exception {
    int lingerMs = Integer.MAX_VALUE;
    int numRecords = 100;
    final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
    final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
    final KafkaException cause = new KafkaException();
    class TestCallback implements Callback {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            assertEquals(cause, exception);
            numExceptionReceivedInCallback.incrementAndGet();
        }
    }
    for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs, false, time.milliseconds());
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
    assertFalse(result.readyNodes.isEmpty());
    Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertTrue(accum.hasUndrained());
    assertTrue(accum.hasIncomplete());
    accum.abortUndrainedBatches(cause);
    int numDrainedRecords = 0;
    for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
        for (ProducerBatch batch : drainedEntry.getValue()) {
            assertTrue(batch.isClosed());
            assertFalse(batch.produceFuture.completed());
            numDrainedRecords += batch.recordCount;
        }
    }
    assertTrue(numDrainedRecords > 0);
    assertTrue(numExceptionReceivedInCallback.get() > 0);
    assertEquals(numRecords, numExceptionReceivedInCallback.get() + numDrainedRecords);
    assertFalse(accum.hasUndrained());
    assertTrue(accum.hasIncomplete());
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) Arrays.asList(java.util.Arrays.asList) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 78 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class RecordAccumulatorTest method testAbortIncompleteBatches.

@Test
public void testAbortIncompleteBatches() throws Exception {
    int lingerMs = Integer.MAX_VALUE;
    int numRecords = 100;
    final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
    final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
    class TestCallback implements Callback {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            assertTrue(exception.getMessage().equals("Producer is closed forcefully."));
            numExceptionReceivedInCallback.incrementAndGet();
        }
    }
    for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs, false, time.milliseconds());
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
    assertFalse(result.readyNodes.isEmpty());
    Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertTrue(accum.hasUndrained());
    assertTrue(accum.hasIncomplete());
    int numDrainedRecords = 0;
    for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
        for (ProducerBatch batch : drainedEntry.getValue()) {
            assertTrue(batch.isClosed());
            assertFalse(batch.produceFuture.completed());
            numDrainedRecords += batch.recordCount;
        }
    }
    assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords);
    accum.abortIncompleteBatches();
    assertEquals(numRecords, numExceptionReceivedInCallback.get());
    assertFalse(accum.hasUndrained());
    assertFalse(accum.hasIncomplete());
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) Arrays.asList(java.util.Arrays.asList) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 79 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class KafkaStatusBackingStoreTest method putConnectorStateShouldOverride.

@Test
public void putConnectorStateShouldOverride() {
    final byte[] value = new byte[0];
    String otherWorkerId = "anotherhost:8083";
    // the persisted came from a different host and has a newer generation
    Map<String, Object> firstStatusRead = new HashMap<>();
    firstStatusRead.put("worker_id", otherWorkerId);
    firstStatusRead.put("state", "RUNNING");
    firstStatusRead.put("generation", 1L);
    Map<String, Object> secondStatusRead = new HashMap<>();
    secondStatusRead.put("worker_id", WORKER_ID);
    secondStatusRead.put("state", "UNASSIGNED");
    secondStatusRead.put("generation", 0L);
    when(converter.toConnectData(STATUS_TOPIC, value)).thenReturn(new SchemaAndValue(null, firstStatusRead)).thenReturn(new SchemaAndValue(null, secondStatusRead));
    when(converter.fromConnectData(eq(STATUS_TOPIC), any(Schema.class), any(Struct.class))).thenReturn(value);
    doAnswer(invocation -> {
        ((Callback) invocation.getArgument(2)).onCompletion(null, null);
        store.read(consumerRecord(1, "status-connector-conn", value));
        return null;
    }).when(kafkaBasedLog).send(eq("status-connector-conn"), eq(value), any(Callback.class));
    store.read(consumerRecord(0, "status-connector-conn", value));
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0);
    store.put(status);
    assertEquals(status, store.get(CONNECTOR));
}
Also used : Callback(org.apache.kafka.clients.producer.Callback) HashMap(java.util.HashMap) Schema(org.apache.kafka.connect.data.Schema) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) Struct(org.apache.kafka.connect.data.Struct) Test(org.junit.Test)

Example 80 with Callback

use of org.apache.kafka.clients.producer.Callback in project kafka by apache.

the class KafkaStatusBackingStoreTest method putConnectorState.

@Test
public void putConnectorState() {
    byte[] value = new byte[0];
    when(converter.fromConnectData(eq(STATUS_TOPIC), any(Schema.class), any(Struct.class))).thenReturn(value);
    doAnswer(invocation -> {
        ((Callback) invocation.getArgument(2)).onCompletion(null, null);
        return null;
    }).when(kafkaBasedLog).send(eq("status-connector-conn"), eq(value), any(Callback.class));
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
    store.put(status);
    // state is not visible until read back from the log
    assertNull(store.get(CONNECTOR));
}
Also used : Callback(org.apache.kafka.clients.producer.Callback) Schema(org.apache.kafka.connect.data.Schema) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) Struct(org.apache.kafka.connect.data.Struct) Test(org.junit.Test)

Aggregations

Callback (org.apache.kafka.clients.producer.Callback)81 Test (org.junit.Test)47 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)39 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)37 KafkaException (org.apache.kafka.common.KafkaException)21 Future (java.util.concurrent.Future)18 TimeoutException (org.apache.kafka.common.errors.TimeoutException)18 ExecutionException (java.util.concurrent.ExecutionException)15 ArrayList (java.util.ArrayList)14 List (java.util.List)13 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 HashMap (java.util.HashMap)12 Properties (java.util.Properties)12 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 TopicPartition (org.apache.kafka.common.TopicPartition)12 Schema (org.apache.kafka.connect.data.Schema)12 Struct (org.apache.kafka.connect.data.Struct)12 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)11 StreamsException (org.apache.kafka.streams.errors.StreamsException)11