Search in sources :

Example 31 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class KafkaBasedLogTest method testSendAndReadToEnd.

@Test
public void testSendAndReadToEnd() throws Exception {
    expectStart();
    TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
    ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
    TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
    ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp1Record), EasyMock.capture(callback1))).andReturn(tp1Future);
    // Producer flushes when read to log end is called
    producer.flush();
    PowerMock.expectLastCall();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 0L);
    endOffsets.put(TP1, 0L);
    consumer.updateEndOffsets(endOffsets);
    store.start();
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(0L, consumer.position(TP0));
    assertEquals(0L, consumer.position(TP1));
    // Set some keys
    final AtomicInteger invoked = new AtomicInteger(0);
    org.apache.kafka.clients.producer.Callback producerCallback = new org.apache.kafka.clients.producer.Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            invoked.incrementAndGet();
        }
    };
    store.send(TP0_KEY, TP0_VALUE, producerCallback);
    store.send(TP1_KEY, TP1_VALUE, producerCallback);
    assertEquals(0, invoked.get());
    // Output not used, so safe to not return a real value for testing
    tp1Future.resolve((RecordMetadata) null);
    callback1.getValue().onCompletion(null, null);
    assertEquals(1, invoked.get());
    tp0Future.resolve((RecordMetadata) null);
    callback0.getValue().onCompletion(null, null);
    assertEquals(2, invoked.get());
    // Now we should have to wait for the records to be read back when we call readToEnd()
    final AtomicBoolean getInvoked = new AtomicBoolean(false);
    final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>(new Callback<Void>() {

        @Override
        public void onCompletion(Throwable error, Void result) {
            getInvoked.set(true);
        }
    });
    consumer.schedulePollTask(new Runnable() {

        @Override
        public void run() {
            // Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
            // that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
            // returning any data.
            Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
            newEndOffsets.put(TP0, 2L);
            newEndOffsets.put(TP1, 2L);
            consumer.updateEndOffsets(newEndOffsets);
            store.readToEnd(readEndFutureCallback);
            // Should keep polling until it reaches current log end offset for all partitions
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE_NEW));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE));
                }
            });
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE_NEW));
                }
            });
        // Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
        }
    });
    readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(getInvoked.get());
    assertEquals(2, consumedRecords.size());
    assertEquals(2, consumedRecords.get(TP0).size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
    assertEquals(2, consumedRecords.get(TP1).size());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
    // Cleanup
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) KafkaException(org.apache.kafka.common.KafkaException) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) WakeupException(org.apache.kafka.common.errors.WakeupException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) HashMap(java.util.HashMap) Map(java.util.Map) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 32 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class ClientCompatibilityTest method testProduce.

public void testProduce() throws Exception {
    Properties producerProps = new Properties();
    producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, testConfig.bootstrapServer);
    ByteArraySerializer serializer = new ByteArraySerializer();
    KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps, serializer, serializer);
    ProducerRecord<byte[], byte[]> record1 = new ProducerRecord<>(testConfig.topic, message1);
    Future<RecordMetadata> future1 = producer.send(record1);
    ProducerRecord<byte[], byte[]> record2 = new ProducerRecord<>(testConfig.topic, message2);
    Future<RecordMetadata> future2 = producer.send(record2);
    producer.flush();
    future1.get();
    future2.get();
    producer.close();
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Properties(java.util.Properties) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer)

Example 33 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class TransactionManagerTest method testTransitionToFatalErrorWhenRetriedBatchIsExpired.

@Test
public void testTransitionToFatalErrorWhenRetriedBatchIsExpired() throws InterruptedException, ExecutionException {
    final long pid = 13131L;
    final short epoch = 1;
    doInitTransactions(pid, epoch);
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartitionToTransaction(tp0);
    Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future;
    assertFalse(responseFuture.isDone());
    prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, pid);
    assertFalse(transactionManager.transactionContainsPartition(tp0));
    assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
    // send addPartitions.
    sender.run(time.milliseconds());
    // Check that only addPartitions was sent.
    assertTrue(transactionManager.transactionContainsPartition(tp0));
    assertTrue(transactionManager.isSendToPartitionAllowed(tp0));
    prepareProduceResponse(Errors.NOT_LEADER_FOR_PARTITION, pid, epoch);
    // send the produce request.
    sender.run(time.milliseconds());
    assertFalse(responseFuture.isDone());
    TransactionalRequestResult commitResult = transactionManager.beginCommit();
    // Sleep 10 seconds to make sure that the batches in the queue would be expired if they can't be drained.
    time.sleep(10000);
    // Disconnect the target node for the pending produce request. This will ensure that sender will try to
    // expire the batch.
    Node clusterNode = this.cluster.nodes().get(0);
    client.disconnect(clusterNode.idString());
    client.blackout(clusterNode, 100);
    // We should try to flush the produce, but expire it instead without sending anything.
    sender.run(time.milliseconds());
    assertTrue(responseFuture.isDone());
    try {
        // make sure the produce was expired.
        responseFuture.get();
        fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
    } catch (ExecutionException e) {
        assertTrue(e.getCause() instanceof TimeoutException);
    }
    // Transition to fatal error since we have unresolved batches.
    sender.run(time.milliseconds());
    // Fail the queued transactional requests
    sender.run(time.milliseconds());
    assertTrue(commitResult.isCompleted());
    // the commit should have been dropped.
    assertFalse(commitResult.isSuccessful());
    assertTrue(transactionManager.hasFatalError());
    assertFalse(transactionManager.hasOngoingTransaction());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Node(org.apache.kafka.common.Node) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 34 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class TransactionManagerTest method testRecoveryFromAbortableErrorTransactionStarted.

@Test
public void testRecoveryFromAbortableErrorTransactionStarted() throws Exception {
    final long pid = 13131L;
    final short epoch = 1;
    final TopicPartition unauthorizedPartition = new TopicPartition("foo", 0);
    doInitTransactions(pid, epoch);
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartitionToTransaction(tp0);
    prepareAddPartitionsToTxn(tp0, Errors.NONE);
    Future<RecordMetadata> authorizedTopicProduceFuture = accumulator.append(unauthorizedPartition, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds());
    assertTrue(transactionManager.isPartitionAdded(tp0));
    transactionManager.maybeAddPartitionToTransaction(unauthorizedPartition);
    Future<RecordMetadata> unauthorizedTopicProduceFuture = accumulator.append(unauthorizedPartition, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future;
    prepareAddPartitionsToTxn(singletonMap(unauthorizedPartition, Errors.TOPIC_AUTHORIZATION_FAILED));
    sender.run(time.milliseconds());
    assertTrue(transactionManager.hasAbortableError());
    assertTrue(transactionManager.isPartitionAdded(tp0));
    assertFalse(transactionManager.isPartitionAdded(unauthorizedPartition));
    assertFalse(authorizedTopicProduceFuture.isDone());
    assertFalse(unauthorizedTopicProduceFuture.isDone());
    prepareEndTxnResponse(Errors.NONE, TransactionResult.ABORT, pid, epoch);
    transactionManager.beginAbort();
    sender.run(time.milliseconds());
    // neither produce request has been sent, so they should both be failed immediately
    assertFutureFailed(authorizedTopicProduceFuture);
    assertFutureFailed(unauthorizedTopicProduceFuture);
    assertTrue(transactionManager.isReady());
    assertFalse(transactionManager.hasPartitionsToAdd());
    assertFalse(accumulator.hasIncomplete());
    // ensure we can now start a new transaction
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartitionToTransaction(tp0);
    FutureRecordMetadata nextTransactionFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future;
    prepareAddPartitionsToTxn(singletonMap(tp0, Errors.NONE));
    sender.run(time.milliseconds());
    assertTrue(transactionManager.isPartitionAdded(tp0));
    assertFalse(transactionManager.hasPartitionsToAdd());
    transactionManager.beginCommit();
    prepareProduceResponse(Errors.NONE, pid, epoch);
    sender.run(time.milliseconds());
    assertTrue(nextTransactionFuture.isDone());
    assertNotNull(nextTransactionFuture.get());
    prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, pid, epoch);
    sender.run(time.milliseconds());
    assertTrue(transactionManager.isReady());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) Test(org.junit.Test)

Example 35 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project incubator-rya by apache.

the class KafkaPeriodicBindingSetExporter method exportNotification.

/**
 * Exports BindingSets to Kafka.  The BindingSet and topic are extracted from
 * the indicated BindingSetRecord and the BindingSet is then exported to the topic.
 */
@Override
public void exportNotification(final BindingSetRecord record) throws BindingSetRecordExportException {
    try {
        log.info("Exporting {} records to Kafka to topic: {}", record.getBindingSet().size(), record.getTopic());
        final String bindingName = IncrementalUpdateConstants.PERIODIC_BIN_ID;
        final BindingSet bindingSet = record.getBindingSet();
        final String topic = record.getTopic();
        final long binId = ((Literal) bindingSet.getValue(bindingName)).longValue();
        final Future<RecordMetadata> future = producer.send(new ProducerRecord<String, BindingSet>(topic, Long.toString(binId), bindingSet));
        // wait for confirmation that results have been received
        future.get(5, TimeUnit.SECONDS);
    } catch (final Exception e) {
        // catch all possible exceptional behavior and throw as our checked exception.
        throw new BindingSetRecordExportException(e.getMessage(), e);
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) BindingSet(org.openrdf.query.BindingSet) BindingSetRecordExportException(org.apache.rya.periodic.notification.api.BindingSetRecordExportException) Literal(org.openrdf.model.Literal) BindingSetRecordExportException(org.apache.rya.periodic.notification.api.BindingSetRecordExportException)

Aggregations

RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)189 Test (org.junit.Test)64 Node (org.apache.kafka.common.Node)50 Test (org.junit.jupiter.api.Test)50 TopicPartition (org.apache.kafka.common.TopicPartition)48 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)45 ExecutionException (java.util.concurrent.ExecutionException)33 Callback (org.apache.kafka.clients.producer.Callback)32 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)31 Properties (java.util.Properties)30 HashMap (java.util.HashMap)24 TimeoutException (org.apache.kafka.common.errors.TimeoutException)23 ArrayList (java.util.ArrayList)21 KafkaException (org.apache.kafka.common.KafkaException)19 List (java.util.List)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)15 Metrics (org.apache.kafka.common.metrics.Metrics)15 LinkedHashMap (java.util.LinkedHashMap)13 Future (java.util.concurrent.Future)13 Map (java.util.Map)12