use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class KafkaBasedLogTest method testProducerError.
@Test
public void testProducerError() throws Exception {
expectStart();
TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
expectStop();
PowerMock.replayAll();
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(0L, consumer.position(TP0));
assertEquals(0L, consumer.position(TP1));
final AtomicReference<Throwable> setException = new AtomicReference<>();
store.send(TP0_KEY, TP0_VALUE, new org.apache.kafka.clients.producer.Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
// Should only be invoked once
assertNull(setException.get());
setException.set(exception);
}
});
KafkaException exc = new LeaderNotAvailableException("Error");
tp0Future.resolve(exc);
callback0.getValue().onCompletion(null, exc);
assertNotNull(setException.get());
store.stop();
assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
assertTrue(consumer.closed());
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class RecordCollectorTest method shouldThrowStreamsExceptionAfterMaxAttempts.
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionAfterMaxAttempts() throws Exception {
RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
throw new TimeoutException();
}
}, "test");
collector.send("topic1", "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnFlushIfASendFailed.
@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnFlushIfASendFailed() throws Exception {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test");
collector.send("topic1", "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
}
use of org.apache.kafka.clients.producer.ProducerRecord in project heron by twitter.
the class KafkaUtilsTest method createTopicAndSendMessage.
private void createTopicAndSendMessage(String key, String value) {
Properties p = new Properties();
p.put("acks", "1");
p.put("bootstrap.servers", broker.getBrokerConnectionString());
p.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
p.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
p.put("metadata.fetch.timeout.ms", 1000);
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);
try {
producer.send(new ProducerRecord<String, String>(config.topic, key, value)).get();
// SUPPRESS CHECKSTYLE IllegalCatch
} catch (Exception e) {
Assert.fail(e.getMessage());
LOG.error("Failed to do synchronous sending due to " + e, e);
} finally {
producer.close();
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project storm by apache.
the class TridentKafkaState method updateState.
public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
String topic = null;
try {
List<Future<RecordMetadata>> futures = new ArrayList<>(tuples.size());
for (TridentTuple tuple : tuples) {
topic = topicSelector.getTopic(tuple);
if (topic != null) {
Future<RecordMetadata> result = producer.send(new ProducerRecord(topic, mapper.getKeyFromTuple(tuple), mapper.getMessageFromTuple(tuple)));
futures.add(result);
} else {
LOG.warn("skipping key = " + mapper.getKeyFromTuple(tuple) + ", topic selector returned null.");
}
}
List<ExecutionException> exceptions = new ArrayList<>(futures.size());
for (Future<RecordMetadata> future : futures) {
try {
future.get();
} catch (ExecutionException e) {
exceptions.add(e);
}
}
if (exceptions.size() > 0) {
String errorMsg = "Could not retrieve result for messages " + tuples + " from topic = " + topic + " because of the following exceptions: \n";
for (ExecutionException exception : exceptions) {
errorMsg = errorMsg + exception.getMessage() + "\n";
}
LOG.error(errorMsg);
throw new FailedException(errorMsg);
}
} catch (Exception ex) {
String errorMsg = "Could not send messages " + tuples + " to topic = " + topic;
LOG.warn(errorMsg, ex);
throw new FailedException(errorMsg, ex);
}
}
Aggregations