use of org.apache.kafka.common.KafkaException in project storm by apache.
the class KafkaBoltTest method testCustomCallbackIsWrappedByDefaultCallbackBehavior.
@Test
public void testCustomCallbackIsWrappedByDefaultCallbackBehavior() {
MockProducer<String, String> producer = new MockProducer<>(Cluster.empty(), false, null, null, null);
KafkaBolt<String, String> bolt = makeBolt(producer);
PreparableCallback customCallback = mock(PreparableCallback.class);
bolt.withProducerCallback(customCallback);
OutputCollector collector = mock(OutputCollector.class);
TopologyContext context = mock(TopologyContext.class);
Map<String, Object> topoConfig = new HashMap<>();
bolt.prepare(topoConfig, context, collector);
verify(customCallback).prepare(topoConfig, context);
String key = "KEY";
String value = "VALUE";
Tuple testTuple = createTestTuple(key, value);
bolt.execute(testTuple);
assertThat(producer.history().size(), is(1));
ProducerRecord<String, String> arg = producer.history().get(0);
LOG.info("GOT {} ->", arg);
LOG.info("{}, {}, {}", arg.topic(), arg.key(), arg.value());
assertThat(arg.topic(), is("MY_TOPIC"));
assertThat(arg.key(), is(key));
assertThat(arg.value(), is(value));
// Force a send error
KafkaException ex = new KafkaException();
producer.errorNext(ex);
verify(customCallback).onCompletion(any(), eq(ex));
verify(collector).reportError(ex);
verify(collector).fail(testTuple);
}
use of org.apache.kafka.common.KafkaException in project storm by apache.
the class KafkaBoltTest method testSimpleWithError.
@Test
public void testSimpleWithError() {
MockProducer<String, String> producer = new MockProducer<>(Cluster.empty(), false, null, null, null);
KafkaBolt<String, String> bolt = makeBolt(producer);
OutputCollector collector = mock(OutputCollector.class);
TopologyContext context = mock(TopologyContext.class);
Map<String, Object> conf = new HashMap<>();
bolt.prepare(conf, context, collector);
String key = "KEY";
String value = "VALUE";
Tuple testTuple = createTestTuple(key, value);
bolt.execute(testTuple);
assertThat(producer.history().size(), is(1));
ProducerRecord<String, String> arg = producer.history().get(0);
LOG.info("GOT {} ->", arg);
LOG.info("{}, {}, {}", arg.topic(), arg.key(), arg.value());
assertThat(arg.topic(), is("MY_TOPIC"));
assertThat(arg.key(), is(key));
assertThat(arg.value(), is(value));
// Force a send error
KafkaException ex = new KafkaException();
producer.errorNext(ex);
verify(collector).reportError(ex);
verify(collector).fail(testTuple);
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class Sanitizer method sanitize.
/**
* Sanitize `name` for safe use as JMX metric name as well as ZooKeeper node name
* using URL-encoding.
*/
public static String sanitize(String name) {
String encoded = "";
try {
encoded = URLEncoder.encode(name, StandardCharsets.UTF_8.name());
StringBuilder builder = new StringBuilder();
for (int i = 0; i < encoded.length(); i++) {
char c = encoded.charAt(i);
if (c == '*') {
// Metric ObjectName treats * as pattern
builder.append("%2A");
} else if (c == '+') {
// Space URL-encoded as +, replace with percent encoding
builder.append("%20");
} else {
builder.append(c);
}
}
return builder.toString();
} catch (UnsupportedEncodingException e) {
throw new KafkaException(e);
}
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class FileRecordsTest method testTruncateNotCalledIfSizeIsBiggerThanTargetSize.
/**
* Expect a KafkaException if targetSize is bigger than the size of
* the FileRecords.
*/
@Test
public void testTruncateNotCalledIfSizeIsBiggerThanTargetSize() throws IOException {
FileChannel channelMock = mock(FileChannel.class);
when(channelMock.size()).thenReturn(42L);
FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false);
try {
fileRecords.truncateTo(43);
fail("Should throw KafkaException");
} catch (KafkaException e) {
// expected
}
verify(channelMock, atLeastOnce()).size();
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class WorkerGroupMember method stop.
private void stop(boolean swallowException) {
log.trace("Stopping the Connect group member.");
AtomicReference<Throwable> firstException = new AtomicReference<>();
this.stopped = true;
Utils.closeQuietly(coordinator, "coordinator", firstException);
Utils.closeQuietly(metrics, "consumer metrics", firstException);
Utils.closeQuietly(client, "consumer network client", firstException);
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics);
if (firstException.get() != null && !swallowException)
throw new KafkaException("Failed to stop the Connect group member", firstException.get());
else
log.debug("The Connect group member has stopped.");
}
Aggregations