use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class SinkNode method process.
@Override
public void process(final K key, final V value) {
RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector();
final long timestamp = context.timestamp();
if (timestamp < 0) {
throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">.");
}
try {
collector.send(topic, key, value, null, timestamp, keySerializer, valSerializer, partitioner);
} catch (ClassCastException e) {
final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName();
final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName();
throw new StreamsException(String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e);
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class SourceNodeRecordDeserializer method deserialize.
@Override
public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) {
final Object key;
try {
key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.key());
} catch (Exception e) {
throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e);
}
final Object value;
try {
value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.value());
} catch (Exception e) {
throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e);
}
return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value);
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamsConfig method keySerde.
/**
* Return an {@link Serde#configure(Map, boolean) configured} instance of {@link #KEY_SERDE_CLASS_CONFIG key Serde
* class}.
*
* @return an configured instance of key Serde class
*/
public Serde keySerde() {
try {
final Serde<?> serde = getConfiguredInstance(KEY_SERDE_CLASS_CONFIG, Serde.class);
serde.configure(originals(), true);
return serde;
} catch (final Exception e) {
throw new StreamsException(String.format("Failed to configure key serde %s", get(KEY_SERDE_CLASS_CONFIG)), e);
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamTask method punctuate.
/**
* @throws IllegalStateException if the current node is not null
*/
@Override
public void punctuate(ProcessorNode node, long timestamp) {
if (processorContext.currentNode() != null)
throw new IllegalStateException(String.format("%s Current node is not null", logPrefix));
final StampedRecord stampedRecord = new StampedRecord(DUMMY_RECORD, timestamp);
updateProcessorContext(createRecordContext(stampedRecord), node);
log.trace("{} Punctuating processor {} with timestamp {}", logPrefix, node.name(), timestamp);
try {
node.punctuate(timestamp);
} catch (KafkaException ke) {
throw new StreamsException(String.format("Exception caught in punctuate. taskId=%s processor=%s", id, node.name()), ke);
} finally {
processorContext.setCurrentNode(null);
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamsKafkaClient method checkBrokerCompatibility.
/**
* Check if the used brokers have version 0.10.1.x or higher.
* <p>
* Note, for <em>pre</em> 0.10.x brokers the broker version cannot be checked and the client will hang and retry
* until it {@link StreamsConfig#REQUEST_TIMEOUT_MS_CONFIG times out}.
*
* @throws StreamsException if brokers have version 0.10.0.x
*/
public void checkBrokerCompatibility() throws StreamsException {
final ClientRequest clientRequest = kafkaClient.newClientRequest(getAnyReadyBrokerId(), new ApiVersionsRequest.Builder(), Time.SYSTEM.milliseconds(), true);
final ClientResponse clientResponse = sendRequest(clientRequest);
if (!clientResponse.hasResponse()) {
throw new StreamsException("Empty response for client request.");
}
if (!(clientResponse.responseBody() instanceof ApiVersionsResponse)) {
throw new StreamsException("Inconsistent response type for API versions request. " + "Expected ApiVersionsResponse but received " + clientResponse.responseBody().getClass().getName());
}
final ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) clientResponse.responseBody();
if (apiVersionsResponse.apiVersion(ApiKeys.CREATE_TOPICS.id) == null) {
throw new StreamsException("Kafka Streams requires broker version 0.10.1.x or higher.");
}
}
Aggregations