use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class MeteredSessionStore method remove.
@Override
public void remove(final Windowed<K> sessionKey) {
Objects.requireNonNull(sessionKey, "sessionKey can't be null");
Objects.requireNonNull(sessionKey.key(), "sessionKey.key() can't be null");
Objects.requireNonNull(sessionKey.window(), "sessionKey.window() can't be null");
try {
maybeMeasureLatency(() -> {
final Bytes key = keyBytes(sessionKey.key());
wrapped().remove(new Windowed<>(key, sessionKey.window()));
}, time, removeSensor);
} catch (final ProcessorStateException e) {
final String message = String.format(e.getMessage(), sessionKey.key());
throw new ProcessorStateException(message, e);
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class AbstractTask method initializeOffsetLimits.
protected void initializeOffsetLimits() {
for (TopicPartition partition : partitions) {
try {
// TODO: batch API?
OffsetAndMetadata metadata = consumer.committed(partition);
stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L);
} catch (AuthorizationException e) {
throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e);
} catch (WakeupException e) {
throw e;
} catch (KafkaException e) {
throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e);
}
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class ProcessorStateManager method flush.
@Override
public void flush(final InternalProcessorContext context) {
if (!this.stores.isEmpty()) {
log.debug("{} Flushing all stores registered in the state manager", logPrefix);
for (StateStore store : this.stores.values()) {
try {
log.trace("{} Flushing store={}", logPrefix, store.name());
store.flush();
} catch (Exception e) {
throw new ProcessorStateException(String.format("%s Failed to flush state store %s", logPrefix, store.name()), e);
}
}
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class RocksDBStore method putAll.
@Override
public void putAll(List<KeyValue<K, V>> entries) {
try (WriteBatch batch = new WriteBatch()) {
for (KeyValue<K, V> entry : entries) {
final byte[] rawKey = serdes.rawKey(entry.key);
if (entry.value == null) {
db.delete(rawKey);
} else {
final byte[] value = serdes.rawValue(entry.value);
batch.put(rawKey, value);
}
}
db.write(wOptions, batch);
} catch (RocksDBException e) {
throw new ProcessorStateException("Error while batch writing to store " + this.name, e);
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project apache-kafka-on-k8s by banzaicloud.
the class RocksDBStore method approximateNumEntries.
/**
* Return an approximate count of key-value mappings in this store.
*
* <code>RocksDB</code> cannot return an exact entry count without doing a
* full scan, so this method relies on the <code>rocksdb.estimate-num-keys</code>
* property to get an approximate count. The returned size also includes
* a count of dirty keys in the store's in-memory cache, which may lead to some
* double-counting of entries and inflate the estimate.
*
* @return an approximate count of key-value mappings in the store.
*/
@Override
public long approximateNumEntries() {
validateStoreOpen();
final long value;
try {
value = this.db.getLongProperty("rocksdb.estimate-num-keys");
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error fetching property from store " + this.name, e);
}
if (isOverflowing(value)) {
return Long.MAX_VALUE;
}
return value;
}
Aggregations