use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class RocksDBStore method restoreBatch.
void restoreBatch(final Collection<ConsumerRecord<byte[], byte[]>> records) {
try (final WriteBatch batch = new WriteBatch()) {
final List<KeyValue<byte[], byte[]>> keyValues = new ArrayList<>();
for (final ConsumerRecord<byte[], byte[]> record : records) {
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
// If version headers are not present or version is V0
keyValues.add(new KeyValue<>(record.key(), record.value()));
}
dbAccessor.prepareBatchForRestore(keyValues, batch);
write(batch);
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + name, e);
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class RocksDBStore method delete.
@Override
public synchronized byte[] delete(final Bytes key) {
Objects.requireNonNull(key, "key cannot be null");
final byte[] oldValue;
try {
oldValue = dbAccessor.getOnly(key.get());
} catch (final RocksDBException e) {
// String format is happening in wrapping stores. So formatted message is thrown from wrapping stores.
throw new ProcessorStateException("Error while getting value for key from store " + name, e);
}
put(key, null);
return oldValue;
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class RocksDBStore method addValueProvidersToMetricsRecorder.
private void addValueProvidersToMetricsRecorder() {
final TableFormatConfig tableFormatConfig = userSpecifiedOptions.tableFormatConfig();
final Statistics statistics = userSpecifiedStatistics ? null : userSpecifiedOptions.statistics();
if (tableFormatConfig instanceof BlockBasedTableConfigWithAccessibleCache) {
final Cache cache = ((BlockBasedTableConfigWithAccessibleCache) tableFormatConfig).blockCache();
metricsRecorder.addValueProviders(name, db, cache, statistics);
} else if (tableFormatConfig instanceof BlockBasedTableConfig) {
throw new ProcessorStateException("The used block-based table format configuration does not expose the " + "block cache. Use the BlockBasedTableConfig instance provided by Options#tableFormatConfig() to configure " + "the block-based table format of RocksDB. Do not provide a new instance of BlockBasedTableConfig to " + "the RocksDB options.");
} else {
metricsRecorder.addValueProviders(name, db, null, statistics);
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class RocksDBTimestampedStore method openRocksDB.
@Override
void openRocksDB(final DBOptions dbOptions, final ColumnFamilyOptions columnFamilyOptions) {
final List<ColumnFamilyDescriptor> columnFamilyDescriptors = asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions));
final List<ColumnFamilyHandle> columnFamilies = new ArrayList<>(columnFamilyDescriptors.size());
try {
db = RocksDB.open(dbOptions, dbDir.getAbsolutePath(), columnFamilyDescriptors, columnFamilies);
setDbAccessor(columnFamilies.get(0), columnFamilies.get(1));
} catch (final RocksDBException e) {
if ("Column family not found: keyValueWithTimestamp".equals(e.getMessage())) {
try {
db = RocksDB.open(dbOptions, dbDir.getAbsolutePath(), columnFamilyDescriptors.subList(0, 1), columnFamilies);
columnFamilies.add(db.createColumnFamily(columnFamilyDescriptors.get(1)));
} catch (final RocksDBException fatal) {
throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), fatal);
}
setDbAccessor(columnFamilies.get(0), columnFamilies.get(1));
} else {
throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), e);
}
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class StateDirectory method lockStateDirectory.
/**
* @return true if the state directory was successfully locked
*/
private boolean lockStateDirectory() {
final File lockFile = new File(stateDir, LOCK_FILE_NAME);
try {
stateDirLockChannel = FileChannel.open(lockFile.toPath(), StandardOpenOption.CREATE, StandardOpenOption.WRITE);
stateDirLock = tryLock(stateDirLockChannel);
} catch (final IOException e) {
log.error("Unable to lock the state directory due to unexpected exception", e);
throw new ProcessorStateException(String.format("Failed to lock the state directory [%s] during startup", stateDir.getAbsolutePath()), e);
}
return stateDirLock != null;
}
Aggregations