use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class StateDirectory method initializeProcessId.
public UUID initializeProcessId() {
if (!hasPersistentStores) {
return UUID.randomUUID();
}
if (!lockStateDirectory()) {
log.error("Unable to obtain lock as state directory is already locked by another process");
throw new StreamsException(String.format("Unable to initialize state, this can happen if multiple instances of " + "Kafka Streams are running in the same state directory " + "(current state directory is [%s]", stateDir.getAbsolutePath()));
}
final File processFile = new File(stateDir, PROCESS_FILE_NAME);
final ObjectMapper mapper = new ObjectMapper();
try {
if (processFile.exists()) {
try {
final StateDirectoryProcessFile processFileData = mapper.readValue(processFile, StateDirectoryProcessFile.class);
log.info("Reading UUID from process file: {}", processFileData.processId);
if (processFileData.processId != null) {
return processFileData.processId;
}
} catch (final Exception e) {
log.warn("Failed to read json process file", e);
}
}
final StateDirectoryProcessFile processFileData = new StateDirectoryProcessFile(UUID.randomUUID());
log.info("No process id found on disk, got fresh process id {}", processFileData.processId);
mapper.writeValue(processFile, processFileData);
return processFileData.processId;
} catch (final IOException e) {
log.error("Unable to read/write process file due to unexpected exception", e);
throw new ProcessorStateException(e);
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStore method getWriteBatches.
// Visible for testing
Map<S, WriteBatch> getWriteBatches(final Collection<ConsumerRecord<byte[], byte[]>> records) {
// advance stream time to the max timestamp in the batch
for (final ConsumerRecord<byte[], byte[]> record : records) {
final long timestamp = keySchema.segmentTimestamp(Bytes.wrap(record.key()));
observedStreamTime = Math.max(observedStreamTime, timestamp);
}
final Map<S, WriteBatch> writeBatchMap = new HashMap<>();
for (final ConsumerRecord<byte[], byte[]> record : records) {
final long timestamp = keySchema.segmentTimestamp(Bytes.wrap(record.key()));
final long segmentId = segments.segmentId(timestamp);
final S segment = segments.getOrCreateSegmentIfLive(segmentId, context, observedStreamTime);
if (segment != null) {
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
try {
final WriteBatch batch = writeBatchMap.computeIfAbsent(segment, s -> new WriteBatch());
segment.addToBatch(new KeyValue<>(record.key(), record.value()), batch);
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + this.name, e);
}
}
}
return writeBatchMap;
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class AbstractSegments method segmentIdFromSegmentName.
private long segmentIdFromSegmentName(final String segmentName, final File parent) {
final int segmentSeparatorIndex = name.length();
final char segmentSeparator = segmentName.charAt(segmentSeparatorIndex);
final String segmentIdString = segmentName.substring(segmentSeparatorIndex + 1);
final long segmentId;
// old style segment name with date
if (segmentSeparator == '-') {
try {
segmentId = formatter.parse(segmentIdString).getTime() / segmentInterval;
} catch (final ParseException e) {
log.warn("Unable to parse segmentName {} to a date. This segment will be skipped", segmentName);
return -1L;
}
renameSegmentFile(parent, segmentName, segmentId);
} else {
// for both new formats (with : or .) parse segment ID identically
try {
segmentId = Long.parseLong(segmentIdString) / segmentInterval;
} catch (final NumberFormatException e) {
throw new ProcessorStateException("Unable to parse segment id as long from segmentName: " + segmentName);
}
// intermediate segment name with : breaks KafkaStreams on Windows OS -> rename segment file to new name with .
if (segmentSeparator == ':') {
renameSegmentFile(parent, segmentName, segmentId);
}
}
return segmentId;
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class AbstractSegments method renameSegmentFile.
private void renameSegmentFile(final File parent, final String segmentName, final long segmentId) {
final File newName = new File(parent, segmentName(segmentId));
final File oldName = new File(parent, segmentName);
if (!oldName.renameTo(newName)) {
throw new ProcessorStateException("Unable to rename old style segment from: " + oldName + " to new name: " + newName);
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class StreamTaskTest method shouldThrowOnCloseCleanCheckpointError.
@Test
public void shouldThrowOnCloseCleanCheckpointError() {
final long offset = 54300L;
EasyMock.expect(recordCollector.offsets()).andReturn(emptyMap());
stateManager.checkpoint();
EasyMock.expectLastCall().andThrow(new ProcessorStateException("KABOOM!")).anyTimes();
stateManager.close();
EasyMock.expectLastCall().andThrow(new AssertionError("Close should not be called!")).anyTimes();
EasyMock.expect(stateManager.changelogPartitions()).andReturn(Collections.emptySet()).anyTimes();
EasyMock.expect(stateManager.changelogOffsets()).andReturn(singletonMap(partition1, offset));
EasyMock.replay(recordCollector, stateManager);
final MetricName metricName = setupCloseTaskMetric();
task = createOptimizedStatefulTask(createConfig("100"), consumer);
task.initializeIfNeeded();
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(partition1, offset)));
task.process(100L);
assertTrue(task.commitNeeded());
task.suspend();
task.prepareCommit();
assertThrows(ProcessorStateException.class, () -> task.postCommit(true));
assertEquals(Task.State.SUSPENDED, task.state());
final double expectedCloseTaskMetric = 0.0;
verifyCloseTaskMetric(expectedCloseTaskMetric, streamsMetrics, metricName);
EasyMock.verify(stateManager);
EasyMock.reset(stateManager);
EasyMock.expect(stateManager.changelogPartitions()).andReturn(Collections.emptySet()).anyTimes();
stateManager.close();
EasyMock.expectLastCall();
EasyMock.replay(stateManager);
}
Aggregations