use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksDBAggregatingState method mergeNamespaces.
@Override
public void mergeNamespaces(N target, Collection<N> sources) {
if (sources == null || sources.isEmpty()) {
return;
}
try {
ACC current = null;
// merge the sources to the target
for (N source : sources) {
if (source != null) {
setCurrentNamespace(source);
final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace();
final byte[] valueBytes = backend.db.get(columnFamily, sourceKey);
if (valueBytes != null) {
backend.db.delete(columnFamily, writeOptions, sourceKey);
dataInputView.setBuffer(valueBytes);
ACC value = valueSerializer.deserialize(dataInputView);
if (current != null) {
current = aggFunction.merge(current, value);
} else {
current = value;
}
}
}
}
// if something came out of merging the sources, merge it or write it to the target
if (current != null) {
setCurrentNamespace(target);
// create the target full-binary-key
final byte[] targetKey = serializeCurrentKeyWithGroupAndNamespace();
final byte[] targetValueBytes = backend.db.get(columnFamily, targetKey);
if (targetValueBytes != null) {
// target also had a value, merge
dataInputView.setBuffer(targetValueBytes);
ACC value = valueSerializer.deserialize(dataInputView);
current = aggFunction.merge(current, value);
}
// serialize the resulting value
dataOutputView.clear();
valueSerializer.serialize(current, dataOutputView);
// write the resulting value
backend.db.put(columnFamily, writeOptions, targetKey, dataOutputView.getCopyOfBuffer());
}
} catch (Exception e) {
throw new FlinkRuntimeException("Error while merging state in RocksDB", e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksStateKeysIterator method hasNext.
@Override
public boolean hasNext() {
try {
while (nextKey == null && iterator.isValid()) {
final byte[] keyBytes = iterator.key();
final K currentKey = deserializeKey(keyBytes, byteArrayDataInputView);
final int namespaceByteStartPos = byteArrayDataInputView.getPosition();
if (isMatchingNameSpace(keyBytes, namespaceByteStartPos) && !Objects.equals(previousKey, currentKey)) {
previousKey = currentKey;
nextKey = currentKey;
}
iterator.next();
}
} catch (Exception e) {
throw new FlinkRuntimeException("Failed to access state [" + state + "]", e);
}
return nextKey != null;
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksDBStateUploader method uploadFilesToCheckpointFs.
/**
* Upload all the files to checkpoint fileSystem using specified number of threads.
*
* @param files The files will be uploaded to checkpoint filesystem.
* @param checkpointStreamFactory The checkpoint streamFactory used to create outputstream.
* @param stateScope
* @throws Exception Thrown if can not upload all the files.
*/
public Map<StateHandleID, StreamStateHandle> uploadFilesToCheckpointFs(@Nonnull Map<StateHandleID, Path> files, CheckpointStreamFactory checkpointStreamFactory, CheckpointedStateScope stateScope, CloseableRegistry closeableRegistry) throws Exception {
Map<StateHandleID, StreamStateHandle> handles = new HashMap<>();
Map<StateHandleID, CompletableFuture<StreamStateHandle>> futures = createUploadFutures(files, checkpointStreamFactory, stateScope, closeableRegistry);
try {
FutureUtils.waitForAll(futures.values()).get();
for (Map.Entry<StateHandleID, CompletableFuture<StreamStateHandle>> entry : futures.entrySet()) {
handles.put(entry.getKey(), entry.getValue().get());
}
} catch (ExecutionException e) {
Throwable throwable = ExceptionUtils.stripExecutionException(e);
throwable = ExceptionUtils.stripException(throwable, RuntimeException.class);
if (throwable instanceof IOException) {
throw (IOException) throwable;
} else {
throw new FlinkRuntimeException("Failed to upload data for state handles.", e);
}
}
return handles;
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class RocksQueueIterator method next.
@Override
public void next() {
try {
if (!elementsForKeyGroup.hasNext()) {
boolean hasElement = moveToNextNonEmptyKeyGroup();
if (!hasElement) {
isValid = false;
return;
}
}
keyOut.setPosition(afterKeyMark);
elementSerializer.serialize(elementsForKeyGroup.next(), keyOut);
this.currentKey = keyOut.getCopyOfBuffer();
} catch (IOException e) {
throw new FlinkRuntimeException(e);
}
}
use of org.apache.flink.util.FlinkRuntimeException in project flink by apache.
the class StreamTaskFinalCheckpointsTest method testTriggerStopWithSavepointWhenWaitingForFinalCheckpoint.
@Test
public void testTriggerStopWithSavepointWhenWaitingForFinalCheckpoint() throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
int finalCheckpointId = 6;
int syncSavepointId = 7;
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder() {
@Override
public void acknowledgeCheckpoint(JobID jobID, ExecutionAttemptID executionAttemptID, long checkpointId, CheckpointMetrics checkpointMetrics, TaskStateSnapshot subtaskState) {
if (syncSavepointId == checkpointId) {
// complete the final checkpoint when sync savepoint acknowledged
// we should wait for the sync savepoint to complete
super.acknowledgeCheckpoint(jobID, executionAttemptID, finalCheckpointId, checkpointMetrics, subtaskState);
try {
// Give some potential time for the task to finish before the
// savepoint is notified complete
Thread.sleep(CONCURRENT_EVENT_WAIT_PERIOD_MS);
} catch (InterruptedException e) {
throw new FlinkRuntimeException(e);
}
super.acknowledgeCheckpoint(jobID, executionAttemptID, syncSavepointId, checkpointMetrics, subtaskState);
}
}
};
try (StreamTaskMailboxTestHarness<String> testHarness = createTestHarness(partitionWriters, checkpointResponder, false)) {
// Tests triggering checkpoint after received all the inputs have received
// EndOfPartition.
testHarness.waitForTaskCompletion();
// trigger the final checkpoint
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, finalCheckpointId);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
// trigger the synchronous savepoint
CompletableFuture<Boolean> savepointFuture = triggerStopWithSavepointDrain(testHarness, syncSavepointId);
// The checkpoint 6 would be triggered successfully.
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
assertTrue(savepointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(syncSavepointId, testHarness.getTaskStateManager().getReportedCheckpointId());
assertEquals(syncSavepointId, testHarness.getTaskStateManager().getNotifiedCompletedCheckpointId());
// Each result partition should have emitted 2 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(3, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
Aggregations