use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class MetadataV2V3SerializerBase method deserializeKeyedStateHandle.
@VisibleForTesting
@Nullable
static KeyedStateHandle deserializeKeyedStateHandle(DataInputStream dis, @Nullable DeserializationContext context) throws IOException {
final int type = dis.readByte();
if (NULL_HANDLE == type) {
return null;
} else if (KEY_GROUPS_HANDLE == type || KEY_GROUPS_HANDLE_V2 == type || SAVEPOINT_KEY_GROUPS_HANDLE == type) {
int startKeyGroup = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(startKeyGroup, startKeyGroup + numKeyGroups - 1);
long[] offsets = new long[numKeyGroups];
for (int i = 0; i < numKeyGroups; ++i) {
offsets[i] = dis.readLong();
}
KeyGroupRangeOffsets keyGroupRangeOffsets = new KeyGroupRangeOffsets(keyGroupRange, offsets);
StreamStateHandle stateHandle = deserializeStreamStateHandle(dis, context);
if (SAVEPOINT_KEY_GROUPS_HANDLE == type) {
return new KeyGroupsSavepointStateHandle(keyGroupRangeOffsets, stateHandle);
} else {
StateHandleID stateHandleID = KEY_GROUPS_HANDLE_V2 == type ? new StateHandleID(dis.readUTF()) : StateHandleID.randomStateHandleId();
return KeyGroupsStateHandle.restore(keyGroupRangeOffsets, stateHandle, stateHandleID);
}
} else if (INCREMENTAL_KEY_GROUPS_HANDLE == type || INCREMENTAL_KEY_GROUPS_HANDLE_V2 == type) {
return deserializeIncrementalStateHandle(dis, context, type);
} else if (CHANGELOG_HANDLE == type) {
int startKeyGroup = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(startKeyGroup, startKeyGroup + numKeyGroups - 1);
long checkpointedSize = dis.readLong();
int baseSize = dis.readInt();
List<KeyedStateHandle> base = new ArrayList<>(baseSize);
for (int i = 0; i < baseSize; i++) {
KeyedStateHandle handle = deserializeKeyedStateHandle(dis, context);
if (handle != null) {
base.add(handle);
} else {
LOG.warn("Unexpected null keyed state handle of materialized part when deserializing changelog state-backend handle");
}
}
int deltaSize = dis.readInt();
List<ChangelogStateHandle> delta = new ArrayList<>(deltaSize);
for (int i = 0; i < deltaSize; i++) {
delta.add((ChangelogStateHandle) deserializeKeyedStateHandle(dis, context));
}
long materializationID = dis.readLong();
StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
return ChangelogStateBackendHandleImpl.restore(base, delta, keyGroupRange, materializationID, checkpointedSize, stateHandleId);
} else if (CHANGELOG_BYTE_INCREMENT_HANDLE == type) {
int start = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(start, start + numKeyGroups - 1);
long from = dis.readLong();
long to = dis.readLong();
int size = dis.readInt();
List<StateChange> changes = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
int keyGroup = dis.readInt();
int bytesSize = dis.readInt();
byte[] bytes = new byte[bytesSize];
IOUtils.readFully(dis, bytes, 0, bytesSize);
changes.add(new StateChange(keyGroup, bytes));
}
StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
return InMemoryChangelogStateHandle.restore(changes, SequenceNumber.of(from), SequenceNumber.of(to), keyGroupRange, stateHandleId);
} else if (CHANGELOG_FILE_INCREMENT_HANDLE == type) {
int start = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(start, start + numKeyGroups - 1);
int numHandles = dis.readInt();
List<Tuple2<StreamStateHandle, Long>> streamHandleAndOffset = new ArrayList<>(numHandles);
for (int i = 0; i < numHandles; i++) {
long o = dis.readLong();
StreamStateHandle h = deserializeStreamStateHandle(dis, context);
streamHandleAndOffset.add(Tuple2.of(h, o));
}
long size = dis.readLong();
long checkpointedSize = dis.readLong();
StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
return ChangelogStateHandleStreamImpl.restore(streamHandleAndOffset, keyGroupRange, size, checkpointedSize, stateHandleId);
} else {
throw new IllegalStateException("Reading invalid KeyedStateHandle, type: " + type);
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class ExternalResourceUtils method createStaticExternalResourceInfoProvider.
/**
* Instantiate {@link StaticExternalResourceInfoProvider} for all of enabled external resources.
*/
@VisibleForTesting
static ExternalResourceInfoProvider createStaticExternalResourceInfoProvider(Map<String, Long> externalResourceAmountMap, Map<String, ExternalResourceDriver> externalResourceDrivers) {
final Map<String, Set<? extends ExternalResourceInfo>> externalResources = new HashMap<>();
for (Map.Entry<String, ExternalResourceDriver> externalResourceDriverEntry : externalResourceDrivers.entrySet()) {
final String resourceName = externalResourceDriverEntry.getKey();
final ExternalResourceDriver externalResourceDriver = externalResourceDriverEntry.getValue();
if (externalResourceAmountMap.containsKey(resourceName)) {
try {
final Set<? extends ExternalResourceInfo> externalResourceInfos;
externalResourceInfos = externalResourceDriver.retrieveResourceInfo(externalResourceAmountMap.get(resourceName));
externalResources.put(resourceName, externalResourceInfos);
} catch (Exception e) {
LOG.warn("Failed to retrieve information of external resource {}.", resourceName, e);
}
} else {
LOG.warn("Could not found legal amount configuration for {}.", resourceName);
}
}
return new StaticExternalResourceInfoProvider(externalResources);
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class DefaultCheckpointPlanCalculator method collectTaskRunningStatus.
/**
* Collects the task running status for each job vertex.
*
* @return The task running status for each job vertex.
*/
@VisibleForTesting
Map<JobVertexID, BitSet> collectTaskRunningStatus() {
Map<JobVertexID, BitSet> runningStatusByVertex = new HashMap<>();
for (ExecutionJobVertex vertex : jobVerticesInTopologyOrder) {
BitSet runningTasks = new BitSet(vertex.getTaskVertices().length);
for (int i = 0; i < vertex.getTaskVertices().length; ++i) {
if (!vertex.getTaskVertices()[i].getCurrentExecutionAttempt().isFinished()) {
runningTasks.set(i);
}
}
runningStatusByVertex.put(vertex.getJobVertexId(), runningTasks);
}
return runningStatusByVertex;
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class AdaptiveScheduler method transitionToState.
// ----------------------------------------------------------------
/**
* Transition the scheduler to another state. This method guards against state transitions while
* there is already a transition ongoing. This effectively means that you can not call this
* method from a State constructor or State#onLeave.
*
* @param targetState State to transition to
* @param <T> Type of the target state
* @return A target state instance
*/
@VisibleForTesting
<T extends State> T transitionToState(StateFactory<T> targetState) {
Preconditions.checkState(!isTransitioningState, "State transitions must not be triggered while another state transition is in progress.");
Preconditions.checkState(state.getClass() != targetState.getStateClass(), "Attempted to transition into the very state the scheduler is already in.");
componentMainThreadExecutor.assertRunningInMainThread();
try {
isTransitioningState = true;
LOG.debug("Transition from state {} to {}.", state.getClass().getSimpleName(), targetState.getStateClass().getSimpleName());
final JobStatus previousJobStatus = state.getJobStatus();
state.onLeave(targetState.getStateClass());
T targetStateInstance = targetState.getState();
state = targetStateInstance;
final JobStatus newJobStatus = state.getJobStatus();
if (previousJobStatus != newJobStatus) {
final long timestamp = System.currentTimeMillis();
jobStatusListeners.forEach(listener -> listener.jobStatusChanges(jobInformation.getJobID(), newJobStatus, timestamp));
}
return targetStateInstance;
} finally {
isTransitioningState = false;
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class AdaptiveScheduler method computeReactiveModeVertexParallelismStore.
/**
* Creates the parallelism store for a set of vertices, optionally with a flag to leave the
* vertex parallelism unchanged. If the flag is set, the parallelisms must be valid for
* execution.
*
* <p>We need to set parallelism to the max possible value when requesting resources, but when
* executing the graph we should respect what we are actually given.
*
* @param vertices The vertices to store parallelism information for
* @param adjustParallelism Whether to adjust the parallelism
* @param defaultMaxParallelismFunc a function for computing a default max parallelism if none
* is specified on a given vertex
* @return The parallelism store.
*/
@VisibleForTesting
static VertexParallelismStore computeReactiveModeVertexParallelismStore(Iterable<JobVertex> vertices, Function<JobVertex, Integer> defaultMaxParallelismFunc, boolean adjustParallelism) {
DefaultVertexParallelismStore store = new DefaultVertexParallelismStore();
for (JobVertex vertex : vertices) {
// if no max parallelism was configured by the user, we calculate and set a default
final int maxParallelism = vertex.getMaxParallelism() == JobVertex.MAX_PARALLELISM_DEFAULT ? defaultMaxParallelismFunc.apply(vertex) : vertex.getMaxParallelism();
// If the parallelism has already been adjusted, respect what has been configured in the
// vertex. Otherwise, scale it to the max parallelism to attempt to be "as parallel as
// possible"
final int parallelism;
if (adjustParallelism) {
parallelism = maxParallelism;
} else {
parallelism = vertex.getParallelism();
}
VertexParallelismInformation parallelismInfo = new DefaultVertexParallelismInfo(parallelism, maxParallelism, // based on the computed default, when actually fewer are necessary.
(newMax) -> newMax >= maxParallelism ? Optional.empty() : Optional.of("Cannot lower max parallelism in Reactive mode."));
store.setParallelismInfo(vertex.getID(), parallelismInfo);
}
return store;
}
Aggregations