use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class MiniBatchIncrementalGroupAggFunction method open.
@Override
public void open(ExecutionContext ctx) throws Exception {
super.open(ctx);
ClassLoader classLoader = ctx.getRuntimeContext().getUserCodeClassLoader();
StateTtlConfig ttlConfig = createTtlConfig(stateRetentionTime);
partialAgg = genPartialAggsHandler.newInstance(classLoader);
partialAgg.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext()));
finalAgg = genFinalAggsHandler.newInstance(classLoader);
finalAgg.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext(), ttlConfig));
resultRow = new JoinedRowData();
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class GroupTableAggFunction method open.
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
// instantiate function
StateTtlConfig ttlConfig = createTtlConfig(stateRetentionTime);
function = genAggsHandler.newInstance(getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(getRuntimeContext(), ttlConfig));
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> accDesc = new ValueStateDescriptor<>("accState", accTypeInfo);
if (ttlConfig.isEnabled()) {
accDesc.enableTimeToLive(ttlConfig);
}
accState = getRuntimeContext().getState(accDesc);
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class DeduplicateFunctionBase method open.
@Override
public void open(Configuration configure) throws Exception {
super.open(configure);
ValueStateDescriptor<T> stateDesc = new ValueStateDescriptor<>("deduplicate-state", typeInfo);
StateTtlConfig ttlConfig = createTtlConfig(stateRetentionTime);
if (ttlConfig.isEnabled()) {
stateDesc.enableTimeToLive(ttlConfig);
}
state = getRuntimeContext().getState(stateDesc);
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class DataStreamStateTTLTestProgram method main.
public static void main(String[] args) throws Exception {
final ParameterTool pt = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
setupEnvironment(env, pt);
setBackendWithCustomTTLTimeProvider(env);
TtlTestConfig config = TtlTestConfig.fromArgs(pt);
StateTtlConfig ttlConfig = StateTtlConfig.newBuilder(config.ttl).cleanupFullSnapshot().build();
env.addSource(new TtlStateUpdateSource(config.keySpace, config.sleepAfterElements, config.sleepTime)).name("TtlStateUpdateSource").keyBy(TtlStateUpdate::getKey).flatMap(new TtlVerifyUpdateFunction(ttlConfig, config.reportStatAfterUpdatesNum)).name("TtlVerifyUpdateFunction").addSink(new PrintSinkFunction<>()).name("PrintFailedVerifications");
env.execute("State TTL test job");
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class ChangelogBackendLogApplier method restoreKvMetaData.
private static RegisteredKeyValueStateBackendMetaInfo restoreKvMetaData(ChangelogKeyedStateBackend<?> backend, StateMetaInfoSnapshot snapshot, DataInputView in) throws Exception {
RegisteredKeyValueStateBackendMetaInfo meta = new RegisteredKeyValueStateBackendMetaInfo(snapshot);
StateTtlConfig ttlConfig = readTtlConfig(in);
Object defaultValue = readDefaultValue(in, meta);
// Use regular API to create states in both changelog and the base backends the metadata is
// persisted in log before data changes.
// An alternative solution to load metadata "natively" by the base backends would require
// base state to be always present, i.e. the 1st checkpoint would have to be "full" always.
StateDescriptor stateDescriptor = toStateDescriptor(meta, defaultValue);
// todo: support changing ttl (FLINK-23143)
if (ttlConfig.isEnabled()) {
stateDescriptor.enableTimeToLive(ttlConfig);
}
backend.getOrCreateKeyedState(meta.getNamespaceSerializer(), stateDescriptor);
return meta;
}
Aggregations