use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class SimpleStateRequestHandler method getMapState.
private MapState<ByteArrayWrapper, byte[]> getMapState(BeamFnApi.StateRequest request) throws Exception {
BeamFnApi.StateKey.MultimapSideInput mapUserState = request.getStateKey().getMultimapSideInput();
byte[] data = Base64.getDecoder().decode(mapUserState.getSideInputId());
FlinkFnApi.StateDescriptor stateDescriptor = FlinkFnApi.StateDescriptor.parseFrom(data);
String stateName = PYTHON_STATE_PREFIX + stateDescriptor.getStateName();
StateDescriptor cachedStateDescriptor = stateDescriptorCache.get(stateName);
MapStateDescriptor<ByteArrayWrapper, byte[]> mapStateDescriptor;
if (cachedStateDescriptor instanceof MapStateDescriptor) {
mapStateDescriptor = (MapStateDescriptor<ByteArrayWrapper, byte[]>) cachedStateDescriptor;
} else if (cachedStateDescriptor == null) {
mapStateDescriptor = new MapStateDescriptor<>(stateName, ByteArrayWrapperSerializer.INSTANCE, valueSerializer);
if (stateDescriptor.hasStateTtlConfig()) {
FlinkFnApi.StateDescriptor.StateTTLConfig stateTtlConfigProto = stateDescriptor.getStateTtlConfig();
StateTtlConfig stateTtlConfig = ProtoUtils.parseStateTtlConfigFromProto(stateTtlConfigProto);
mapStateDescriptor.enableTimeToLive(stateTtlConfig);
}
stateDescriptorCache.put(stateName, mapStateDescriptor);
} else {
throw new RuntimeException(String.format("State name corrupt detected: " + "'%s' is used both as MAP state and '%s' state at the same time.", stateName, cachedStateDescriptor.getType()));
}
byte[] windowBytes = mapUserState.getWindow().toByteArray();
if (windowBytes.length != 0) {
bais.setBuffer(windowBytes, 0, windowBytes.length);
Object namespace = namespaceSerializer.deserialize(baisWrapper);
return (MapState<ByteArrayWrapper, byte[]>) keyedStateBackend.getPartitionedState(namespace, namespaceSerializer, mapStateDescriptor);
} else {
return (MapState<ByteArrayWrapper, byte[]>) keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, mapStateDescriptor);
}
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class SimpleStateRequestHandler method getListState.
private ListState<byte[]> getListState(BeamFnApi.StateRequest request) throws Exception {
BeamFnApi.StateKey.BagUserState bagUserState = request.getStateKey().getBagUserState();
byte[] data = Base64.getDecoder().decode(bagUserState.getUserStateId());
FlinkFnApi.StateDescriptor stateDescriptor = FlinkFnApi.StateDescriptor.parseFrom(data);
String stateName = PYTHON_STATE_PREFIX + stateDescriptor.getStateName();
ListStateDescriptor<byte[]> listStateDescriptor;
StateDescriptor cachedStateDescriptor = stateDescriptorCache.get(stateName);
if (cachedStateDescriptor instanceof ListStateDescriptor) {
listStateDescriptor = (ListStateDescriptor<byte[]>) cachedStateDescriptor;
} else if (cachedStateDescriptor == null) {
listStateDescriptor = new ListStateDescriptor<>(stateName, valueSerializer);
if (stateDescriptor.hasStateTtlConfig()) {
FlinkFnApi.StateDescriptor.StateTTLConfig stateTtlConfigProto = stateDescriptor.getStateTtlConfig();
StateTtlConfig stateTtlConfig = ProtoUtils.parseStateTtlConfigFromProto(stateTtlConfigProto);
listStateDescriptor.enableTimeToLive(stateTtlConfig);
}
stateDescriptorCache.put(stateName, listStateDescriptor);
} else {
throw new RuntimeException(String.format("State name corrupt detected: " + "'%s' is used both as LIST state and '%s' state at the same time.", stateName, cachedStateDescriptor.getType()));
}
byte[] windowBytes = bagUserState.getWindow().toByteArray();
if (windowBytes.length != 0) {
bais.setBuffer(windowBytes, 0, windowBytes.length);
Object namespace = namespaceSerializer.deserialize(baisWrapper);
return (ListState<byte[]>) keyedStateBackend.getPartitionedState(namespace, namespaceSerializer, listStateDescriptor);
} else {
return (ListState<byte[]>) keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, listStateDescriptor);
}
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class ProtoUtilsTest method testParseStateTtlConfigFromProto.
@Test
public void testParseStateTtlConfigFromProto() {
FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies cleanupStrategiesProto = FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.newBuilder().setIsCleanupInBackground(true).addStrategies(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.MapStrategiesEntry.newBuilder().setStrategy(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.Strategies.FULL_STATE_SCAN_SNAPSHOT).setEmptyStrategy(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.EmptyCleanupStrategy.EMPTY_STRATEGY)).addStrategies(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.MapStrategiesEntry.newBuilder().setStrategy(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.Strategies.INCREMENTAL_CLEANUP).setIncrementalCleanupStrategy(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.IncrementalCleanupStrategy.newBuilder().setCleanupSize(10).setRunCleanupForEveryRecord(true).build())).addStrategies(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.MapStrategiesEntry.newBuilder().setStrategy(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.Strategies.ROCKSDB_COMPACTION_FILTER).setRocksdbCompactFilterCleanupStrategy(FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategy.newBuilder().setQueryTimeAfterNumEntries(1000).build())).build();
FlinkFnApi.StateDescriptor.StateTTLConfig stateTTLConfigProto = FlinkFnApi.StateDescriptor.StateTTLConfig.newBuilder().setTtl(Time.of(1000, TimeUnit.MILLISECONDS).toMilliseconds()).setUpdateType(FlinkFnApi.StateDescriptor.StateTTLConfig.UpdateType.OnCreateAndWrite).setStateVisibility(FlinkFnApi.StateDescriptor.StateTTLConfig.StateVisibility.NeverReturnExpired).setCleanupStrategies(cleanupStrategiesProto).build();
StateTtlConfig stateTTLConfig = ProtoUtils.parseStateTtlConfigFromProto(stateTTLConfigProto);
assertEquals(stateTTLConfig.getUpdateType(), StateTtlConfig.UpdateType.OnCreateAndWrite);
assertEquals(stateTTLConfig.getStateVisibility(), StateTtlConfig.StateVisibility.NeverReturnExpired);
assertEquals(stateTTLConfig.getTtl(), Time.milliseconds(1000));
assertEquals(stateTTLConfig.getTtlTimeCharacteristic(), StateTtlConfig.TtlTimeCharacteristic.ProcessingTime);
StateTtlConfig.CleanupStrategies cleanupStrategies = stateTTLConfig.getCleanupStrategies();
assertTrue(cleanupStrategies.isCleanupInBackground());
assertTrue(cleanupStrategies.inFullSnapshot());
StateTtlConfig.IncrementalCleanupStrategy incrementalCleanupStrategy = cleanupStrategies.getIncrementalCleanupStrategy();
assertNotNull(incrementalCleanupStrategy);
assertEquals(incrementalCleanupStrategy.getCleanupSize(), 10);
assertTrue(incrementalCleanupStrategy.runCleanupForEveryRecord());
StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy = cleanupStrategies.getRocksdbCompactFilterCleanupStrategy();
assertNotNull(rocksdbCompactFilterCleanupStrategy);
assertEquals(rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries(), 1000);
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class RocksDbTtlCompactFiltersManager method configCompactFilter.
public void configCompactFilter(@Nonnull StateDescriptor<?, ?> stateDesc, TypeSerializer<?> stateSerializer) {
StateTtlConfig ttlConfig = stateDesc.getTtlConfig();
if (ttlConfig.isEnabled() && ttlConfig.getCleanupStrategies().inRocksdbCompactFilter()) {
FlinkCompactionFilterFactory compactionFilterFactory = compactionFilterFactories.get(stateDesc.getName());
Preconditions.checkNotNull(compactionFilterFactory);
long ttl = ttlConfig.getTtl().toMilliseconds();
StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy = ttlConfig.getCleanupStrategies().getRocksdbCompactFilterCleanupStrategy();
Preconditions.checkNotNull(rocksdbCompactFilterCleanupStrategy);
long queryTimeAfterNumEntries = rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries();
FlinkCompactionFilter.Config config;
if (stateDesc instanceof ListStateDescriptor) {
TypeSerializer<?> elemSerializer = ((ListSerializer<?>) stateSerializer).getElementSerializer();
int len = elemSerializer.getLength();
if (len > 0) {
config = FlinkCompactionFilter.Config.createForFixedElementList(ttl, queryTimeAfterNumEntries, // plus one byte for list element delimiter
len + 1);
} else {
config = FlinkCompactionFilter.Config.createForList(ttl, queryTimeAfterNumEntries, new ListElementFilterFactory<>(elemSerializer.duplicate()));
}
} else if (stateDesc instanceof MapStateDescriptor) {
config = FlinkCompactionFilter.Config.createForMap(ttl, queryTimeAfterNumEntries);
} else {
config = FlinkCompactionFilter.Config.createForValue(ttl, queryTimeAfterNumEntries);
}
compactionFilterFactory.configure(config);
}
}
use of org.apache.flink.api.common.state.StateTtlConfig in project flink by apache.
the class MiniBatchGroupAggFunction method open.
@Override
public void open(ExecutionContext ctx) throws Exception {
super.open(ctx);
// instantiate function
StateTtlConfig ttlConfig = createTtlConfig(stateRetentionTime);
function = genAggsHandler.newInstance(ctx.getRuntimeContext().getUserCodeClassLoader());
function.open(new PerKeyStateDataViewStore(ctx.getRuntimeContext(), ttlConfig));
// instantiate equaliser
equaliser = genRecordEqualiser.newInstance(ctx.getRuntimeContext().getUserCodeClassLoader());
InternalTypeInfo<RowData> accTypeInfo = InternalTypeInfo.ofFields(accTypes);
ValueStateDescriptor<RowData> accDesc = new ValueStateDescriptor<>("accState", accTypeInfo);
if (ttlConfig.isEnabled()) {
accDesc.enableTimeToLive(ttlConfig);
}
accState = ctx.getRuntimeContext().getState(accDesc);
inputRowSerializer = InternalSerializers.create(inputType);
resultRow = new JoinedRowData();
}
Aggregations