use of org.apache.flink.runtime.state.StreamCompressionDecorator in project flink by apache.
the class StateChangeFsUploader method wrap.
private OutputStreamWithPos wrap(FSDataOutputStream fsStream) throws IOException {
StreamCompressionDecorator instance = compression ? SnappyStreamCompressionDecorator.INSTANCE : UncompressedStreamCompressionDecorator.INSTANCE;
OutputStream compressed = compression ? instance.decorateWithCompression(fsStream) : fsStream;
return new OutputStreamWithPos(new BufferedOutputStream(compressed, bufferSize));
}
use of org.apache.flink.runtime.state.StreamCompressionDecorator in project flink by apache.
the class HeapRestoreOperation method readStateHandleStateData.
private void readStateHandleStateData(FSDataInputStream fsDataInputStream, DataInputViewStreamWrapper inView, KeyGroupRangeOffsets keyGroupOffsets, Map<Integer, StateMetaInfoSnapshot> kvStatesById, int numStates, int readVersion, boolean isCompressed) throws IOException {
final StreamCompressionDecorator streamCompressionDecorator = isCompressed ? SnappyStreamCompressionDecorator.INSTANCE : UncompressedStreamCompressionDecorator.INSTANCE;
for (Tuple2<Integer, Long> groupOffset : keyGroupOffsets) {
int keyGroupIndex = groupOffset.f0;
long offset = groupOffset.f1;
if (!keyGroupRange.contains(keyGroupIndex)) {
LOG.debug("Key group {} doesn't belong to this backend with key group range: {}", keyGroupIndex, keyGroupRange);
continue;
}
fsDataInputStream.seek(offset);
int writtenKeyGroupIndex = inView.readInt();
Preconditions.checkState(writtenKeyGroupIndex == keyGroupIndex, "Unexpected key-group in restore.");
try (InputStream kgCompressionInStream = streamCompressionDecorator.decorateWithCompression(fsDataInputStream)) {
readKeyGroupStateData(kgCompressionInStream, kvStatesById, keyGroupIndex, numStates, readVersion);
}
}
}
use of org.apache.flink.runtime.state.StreamCompressionDecorator in project flink by apache.
the class EmbeddedRocksDBStateBackend method createKeyedStateBackend.
@Override
public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry, double managedMemoryFraction) throws IOException {
// first, make sure that the RocksDB JNI library is loaded
// we do this explicitly here to have better error handling
String tempDir = env.getTaskManagerInfo().getTmpWorkingDirectory().getAbsolutePath();
ensureRocksDBIsLoaded(tempDir);
// replace all characters that are not legal for filenames with underscore
String fileCompatibleIdentifier = operatorIdentifier.replaceAll("[^a-zA-Z0-9\\-]", "_");
lazyInitializeForJob(env, fileCompatibleIdentifier);
File instanceBasePath = new File(getNextStoragePath(), "job_" + jobId + "_op_" + fileCompatibleIdentifier + "_uuid_" + UUID.randomUUID());
LocalRecoveryConfig localRecoveryConfig = env.getTaskStateManager().createLocalRecoveryConfig();
final OpaqueMemoryResource<RocksDBSharedResources> sharedResources = RocksDBOperationUtils.allocateSharedCachesIfConfigured(memoryConfiguration, env.getMemoryManager(), managedMemoryFraction, LOG);
if (sharedResources != null) {
LOG.info("Obtained shared RocksDB cache of size {} bytes", sharedResources.getSize());
}
final RocksDBResourceContainer resourceContainer = createOptionsAndResourceContainer(sharedResources);
ExecutionConfig executionConfig = env.getExecutionConfig();
StreamCompressionDecorator keyGroupCompressionDecorator = getCompressionDecorator(executionConfig);
LatencyTrackingStateConfig latencyTrackingStateConfig = latencyTrackingConfigBuilder.setMetricGroup(metricGroup).build();
RocksDBKeyedStateBackendBuilder<K> builder = new RocksDBKeyedStateBackendBuilder<>(operatorIdentifier, env.getUserCodeClassLoader().asClassLoader(), instanceBasePath, resourceContainer, stateName -> resourceContainer.getColumnOptions(), kvStateRegistry, keySerializer, numberOfKeyGroups, keyGroupRange, executionConfig, localRecoveryConfig, getPriorityQueueStateType(), ttlTimeProvider, latencyTrackingStateConfig, metricGroup, stateHandles, keyGroupCompressionDecorator, cancelStreamRegistry).setEnableIncrementalCheckpointing(isIncrementalCheckpointsEnabled()).setNumberOfTransferingThreads(getNumberOfTransferThreads()).setNativeMetricOptions(resourceContainer.getMemoryWatcherOptions(defaultMetricOptions)).setWriteBatchSize(getWriteBatchSize());
return builder.build();
}
Aggregations