use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class KeyedStateCheckpointOutputStreamTest method verifyRead.
private static void verifyRead(KeyGroupsStateHandle fullHandle, KeyGroupRange keyRange) throws IOException {
int count = 0;
try (FSDataInputStream in = fullHandle.openInputStream()) {
DataInputView div = new DataInputViewStreamWrapper(in);
for (int kg : fullHandle.getKeyGroupRange()) {
long off = fullHandle.getOffsetForKeyGroup(kg);
in.seek(off);
Assert.assertEquals(kg, div.readInt());
++count;
}
}
Assert.assertEquals(keyRange.getNumberOfKeyGroups(), count);
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class OperatorStateOutputCheckpointStreamTest method verifyRead.
private static void verifyRead(OperatorStateHandle fullHandle, int numPartitions) throws IOException {
int count = 0;
try (FSDataInputStream in = fullHandle.openInputStream()) {
OperatorStateHandle.StateMetaInfo metaInfo = fullHandle.getStateNameToPartitionOffsets().get(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME);
long[] offsets = metaInfo.getOffsets();
Assert.assertNotNull(offsets);
DataInputView div = new DataInputViewStreamWrapper(in);
for (int i = 0; i < numPartitions; ++i) {
in.seek(offsets[i]);
Assert.assertEquals(i, div.readInt());
++count;
}
}
Assert.assertEquals(numPartitions, count);
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class IterationHeadTask method run.
@Override
public void run() throws Exception {
final String brokerKey = brokerKey();
final int workerIndex = getEnvironment().getTaskInfo().getIndexOfThisSubtask();
final boolean objectSolutionSet = config.isSolutionSetUnmanaged();
// if workset iteration
CompactingHashTable<X> solutionSet = null;
JoinHashMap<X> solutionSetObjectMap = // if workset iteration with unmanaged solution set
null;
boolean waitForSolutionSetUpdate = config.getWaitForSolutionSetUpdate();
boolean isWorksetIteration = config.getIsWorksetIteration();
try {
/* used for receiving the current iteration result from iteration tail */
SuperstepKickoffLatch nextStepKickoff = new SuperstepKickoffLatch();
SuperstepKickoffLatchBroker.instance().handIn(brokerKey, nextStepKickoff);
BlockingBackChannel backChannel = initBackChannel();
SuperstepBarrier barrier = initSuperstepBarrier();
SolutionSetUpdateBarrier solutionSetUpdateBarrier = null;
feedbackDataInput = config.getIterationHeadPartialSolutionOrWorksetInputIndex();
feedbackTypeSerializer = this.getInputSerializer(feedbackDataInput);
excludeFromReset(feedbackDataInput);
int initialSolutionSetInput;
if (isWorksetIteration) {
initialSolutionSetInput = config.getIterationHeadSolutionSetInputIndex();
solutionTypeSerializer = config.getSolutionSetSerializer(getUserCodeClassLoader());
// setup the index for the solution set
@SuppressWarnings("unchecked") MutableObjectIterator<X> solutionSetInput = (MutableObjectIterator<X>) createInputIterator(inputReaders[initialSolutionSetInput], solutionTypeSerializer);
// read the initial solution set
if (objectSolutionSet) {
solutionSetObjectMap = initJoinHashMap();
readInitialSolutionSet(solutionSetObjectMap, solutionSetInput);
SolutionSetBroker.instance().handIn(brokerKey, solutionSetObjectMap);
} else {
solutionSet = initCompactingHashTable();
readInitialSolutionSet(solutionSet, solutionSetInput);
SolutionSetBroker.instance().handIn(brokerKey, solutionSet);
}
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier = new SolutionSetUpdateBarrier();
SolutionSetUpdateBarrierBroker.instance().handIn(brokerKey, solutionSetUpdateBarrier);
}
} else {
// bulk iteration case
@SuppressWarnings("unchecked") TypeSerializerFactory<X> solSer = (TypeSerializerFactory<X>) feedbackTypeSerializer;
solutionTypeSerializer = solSer;
// = termination Criterion tail
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier = new SolutionSetUpdateBarrier();
SolutionSetUpdateBarrierBroker.instance().handIn(brokerKey, solutionSetUpdateBarrier);
}
}
// instantiate all aggregators and register them at the iteration global registry
RuntimeAggregatorRegistry aggregatorRegistry = new RuntimeAggregatorRegistry(config.getIterationAggregators(getUserCodeClassLoader()));
IterationAggregatorBroker.instance().handIn(brokerKey, aggregatorRegistry);
DataInputView superstepResult = null;
while (this.running && !terminationRequested()) {
if (log.isInfoEnabled()) {
log.info(formatLogString("starting iteration [" + currentIteration() + "]"));
}
barrier.setup();
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier.setup();
}
if (!inFirstIteration()) {
feedBackSuperstepResult(superstepResult);
}
super.run();
// signal to connected tasks that we are done with the superstep
sendEndOfSuperstepToAllIterationOutputs();
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier.waitForSolutionSetUpdate();
}
// blocking call to wait for the result
superstepResult = backChannel.getReadEndAfterSuperstepEnded();
if (log.isInfoEnabled()) {
log.info(formatLogString("finishing iteration [" + currentIteration() + "]"));
}
sendEventToSync(new WorkerDoneEvent(workerIndex, aggregatorRegistry.getAllAggregators()));
if (log.isInfoEnabled()) {
log.info(formatLogString("waiting for other workers in iteration [" + currentIteration() + "]"));
}
barrier.waitForOtherWorkers();
if (barrier.terminationSignaled()) {
if (log.isInfoEnabled()) {
log.info(formatLogString("head received termination request in iteration [" + currentIteration() + "]"));
}
requestTermination();
nextStepKickoff.signalTermination();
} else {
incrementIterationCounter();
String[] globalAggregateNames = barrier.getAggregatorNames();
Value[] globalAggregates = barrier.getAggregates();
aggregatorRegistry.updateGlobalAggregatesAndReset(globalAggregateNames, globalAggregates);
nextStepKickoff.triggerNextSuperstep();
}
}
if (log.isInfoEnabled()) {
log.info(formatLogString("streaming out final result after [" + currentIteration() + "] iterations"));
}
if (isWorksetIteration) {
if (objectSolutionSet) {
streamSolutionSetToFinalOutput(solutionSetObjectMap);
} else {
streamSolutionSetToFinalOutput(solutionSet);
}
} else {
streamOutFinalOutputBulk(new InputViewIterator<X>(superstepResult, this.solutionTypeSerializer.getSerializer()));
}
this.finalOutputCollector.close();
} finally {
// make sure we unregister everything from the broker:
// - backchannel
// - aggregator registry
// - solution set index
IterationAggregatorBroker.instance().remove(brokerKey);
BlockingBackChannelBroker.instance().remove(brokerKey);
SuperstepKickoffLatchBroker.instance().remove(brokerKey);
SolutionSetBroker.instance().remove(brokerKey);
SolutionSetUpdateBarrierBroker.instance().remove(brokerKey);
if (solutionSet != null) {
solutionSet.close();
}
}
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class OperatorStateRestoreOperation method deserializeBroadcastStateValues.
private <K, V> void deserializeBroadcastStateValues(final BackendWritableBroadcastState<K, V> broadcastStateForName, final FSDataInputStream in, final OperatorStateHandle.StateMetaInfo metaInfo) throws Exception {
if (metaInfo != null) {
long[] offsets = metaInfo.getOffsets();
if (offsets != null) {
TypeSerializer<K> keySerializer = broadcastStateForName.getStateMetaInfo().getKeySerializer();
TypeSerializer<V> valueSerializer = broadcastStateForName.getStateMetaInfo().getValueSerializer();
in.seek(offsets[0]);
DataInputView div = new DataInputViewStreamWrapper(in);
int size = div.readInt();
for (int i = 0; i < size; i++) {
broadcastStateForName.put(keySerializer.deserialize(div), valueSerializer.deserialize(div));
}
}
}
}
use of org.apache.flink.core.memory.DataInputView in project flink by apache.
the class OperatorStateRestoreOperation method deserializeOperatorStateValues.
private <S> void deserializeOperatorStateValues(PartitionableListState<S> stateListForName, FSDataInputStream in, OperatorStateHandle.StateMetaInfo metaInfo) throws IOException {
if (null != metaInfo) {
long[] offsets = metaInfo.getOffsets();
if (null != offsets) {
DataInputView div = new DataInputViewStreamWrapper(in);
TypeSerializer<S> serializer = stateListForName.getStateMetaInfo().getPartitionStateSerializer();
for (long offset : offsets) {
in.seek(offset);
stateListForName.add(serializer.deserialize(div));
}
}
}
}
Aggregations