use of org.apache.flink.core.memory.DataOutputView in project flink by apache.
the class DefaultOperatorStateBackend method snapshot.
@Override
public RunnableFuture<OperatorStateHandle> snapshot(long checkpointId, long timestamp, CheckpointStreamFactory streamFactory, CheckpointOptions checkpointOptions) throws Exception {
if (registeredStates.isEmpty()) {
return DoneFuture.nullValue();
}
List<OperatorBackendSerializationProxy.StateMetaInfo<?>> metaInfoList = new ArrayList<>(registeredStates.size());
for (Map.Entry<String, PartitionableListState<?>> entry : registeredStates.entrySet()) {
PartitionableListState<?> state = entry.getValue();
OperatorBackendSerializationProxy.StateMetaInfo<?> metaInfo = new OperatorBackendSerializationProxy.StateMetaInfo<>(state.getName(), state.getPartitionStateSerializer(), state.getAssignmentMode());
metaInfoList.add(metaInfo);
}
Map<String, OperatorStateHandle.StateMetaInfo> writtenStatesMetaData = new HashMap<>(registeredStates.size());
CheckpointStreamFactory.CheckpointStateOutputStream out = streamFactory.createCheckpointStateOutputStream(checkpointId, timestamp);
try {
closeStreamOnCancelRegistry.registerClosable(out);
DataOutputView dov = new DataOutputViewStreamWrapper(out);
OperatorBackendSerializationProxy backendSerializationProxy = new OperatorBackendSerializationProxy(metaInfoList);
backendSerializationProxy.write(dov);
dov.writeInt(registeredStates.size());
for (Map.Entry<String, PartitionableListState<?>> entry : registeredStates.entrySet()) {
PartitionableListState<?> value = entry.getValue();
long[] partitionOffsets = value.write(out);
OperatorStateHandle.Mode mode = value.getAssignmentMode();
writtenStatesMetaData.put(entry.getKey(), new OperatorStateHandle.StateMetaInfo(partitionOffsets, mode));
}
OperatorStateHandle handle = new OperatorStateHandle(writtenStatesMetaData, out.closeAndGetHandle());
return new DoneFuture<>(handle);
} finally {
closeStreamOnCancelRegistry.unregisterClosable(out);
out.close();
}
}
use of org.apache.flink.core.memory.DataOutputView in project flink by apache.
the class SpanningRecordSerializerTest method testEmptyRecords.
@Test
public void testEmptyRecords() {
final int SEGMENT_SIZE = 11;
final SpanningRecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<SerializationTestType>();
final Buffer buffer = new Buffer(MemorySegmentFactory.allocateUnpooledSegment(SEGMENT_SIZE), mock(BufferRecycler.class));
try {
Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD, serializer.setNextBuffer(buffer));
} catch (IOException e) {
e.printStackTrace();
}
try {
SerializationTestType emptyRecord = new SerializationTestType() {
@Override
public SerializationTestType getRandom(Random rnd) {
throw new UnsupportedOperationException();
}
@Override
public int length() {
throw new UnsupportedOperationException();
}
@Override
public void write(DataOutputView out) {
}
@Override
public void read(DataInputView in) {
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object obj) {
throw new UnsupportedOperationException();
}
};
RecordSerializer.SerializationResult result = serializer.addRecord(emptyRecord);
Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD, result);
result = serializer.addRecord(emptyRecord);
Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD, result);
result = serializer.addRecord(emptyRecord);
Assert.assertEquals(RecordSerializer.SerializationResult.PARTIAL_RECORD_MEMORY_SEGMENT_FULL, result);
result = serializer.setNextBuffer(buffer);
Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD, result);
} catch (Exception e) {
e.printStackTrace();
Assert.fail(e.getMessage());
}
}
use of org.apache.flink.core.memory.DataOutputView in project flink by apache.
the class KeyedStateCheckpointOutputStreamTest method testWriteKeyGroupTracking.
@Test
public void testWriteKeyGroupTracking() throws Exception {
final KeyGroupRange keyRange = new KeyGroupRange(0, 2);
KeyedStateCheckpointOutputStream stream = createStream(keyRange);
try {
stream.startNewKeyGroup(4711);
Assert.fail();
} catch (IllegalArgumentException expected) {
// good
}
Assert.assertEquals(-1, stream.getCurrentKeyGroup());
DataOutputView dov = new DataOutputViewStreamWrapper(stream);
int previous = -1;
for (int kg : keyRange) {
Assert.assertFalse(stream.isKeyGroupAlreadyStarted(kg));
Assert.assertFalse(stream.isKeyGroupAlreadyFinished(kg));
stream.startNewKeyGroup(kg);
if (-1 != previous) {
Assert.assertTrue(stream.isKeyGroupAlreadyStarted(previous));
Assert.assertTrue(stream.isKeyGroupAlreadyFinished(previous));
}
Assert.assertTrue(stream.isKeyGroupAlreadyStarted(kg));
Assert.assertFalse(stream.isKeyGroupAlreadyFinished(kg));
dov.writeInt(kg);
previous = kg;
}
KeyGroupsStateHandle fullHandle = stream.closeAndGetHandle();
verifyRead(fullHandle, keyRange);
for (int kg : keyRange) {
try {
stream.startNewKeyGroup(kg);
Assert.fail();
} catch (IOException ex) {
// required
}
}
}
use of org.apache.flink.core.memory.DataOutputView in project flink by apache.
the class KeyedStateCheckpointOutputStreamTest method testReadWriteMissingKeyGroups.
@Test
public void testReadWriteMissingKeyGroups() throws Exception {
final KeyGroupRange keyRange = new KeyGroupRange(0, 2);
KeyedStateCheckpointOutputStream stream = createStream(keyRange);
DataOutputView dov = new DataOutputViewStreamWrapper(stream);
stream.startNewKeyGroup(1);
dov.writeInt(1);
KeyGroupsStateHandle fullHandle = stream.closeAndGetHandle();
int count = 0;
try (FSDataInputStream in = fullHandle.openInputStream()) {
DataInputView div = new DataInputViewStreamWrapper(in);
for (int kg : fullHandle.keyGroups()) {
long off = fullHandle.getOffsetForKeyGroup(kg);
if (off >= 0) {
in.seek(off);
Assert.assertEquals(1, div.readInt());
++count;
}
}
}
Assert.assertEquals(1, count);
}
Aggregations