use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class SpanningRecordSerializationTest method test.
/**
* Iterates over the provided records and tests whether {@link SpanningRecordSerializer} and {@link AdaptiveSpanningRecordDeserializer}
* interact as expected.
* <p>
* Only a single {@link MemorySegment} will be allocated.
*
* @param records records to test
* @param segmentSize size for the {@link MemorySegment}
*/
private void test(Util.MockRecords records, int segmentSize, RecordSerializer<SerializationTestType> serializer, RecordDeserializer<SerializationTestType> deserializer) throws Exception {
// length encoding
final int SERIALIZATION_OVERHEAD = 4;
final Buffer buffer = new Buffer(MemorySegmentFactory.allocateUnpooledSegment(segmentSize), mock(BufferRecycler.class));
final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<SerializationTestType>();
// -------------------------------------------------------------------------------------------------------------
serializer.setNextBuffer(buffer);
int numBytes = 0;
int numRecords = 0;
for (SerializationTestType record : records) {
serializedRecords.add(record);
numRecords++;
numBytes += record.length() + SERIALIZATION_OVERHEAD;
// serialize record
if (serializer.addRecord(record).isFullBuffer()) {
// buffer is full => start deserializing
deserializer.setNextMemorySegment(serializer.getCurrentBuffer().getMemorySegment(), segmentSize);
while (!serializedRecords.isEmpty()) {
SerializationTestType expected = serializedRecords.poll();
SerializationTestType actual = expected.getClass().newInstance();
if (deserializer.getNextRecord(actual).isFullRecord()) {
Assert.assertEquals(expected, actual);
numRecords--;
} else {
serializedRecords.addFirst(expected);
break;
}
}
while (serializer.setNextBuffer(buffer).isFullBuffer()) {
deserializer.setNextMemorySegment(serializer.getCurrentBuffer().getMemorySegment(), segmentSize);
}
}
}
// deserialize left over records
deserializer.setNextMemorySegment(serializer.getCurrentBuffer().getMemorySegment(), (numBytes % segmentSize));
serializer.clear();
while (!serializedRecords.isEmpty()) {
SerializationTestType expected = serializedRecords.poll();
SerializationTestType actual = expected.getClass().newInstance();
RecordDeserializer.DeserializationResult result = deserializer.getNextRecord(actual);
Assert.assertTrue(result.isFullRecord());
Assert.assertEquals(expected, actual);
numRecords--;
}
// assert that all records have been serialized and deserialized
Assert.assertEquals(0, numRecords);
Assert.assertFalse(serializer.hasData());
Assert.assertFalse(deserializer.hasUnfinishedData());
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class SpanningRecordSerializerTest method test.
// -----------------------------------------------------------------------------------------------------------------
/**
* Iterates over the provided records and tests whether the {@link SpanningRecordSerializer} returns the expected
* {@link RecordSerializer.SerializationResult} values.
* <p>
* Only a single {@link MemorySegment} will be allocated.
*
* @param records records to test
* @param segmentSize size for the {@link MemorySegment}
*/
private void test(Util.MockRecords records, int segmentSize) throws Exception {
// length encoding
final int SERIALIZATION_OVERHEAD = 4;
final SpanningRecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<SerializationTestType>();
final Buffer buffer = new Buffer(MemorySegmentFactory.allocateUnpooledSegment(segmentSize), mock(BufferRecycler.class));
// -------------------------------------------------------------------------------------------------------------
serializer.setNextBuffer(buffer);
int numBytes = 0;
for (SerializationTestType record : records) {
RecordSerializer.SerializationResult result = serializer.addRecord(record);
numBytes += record.length() + SERIALIZATION_OVERHEAD;
if (numBytes < segmentSize) {
Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD, result);
} else if (numBytes == segmentSize) {
Assert.assertEquals(RecordSerializer.SerializationResult.FULL_RECORD_MEMORY_SEGMENT_FULL, result);
serializer.setNextBuffer(buffer);
numBytes = 0;
} else {
Assert.assertEquals(RecordSerializer.SerializationResult.PARTIAL_RECORD_MEMORY_SEGMENT_FULL, result);
while (result.isFullBuffer()) {
numBytes -= segmentSize;
result = serializer.setNextBuffer(buffer);
}
}
}
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RecordWriterTest method testSerializerClearedAfterClearBuffers.
@Test
public void testSerializerClearedAfterClearBuffers() throws Exception {
final Buffer buffer = TestBufferFactory.createBuffer(16);
ResultPartitionWriter partitionWriter = createResultPartitionWriter(createBufferProvider(buffer));
RecordWriter<IntValue> recordWriter = new RecordWriter<IntValue>(partitionWriter);
// Fill a buffer, but don't write it out.
recordWriter.emit(new IntValue(0));
verify(partitionWriter, never()).writeBuffer(any(Buffer.class), anyInt());
// Clear all buffers.
recordWriter.clearBuffers();
// This should not throw an Exception iff the serializer state
// has been cleared as expected.
recordWriter.flush();
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RecordWriterTest method testBroadcastEventBufferReferenceCounting.
/**
* Tests that event buffers are properly recycled when broadcasting events
* to multiple channels.
*
* @throws Exception
*/
@Test
public void testBroadcastEventBufferReferenceCounting() throws Exception {
Buffer buffer = EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE);
// Partial mocking of static method...
PowerMockito.stub(PowerMockito.method(EventSerializer.class, "toBuffer")).toReturn(buffer);
@SuppressWarnings("unchecked") ArrayDeque<BufferOrEvent>[] queues = new ArrayDeque[] { new ArrayDeque(), new ArrayDeque() };
ResultPartitionWriter partition = createCollectingPartitionWriter(queues, new TestInfiniteBufferProvider());
RecordWriter<?> writer = new RecordWriter<>(partition);
writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);
// Verify added to all queues
assertEquals(1, queues[0].size());
assertEquals(1, queues[1].size());
assertTrue(buffer.isRecycled());
}
use of org.apache.flink.runtime.io.network.buffer.Buffer in project flink by apache.
the class RecordWriterTest method createBufferProvider.
private BufferProvider createBufferProvider(final int bufferSize) throws IOException, InterruptedException {
BufferProvider bufferProvider = mock(BufferProvider.class);
when(bufferProvider.requestBufferBlocking()).thenAnswer(new Answer<Buffer>() {
@Override
public Buffer answer(InvocationOnMock invocationOnMock) throws Throwable {
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(bufferSize);
Buffer buffer = new Buffer(segment, DiscardingRecycler.INSTANCE);
return buffer;
}
});
return bufferProvider;
}
Aggregations