use of org.apache.flink.runtime.io.network.serialization.types.LargeObjectType in project flink by apache.
the class LargeRecordsTest method testHandleMixedLargeRecords.
@Test
public void testHandleMixedLargeRecords() {
try {
final int NUM_RECORDS = 99;
final int SEGMENT_SIZE = 32 * 1024;
final RecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<SerializationTestType>();
final RecordDeserializer<SerializationTestType> deserializer = new AdaptiveSpanningRecordDeserializer<SerializationTestType>();
final Buffer buffer = new Buffer(MemorySegmentFactory.allocateUnpooledSegment(SEGMENT_SIZE), mock(BufferRecycler.class));
List<SerializationTestType> originalRecords = new ArrayList<SerializationTestType>((NUM_RECORDS + 1) / 2);
List<SerializationTestType> deserializedRecords = new ArrayList<SerializationTestType>((NUM_RECORDS + 1) / 2);
LargeObjectType genLarge = new LargeObjectType();
Random rnd = new Random();
for (int i = 0; i < NUM_RECORDS; i++) {
if (i % 2 == 0) {
originalRecords.add(new IntType(42));
deserializedRecords.add(new IntType());
} else {
originalRecords.add(genLarge.getRandom(rnd));
deserializedRecords.add(new LargeObjectType());
}
}
// -------------------------------------------------------------------------------------------------------------
serializer.setNextBuffer(buffer);
int numRecordsDeserialized = 0;
for (SerializationTestType record : originalRecords) {
// serialize record
if (serializer.addRecord(record).isFullBuffer()) {
// buffer is full => move to deserializer
deserializer.setNextMemorySegment(serializer.getCurrentBuffer().getMemorySegment(), SEGMENT_SIZE);
// deserialize records, as many complete as there are
while (numRecordsDeserialized < deserializedRecords.size()) {
SerializationTestType next = deserializedRecords.get(numRecordsDeserialized);
if (deserializer.getNextRecord(next).isFullRecord()) {
assertEquals(originalRecords.get(numRecordsDeserialized), next);
numRecordsDeserialized++;
} else {
break;
}
}
// move buffers as long as necessary (for long records)
while (serializer.setNextBuffer(buffer).isFullBuffer()) {
deserializer.setNextMemorySegment(serializer.getCurrentBuffer().getMemorySegment(), SEGMENT_SIZE);
}
// deserialize records, as many as there are in the last buffer
while (numRecordsDeserialized < deserializedRecords.size()) {
SerializationTestType next = deserializedRecords.get(numRecordsDeserialized);
if (deserializer.getNextRecord(next).isFullRecord()) {
assertEquals(originalRecords.get(numRecordsDeserialized), next);
numRecordsDeserialized++;
} else {
break;
}
}
}
}
// move the last (incomplete buffer)
Buffer last = serializer.getCurrentBuffer();
deserializer.setNextMemorySegment(last.getMemorySegment(), last.getSize());
serializer.clear();
// deserialize records, as many as there are in the last buffer
while (numRecordsDeserialized < deserializedRecords.size()) {
SerializationTestType next = deserializedRecords.get(numRecordsDeserialized);
assertTrue(deserializer.getNextRecord(next).isFullRecord());
assertEquals(originalRecords.get(numRecordsDeserialized), next);
numRecordsDeserialized++;
}
// might be that the last big records has not yet been fully moved, and a small one is missing
assertFalse(serializer.hasData());
assertFalse(deserializer.hasUnfinishedData());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.io.network.serialization.types.LargeObjectType in project flink by apache.
the class SpanningRecordSerializationTest method testHandleMixedLargeRecords.
@Test
public void testHandleMixedLargeRecords() throws Exception {
final int numValues = 99;
final int segmentSize = 32 * 1024;
List<SerializationTestType> originalRecords = new ArrayList<>((numValues + 1) / 2);
LargeObjectType genLarge = new LargeObjectType();
Random rnd = new Random();
for (int i = 0; i < numValues; i++) {
if (i % 2 == 0) {
originalRecords.add(new IntType(42));
} else {
originalRecords.add(genLarge.getRandom(rnd));
}
}
testSerializationRoundTrip(originalRecords, segmentSize);
}
use of org.apache.flink.runtime.io.network.serialization.types.LargeObjectType in project flink by apache.
the class LargeRecordsTest method testHandleMixedLargeRecordsSpillingAdaptiveSerializer.
@Test
public void testHandleMixedLargeRecordsSpillingAdaptiveSerializer() {
try {
final int NUM_RECORDS = 99;
final int SEGMENT_SIZE = 32 * 1024;
final RecordSerializer<SerializationTestType> serializer = new SpanningRecordSerializer<SerializationTestType>();
final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<SerializationTestType>(new String[] { System.getProperty("java.io.tmpdir") });
final Buffer buffer = new Buffer(MemorySegmentFactory.allocateUnpooledSegment(SEGMENT_SIZE), mock(BufferRecycler.class));
List<SerializationTestType> originalRecords = new ArrayList<>((NUM_RECORDS + 1) / 2);
List<SerializationTestType> deserializedRecords = new ArrayList<>((NUM_RECORDS + 1) / 2);
LargeObjectType genLarge = new LargeObjectType();
Random rnd = new Random();
for (int i = 0; i < NUM_RECORDS; i++) {
if (i % 2 == 0) {
originalRecords.add(new IntType(42));
deserializedRecords.add(new IntType());
} else {
originalRecords.add(genLarge.getRandom(rnd));
deserializedRecords.add(new LargeObjectType());
}
}
// -------------------------------------------------------------------------------------------------------------
serializer.setNextBuffer(buffer);
int numRecordsDeserialized = 0;
for (SerializationTestType record : originalRecords) {
// serialize record
if (serializer.addRecord(record).isFullBuffer()) {
// buffer is full => move to deserializer
deserializer.setNextMemorySegment(serializer.getCurrentBuffer().getMemorySegment(), SEGMENT_SIZE);
// deserialize records, as many complete as there are
while (numRecordsDeserialized < deserializedRecords.size()) {
SerializationTestType next = deserializedRecords.get(numRecordsDeserialized);
if (deserializer.getNextRecord(next).isFullRecord()) {
assertEquals(originalRecords.get(numRecordsDeserialized), next);
numRecordsDeserialized++;
} else {
break;
}
}
// move buffers as long as necessary (for long records)
while (serializer.setNextBuffer(buffer).isFullBuffer()) {
deserializer.setNextMemorySegment(serializer.getCurrentBuffer().getMemorySegment(), SEGMENT_SIZE);
}
// deserialize records, as many as there are in the last buffer
while (numRecordsDeserialized < deserializedRecords.size()) {
SerializationTestType next = deserializedRecords.get(numRecordsDeserialized);
if (deserializer.getNextRecord(next).isFullRecord()) {
assertEquals(originalRecords.get(numRecordsDeserialized), next);
numRecordsDeserialized++;
} else {
break;
}
}
}
}
// move the last (incomplete buffer)
Buffer last = serializer.getCurrentBuffer();
deserializer.setNextMemorySegment(last.getMemorySegment(), last.getSize());
serializer.clear();
// deserialize records, as many as there are in the last buffer
while (numRecordsDeserialized < deserializedRecords.size()) {
SerializationTestType next = deserializedRecords.get(numRecordsDeserialized);
assertTrue(deserializer.getNextRecord(next).isFullRecord());
assertEquals(originalRecords.get(numRecordsDeserialized), next);
numRecordsDeserialized++;
}
// might be that the last big records has not yet been fully moved, and a small one is missing
assertFalse(serializer.hasData());
assertFalse(deserializer.hasUnfinishedData());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations