use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class AbstractFsStateSnapshot method deserialize.
@Override
@SuppressWarnings("unchecked")
public StateTable<K, N, SV> deserialize(String stateName, HeapKeyedStateBackend<K> stateBackend) throws IOException {
final FileSystem fs = getFilePath().getFileSystem();
try (FSDataInputStream inStream = fs.open(getFilePath())) {
final DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(inStream);
AbstractMigrationRestoreStrategy<K, N, SV> restoreStrategy = new AbstractMigrationRestoreStrategy<K, N, SV>(keySerializer, namespaceSerializer, stateSerializer) {
@Override
protected DataInputView openDataInputView() throws IOException {
return inView;
}
};
return restoreStrategy.deserialize(stateName, stateBackend);
}
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class IterationEventWithAggregators method getAggregates.
public Value[] getAggregates(ClassLoader classResolver) {
if (aggregates == null) {
// we have read the binary data, but not yet turned into the objects
final int num = aggNames.length;
aggregates = new Value[num];
for (int i = 0; i < num; i++) {
Value v;
try {
Class<? extends Value> valClass = Class.forName(classNames[i], true, classResolver).asSubclass(Value.class);
v = InstantiationUtil.instantiate(valClass, Value.class);
} catch (ClassNotFoundException e) {
throw new RuntimeException("Could not load user-defined class '" + classNames[i] + "'.", e);
} catch (ClassCastException e) {
throw new RuntimeException("User-defined aggregator class is not a value sublass.");
}
try (DataInputViewStreamWrapper in = new DataInputViewStreamWrapper(new ByteArrayInputStream(serializedData[i]))) {
v.read(in);
} catch (IOException e) {
throw new RuntimeException("Error while deserializing the user-defined aggregate class.", e);
}
aggregates[i] = v;
}
}
return this.aggregates;
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class AvroRecordInputFormatTest method testDeserializeToSpecificType.
/**
* This test validates proper serialization with specific (generated POJO) types.
*/
@Test
public void testDeserializeToSpecificType() throws IOException {
DatumReader<User> datumReader = new SpecificDatumReader<User>(userSchema);
try (FileReader<User> dataFileReader = DataFileReader.openReader(testFile, datumReader)) {
User rec = dataFileReader.next();
// check if record has been read correctly
assertNotNull(rec);
assertEquals("name not equal", TEST_NAME, rec.get("name").toString());
assertEquals("enum not equal", TEST_ENUM_COLOR.toString(), rec.get("type_enum").toString());
// now serialize it with our framework:
ExecutionConfig ec = new ExecutionConfig();
TypeInformation<User> te = TypeExtractor.createTypeInfo(User.class);
Assert.assertEquals(AvroTypeInfo.class, te.getClass());
TypeSerializer<User> tser = te.createSerializer(ec);
ByteArrayOutputStream out = new ByteArrayOutputStream();
try (DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(out)) {
tser.serialize(rec, outView);
}
User newRec;
try (DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(new ByteArrayInputStream(out.toByteArray()))) {
newRec = tser.deserialize(inView);
}
// check if it is still the same
assertNotNull(newRec);
assertEquals("name not equal", TEST_NAME, newRec.getName().toString());
assertEquals("enum not equal", TEST_ENUM_COLOR.toString(), newRec.getTypeEnum().toString());
}
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class AvroRecordInputFormatTest method testDeserializeToGenericType.
/**
* Test if the Flink serialization is able to properly process GenericData.Record types.
* Usually users of Avro generate classes (POJOs) from Avro schemas.
* However, if generated classes are not available, one can also use GenericData.Record.
* It is an untyped key-value record which is using a schema to validate the correctness of the data.
*
* It is not recommended to use GenericData.Record with Flink. Use generated POJOs instead.
*/
@Test
public void testDeserializeToGenericType() throws IOException {
DatumReader<GenericData.Record> datumReader = new GenericDatumReader<>(userSchema);
try (FileReader<GenericData.Record> dataFileReader = DataFileReader.openReader(testFile, datumReader)) {
// initialize Record by reading it from disk (thats easier than creating it by hand)
GenericData.Record rec = new GenericData.Record(userSchema);
dataFileReader.next(rec);
// check if record has been read correctly
assertNotNull(rec);
assertEquals("name not equal", TEST_NAME, rec.get("name").toString());
assertEquals("enum not equal", TEST_ENUM_COLOR.toString(), rec.get("type_enum").toString());
// it is null for the first record.
assertEquals(null, rec.get("type_long_test"));
// now serialize it with our framework:
TypeInformation<GenericData.Record> te = TypeExtractor.createTypeInfo(GenericData.Record.class);
ExecutionConfig ec = new ExecutionConfig();
Assert.assertEquals(GenericTypeInfo.class, te.getClass());
Serializers.recursivelyRegisterType(te.getTypeClass(), ec, new HashSet<Class<?>>());
TypeSerializer<GenericData.Record> tser = te.createSerializer(ec);
Assert.assertEquals(1, ec.getDefaultKryoSerializerClasses().size());
Assert.assertTrue(ec.getDefaultKryoSerializerClasses().containsKey(Schema.class) && ec.getDefaultKryoSerializerClasses().get(Schema.class).equals(Serializers.AvroSchemaSerializer.class));
ByteArrayOutputStream out = new ByteArrayOutputStream();
try (DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(out)) {
tser.serialize(rec, outView);
}
GenericData.Record newRec;
try (DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(new ByteArrayInputStream(out.toByteArray()))) {
newRec = tser.deserialize(inView);
}
// check if it is still the same
assertNotNull(newRec);
assertEquals("enum not equal", TEST_ENUM_COLOR.toString(), newRec.get("type_enum").toString());
assertEquals("name not equal", TEST_NAME, newRec.get("name").toString());
assertEquals(null, newRec.get("type_long_test"));
}
}
use of org.apache.flink.core.memory.DataInputViewStreamWrapper in project flink by apache.
the class RocksDBAggregatingState method get.
@Override
public R get() throws IOException {
try {
// prepare the current key and namespace for RocksDB lookup
writeCurrentKeyWithGroupAndNamespace();
final byte[] key = keySerializationStream.toByteArray();
// get the current value
final byte[] valueBytes = backend.db.get(columnFamily, key);
if (valueBytes == null) {
return null;
}
ACC accumulator = valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
return aggFunction.getResult(accumulator);
} catch (IOException | RocksDBException e) {
throw new IOException("Error while retrieving value from RocksDB", e);
}
}
Aggregations