use of com.yahoo.io.GrowableByteBuffer in project vespa by vespa-engine.
the class DocumentUpdateTestCase method testGenerateSerializedFile.
public void testGenerateSerializedFile() throws IOException {
docMan = DocumentTestCase.setUpCppDocType();
DocumentType type = docMan.getDocumentType("serializetest");
DocumentUpdate upd = new DocumentUpdate(type, new DocumentId("doc:update:test"));
FieldUpdate serAssign = FieldUpdate.createAssign(type.getField("intfield"), new IntegerFieldValue(4));
upd.addFieldUpdate(serAssign);
FieldUpdate serClearField = FieldUpdate.createClearField(type.getField("floatfield"));
upd.addFieldUpdate(serClearField);
List<FloatFieldValue> arrayOfFloat = new ArrayList<>();
arrayOfFloat.add(new FloatFieldValue(5.00f));
arrayOfFloat.add(new FloatFieldValue(4.23f));
arrayOfFloat.add(new FloatFieldValue(-1.00f));
FieldUpdate serAdd = FieldUpdate.createAddAll(type.getField("arrayoffloatfield"), arrayOfFloat);
upd.addFieldUpdate(serAdd);
GrowableByteBuffer buf = new GrowableByteBuffer(100, 2.0f);
upd.serialize(buf);
int size = buf.position();
buf.position(0);
FileOutputStream fos = new FileOutputStream("src/tests/data/serializeupdatejava.dat");
fos.write(buf.array(), 0, size);
fos.close();
}
use of com.yahoo.io.GrowableByteBuffer in project vespa by vespa-engine.
the class VespaDocumentDeserializer42 method read.
public void read(FieldBase field, Document doc) {
// Verify that we have correct version
version = getShort(null);
if (version < 6 || version > Document.SERIALIZED_VERSION) {
throw new DeserializationException("Unknown version " + version + ", expected " + Document.SERIALIZED_VERSION + ".");
}
int dataLength = 0;
int dataPos = 0;
if (version < 7) {
// Total document size.. Ignore
getInt2_4_8Bytes(null);
} else {
dataLength = getInt(null);
dataPos = position();
}
doc.setId(readDocumentId());
Byte content = getByte(null);
doc.setDataType(readDocumentType());
if ((content & 0x2) != 0) {
doc.getHeader().deserialize(new Field("header"), this);
}
if ((content & 0x4) != 0) {
doc.getBody().deserialize(new Field("body"), this);
} else if (body != null) {
GrowableByteBuffer header = getBuf();
setBuf(body);
body = null;
doc.getBody().deserialize(new Field("body"), this);
body = getBuf();
setBuf(header);
}
if (version < 8) {
int crcVal = getInt(null);
}
if (version > 6) {
if (dataLength != (position() - dataPos)) {
throw new DeserializationException("Length mismatch");
}
}
}
use of com.yahoo.io.GrowableByteBuffer in project vespa by vespa-engine.
the class VespaDocumentSerializer42 method getSerializedSize.
/**
* Returns the serialized size of the given {@link Document}. Please note that this method performs actual
* serialization of the document, but simply return the size of the final {@link GrowableByteBuffer}. If you need
* the buffer itself, do NOT use this method.
*
* @param doc The Document whose size to calculate.
* @return The size in bytes.
*/
public static long getSerializedSize(Document doc) {
DocumentSerializer serializer = new VespaDocumentSerializerHead(new GrowableByteBuffer());
serializer.write(doc);
return serializer.getBuf().position();
}
use of com.yahoo.io.GrowableByteBuffer in project vespa by vespa-engine.
the class VespaDocumentSerializer42 method write.
/**
* Write out the value of struct field
*
* @param field - field description (name and data type)
* @param s - field value
*/
public void write(FieldBase field, Struct s) {
// Serialize all parts first.. As we need to know length before starting
// Serialize all the fields.
// keep the buffer we're serializing everything into:
GrowableByteBuffer bigBuffer = buf;
// create a new buffer and serialize into that for a while:
GrowableByteBuffer buffer = new GrowableByteBuffer(4096, 2.0f);
buf = buffer;
List<Integer> fieldIds = new LinkedList<>();
List<java.lang.Integer> fieldLengths = new LinkedList<>();
for (Map.Entry<Field, FieldValue> value : s.getFields()) {
int startPos = buffer.position();
value.getValue().serialize(value.getKey(), this);
fieldLengths.add(buffer.position() - startPos);
fieldIds.add(value.getKey().getId(s.getVersion()));
}
// Switch buffers again:
buffer.flip();
buf = bigBuffer;
int uncompressedSize = buffer.remaining();
Compressor.Compression compression = s.getDataType().getCompressor().compress(buffer.getByteBuffer().array(), buffer.remaining());
// Actual serialization starts here.
int lenPos = buf.position();
// Move back to this after compression is done.
putInt(null, 0);
buf.put(compression.type().getCode());
if (compression.data() != null && compression.type().isCompressed()) {
buf.putInt2_4_8Bytes(uncompressedSize);
}
buf.putInt1_4Bytes(s.getFieldCount());
for (int i = 0; i < s.getFieldCount(); ++i) {
putInt1_4Bytes(null, fieldIds.get(i));
putInt2_4_8Bytes(null, fieldLengths.get(i));
}
int pos = buf.position();
if (compression.data() != null && compression.type().isCompressed()) {
put(null, compression.data());
} else {
put(null, buffer.getByteBuffer());
}
int dataLength = buf.position() - pos;
int posNow = buf.position();
buf.position(lenPos);
putInt(null, dataLength);
buf.position(posNow);
}
use of com.yahoo.io.GrowableByteBuffer in project vespa by vespa-engine.
the class QueryPacket method encodeBody.
public void encodeBody(ByteBuffer buffer) {
queryPacketData = new QueryPacketData();
final int relativeZero = buffer.position();
boolean sendSessionKey = query.getGroupingSessionCache() || query.getRanking().getQueryCache();
int featureFlag = getFeatureInt(sendSessionKey);
buffer.putInt(featureFlag);
ignoreableOffset = buffer.position() - relativeZero;
IntegerCompressor.putCompressedPositiveNumber(getOffset(), buffer);
IntegerCompressor.putCompressedPositiveNumber(getHits(), buffer);
// Safety to avoid sending down 0 or negative number
buffer.putInt(Math.max(1, (int) query.getTimeLeft()));
ignoreableSize = buffer.position() - relativeZero - ignoreableOffset;
buffer.putInt(getFlagInt());
int startOfFieldToSave = buffer.position();
Item.putString(query.getRanking().getProfile(), buffer);
queryPacketData.setRankProfile(buffer, startOfFieldToSave);
if ((featureFlag & QF_PROPERTIES) != 0) {
startOfFieldToSave = buffer.position();
query.encodeAsProperties(buffer, true);
queryPacketData.setPropertyMaps(buffer, startOfFieldToSave);
}
if ((featureFlag & QF_SORTSPEC) != 0) {
int sortSpecLengthPosition = buffer.position();
buffer.putInt(0);
int sortSpecLength = query.getRanking().getSorting().encode(buffer);
buffer.putInt(sortSpecLengthPosition, sortSpecLength);
}
if ((featureFlag & QF_GROUPSPEC) != 0) {
List<Grouping> groupingList = GroupingExecutor.getGroupingList(query);
BufferSerializer gbuf = new BufferSerializer(new GrowableByteBuffer());
gbuf.putInt(null, groupingList.size());
for (Grouping g : groupingList) {
g.serialize(gbuf);
}
gbuf.getBuf().flip();
byte[] blob = new byte[gbuf.getBuf().limit()];
gbuf.getBuf().get(blob);
buffer.putInt(blob.length);
buffer.put(blob);
}
sessionOffset = buffer.position() - relativeZero;
if (sendSessionKey) {
Utf8String key = query.getSessionId(true).asUtf8String();
sessionSize = key.getByteLength();
buffer.putInt(key.getByteLength());
buffer.put(key.getBytes());
}
if ((featureFlag & QF_LOCATION) != 0) {
startOfFieldToSave = buffer.position();
int locationLengthPosition = buffer.position();
buffer.putInt(0);
int locationLength = query.getRanking().getLocation().encode(buffer);
buffer.putInt(locationLengthPosition, locationLength);
queryPacketData.setLocation(buffer, startOfFieldToSave);
}
startOfFieldToSave = buffer.position();
int stackItemPosition = buffer.position();
// Number of stack items written below
buffer.putInt(0);
int stackLengthPosition = buffer.position();
buffer.putInt(0);
int stackPosition = buffer.position();
int stackItemCount = query.encode(buffer);
int stackLength = buffer.position() - stackPosition;
buffer.putInt(stackItemPosition, stackItemCount);
buffer.putInt(stackLengthPosition, stackLength);
queryPacketData.setQueryStack(buffer, startOfFieldToSave);
}
Aggregations