use of com.yahoo.compress.CompressionType in project vespa by vespa-engine.
the class RPCSendV2 method toParams.
protected Params toParams(Values args) {
CompressionType compression = CompressionType.valueOf(args.get(3).asInt8());
byte[] slimeBytes = compressor.decompress(args.get(5).asData(), compression, args.get(4).asInt32());
Slime slime = BinaryFormat.decode(slimeBytes);
Inspector root = slime.get();
Params p = new Params();
p.version = new Version(root.field(VERSION_F).asString());
p.route = root.field(ROUTE_F).asString();
p.session = root.field(SESSION_F).asString();
p.retryEnabled = root.field(USERETRY_F).asBool();
p.retry = (int) root.field(RETRY_F).asLong();
p.timeRemaining = root.field(TIMEREMAINING_F).asLong();
p.protocolName = new Utf8Array(Utf8.toBytes(root.field(PROTOCOL_F).asString()));
p.payload = root.field(BLOB_F).asData();
p.traceLevel = (int) root.field(TRACELEVEL_F).asLong();
return p;
}
use of com.yahoo.compress.CompressionType in project vespa by vespa-engine.
the class VespaDocumentDeserializer42 method read.
public void read(FieldBase fieldDef, Struct s) {
s.setVersion(version);
int startPos = position();
if (version < 6) {
throw new DeserializationException("Illegal document serialization version " + version);
}
int dataSize;
if (version < 7) {
long rSize = getInt2_4_8Bytes(null);
// TODO: Look into how to support data segments larger than INT_MAX bytes
if (rSize > Integer.MAX_VALUE) {
throw new DeserializationException("Raw size of data block is too large.");
}
dataSize = (int) rSize;
} else {
dataSize = getInt(null);
}
byte comprCode = getByte(null);
CompressionType compression = CompressionType.valueOf(comprCode);
int uncompressedSize = 0;
if (compression != CompressionType.NONE && compression != CompressionType.INCOMPRESSIBLE) {
// uncompressedsize (full size of FIELDS only, after decompression)
long pSize = getInt2_4_8Bytes(null);
// TODO: Look into how to support data segments larger than INT_MAX bytes
if (pSize > Integer.MAX_VALUE) {
throw new DeserializationException("Uncompressed size of data block is too large.");
}
uncompressedSize = (int) pSize;
}
int numberOfFields = getInt1_4Bytes(null);
List<Tuple2<Integer, Long>> fieldIdsAndLengths = new ArrayList<>(numberOfFields);
for (int i = 0; i < numberOfFields; ++i) {
// id, length (length only used for unknown fields
fieldIdsAndLengths.add(new Tuple2<>(getInt1_4Bytes(null), getInt2_4_8Bytes(null)));
}
// save a reference to the big buffer we're reading from:
GrowableByteBuffer bigBuf = buf;
if (version < 7) {
// In V6 and earlier, the length included the header.
int headerSize = position() - startPos;
dataSize -= headerSize;
}
byte[] destination = compressor.decompress(compression, getBuf().array(), position(), uncompressedSize, Optional.of(dataSize));
// set position in original buffer to after data
position(position() + dataSize);
// for a while: deserialize from this buffer instead:
buf = GrowableByteBuffer.wrap(destination);
s.clear();
StructDataType type = s.getDataType();
for (int i = 0; i < numberOfFields; ++i) {
Field structField = type.getField(fieldIdsAndLengths.get(i).first, version);
if (structField == null) {
// ignoring unknown field:
position(position() + fieldIdsAndLengths.get(i).second.intValue());
} else {
int posBefore = position();
FieldValue value = structField.getDataType().createFieldValue();
value.deserialize(structField, this);
s.setFieldValue(structField, value);
// jump to beginning of next field:
position(posBefore + fieldIdsAndLengths.get(i).second.intValue());
}
}
// restore the original buffer
buf = bigBuf;
}
Aggregations