use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class GossipDigestTest method test.
@Test
public void test() throws IOException {
InetAddressAndPort endpoint = InetAddressAndPort.getByName("127.0.0.1");
int generation = 0;
int maxVersion = 123;
GossipDigest expected = new GossipDigest(endpoint, generation, maxVersion);
// make sure we get the same values out
assertEquals(endpoint, expected.getEndpoint());
assertEquals(generation, expected.getGeneration());
assertEquals(maxVersion, expected.getMaxVersion());
// test the serialization and equals
DataOutputBuffer output = new DataOutputBuffer();
GossipDigest.serializer.serialize(expected, output, MessagingService.current_version);
DataInputPlus input = new DataInputBuffer(output.getData());
GossipDigest actual = GossipDigest.serializer.deserialize(input, MessagingService.current_version);
assertEquals(0, expected.compareTo(actual));
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class HintMessageTest method testEncodedSerializer.
@Test
public void testEncodedSerializer() throws IOException {
UUID hostId = UUID.randomUUID();
long now = FBUtilities.timestampMicros();
TableMetadata table = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
Mutation mutation = new RowUpdateBuilder(table, now, bytes("key")).clustering("column").add("val", "val" + 1234).build();
Hint hint = Hint.create(mutation, now / 1000);
HintMessage.Encoded message;
try (DataOutputBuffer dob = new DataOutputBuffer()) {
Hint.serializer.serialize(hint, dob, MessagingService.current_version);
message = new HintMessage.Encoded(hostId, dob.buffer(), MessagingService.current_version);
}
// serialize
int serializedSize = (int) HintMessage.serializer.serializedSize(message, MessagingService.current_version);
DataOutputBuffer dob = new DataOutputBuffer();
HintMessage.serializer.serialize(message, dob, MessagingService.current_version);
assertEquals(serializedSize, dob.getLength());
// deserialize
DataInputPlus dip = new DataInputBuffer(dob.buffer(), true);
HintMessage deserializedMessage = HintMessage.serializer.deserialize(dip, MessagingService.current_version);
// compare before/after
assertEquals(hostId, deserializedMessage.hostId);
assertNotNull(deserializedMessage.hint);
assertHintsEqual(hint, deserializedMessage.hint);
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class StreamingTombstoneHistogramBuilderTest method testSerDe.
@Test
public void testSerDe() throws Exception {
StreamingTombstoneHistogramBuilder builder = new StreamingTombstoneHistogramBuilder(5, 0, 1);
int[] samples = new int[] { 23, 19, 10, 16, 36, 2, 9 };
// add 7 points to histogram of 5 bins
for (int i = 0; i < samples.length; i++) {
builder.update(samples[i]);
}
TombstoneHistogram hist = builder.build();
DataOutputBuffer out = new DataOutputBuffer();
TombstoneHistogram.serializer.serialize(hist, out);
byte[] bytes = out.toByteArray();
TombstoneHistogram deserialized = TombstoneHistogram.serializer.deserialize(new DataInputBuffer(bytes));
// deserialized histogram should have following values
Map<Double, Long> expected1 = new LinkedHashMap<Double, Long>(5);
expected1.put(2.0, 1L);
expected1.put(9.0, 2L);
expected1.put(17.0, 2L);
expected1.put(23.0, 1L);
expected1.put(36.0, 1L);
Iterator<Map.Entry<Double, Long>> expectedItr = expected1.entrySet().iterator();
deserialized.forEach((point, value) -> {
Map.Entry<Double, Long> entry = expectedItr.next();
assertEquals(entry.getKey(), point, 0.01);
assertEquals(entry.getValue().longValue(), value);
});
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class StreamingInboundHandlerTest method StreamDeserializingTask_deserialize_ISM_NoSession.
@Test(expected = IllegalStateException.class)
public void StreamDeserializingTask_deserialize_ISM_NoSession() throws IOException {
StreamMessageHeader header = new StreamMessageHeader(TableId.generate(), REMOTE_ADDR, UUID.randomUUID(), true, 0, 0, 0, UUID.randomUUID());
ByteBuffer temp = ByteBuffer.allocate(1024);
DataOutputPlus out = new DataOutputBuffer(temp);
StreamMessageHeader.serializer.serialize(header, out, MessagingService.current_version);
temp.flip();
DataInputPlus in = new DataInputBuffer(temp, false);
// session not found
IncomingStreamMessage.serializer.deserialize(in, MessagingService.current_version);
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class PagingState method deserialize.
public static PagingState deserialize(ByteBuffer bytes, ProtocolVersion protocolVersion) {
if (bytes == null)
return null;
try (DataInputBuffer in = new DataInputBuffer(bytes, true)) {
ByteBuffer pk;
RowMark mark;
int remaining, remainingInPartition;
if (protocolVersion.isSmallerOrEqualTo(ProtocolVersion.V3)) {
pk = ByteBufferUtil.readWithShortLength(in);
mark = new RowMark(ByteBufferUtil.readWithShortLength(in), protocolVersion);
remaining = in.readInt();
// Note that while 'in.available()' is theoretically an estimate of how many bytes are available
// without blocking, we know that since we're reading a ByteBuffer it will be exactly how many
// bytes remain to be read. And the reason we want to condition this is for backward compatility
// as we used to not set this.
remainingInPartition = in.available() > 0 ? in.readInt() : Integer.MAX_VALUE;
} else {
pk = ByteBufferUtil.readWithVIntLength(in);
mark = new RowMark(ByteBufferUtil.readWithVIntLength(in), protocolVersion);
remaining = (int) in.readUnsignedVInt();
remainingInPartition = (int) in.readUnsignedVInt();
}
return new PagingState(pk.hasRemaining() ? pk : null, mark.mark.hasRemaining() ? mark : null, remaining, remainingInPartition);
} catch (IOException e) {
throw new ProtocolException("Invalid value for the paging state");
}
}
Aggregations