use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class ReadMessageTest method serializeAndDeserializeReadMessage.
private ReadCommand serializeAndDeserializeReadMessage(ReadCommand rm) throws IOException {
IVersionedSerializer<ReadCommand> rms = ReadCommand.serializer;
DataOutputBuffer out = new DataOutputBuffer();
rms.serialize(rm, out, MessagingService.current_version);
DataInputPlus dis = new DataInputBuffer(out.getData());
return rms.deserialize(dis, MessagingService.current_version);
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class PartitionTest method testManyColumns.
@Test
public void testManyColumns() throws IOException {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_TENCOL);
RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 5, "key1").clustering("c").add("val", "val1");
for (int i = 0; i < 10; i++) builder.add("val" + i, "val" + i);
PartitionUpdate update = builder.buildUpdate();
CachedBTreePartition partition = CachedBTreePartition.create(update.unfilteredIterator(), FBUtilities.nowInSeconds());
DataOutputBuffer bufOut = new DataOutputBuffer();
CachedPartition.cacheSerializer.serialize(partition, bufOut);
CachedPartition deserialized = CachedPartition.cacheSerializer.deserialize(new DataInputBuffer(bufOut.getData()));
assertEquals(partition.columns().regulars.size(), deserialized.columns().regulars.size());
assertEquals(deserialized.columns().regulars.getSimple(1), partition.columns().regulars.getSimple(1));
assertEquals(deserialized.columns().regulars.getSimple(5), partition.columns().regulars.getSimple(5));
ColumnMetadata cDef = cfs.metadata().getColumn(ByteBufferUtil.bytes("val8"));
assertEquals(partition.lastRow().getCell(cDef).buffer(), deserialized.lastRow().getCell(cDef).buffer());
assert deserialized.partitionKey().equals(partition.partitionKey());
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class BatchlogTest method testSerialization.
@Test
public void testSerialization() throws IOException {
TableMetadata cfm = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF_STANDARD).metadata();
long now = FBUtilities.timestampMicros();
int version = MessagingService.current_version;
UUID uuid = UUIDGen.getTimeUUID();
List<Mutation> mutations = new ArrayList<>(10);
for (int i = 0; i < 10; i++) {
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), bytes(i)).clustering("name" + i).add("val", "val" + i).build());
}
Batch batch1 = Batch.createLocal(uuid, now, mutations);
assertEquals(uuid, batch1.id);
assertEquals(now, batch1.creationTime);
assertEquals(mutations, batch1.decodedMutations);
DataOutputBuffer out = new DataOutputBuffer();
Batch.serializer.serialize(batch1, out, version);
assertEquals(out.getLength(), Batch.serializer.serializedSize(batch1, version));
DataInputPlus dis = new DataInputBuffer(out.getData());
Batch batch2 = Batch.serializer.deserialize(dis, version);
assertEquals(batch1.id, batch2.id);
assertEquals(batch1.creationTime, batch2.creationTime);
assertEquals(batch1.decodedMutations.size(), batch2.encodedMutations.size());
Iterator<Mutation> it1 = batch1.decodedMutations.iterator();
Iterator<ByteBuffer> it2 = batch2.encodedMutations.iterator();
while (it1.hasNext()) {
try (DataInputBuffer in = new DataInputBuffer(it2.next().array())) {
assertEquals(it1.next().toString(), Mutation.serializer.deserialize(in, version).toString());
}
}
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class EntireSSTableStreamConcurrentComponentMutationTest method testStreamWithConcurrentComponentMutation.
private void testStreamWithConcurrentComponentMutation(Callable<?> runBeforeStreaming, Callable<?> runConcurrentWithStreaming) throws Throwable {
ByteBuf serializedFile = Unpooled.buffer(8192);
InetAddressAndPort peer = FBUtilities.getBroadcastAddressAndPort();
StreamSession session = setupStreamingSessionForTest();
Collection<OutgoingStream> outgoingStreams = store.getStreamManager().createOutgoingStreams(session, rangesAtEndpoint, NO_PENDING_REPAIR, PreviewKind.NONE);
CassandraOutgoingFile outgoingFile = (CassandraOutgoingFile) Iterables.getOnlyElement(outgoingStreams);
Future<?> streaming = executeAsync(() -> {
runBeforeStreaming.call();
try (AsyncStreamingOutputPlus out = new AsyncStreamingOutputPlus(createMockNettyChannel(serializedFile))) {
outgoingFile.write(session, out, MessagingService.current_version);
assertTrue(sstable.descriptor.getTemporaryFiles().isEmpty());
}
return null;
});
Future<?> concurrentMutations = executeAsync(runConcurrentWithStreaming);
streaming.get(3, TimeUnit.MINUTES);
concurrentMutations.get(3, TimeUnit.MINUTES);
session.prepareReceiving(new StreamSummary(sstable.metadata().id, 1, 5104));
StreamMessageHeader messageHeader = new StreamMessageHeader(sstable.metadata().id, peer, session.planId(), false, 0, 0, 0, null);
try (DataInputBuffer in = new DataInputBuffer(serializedFile.nioBuffer(), false)) {
CassandraStreamHeader header = CassandraStreamHeader.serializer.deserialize(in, MessagingService.current_version);
CassandraEntireSSTableStreamReader reader = new CassandraEntireSSTableStreamReader(messageHeader, header, session);
SSTableReader streamedSSTable = Iterables.getOnlyElement(reader.read(in).finished());
SSTableUtils.assertContentEquals(sstable, streamedSSTable);
}
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class InboundMessageHandler method processSmallMessage.
private void processSmallMessage(ShareableBytes bytes, int size, Header header) {
ByteBuffer buf = bytes.get();
final int begin = buf.position();
final int end = buf.limit();
// cap to expected message size
buf.limit(begin + size);
Message<?> message = null;
try (DataInputBuffer in = new DataInputBuffer(buf, false)) {
Message<?> m = serializer.deserialize(in, header, version);
if (// bytes remaining after deser: deserializer is busted
in.available() > 0)
throw new InvalidSerializedSizeException(header.verb, size, size - in.available());
message = m;
} catch (IncompatibleSchemaException e) {
callbacks.onFailedDeserialize(size, header, e);
noSpamLogger.info("{} incompatible schema encountered while deserializing a message", this, e);
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
callbacks.onFailedDeserialize(size, header, t);
logger.error("{} unexpected exception caught while deserializing a message", id(), t);
} finally {
if (null == message)
releaseCapacity(size);
// no matter what, set position to the beginning of the next message and restore limit, so that
// we can always keep on decoding the frame even on failure to deserialize previous message
buf.position(begin + size);
buf.limit(end);
}
if (null != message)
dispatch(new ProcessSmallMessage(message, size));
}
Aggregations