use of org.apache.cassandra.io.util.DataOutputBuffer in project cassandra by apache.
the class ReadCommandTest method testSinglePartitionGroupMerge.
@Test
public void testSinglePartitionGroupMerge() throws Exception {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF3);
String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the row
new String[] { "1", "key1", "aa", "a" }, new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" } } };
// Given the data above, when the keys are sorted and the deletions removed, we should
// get these clustering rows in this order
String[] expectedRows = new String[] { "aa", "ff", "ee", "cc", "dd", "cc", "bb" };
List<ByteBuffer> buffers = new ArrayList<>(groups.length);
int nowInSeconds = FBUtilities.nowInSeconds();
ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata()).build();
RowFilter rowFilter = RowFilter.create();
Slice slice = Slice.make(ClusteringBound.BOTTOM, ClusteringBound.TOP);
ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
for (String[][] group : groups) {
cfs.truncateBlocking();
List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
for (String[] data : group) {
if (data[0].equals("1")) {
new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
} else {
RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
}
commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
}
cfs.forceBlockingFlush();
ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
try (ReadExecutionController executionController = query.executionController();
UnfilteredPartitionIterator iter = query.executeLocally(executionController);
DataOutputBuffer buffer = new DataOutputBuffer()) {
UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
buffers.add(buffer.buffer());
}
}
// deserialize, merge and check the results are all there
List<UnfilteredPartitionIterator> iterators = new ArrayList<>();
for (ByteBuffer buffer : buffers) {
try (DataInputBuffer in = new DataInputBuffer(buffer, true)) {
iterators.add(UnfilteredPartitionIterators.serializerForIntraNode().deserialize(in, MessagingService.current_version, cfs.metadata(), columnFilter, SerializationHelper.Flag.LOCAL));
}
}
try (PartitionIterator partitionIterator = UnfilteredPartitionIterators.mergeAndFilter(iterators, nowInSeconds, new UnfilteredPartitionIterators.MergeListener() {
public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
return null;
}
public void close() {
}
})) {
int i = 0;
int numPartitions = 0;
while (partitionIterator.hasNext()) {
numPartitions++;
try (RowIterator rowIterator = partitionIterator.next()) {
while (rowIterator.hasNext()) {
Row row = rowIterator.next();
assertEquals("col=" + expectedRows[i++], row.clustering().toString(cfs.metadata()));
//System.out.print(row.toString(cfs.metadata, true));
}
}
}
assertEquals(5, numPartitions);
assertEquals(expectedRows.length, i);
}
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project cassandra by apache.
the class ReadMessageTest method serializeAndDeserializeReadMessage.
private ReadCommand serializeAndDeserializeReadMessage(ReadCommand rm) throws IOException {
IVersionedSerializer<ReadCommand> rms = ReadCommand.serializer;
DataOutputBuffer out = new DataOutputBuffer();
rms.serialize(rm, out, MessagingService.current_version);
DataInputPlus dis = new DataInputBuffer(out.getData());
return rms.deserialize(dis, MessagingService.current_version);
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project cassandra by apache.
the class PartitionTest method testManyColumns.
@Test
public void testManyColumns() throws IOException {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_TENCOL);
RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 5, "key1").clustering("c").add("val", "val1");
for (int i = 0; i < 10; i++) builder.add("val" + i, "val" + i);
PartitionUpdate update = builder.buildUpdate();
CachedBTreePartition partition = CachedBTreePartition.create(update.unfilteredIterator(), FBUtilities.nowInSeconds());
DataOutputBuffer bufOut = new DataOutputBuffer();
CachedPartition.cacheSerializer.serialize(partition, bufOut);
CachedPartition deserialized = CachedPartition.cacheSerializer.deserialize(new DataInputBuffer(bufOut.getData()));
assertEquals(partition.columns().regulars.size(), deserialized.columns().regulars.size());
assertTrue(deserialized.columns().regulars.getSimple(1).equals(partition.columns().regulars.getSimple(1)));
assertTrue(deserialized.columns().regulars.getSimple(5).equals(partition.columns().regulars.getSimple(5)));
ColumnMetadata cDef = cfs.metadata().getColumn(ByteBufferUtil.bytes("val8"));
assertTrue(partition.lastRow().getCell(cDef).value().equals(deserialized.lastRow().getCell(cDef).value()));
assert deserialized.partitionKey().equals(partition.partitionKey());
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project cassandra by apache.
the class HintTest method testSerializer.
@Test
public void testSerializer() throws IOException {
long now = FBUtilities.timestampMicros();
Mutation mutation = createMutation("testSerializer", now);
Hint hint = Hint.create(mutation, now / 1000);
// serialize
int serializedSize = (int) Hint.serializer.serializedSize(hint, MessagingService.current_version);
DataOutputBuffer dob = new DataOutputBuffer();
Hint.serializer.serialize(hint, dob, MessagingService.current_version);
assertEquals(serializedSize, dob.getLength());
// deserialize
DataInputPlus di = new DataInputBuffer(dob.buffer(), true);
Hint deserializedHint = Hint.serializer.deserialize(di, MessagingService.current_version);
// compare before/after
assertHintsEqual(hint, deserializedHint);
}
use of org.apache.cassandra.io.util.DataOutputBuffer in project cassandra by apache.
the class HintsDescriptorTest method serializeDescriptor.
private static byte[] serializeDescriptor(HintsDescriptor descriptor) throws IOException {
DataOutputBuffer dob = new DataOutputBuffer();
descriptor.serialize(dob);
return dob.toByteArray();
}
Aggregations