use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class LocalSessions method deserializeRange.
private static Range<Token> deserializeRange(ByteBuffer bb) {
try (DataInputBuffer in = new DataInputBuffer(bb, false)) {
IPartitioner partitioner = DatabaseDescriptor.getPartitioner();
Token left = Token.serializer.deserialize(in, partitioner, 0);
Token right = Token.serializer.deserialize(in, partitioner, 0);
return new Range<>(left, right);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class ColumnFilterTest method testRoundTrip.
private void testRoundTrip(ColumnFilter columnFilter, int version) {
try {
DataOutputBuffer output = new DataOutputBuffer();
serializer.serialize(columnFilter, output, version);
Assert.assertEquals(serializer.serializedSize(columnFilter, version), output.position());
DataInputPlus input = new DataInputBuffer(output.buffer(), false);
ColumnFilter deserialized = serializer.deserialize(input, version, metadata);
if (version == MessagingService.VERSION_30 && columnFilter.fetchesAllColumns(false)) {
Assert.assertEquals(metadata.regularAndStaticColumns(), deserialized.fetchedColumns());
} else {
Assert.assertEquals(deserialized, columnFilter);
}
} catch (IOException e) {
throw Throwables.cleaned(e);
}
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class StandardAnalyzer method init.
public void init(StandardTokenizerOptions tokenizerOptions, AbstractType<?> validator) {
this.validator = validator;
this.options = tokenizerOptions;
this.filterPipeline = getFilterPipeline();
Reader reader = new InputStreamReader(new DataInputBuffer(ByteBufferUtil.EMPTY_BYTE_BUFFER, false), StandardCharsets.UTF_8);
this.scanner = new StandardTokenizerImpl(reader);
this.inputReader = reader;
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class MutationBench method setup.
@Setup
public void setup() throws IOException {
Schema.instance.load(KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1)));
KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(keyspace);
TableMetadata metadata = CreateTableStatement.parse("CREATE TABLE userpics " + "( userid bigint," + "picid bigint," + "commentid bigint, " + "PRIMARY KEY(userid, picid))", keyspace).build();
Schema.instance.load(ksm.withSwapped(ksm.tables.with(metadata)));
mutation = (Mutation) UpdateBuilder.create(metadata, 1L).newRow(1L).add("commentid", 32L).makeMutation();
buffer = ByteBuffer.allocate(mutation.serializedSize(MessagingService.current_version));
outputBuffer = new DataOutputBufferFixed(buffer);
inputBuffer = new DataInputBuffer(buffer, false);
Mutation.serializer.serialize(mutation, outputBuffer, MessagingService.current_version);
}
use of org.apache.cassandra.io.util.DataInputBuffer in project cassandra by apache.
the class ReadCommandTest method testSinglePartitionGroupMerge.
@Test
public void testSinglePartitionGroupMerge() throws Exception {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF3);
String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the row
new String[] { "1", "key1", "aa", "a" }, new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" } } };
// Given the data above, when the keys are sorted and the deletions removed, we should
// get these clustering rows in this order
String[] expectedRows = new String[] { "aa", "ff", "ee", "cc", "dd", "cc", "bb" };
List<ByteBuffer> buffers = new ArrayList<>(groups.length);
int nowInSeconds = FBUtilities.nowInSeconds();
ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata(), false).build();
RowFilter rowFilter = RowFilter.create();
Slice slice = Slice.make(BufferClusteringBound.BOTTOM, BufferClusteringBound.TOP);
ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
for (String[][] group : groups) {
cfs.truncateBlocking();
List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
for (String[] data : group) {
if (data[0].equals("1")) {
new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
} else {
RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
}
commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
}
cfs.forceBlockingFlush();
ReadQuery query = SinglePartitionReadCommand.Group.create(commands, DataLimits.NONE);
try (ReadExecutionController executionController = query.executionController();
UnfilteredPartitionIterator iter = query.executeLocally(executionController);
DataOutputBuffer buffer = new DataOutputBuffer()) {
UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
buffers.add(buffer.buffer());
}
}
// deserialize, merge and check the results are all there
List<UnfilteredPartitionIterator> iterators = new ArrayList<>();
for (ByteBuffer buffer : buffers) {
try (DataInputBuffer in = new DataInputBuffer(buffer, true)) {
iterators.add(UnfilteredPartitionIterators.serializerForIntraNode().deserialize(in, MessagingService.current_version, cfs.metadata(), columnFilter, DeserializationHelper.Flag.LOCAL));
}
}
UnfilteredPartitionIterators.MergeListener listener = new UnfilteredPartitionIterators.MergeListener() {
public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
return null;
}
public void close() {
}
};
try (PartitionIterator partitionIterator = UnfilteredPartitionIterators.filter(UnfilteredPartitionIterators.merge(iterators, listener), nowInSeconds)) {
int i = 0;
int numPartitions = 0;
while (partitionIterator.hasNext()) {
numPartitions++;
try (RowIterator rowIterator = partitionIterator.next()) {
while (rowIterator.hasNext()) {
Row row = rowIterator.next();
assertEquals("col=" + expectedRows[i++], row.clustering().toString(cfs.metadata()));
// System.out.print(row.toString(cfs.metadata, true));
}
}
}
assertEquals(5, numPartitions);
assertEquals(expectedRows.length, i);
}
}
Aggregations