use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.
the class DynamicCompositeTypeTest method testFullRound.
@Test
public void testFullRound() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARDDYNCOMPOSITE);
ByteBuffer cname1 = createDynamicCompositeKey("test1", null, -1, false);
ByteBuffer cname2 = createDynamicCompositeKey("test1", uuids[0], 24, false);
ByteBuffer cname3 = createDynamicCompositeKey("test1", uuids[0], 42, false);
ByteBuffer cname4 = createDynamicCompositeKey("test2", uuids[0], -1, false);
ByteBuffer cname5 = createDynamicCompositeKey("test2", uuids[1], 42, false);
ByteBuffer key = ByteBufferUtil.bytes("k");
long ts = FBUtilities.timestampMicros();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname5).add("val", "cname5").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname1).add("val", "cname1").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname4).add("val", "cname4").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname2).add("val", "cname2").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname3).add("val", "cname3").build().applyUnsafe();
ColumnMetadata cdef = cfs.metadata().getColumn(ByteBufferUtil.bytes("val"));
ImmutableBTreePartition readPartition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
Iterator<Row> iter = readPartition.iterator();
compareValues(iter.next().getCell(cdef), "cname1");
compareValues(iter.next().getCell(cdef), "cname2");
compareValues(iter.next().getCell(cdef), "cname3");
compareValues(iter.next().getCell(cdef), "cname4");
compareValues(iter.next().getCell(cdef), "cname5");
}
use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.
the class SSTableRewriterTest method validateKeys.
private void validateKeys(Keyspace ks) {
for (int i = 0; i < 100; i++) {
DecoratedKey key = Util.dk(Integer.toString(i));
ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(ks.getColumnFamilyStore(CF), key).build());
assertTrue(partition != null && partition.rowCount() > 0);
}
}
use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.
the class RowIndexEntryTest method testSerializedSize.
@Test
public void testSerializedSize() throws Throwable {
String tableName = createTable("CREATE TABLE %s (a int, b text, c int, PRIMARY KEY(a, b))");
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName);
Pre_C_11206_RowIndexEntry simple = new Pre_C_11206_RowIndexEntry(123);
DataOutputBuffer buffer = new DataOutputBuffer();
SerializationHeader header = new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS);
Pre_C_11206_RowIndexEntry.Serializer serializer = new Pre_C_11206_RowIndexEntry.Serializer(cfs.metadata(), BigFormat.latestVersion, header);
serializer.serialize(simple, buffer);
assertEquals(buffer.getLength(), serializer.serializedSize(simple));
// write enough rows to ensure we get a few column index entries
for (int i = 0; i <= DatabaseDescriptor.getColumnIndexSize() / 4; i++) execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, String.valueOf(i), i);
ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs).build());
File tempFile = File.createTempFile("row_index_entry_test", null);
tempFile.deleteOnExit();
SequentialWriter writer = new SequentialWriter(tempFile);
ColumnIndex columnIndex = RowIndexEntryTest.ColumnIndex.writeAndBuildIndex(partition.unfilteredIterator(), writer, header, Collections.emptySet(), BigFormat.latestVersion);
Pre_C_11206_RowIndexEntry withIndex = Pre_C_11206_RowIndexEntry.create(0xdeadbeef, DeletionTime.LIVE, columnIndex);
IndexInfo.Serializer indexSerializer = IndexInfo.serializer(BigFormat.latestVersion, header);
// sanity check
assertTrue(columnIndex.columnsIndex.size() >= 3);
buffer = new DataOutputBuffer();
serializer.serialize(withIndex, buffer);
assertEquals(buffer.getLength(), serializer.serializedSize(withIndex));
// serialization check
ByteBuffer bb = buffer.buffer();
DataInputBuffer input = new DataInputBuffer(bb, false);
serializationCheck(withIndex, indexSerializer, bb, input);
// test with an output stream that doesn't support a file-pointer
buffer = new DataOutputBuffer() {
public boolean hasPosition() {
return false;
}
public long position() {
throw new UnsupportedOperationException();
}
};
serializer.serialize(withIndex, buffer);
bb = buffer.buffer();
input = new DataInputBuffer(bb, false);
serializationCheck(withIndex, indexSerializer, bb, input);
//
bb = buffer.buffer();
input = new DataInputBuffer(bb, false);
Pre_C_11206_RowIndexEntry.Serializer.skip(input, BigFormat.latestVersion);
Assert.assertEquals(0, bb.remaining());
}
use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.
the class CompositeTypeTest method testFullRound.
@Test
public void testFullRound() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARDCOMPOSITE);
ByteBuffer cname1 = createCompositeKey("test1", null, -1, false);
ByteBuffer cname2 = createCompositeKey("test1", uuids[0], 24, false);
ByteBuffer cname3 = createCompositeKey("test1", uuids[0], 42, false);
ByteBuffer cname4 = createCompositeKey("test2", uuids[0], -1, false);
ByteBuffer cname5 = createCompositeKey("test2", uuids[1], 42, false);
ByteBuffer key = ByteBufferUtil.bytes("k");
long ts = FBUtilities.timestampMicros();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname5).add("val", "cname5").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname1).add("val", "cname1").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname4).add("val", "cname4").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname2).add("val", "cname2").build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname3).add("val", "cname3").build().applyUnsafe();
ColumnMetadata cdef = cfs.metadata().getColumn(ByteBufferUtil.bytes("val"));
ImmutableBTreePartition readPartition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
Iterator<Row> iter = readPartition.iterator();
compareValues(iter.next().getCell(cdef), "cname1");
compareValues(iter.next().getCell(cdef), "cname2");
compareValues(iter.next().getCell(cdef), "cname3");
compareValues(iter.next().getCell(cdef), "cname4");
compareValues(iter.next().getCell(cdef), "cname5");
}
use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.
the class CompactionsPurgeTest method testMajorCompactionPurgeTombstonesWithMaxTimestamp.
@Test
public void testMajorCompactionPurgeTombstonesWithMaxTimestamp() {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
String cfName = "Standard1";
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
String key = "key1";
// inserts
for (int i = 0; i < 10; i++) {
RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 0, key);
builder.clustering(String.valueOf(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
cfs.forceBlockingFlush();
// deletes
for (int i = 0; i < 10; i++) {
RowUpdateBuilder.deleteRow(cfs.metadata(), Long.MAX_VALUE, key, String.valueOf(i)).applyUnsafe();
}
cfs.forceBlockingFlush();
// major compact - tombstones should be purged
FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE, false));
// resurrect one column
RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 2, key);
builder.clustering(String.valueOf(5)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
cfs.forceBlockingFlush();
cfs.invalidateCachedPartition(dk(key));
ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
assertEquals(1, partition.rowCount());
}
Aggregations