use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class PartitionImplementationTest method makeRow.
Row makeRow(Clustering clustering, String colValue) {
ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
Row.Builder row = BTreeRow.unsortedBuilder(TIMESTAMP);
row.newRow(clustering);
row.addCell(BufferCell.live(defCol, TIMESTAMP, ByteBufferUtil.bytes(colValue)));
return row.build();
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class PartitionImplementationTest method makeStaticRow.
Row makeStaticRow() {
ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("static_col", true));
Row.Builder row = BTreeRow.unsortedBuilder(TIMESTAMP);
row.newRow(Clustering.STATIC_CLUSTERING);
row.addCell(BufferCell.live(defCol, TIMESTAMP, ByteBufferUtil.bytes("static value")));
return row.build();
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class PartitionImplementationTest method testIter.
private void testIter(Supplier<Collection<? extends Unfiltered>> contentSupplier, Row staticRow) {
NavigableSet<Clusterable> sortedContent = new TreeSet<Clusterable>(metadata.comparator);
sortedContent.addAll(contentSupplier.get());
AbstractBTreePartition partition;
try (UnfilteredRowIterator iter = new Util.UnfilteredSource(metadata, Util.dk("pk"), staticRow, sortedContent.stream().map(x -> (Unfiltered) x).iterator())) {
partition = ImmutableBTreePartition.create(iter);
}
ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
ColumnFilter cf = ColumnFilter.selectionBuilder().add(defCol).build();
Function<? super Clusterable, ? extends Clusterable> colFilter = x -> x instanceof Row ? ((Row) x).filter(cf, metadata) : x;
Slices slices = Slices.with(metadata.comparator, Slice.make(clustering(KEY_RANGE / 4), clustering(KEY_RANGE * 3 / 4)));
Slices multiSlices = makeSlices();
// lastRow
assertRowsEqual((Row) get(sortedContent.descendingSet(), x -> x instanceof Row), partition.lastRow());
// get(static)
assertRowsEqual(staticRow, partition.getRow(Clustering.STATIC_CLUSTERING));
// get
for (int i = 0; i < KEY_RANGE; ++i) {
Clustering cl = clustering(i);
assertRowsEqual(getRow(sortedContent, cl), partition.getRow(cl));
}
// isEmpty
assertEquals(sortedContent.isEmpty() && staticRow == null, partition.isEmpty());
// hasRows
assertEquals(sortedContent.stream().anyMatch(x -> x instanceof Row), partition.hasRows());
// iterator
assertIteratorsEqual(sortedContent.stream().filter(x -> x instanceof Row).iterator(), partition.iterator());
// unfiltered iterator
assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator());
// unfiltered iterator
assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, false));
// column-filtered
assertIteratorsEqual(sortedContent.stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, false));
// sliced
assertIteratorsEqual(slice(sortedContent, slices.get(0)), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, false));
assertIteratorsEqual(streamOf(slice(sortedContent, slices.get(0))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, false));
// randomly multi-sliced
assertIteratorsEqual(slice(sortedContent, multiSlices), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, false));
assertIteratorsEqual(streamOf(slice(sortedContent, multiSlices)).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, false));
// reversed
assertIteratorsEqual(sortedContent.descendingIterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, true));
assertIteratorsEqual(sortedContent.descendingSet().stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, true));
assertIteratorsEqual(invert(slice(sortedContent, slices.get(0))), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, true));
assertIteratorsEqual(streamOf(invert(slice(sortedContent, slices.get(0)))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, true));
assertIteratorsEqual(invert(slice(sortedContent, multiSlices)), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, true));
assertIteratorsEqual(streamOf(invert(slice(sortedContent, multiSlices))).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, true));
// search iterator
testSearchIterator(sortedContent, partition, ColumnFilter.all(metadata), false);
testSearchIterator(sortedContent, partition, cf, false);
testSearchIterator(sortedContent, partition, ColumnFilter.all(metadata), true);
testSearchIterator(sortedContent, partition, cf, true);
// sliceable iter
testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), false);
testSlicingOfIterators(sortedContent, partition, cf, false);
testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), true);
testSlicingOfIterators(sortedContent, partition, cf, true);
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class HintTest method readPartition.
private static FilteredPartition readPartition(String key, String table, RegularAndStaticColumns columns) {
String[] columnNames = new String[columns.size()];
int i = 0;
for (ColumnMetadata column : columns) columnNames[i++] = column.name.toString();
return Util.getOnlyPartition(Util.cmd(cfs(table), key).columns(columnNames).build());
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class PerSSTableIndexWriterTest method testSparse.
@Test
public void testSparse() throws Exception {
final String columnName = "timestamp";
ColumnFamilyStore cfs = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME);
ColumnMetadata column = cfs.metadata().getColumn(UTF8Type.instance.decompose(columnName));
SASIIndex sasi = (SASIIndex) cfs.indexManager.getIndexByName(cfs.name + "_" + columnName);
File directory = cfs.getDirectories().getDirectoryForNewSSTables();
Descriptor descriptor = cfs.newSSTableDescriptor(directory);
PerSSTableIndexWriter indexWriter = (PerSSTableIndexWriter) sasi.getFlushObserver(descriptor, OperationType.FLUSH);
final long now = System.currentTimeMillis();
indexWriter.begin();
indexWriter.indexes.put(column, indexWriter.newIndex(sasi.getIndex()));
populateSegment(cfs.metadata(), indexWriter.getIndex(column), new HashMap<Long, Set<Integer>>() {
{
put(now, new HashSet<>(Arrays.asList(0, 1)));
put(now + 1, new HashSet<>(Arrays.asList(2, 3)));
put(now + 2, new HashSet<>(Arrays.asList(4, 5, 6, 7, 8, 9)));
}
});
Callable<OnDiskIndex> segmentBuilder = indexWriter.getIndex(column).scheduleSegmentFlush(false);
Assert.assertNull(segmentBuilder.call());
PerSSTableIndexWriter.Index index = indexWriter.getIndex(column);
Random random = ThreadLocalRandom.current();
Set<String> segments = new HashSet<>();
// now let's test multiple correct segments with yield incorrect final segment
for (int i = 0; i < 3; i++) {
populateSegment(cfs.metadata(), index, new HashMap<Long, Set<Integer>>() {
{
put(now, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
put(now + 1, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
put(now + 2, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
}
});
try {
// flush each of the new segments, they should all succeed
OnDiskIndex segment = index.scheduleSegmentFlush(false).call();
index.segments.add(Futures.immediateFuture(segment));
segments.add(segment.getIndexPath());
} catch (Exception | FSError e) {
e.printStackTrace();
Assert.fail();
}
}
// make sure that all of the segments are present of the filesystem
for (String segment : segments) Assert.assertTrue(new File(segment).exists());
indexWriter.complete();
// make sure that individual segments have been cleaned up
for (String segment : segments) Assert.assertFalse(new File(segment).exists());
// and combined index doesn't exist either
Assert.assertFalse(new File(index.outputFile).exists());
}
Aggregations