use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class SerializationsTest method testBloomFilterTable.
private static void testBloomFilterTable(String file) throws Exception {
Murmur3Partitioner partitioner = new Murmur3Partitioner();
try (DataInputStream in = new DataInputStream(new FileInputStream(new File(file)));
IFilter filter = FilterFactory.deserialize(in, true)) {
for (int i = 1; i <= 10; i++) {
DecoratedKey decoratedKey = partitioner.decorateKey(Int32Type.instance.decompose(i));
boolean present = filter.isPresent(decoratedKey);
Assert.assertTrue(present);
}
int positives = 0;
for (int i = 11; i <= 1000010; i++) {
DecoratedKey decoratedKey = partitioner.decorateKey(Int32Type.instance.decompose(i));
boolean present = filter.isPresent(decoratedKey);
if (present)
positives++;
}
double fpr = positives;
fpr /= 1000000;
Assert.assertTrue(fpr <= 0.011d);
}
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class ReplicaFilteringProtection method mergeController.
/**
* Returns a merge listener that skips the merged rows for which any of the replicas doesn't have a version,
* pessimistically assuming that they are outdated. It is intended to be used during a first merge of per-replica
* query results to ensure we fetch enough results from the replicas to ensure we don't miss any potentially
* outdated result.
* <p>
* The listener will track both the accepted data and the primary keys of the rows that are considered as outdated.
* That way, once the query results would have been merged using this listener, further calls to
* {@link #queryProtectedPartitions(PartitionIterator, int)} will use the collected data to return a copy of the
* data originally collected from the specified replica, completed with the potentially outdated rows.
*/
UnfilteredPartitionIterators.MergeListener mergeController() {
return new UnfilteredPartitionIterators.MergeListener() {
@Override
public void close() {
// If we hit the failure threshold before consuming a single partition, record the current rows cached.
tableMetrics.rfpRowsCachedPerQuery.update(Math.max(currentRowsCached, maxRowsCached));
}
@Override
public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
List<PartitionBuilder> builders = new ArrayList<>(sources.size());
RegularAndStaticColumns columns = columns(versions);
EncodingStats stats = EncodingStats.merge(versions, NULL_TO_NO_STATS);
for (int i = 0; i < sources.size(); i++) builders.add(i, new PartitionBuilder(partitionKey, sources.get(i), columns, stats));
return new UnfilteredRowIterators.MergeListener() {
@Override
public void onMergedPartitionLevelDeletion(DeletionTime mergedDeletion, DeletionTime[] versions) {
// cache the deletion time versions to be able to regenerate the original row iterator
for (int i = 0; i < versions.length; i++) builders.get(i).setDeletionTime(versions[i]);
}
@Override
public Row onMergedRows(Row merged, Row[] versions) {
// cache the row versions to be able to regenerate the original row iterator
for (int i = 0; i < versions.length; i++) builders.get(i).addRow(versions[i]);
if (merged.isEmpty())
return merged;
boolean isPotentiallyOutdated = false;
boolean isStatic = merged.isStatic();
for (int i = 0; i < versions.length; i++) {
Row version = versions[i];
if (version == null || (isStatic && version.isEmpty())) {
isPotentiallyOutdated = true;
builders.get(i).addToFetch(merged);
}
}
// to look at enough data to ultimately fulfill the query limit.
return isPotentiallyOutdated ? null : merged;
}
@Override
public void onMergedRangeTombstoneMarkers(RangeTombstoneMarker merged, RangeTombstoneMarker[] versions) {
// cache the marker versions to be able to regenerate the original row iterator
for (int i = 0; i < versions.length; i++) builders.get(i).addRangeTombstoneMarker(versions[i]);
}
@Override
public void close() {
for (int i = 0; i < sources.size(); i++) originalPartitions.get(i).add(builders.get(i));
}
};
}
};
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class CompactionIteratorTest method iterate.
private void iterate(Unfiltered... unfiltereds) {
ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
DecoratedKey key = cfs.getPartitioner().decorateKey(ByteBufferUtil.bytes("key"));
try (CompactionController controller = new CompactionController(cfs, Integer.MAX_VALUE);
UnfilteredRowIterator rows = rows(cfs.metadata(), key, false, unfiltereds);
ISSTableScanner scanner = new Scanner(Collections.singletonList(rows));
CompactionIterator iter = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), null)) {
while (iter.hasNext()) {
try (UnfilteredRowIterator partition = iter.next()) {
partition.forEachRemaining(u -> {
});
}
}
}
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class CompactionIteratorTest method generateContent.
NavigableMap<DecoratedKey, List<Unfiltered>> generateContent(Random rand, UnfilteredRowsGenerator generator, List<DecoratedKey> keys, int pcount, int rcount) {
NavigableMap<DecoratedKey, List<Unfiltered>> map = new TreeMap<>();
for (int i = 0; i < pcount; ++i) {
DecoratedKey key = keys.get(rand.nextInt(keys.size()));
map.put(key, generator.generateSource(rand, rcount, RANGE, NOW - 5, x -> NOW - 1));
}
return map;
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class CompactionStrategyManagerTest method createSSTableWithKey.
private static SSTableReader createSSTableWithKey(String keyspace, String table, int key) {
long timestamp = System.currentTimeMillis();
DecoratedKey dk = Util.dk(String.format("%04d", key));
ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
new RowUpdateBuilder(cfs.metadata(), timestamp, dk.getKey()).clustering(Integer.toString(key)).add("val", "val").build().applyUnsafe();
Set<SSTableReader> before = cfs.getLiveSSTables();
cfs.forceBlockingFlush();
Set<SSTableReader> after = cfs.getLiveSSTables();
return Iterables.getOnlyElement(Sets.difference(after, before));
}
Aggregations