use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class AntiCompactionTest method shouldSkipAntiCompactionForNonIntersectingRange.
@Test
public void shouldSkipAntiCompactionForNonIntersectingRange() throws InterruptedException, IOException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.disableAutoCompaction();
for (int table = 0; table < 10; table++) {
generateSStable(store, Integer.toString(table));
}
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("-1".getBytes()), new BytesToken("-10".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
UUID parentRepairSession = UUID.randomUUID();
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, 1, NO_PENDING_REPAIR, parentRepairSession);
}
assertThat(store.getLiveSSTables().size(), is(10));
assertThat(Iterables.get(store.getLiveSSTables(), 0).isRepaired(), is(false));
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class CompactionManagerGetSSTablesForValidationTest method registerRepair.
private void registerRepair(boolean incremental) throws Exception {
sessionID = UUIDGen.getTimeUUID();
Range<Token> range = new Range<>(MT, MT);
ActiveRepairService.instance.registerParentRepairSession(sessionID, coordinator, Lists.newArrayList(cfs), Sets.newHashSet(range), incremental, incremental ? System.currentTimeMillis() : ActiveRepairService.UNREPAIRED_SSTABLE, true);
desc = new RepairJobDesc(sessionID, UUIDGen.getTimeUUID(), ks, tbl, Collections.singleton(range));
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class LegacySSTableTest method streamLegacyTable.
private void streamLegacyTable(String tablePattern, String legacyVersion, String compactNameSuffix) throws Exception {
String table = String.format(tablePattern, legacyVersion, compactNameSuffix);
SSTableReader sstable = SSTableReader.open(getDescriptor(legacyVersion, table));
IPartitioner p = sstable.getPartitioner();
List<Range<Token>> ranges = new ArrayList<>();
ranges.add(new Range<>(p.getMinimumToken(), p.getToken(ByteBufferUtil.bytes("100"))));
ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("100")), p.getMinimumToken()));
ArrayList<StreamSession.SSTableStreamingSections> details = new ArrayList<>();
details.add(new StreamSession.SSTableStreamingSections(sstable.ref(), sstable.getPositionsForRanges(ranges), sstable.estimatedKeysForRanges(ranges), sstable.getSSTableMetadata().repairedAt));
new StreamPlan("LegacyStreamingTest").transferFiles(FBUtilities.getBroadcastAddress(), details).execute().get();
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class SSTableReaderTest method testGetPositionsForRanges.
@Test
public void testGetPositionsForRanges() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
partitioner = store.getPartitioner();
// insert data and compact to a single sstable
CompactionManager.instance.disableAutoCompaction();
for (int j = 0; j < 10; j++) {
new RowUpdateBuilder(store.metadata(), j, String.valueOf(j)).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
store.forceBlockingFlush();
CompactionManager.instance.performMaximal(store, false);
List<Range<Token>> ranges = new ArrayList<Range<Token>>();
// 1 key
ranges.add(new Range<>(t(0), t(1)));
// 2 keys
ranges.add(new Range<>(t(2), t(4)));
// wrapping range from key to end
ranges.add(new Range<>(t(6), partitioner.getMinimumToken()));
// empty range (should be ignored)
ranges.add(new Range<>(t(9), t(91)));
// confirm that positions increase continuously
SSTableReader sstable = store.getLiveSSTables().iterator().next();
long previous = -1;
for (Pair<Long, Long> section : sstable.getPositionsForRanges(ranges)) {
assert previous <= section.left : previous + " ! < " + section.left;
assert section.left < section.right : section.left + " ! < " + section.right;
previous = section.right;
}
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class SSTableRewriterTest method getPositionsTest.
@Test
public void getPositionsTest() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
int nowInSec = FBUtilities.nowInSeconds();
boolean checked = false;
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext()) {
UnfilteredRowIterator row = ci.next();
writer.append(row);
if (!checked && writer.currentWriter().getFilePointer() > 1500000) {
checked = true;
for (SSTableReader sstable : cfs.getLiveSSTables()) {
if (sstable.openReason == SSTableReader.OpenReason.EARLY) {
SSTableReader c = txn.current(sstables.iterator().next());
Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.getPartitioner().getMinimumToken(), cfs.getPartitioner().getMinimumToken()));
List<Pair<Long, Long>> tmplinkPositions = sstable.getPositionsForRanges(r);
List<Pair<Long, Long>> compactingPositions = c.getPositionsForRanges(r);
assertEquals(1, tmplinkPositions.size());
assertEquals(1, compactingPositions.size());
assertEquals(0, tmplinkPositions.get(0).left.longValue());
// make sure we have no overlap between the early opened file and the compacting one:
assertEquals(tmplinkPositions.get(0).right.longValue(), compactingPositions.get(0).left.longValue());
assertEquals(c.uncompressedLength(), compactingPositions.get(0).right.longValue());
}
}
}
}
assertTrue(checked);
writer.finish();
}
LifecycleTransaction.waitForDeletions();
assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
validateCFS(cfs);
truncate(cfs);
}
Aggregations