use of org.apache.cassandra.locator.RangesAtEndpoint in project cassandra by apache.
the class CompactionManager method validateSSTableBoundsForAnticompaction.
static void validateSSTableBoundsForAnticompaction(UUID sessionID, Collection<SSTableReader> sstables, RangesAtEndpoint ranges) {
List<Range<Token>> normalizedRanges = Range.normalize(ranges.ranges());
for (SSTableReader sstable : sstables) {
Bounds<Token> bounds = new Bounds<>(sstable.first.getToken(), sstable.last.getToken());
if (!Iterables.any(normalizedRanges, r -> (r.contains(bounds.left) && r.contains(bounds.right)) || r.intersects(bounds))) {
// this should never happen - in PendingAntiCompaction#getSSTables we select all sstables that intersect the repaired ranges, that can't have changed here
String message = String.format("%s SSTable %s (%s) does not intersect repaired ranges %s, this sstable should not have been included.", PreviewKind.NONE.logPrefix(sessionID), sstable, bounds, normalizedRanges);
logger.error(message);
throw new IllegalStateException(message);
}
}
}
use of org.apache.cassandra.locator.RangesAtEndpoint in project cassandra by apache.
the class CompactionManager method forceUserDefinedCleanup.
public void forceUserDefinedCleanup(String dataFiles) {
String[] filenames = dataFiles.split(",");
HashMap<ColumnFamilyStore, Descriptor> descriptors = Maps.newHashMap();
for (String filename : filenames) {
// extract keyspace and columnfamily name from filename
Descriptor desc = Descriptor.fromFilename(filename.trim());
if (Schema.instance.getTableMetadataRef(desc) == null) {
logger.warn("Schema does not exist for file {}. Skipping.", filename);
continue;
}
// group by keyspace/columnfamily
ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname);
desc = cfs.getDirectories().find(new File(filename.trim()).name());
if (desc != null)
descriptors.put(cfs, desc);
}
if (!StorageService.instance.isJoined()) {
logger.error("Cleanup cannot run before a node has joined the ring");
return;
}
for (Map.Entry<ColumnFamilyStore, Descriptor> entry : descriptors.entrySet()) {
ColumnFamilyStore cfs = entry.getKey();
Keyspace keyspace = cfs.keyspace;
final RangesAtEndpoint replicas = StorageService.instance.getLocalReplicas(keyspace.getName());
final Set<Range<Token>> allRanges = replicas.ranges();
final Set<Range<Token>> transientRanges = replicas.onlyTransient().ranges();
boolean hasIndexes = cfs.indexManager.hasIndexes();
SSTableReader sstable = lookupSSTable(cfs, entry.getValue());
if (sstable == null) {
logger.warn("Will not clean {}, it is not an active sstable", entry.getValue());
} else {
CleanupStrategy cleanupStrategy = CleanupStrategy.get(cfs, allRanges, transientRanges, sstable.isRepaired(), FBUtilities.nowInSeconds());
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstable, OperationType.CLEANUP)) {
doCleanupOne(cfs, txn, cleanupStrategy, allRanges, hasIndexes);
} catch (IOException e) {
logger.error("forceUserDefinedCleanup failed: {}", e.getLocalizedMessage());
}
}
}
}
use of org.apache.cassandra.locator.RangesAtEndpoint in project cassandra by apache.
the class LocalSessions method filterLocalRanges.
RangesAtEndpoint filterLocalRanges(String keyspace, Set<Range<Token>> ranges) {
RangesAtEndpoint localRanges = StorageService.instance.getLocalReplicas(keyspace);
RangesAtEndpoint.Builder builder = RangesAtEndpoint.builder(localRanges.endpoint());
for (Range<Token> range : ranges) {
for (Replica replica : localRanges) {
if (replica.range().equals(range)) {
builder.add(replica);
} else if (replica.contains(range)) {
builder.add(replica.decorateSubrange(range));
}
}
}
return builder.build();
}
use of org.apache.cassandra.locator.RangesAtEndpoint in project cassandra by apache.
the class CassandraStreamManager method createOutgoingStreams.
@Override
public Collection<OutgoingStream> createOutgoingStreams(StreamSession session, RangesAtEndpoint replicas, UUID pendingRepair, PreviewKind previewKind) {
Refs<SSTableReader> refs = new Refs<>();
try {
final List<Range<PartitionPosition>> keyRanges = new ArrayList<>(replicas.size());
for (Replica replica : replicas) keyRanges.add(Range.makeRowRange(replica.range()));
refs.addAll(cfs.selectAndReference(view -> {
Set<SSTableReader> sstables = Sets.newHashSet();
SSTableIntervalTree intervalTree = SSTableIntervalTree.build(view.select(SSTableSet.CANONICAL));
Predicate<SSTableReader> predicate;
if (previewKind.isPreview()) {
predicate = previewKind.predicate();
} else if (pendingRepair == ActiveRepairService.NO_PENDING_REPAIR) {
predicate = Predicates.alwaysTrue();
} else {
predicate = s -> s.isPendingRepair() && s.getSSTableMetadata().pendingRepair.equals(pendingRepair);
}
for (Range<PartitionPosition> keyRange : keyRanges) {
// still actually selecting what we wanted.
for (SSTableReader sstable : Iterables.filter(View.sstablesInBounds(keyRange.left, keyRange.right, intervalTree), predicate)) {
sstables.add(sstable);
}
}
if (logger.isDebugEnabled())
logger.debug("ViewFilter for {}/{} sstables", sstables.size(), Iterables.size(view.select(SSTableSet.CANONICAL)));
return sstables;
}).refs);
List<Range<Token>> normalizedFullRanges = Range.normalize(replicas.onlyFull().ranges());
List<Range<Token>> normalizedAllRanges = Range.normalize(replicas.ranges());
// Create outgoing file streams for ranges possibly skipping repaired ranges in sstables
List<OutgoingStream> streams = new ArrayList<>(refs.size());
for (SSTableReader sstable : refs) {
List<Range<Token>> ranges = sstable.isRepaired() ? normalizedFullRanges : normalizedAllRanges;
List<SSTableReader.PartitionPositionBounds> sections = sstable.getPositionsForRanges(ranges);
Ref<SSTableReader> ref = refs.get(sstable);
if (sections.isEmpty()) {
ref.release();
continue;
}
streams.add(new CassandraOutgoingFile(session.getStreamOperation(), ref, sections, ranges, sstable.estimatedKeysForRanges(ranges)));
}
return streams;
} catch (Throwable t) {
refs.release();
throw t;
}
}
use of org.apache.cassandra.locator.RangesAtEndpoint in project cassandra by apache.
the class RangeRelocator method calculateRangesToStreamWithEndpoints.
/**
* calculating endpoints to stream current ranges to if needed
* in some situations node will handle current ranges as part of the new ranges
*/
public static RangesByEndpoint calculateRangesToStreamWithEndpoints(RangesAtEndpoint streamRanges, AbstractReplicationStrategy strat, TokenMetadata tmdBefore, TokenMetadata tmdAfter) {
RangesByEndpoint.Builder endpointRanges = new RangesByEndpoint.Builder();
for (Replica toStream : streamRanges) {
// If the range we are sending is full only send it to the new full replica
// There will also be a new transient replica we need to send the data to, but not
// the repaired data
EndpointsForRange oldEndpoints = strat.calculateNaturalReplicas(toStream.range().right, tmdBefore);
EndpointsForRange newEndpoints = strat.calculateNaturalReplicas(toStream.range().right, tmdAfter);
logger.debug("Need to stream {}, current endpoints {}, new endpoints {}", toStream, oldEndpoints, newEndpoints);
for (Replica newEndpoint : newEndpoints) {
Replica oldEndpoint = oldEndpoints.byEndpoint().get(newEndpoint.endpoint());
// Nothing to do
if (newEndpoint.equals(oldEndpoint))
continue;
// Completely new range for this endpoint
if (oldEndpoint == null) {
if (toStream.isTransient() && newEndpoint.isFull())
throw new AssertionError(String.format("Need to stream %s, but only have %s which is transient and not full", newEndpoint, toStream));
for (Range<Token> intersection : newEndpoint.range().intersectionWith(toStream.range())) {
endpointRanges.put(newEndpoint.endpoint(), newEndpoint.decorateSubrange(intersection));
}
} else {
Set<Range<Token>> subsToStream = Collections.singleton(toStream.range());
// First subtract what we already have
if (oldEndpoint.isFull() == newEndpoint.isFull() || oldEndpoint.isFull())
subsToStream = toStream.range().subtract(oldEndpoint.range());
// Now we only stream what is still replicated
subsToStream.stream().flatMap(range -> range.intersectionWith(newEndpoint.range()).stream()).forEach(tokenRange -> endpointRanges.put(newEndpoint.endpoint(), newEndpoint.decorateSubrange(tokenRange)));
}
}
}
return endpointRanges.build();
}
Aggregations