use of org.apache.cassandra.db.RowIndexEntry in project cassandra by apache.
the class SplittingSizeTieredCompactionWriter method realAppend.
@Override
public boolean realAppend(UnfilteredRowIterator partition) {
RowIndexEntry rie = sstableWriter.append(partition);
if (// if we underestimate how many keys we have, the last sstable might get more than we expect
sstableWriter.currentWriter().getEstimatedOnDiskBytesWritten() > currentBytesToWrite && currentRatioIndex < ratios.length - 1) {
currentRatioIndex++;
currentBytesToWrite = Math.round(totalSize * ratios[currentRatioIndex]);
switchCompactionLocation(location);
logger.debug("Switching writer, currentBytesToWrite = {}", currentBytesToWrite);
}
return rie != null;
}
use of org.apache.cassandra.db.RowIndexEntry in project cassandra by apache.
the class SASIIndexBuilder method build.
public void build() {
AbstractType<?> keyValidator = cfs.metadata().partitionKeyType;
for (Map.Entry<SSTableReader, Map<ColumnMetadata, ColumnIndex>> e : sstables.entrySet()) {
SSTableReader sstable = e.getKey();
Map<ColumnMetadata, ColumnIndex> indexes = e.getValue();
try (RandomAccessReader dataFile = sstable.openDataReader()) {
PerSSTableIndexWriter indexWriter = SASIIndex.newWriter(keyValidator, sstable.descriptor, indexes, OperationType.COMPACTION);
long previousKeyPosition = 0;
try (KeyIterator keys = new KeyIterator(sstable.descriptor, cfs.metadata())) {
while (keys.hasNext()) {
if (isStopRequested())
throw new CompactionInterruptedException(getCompactionInfo());
final DecoratedKey key = keys.next();
final long keyPosition = keys.getKeyPosition();
indexWriter.startPartition(key, keyPosition);
try {
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
dataFile.seek(indexEntry.position);
// key
ByteBufferUtil.readWithShortLength(dataFile);
try (SSTableIdentityIterator partition = SSTableIdentityIterator.create(sstable, dataFile, key)) {
// if the row has statics attached, it has to be indexed separately
if (cfs.metadata().hasStaticColumns())
indexWriter.nextUnfilteredCluster(partition.staticRow());
while (partition.hasNext()) indexWriter.nextUnfilteredCluster(partition.next());
}
} catch (IOException ex) {
throw new FSReadError(ex, sstable.getFilename());
}
bytesProcessed += keyPosition - previousKeyPosition;
previousKeyPosition = keyPosition;
}
completeSSTable(indexWriter, sstable, indexes.values());
}
}
}
}
use of org.apache.cassandra.db.RowIndexEntry in project cassandra by apache.
the class SSTableRewriter method maybeReopenEarly.
private void maybeReopenEarly(DecoratedKey key) {
if (writer.getFilePointer() - currentlyOpenedEarlyAt > preemptiveOpenInterval) {
if (transaction.isOffline()) {
for (SSTableReader reader : transaction.originals()) {
RowIndexEntry index = reader.getPosition(key, SSTableReader.Operator.GE);
CLibrary.trySkipCache(reader.getFilename(), 0, index == null ? 0 : index.position);
}
} else {
SSTableReader reader = writer.setMaxDataAge(maxAge).openEarly();
if (reader != null) {
transaction.update(reader, false);
currentlyOpenedEarlyAt = writer.getFilePointer();
moveStarts(reader, reader.last);
transaction.checkpoint();
}
}
}
}
use of org.apache.cassandra.db.RowIndexEntry in project cassandra by apache.
the class SSTableRewriter method moveStarts.
/**
* Replace the readers we are rewriting with cloneWithNewStart, reclaiming any page cache that is no longer
* needed, and transferring any key cache entries over to the new reader, expiring them from the old. if reset
* is true, we are instead restoring the starts of the readers from before the rewriting began
*
* note that we replace an existing sstable with a new *instance* of the same sstable, the replacement
* sstable .equals() the old one, BUT, it is a new instance, so, for example, since we releaseReference() on the old
* one, the old *instance* will have reference count == 0 and if we were to start a new compaction with that old
* instance, we would get exceptions.
*
* @param newReader the rewritten reader that replaces them for this region
* @param lowerbound if !reset, must be non-null, and marks the exclusive lowerbound of the start for each sstable
*/
private void moveStarts(SSTableReader newReader, DecoratedKey lowerbound) {
if (transaction.isOffline())
return;
if (preemptiveOpenInterval == Long.MAX_VALUE)
return;
newReader.setupOnline();
List<DecoratedKey> invalidateKeys = null;
if (!cachedKeys.isEmpty()) {
invalidateKeys = new ArrayList<>(cachedKeys.size());
for (Map.Entry<DecoratedKey, RowIndexEntry> cacheKey : cachedKeys.entrySet()) {
invalidateKeys.add(cacheKey.getKey());
newReader.cacheKey(cacheKey.getKey(), cacheKey.getValue());
}
}
cachedKeys.clear();
for (SSTableReader sstable : transaction.originals()) {
// we call getCurrentReplacement() to support multiple rewriters operating over the same source readers at once.
// note: only one such writer should be written to at any moment
final SSTableReader latest = transaction.current(sstable);
// skip any sstables that we know to already be shadowed
if (latest.first.compareTo(lowerbound) > 0)
continue;
Runnable runOnClose = invalidateKeys != null ? new InvalidateKeys(latest, invalidateKeys) : null;
if (lowerbound.compareTo(latest.last) >= 0) {
if (!transaction.isObsolete(latest)) {
if (runOnClose != null) {
latest.runOnClose(runOnClose);
}
transaction.obsolete(latest);
}
continue;
}
DecoratedKey newStart = latest.firstKeyBeyond(lowerbound);
assert newStart != null;
SSTableReader replacement = latest.cloneWithNewStart(newStart, runOnClose);
transaction.update(replacement, true);
}
}
use of org.apache.cassandra.db.RowIndexEntry in project cassandra by apache.
the class StatusLogger method log.
public static void log() {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
// everything from o.a.c.concurrent
logger.info(String.format("%-25s%10s%10s%15s%10s%18s", "Pool Name", "Active", "Pending", "Completed", "Blocked", "All Time Blocked"));
for (Map.Entry<String, String> tpool : ThreadPoolMetrics.getJmxThreadPools(server).entries()) {
logger.info(String.format("%-25s%10s%10s%15s%10s%18s%n", tpool.getValue(), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "ActiveTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "PendingTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "CompletedTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "CurrentlyBlockedTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "TotalBlockedTasks")));
}
// one offs
logger.info(String.format("%-25s%10s%10s", "CompactionManager", CompactionManager.instance.getActiveCompactions(), CompactionManager.instance.getPendingTasks()));
int pendingLargeMessages = 0;
for (int n : MessagingService.instance().getLargeMessagePendingTasks().values()) {
pendingLargeMessages += n;
}
int pendingSmallMessages = 0;
for (int n : MessagingService.instance().getSmallMessagePendingTasks().values()) {
pendingSmallMessages += n;
}
logger.info(String.format("%-25s%10s%10s", "MessagingService", "n/a", pendingLargeMessages + "/" + pendingSmallMessages));
// Global key/row cache information
AutoSavingCache<KeyCacheKey, RowIndexEntry> keyCache = CacheService.instance.keyCache;
AutoSavingCache<RowCacheKey, IRowCacheEntry> rowCache = CacheService.instance.rowCache;
int keyCacheKeysToSave = DatabaseDescriptor.getKeyCacheKeysToSave();
int rowCacheKeysToSave = DatabaseDescriptor.getRowCacheKeysToSave();
logger.info(String.format("%-25s%10s%25s%25s", "Cache Type", "Size", "Capacity", "KeysToSave"));
logger.info(String.format("%-25s%10s%25s%25s", "KeyCache", keyCache.weightedSize(), keyCache.getCapacity(), keyCacheKeysToSave == Integer.MAX_VALUE ? "all" : keyCacheKeysToSave));
logger.info(String.format("%-25s%10s%25s%25s", "RowCache", rowCache.weightedSize(), rowCache.getCapacity(), rowCacheKeysToSave == Integer.MAX_VALUE ? "all" : rowCacheKeysToSave));
// per-CF stats
logger.info(String.format("%-25s%20s", "Table", "Memtable ops,data"));
for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
logger.info(String.format("%-25s%20s", cfs.keyspace.getName() + "." + cfs.name, cfs.metric.memtableColumnsCount.getValue() + "," + cfs.metric.memtableLiveDataSize.getValue()));
}
}
Aggregations