use of org.apache.cassandra.io.sstable.SSTableIdentityIterator in project cassandra by apache.
the class SASIIndexBuilder method build.
public void build() {
AbstractType<?> keyValidator = cfs.metadata().partitionKeyType;
for (Map.Entry<SSTableReader, Map<ColumnMetadata, ColumnIndex>> e : sstables.entrySet()) {
SSTableReader sstable = e.getKey();
Map<ColumnMetadata, ColumnIndex> indexes = e.getValue();
try (RandomAccessReader dataFile = sstable.openDataReader()) {
PerSSTableIndexWriter indexWriter = SASIIndex.newWriter(keyValidator, sstable.descriptor, indexes, OperationType.COMPACTION);
long previousKeyPosition = 0;
try (KeyIterator keys = new KeyIterator(sstable.descriptor, cfs.metadata())) {
while (keys.hasNext()) {
if (isStopRequested())
throw new CompactionInterruptedException(getCompactionInfo());
final DecoratedKey key = keys.next();
final long keyPosition = keys.getKeyPosition();
indexWriter.startPartition(key, keyPosition);
try {
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
dataFile.seek(indexEntry.position);
// key
ByteBufferUtil.readWithShortLength(dataFile);
try (SSTableIdentityIterator partition = SSTableIdentityIterator.create(sstable, dataFile, key)) {
// if the row has statics attached, it has to be indexed separately
if (cfs.metadata().hasStaticColumns())
indexWriter.nextUnfilteredCluster(partition.staticRow());
while (partition.hasNext()) indexWriter.nextUnfilteredCluster(partition.next());
}
} catch (IOException ex) {
throw new FSReadError(ex, sstable.getFilename());
}
bytesProcessed += keyPosition - previousKeyPosition;
previousKeyPosition = keyPosition;
}
completeSSTable(indexWriter, sstable, indexes.values());
}
}
}
}
use of org.apache.cassandra.io.sstable.SSTableIdentityIterator in project eiger by wlloyd.
the class CompactionController method getCompactedRow.
/**
* @return an AbstractCompactedRow implementation to write the merged rows in question.
*
* If there is a single source row, the data is from a current-version sstable, we don't
* need to purge and we aren't forcing deserialization for scrub, write it unchanged.
* Otherwise, we deserialize, purge tombstones, and reserialize in the latest version.
*/
public AbstractCompactedRow getCompactedRow(List<SSTableIdentityIterator> rows) {
long rowSize = 0;
for (SSTableIdentityIterator row : rows) rowSize += row.dataSize;
// is going to be less expensive than simply de/serializing the row again
if (rows.size() == 1 && !needDeserialize() && (rowSize > DatabaseDescriptor.getInMemoryCompactionLimit() || !keyExistenceIsExpensive) && !shouldPurge(rows.get(0).getKey())) {
return new EchoedRow(rows.get(0));
}
if (rowSize > DatabaseDescriptor.getInMemoryCompactionLimit()) {
String keyString = cfs.metadata.getKeyValidator().getString(rows.get(0).getKey().key);
logger.info(String.format("Compacting large row %s/%s:%s (%d bytes) incrementally", cfs.table.name, cfs.columnFamily, keyString, rowSize));
return new LazilyCompactedRow(this, rows);
}
return new PrecompactedRow(this, rows);
}
Aggregations