use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeTombstonedRow.
@Test
public void testCompactionPurgeTombstonedRow() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "Keyspace1";
String cfName = "Standard1";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
RowMutation rm;
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// deletes row with timestamp such that not all columns are deleted
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, null, null), 4);
rm.apply();
// flush and major compact (with tombstone purging)
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 5; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assertEquals(10, cf.getColumnCount());
for (IColumn c : cf) assert !c.isMarkedForDelete();
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class OneCompactionTest method testCompaction.
private void testCompaction(String columnFamilyName, int insertsPerTable) throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
Table table = Table.open("Keyspace1");
ColumnFamilyStore store = table.getColumnFamilyStore(columnFamilyName);
Set<DecoratedKey> inserted = new HashSet<DecoratedKey>();
for (int j = 0; j < insertsPerTable; j++) {
DecoratedKey key = Util.dk(String.valueOf(j));
RowMutation rm = new RowMutation("Keyspace1", key.key);
rm.add(new QueryPath(columnFamilyName, null, ByteBufferUtil.bytes("0")), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
rm.apply();
inserted.add(key);
store.forceBlockingFlush();
assertEquals(inserted.size(), Util.getRangeSlice(store).size());
}
CompactionManager.instance.performMaximal(store);
assertEquals(1, store.getSSTables().size());
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionTask method execute.
/**
* For internal use and testing only. The rest of the system should go through the submit* methods,
* which are properly serialized.
* Caller is in charge of marking/unmarking the sstables as compacting.
*/
public int execute(CompactionExecutorStatsCollector collector) throws IOException {
// it is not empty, it may compact down to nothing if all rows are deleted.
assert sstables != null;
Set<SSTableReader> toCompact = new HashSet<SSTableReader>(sstables);
if (!isCompactionInteresting(toCompact))
return 0;
// If use defined, we don't want to "trust" our space estimation. If
// there isn't enough room, it's the user problem
long expectedSize = isUserDefined ? 0 : cfs.getExpectedCompactedFileSize(toCompact);
File compactionFileLocation = cfs.directories.getDirectoryForNewSSTables(expectedSize);
if (partialCompactionsAcceptable()) {
// Try again w/o the largest one.
if (compactionFileLocation == null) {
while (compactionFileLocation == null && toCompact.size() > 1) {
logger.warn("insufficient space to compact all requested files " + StringUtils.join(toCompact, ", "));
// Note that we have removed files that are still marked as compacting. This suboptimal but ok since the caller will unmark all
// the sstables at the end.
toCompact.remove(cfs.getMaxSizeFile(toCompact));
compactionFileLocation = cfs.directories.getDirectoryForNewSSTables(cfs.getExpectedCompactedFileSize(toCompact));
}
}
}
if (compactionFileLocation == null) {
logger.warn("insufficient space to compact even the two smallest files, aborting");
return 0;
}
if (DatabaseDescriptor.isSnapshotBeforeCompaction())
cfs.table.snapshot(System.currentTimeMillis() + "-" + "compact-" + cfs.columnFamily);
// sanity check: all sstables must belong to the same cfs
for (SSTableReader sstable : toCompact) assert sstable.descriptor.cfname.equals(cfs.columnFamily);
CompactionController controller = new CompactionController(cfs, toCompact, gcBefore, isUserDefined);
// new sstables from flush can be added during a compaction, but only the compaction can remove them,
// so in our single-threaded compaction world this is a valid way of determining if we're compacting
// all the sstables (that existed when we started)
logger.info("Compacting {}", toCompact);
long startTime = System.currentTimeMillis();
long totalkeysWritten = 0;
long estimatedTotalKeys = Math.max(DatabaseDescriptor.getIndexInterval(), SSTableReader.getApproximateKeyCount(toCompact));
long estimatedSSTables = Math.max(1, SSTable.getTotalBytes(toCompact) / cfs.getCompactionStrategy().getMaxSSTableSize());
long keysPerSSTable = (long) Math.ceil((double) estimatedTotalKeys / estimatedSSTables);
if (logger.isDebugEnabled())
logger.debug("Expected bloom filter size : " + keysPerSSTable);
AbstractCompactionIterable ci = DatabaseDescriptor.isMultithreadedCompaction() ? new ParallelCompactionIterable(OperationType.COMPACTION, toCompact, controller) : new CompactionIterable(OperationType.COMPACTION, toCompact, controller);
CloseableIterator<AbstractCompactedRow> iter = ci.iterator();
Iterator<AbstractCompactedRow> nni = Iterators.filter(iter, Predicates.notNull());
Map<DecoratedKey, Long> cachedKeys = new HashMap<DecoratedKey, Long>();
// we can't preheat until the tracker has been set. This doesn't happen until we tell the cfs to
// replace the old entries. Track entries to preheat here until then.
Map<SSTableReader, Map<DecoratedKey, Long>> cachedKeyMap = new HashMap<SSTableReader, Map<DecoratedKey, Long>>();
Collection<SSTableReader> sstables = new ArrayList<SSTableReader>();
Collection<SSTableWriter> writers = new ArrayList<SSTableWriter>();
if (collector != null)
collector.beginCompaction(ci);
try {
if (!nni.hasNext()) {
// don't mark compacted in the finally block, since if there _is_ nondeleted data,
// we need to sync it (via closeAndOpen) first, so there is no period during which
// a crash could cause data loss.
cfs.markCompacted(toCompact);
return 0;
}
SSTableWriter writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
writers.add(writer);
while (nni.hasNext()) {
if (ci.isStopped())
throw new CompactionInterruptedException(ci.getCompactionInfo());
AbstractCompactedRow row = nni.next();
if (row.isEmpty())
continue;
long position = writer.append(row);
totalkeysWritten++;
if (DatabaseDescriptor.getPreheatKeyCache()) {
for (SSTableReader sstable : toCompact) {
if (sstable.getCachedPosition(row.key, false) != null) {
cachedKeys.put(row.key, position);
break;
}
}
}
if (!nni.hasNext() || newSSTableSegmentThresholdReached(writer, position)) {
SSTableReader toIndex = writer.closeAndOpenReader(getMaxDataAge(toCompact));
cachedKeyMap.put(toIndex, cachedKeys);
sstables.add(toIndex);
if (nni.hasNext()) {
writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
writers.add(writer);
cachedKeys = new HashMap<DecoratedKey, Long>();
}
}
}
} catch (Exception e) {
for (SSTableWriter writer : writers) writer.abort();
throw FBUtilities.unchecked(e);
} finally {
iter.close();
if (collector != null)
collector.finishCompaction(ci);
}
cfs.replaceCompactedSSTables(toCompact, sstables);
// TODO: this doesn't belong here, it should be part of the reader to load when the tracker is wired up
for (Entry<SSTableReader, Map<DecoratedKey, Long>> ssTableReaderMapEntry : cachedKeyMap.entrySet()) {
SSTableReader key = ssTableReaderMapEntry.getKey();
for (Entry<DecoratedKey, Long> entry : ssTableReaderMapEntry.getValue().entrySet()) key.cacheKey(entry.getKey(), entry.getValue());
}
long dTime = System.currentTimeMillis() - startTime;
long startsize = SSTable.getTotalBytes(toCompact);
long endsize = SSTable.getTotalBytes(sstables);
double ratio = (double) endsize / (double) startsize;
StringBuilder builder = new StringBuilder();
builder.append("[");
for (SSTableReader reader : sstables) builder.append(reader.getFilename()).append(",");
builder.append("]");
double mbps = dTime > 0 ? (double) endsize / (1024 * 1024) / ((double) dTime / 1000) : 0;
logger.info(String.format("Compacted to %s. %,d to %,d (~%d%% of original) bytes for %,d keys at %fMB/s. Time: %,dms.", builder.toString(), startsize, endsize, (int) (ratio * 100), totalkeysWritten, mbps, dTime));
logger.debug(String.format("CF Total Bytes Compacted: %,d", CompactionTask.addToTotalBytesCompacted(endsize)));
return toCompact.size();
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class FBUtilities method sortSampledKeys.
public static void sortSampledKeys(List<DecoratedKey> keys, Range<Token> range) {
if (range.left.compareTo(range.right) >= 0) {
// range wraps. have to be careful that we sort in the same order as the range to find the right midpoint.
final Token right = range.right;
Comparator<DecoratedKey> comparator = new Comparator<DecoratedKey>() {
public int compare(DecoratedKey o1, DecoratedKey o2) {
if ((right.compareTo(o1.token) < 0 && right.compareTo(o2.token) < 0) || (right.compareTo(o1.token) > 0 && right.compareTo(o2.token) > 0)) {
// both tokens are on the same side of the wrap point
return o1.compareTo(o2);
}
return o2.compareTo(o1);
}
};
Collections.sort(keys, comparator);
} else {
// unwrapped range (left < right). standard sort is all we need.
Collections.sort(keys);
}
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class LegacySSTableTest method testVersion.
public void testVersion(String version) throws Throwable {
try {
SSTableReader reader = SSTableReader.open(getDescriptor(version));
for (String keystring : TEST_DATA) {
ByteBuffer key = ByteBufferUtil.bytes(keystring);
// confirm that the bloom filter does not reject any keys/names
DecoratedKey dk = reader.partitioner.decorateKey(key);
SSTableNamesIterator iter = new SSTableNamesIterator(reader, dk, FBUtilities.singleton(key));
assert iter.next().name().equals(key);
}
} catch (Throwable e) {
System.err.println("Failed to read " + version);
throw e;
}
}
Aggregations