use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CleanupHelper method readData.
/* usually used to populate the cache */
protected void readData(String keyspace, String columnFamily, int offset, int numberOfRows) throws IOException {
ColumnFamilyStore store = Table.open(keyspace).getColumnFamilyStore(columnFamily);
for (int i = offset; i < offset + numberOfRows; i++) {
DecoratedKey key = Util.dk("key" + i);
QueryPath path = new QueryPath(columnFamily, null, ByteBufferUtil.bytes("col" + i));
store.getColumnFamily(key, path, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1);
}
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testMinorCompactionPurge.
@Test
public void testMinorCompactionPurge() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
Table table = Table.open(TABLE2);
String cfName = "Standard1";
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
RowMutation rm;
for (int k = 1; k <= 2; ++k) {
DecoratedKey key = Util.dk("key" + k);
// inserts
rm = new RowMutation(TABLE2, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
cfs.forceBlockingFlush();
// deletes
for (int i = 0; i < 10; i++) {
rm = new RowMutation(TABLE2, key.key);
rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), 1);
rm.apply();
}
cfs.forceBlockingFlush();
}
DecoratedKey key1 = Util.dk("key1");
DecoratedKey key2 = Util.dk("key2");
// flush, remember the current sstable and then resurrect one column
// for first key. Then submit minor compaction on remembered sstables.
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesIncomplete = cfs.getSSTables();
rm = new RowMutation(TABLE2, key1.key);
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(5))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
rm.apply();
cfs.forceBlockingFlush();
new CompactionTask(cfs, sstablesIncomplete, Integer.MAX_VALUE).execute(null);
// verify that minor compaction does not GC when key is present
// in a non-compacted sstable
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key1, new QueryPath(cfName)));
assert cf.getColumnCount() == 10;
// verify that minor compaction does GC when key is provably not
// present in a non-compacted sstable
cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key2, new QueryPath(cfName)));
assert cf == null;
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testMajorCompactionPurge.
@Test
public void testMajorCompactionPurge() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
Table table = Table.open(TABLE1);
String cfName = "Standard1";
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key1");
RowMutation rm;
// inserts
rm = new RowMutation(TABLE1, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
cfs.forceBlockingFlush();
// deletes
for (int i = 0; i < 10; i++) {
rm = new RowMutation(TABLE1, key.key);
rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), 1);
rm.apply();
}
cfs.forceBlockingFlush();
// resurrect one column
rm = new RowMutation(TABLE1, key.key);
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(5))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
rm.apply();
cfs.forceBlockingFlush();
// major compact and test that all columns but the resurrected one is completely gone
CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE).get();
cfs.invalidateCachedRow(key);
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assertColumns(cf, "5");
assert cf.getColumn(ByteBufferUtil.bytes(String.valueOf(5))) != null;
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeOneFile.
@Test
public void testCompactionPurgeOneFile() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
Table table = Table.open(TABLE1);
String cfName = "Standard2";
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key1");
RowMutation rm;
// inserts
rm = new RowMutation(TABLE1, key.key);
for (int i = 0; i < 5; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
// deletes
for (int i = 0; i < 5; i++) {
rm = new RowMutation(TABLE1, key.key);
rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), 1);
rm.apply();
}
cfs.forceBlockingFlush();
// inserts & deletes were in the same memtable -> only deletes in sstable
assert cfs.getSSTables().size() == 1 : cfs.getSSTables();
// compact and test that the row is completely gone
Util.compactAll(cfs).get();
assert cfs.getSSTables().isEmpty();
ColumnFamily cf = table.getColumnFamilyStore(cfName).getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assert cf == null : cf;
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class IncomingStreamReader method streamIn.
private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException {
ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
DecoratedKey key;
SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
try {
BytesReadTracker in = new BytesReadTracker(input);
for (Pair<Long, Long> section : localFile.sections) {
long length = section.right - section.left;
long bytesRead = 0;
while (bytesRead < length) {
in.reset(0);
key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
long dataSize = SSTableReader.readRowSize(in, localFile.desc);
ColumnFamily cached = cfs.getRawCachedRow(key);
if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit()) {
// need to update row cache
// Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
// We don't expire anything so the row shouldn't be empty
assert !row.isEmpty();
writer.append(row);
// row append does not update the max timestamp on its own
writer.updateMaxTimestamp(row.maxTimestamp());
// update cache
ColumnFamily cf = row.getFullColumnFamily();
cfs.updateRowCache(key, cf);
} else {
writer.appendFromStream(key, cfs.metadata, dataSize, in);
cfs.invalidateCachedRow(key);
}
bytesRead += in.getBytesRead();
remoteFile.progress += in.getBytesRead();
}
}
return writer.closeAndOpenReader();
} catch (Exception e) {
writer.abort();
if (e instanceof IOException)
throw (IOException) e;
else
throw FBUtilities.unchecked(e);
}
}
Aggregations