use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class AutoSavingCache method readSaved.
public Set<DecoratedKey> readSaved(String ksName, String cfName) {
File path = getCachePath(ksName, cfName);
Set<DecoratedKey> keys = new TreeSet<DecoratedKey>();
if (path.exists()) {
DataInputStream in = null;
try {
long start = System.currentTimeMillis();
logger.info(String.format("reading saved cache %s", path));
in = new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
while (in.available() > 0) {
int size = in.readInt();
byte[] bytes = new byte[size];
in.readFully(bytes);
ByteBuffer buffer = ByteBuffer.wrap(bytes);
DecoratedKey key;
try {
key = StorageService.getPartitioner().decorateKey(buffer);
} catch (Exception e) {
logger.info(String.format("unable to read entry #%s from saved cache %s; skipping remaining entries", keys.size(), path.getAbsolutePath()), e);
break;
}
keys.add(key);
}
if (logger.isDebugEnabled())
logger.debug(String.format("completed reading (%d ms; %d keys) saved cache %s", System.currentTimeMillis() - start, keys.size(), path));
} catch (Exception e) {
logger.warn(String.format("error reading saved cache %s", path.getAbsolutePath()), e);
} finally {
FileUtils.closeQuietly(in);
}
}
return keys;
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class LegacySSTableTest method testVersion.
public void testVersion(String version) throws Throwable {
try {
SSTableReader reader = SSTableReader.open(getDescriptor(version));
for (String keystring : TEST_DATA) {
ByteBuffer key = ByteBufferUtil.bytes(keystring);
// confirm that the bloom filter does not reject any keys/names
DecoratedKey dk = reader.partitioner.decorateKey(key);
SSTableNamesIterator iter = new SSTableNamesIterator(reader, dk, FBUtilities.singleton(key));
assert iter.next().name().equals(key);
}
} catch (Throwable e) {
System.err.println("Failed to read " + version);
throw e;
}
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeCachedRow.
@Test
public void testCompactionPurgeCachedRow() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "RowCacheSpace";
String cfName = "CachedCF";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
RowMutation rm;
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
// move the key up in row cache
cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
// deletes row
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, null, null), 1);
rm.apply();
// flush and major compact
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assertEquals(10, cf.getColumnCount());
for (IColumn c : cf) assert !c.isMarkedForDelete();
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeTombstonedRow.
@Test
public void testCompactionPurgeTombstonedRow() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "Keyspace1";
String cfName = "Standard1";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
RowMutation rm;
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// deletes row with timestamp such that not all columns are deleted
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, null, null), 4);
rm.apply();
// flush and major compact (with tombstone purging)
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 5; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assertEquals(10, cf.getColumnCount());
for (IColumn c : cf) assert !c.isMarkedForDelete();
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeTombstonedSuperColumn.
@Test
public void testCompactionPurgeTombstonedSuperColumn() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "Keyspace1";
String cfName = "Super5";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key5");
RowMutation rm;
ByteBuffer scName = ByteBufferUtil.bytes("sc");
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, scName, ByteBuffer.wrap(String.valueOf(i).getBytes())), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// deletes supercolumn with timestamp such that not all columns go
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, scName, null), 4);
rm.apply();
// flush and major compact
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 5; i++) {
rm.add(new QueryPath(cfName, scName, ByteBuffer.wrap(String.valueOf(i).getBytes())), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
SuperColumn sc = (SuperColumn) cf.getColumn(scName);
assert sc != null;
assertEquals(10, sc.getColumnCount());
}
Aggregations