use of org.apache.cassandra.db.context.CounterContext in project eiger by wlloyd.
the class CounterMutationTest method testRemoveOldShardFixCorrupted.
@Test
public void testRemoveOldShardFixCorrupted() throws IOException {
CounterContext ctx = CounterContext.instance();
int now = (int) (System.currentTimeMillis() / 1000);
// Check that corrupted context created prior to #2968 are fixed by removeOldShards
NodeId id1 = NodeId.getLocalId();
NodeId.renewLocalId();
NodeId id2 = NodeId.getLocalId();
ContextState state = ContextState.allocate(3, 2);
state.writeElement(NodeId.fromInt(1), 1, 4, false);
state.writeElement(id1, 3, 2, true);
// corrupted!
state.writeElement(id2, -100, 5, true);
assert ctx.total(state.context) == 11;
try {
ByteBuffer merger = ctx.computeOldShardMerger(state.context, Collections.<NodeId.NodeIdRecord>emptyList(), 0);
ctx.removeOldShards(ctx.merge(state.context, merger, HeapAllocator.instance), now);
fail("RemoveOldShards should throw an exception if the current id is non-sensical");
} catch (RuntimeException e) {
}
NodeId.renewLocalId();
ByteBuffer merger = ctx.computeOldShardMerger(state.context, Collections.<NodeId.NodeIdRecord>emptyList(), 0);
ByteBuffer cleaned = ctx.removeOldShards(ctx.merge(state.context, merger, HeapAllocator.instance), now);
assert ctx.total(cleaned) == 11;
// Check it is not corrupted anymore
ContextState state2 = new ContextState(cleaned);
while (state2.hasRemaining()) {
assert state2.getClock() >= 0 || state2.getCount() == 0;
state2.moveToNext();
}
// Check that if we merge old and clean on another node, we keep the right count
ByteBuffer onRemote = ctx.merge(ctx.clearAllDelta(state.context), ctx.clearAllDelta(cleaned), HeapAllocator.instance);
assert ctx.total(onRemote) == 11;
}
use of org.apache.cassandra.db.context.CounterContext in project eiger by wlloyd.
the class StreamingTransferTest method testTransferTableCounter.
@Test
public void testTransferTableCounter() throws Exception {
final Table table = Table.open("Keyspace1");
final ColumnFamilyStore cfs = table.getColumnFamilyStore("Counter1");
final CounterContext cc = new CounterContext();
final Map<String, ColumnFamily> cleanedEntries = new HashMap<String, ColumnFamily>();
List<String> keys = createAndTransfer(table, cfs, new Mutator() {
/**
* Creates a new SSTable per key: all will be merged before streaming.
*/
public void mutate(String key, String col, long timestamp) throws Exception {
Map<String, ColumnFamily> entries = new HashMap<String, ColumnFamily>();
ColumnFamily cf = ColumnFamily.create(cfs.metadata);
ColumnFamily cfCleaned = ColumnFamily.create(cfs.metadata);
CounterContext.ContextState state = CounterContext.ContextState.allocate(4, 1);
state.writeElement(NodeId.fromInt(2), 9L, 3L, true);
state.writeElement(NodeId.fromInt(4), 4L, 2L);
state.writeElement(NodeId.fromInt(6), 3L, 3L);
state.writeElement(NodeId.fromInt(8), 2L, 4L);
cf.addColumn(new CounterColumn(ByteBufferUtil.bytes(col), state.context, timestamp));
cfCleaned.addColumn(new CounterColumn(ByteBufferUtil.bytes(col), cc.clearAllDelta(state.context), timestamp));
entries.put(key, cf);
cleanedEntries.put(key, cfCleaned);
cfs.addSSTable(SSTableUtils.prepare().ks(table.name).cf(cfs.columnFamily).generation(0).write(entries));
}
});
// filter pre-cleaned entries locally, and ensure that the end result is equal
cleanedEntries.keySet().retainAll(keys);
SSTableReader cleaned = SSTableUtils.prepare().ks(table.name).cf(cfs.columnFamily).generation(0).write(cleanedEntries);
SSTableReader streamed = cfs.getSSTables().iterator().next();
SSTableUtils.assertContentEquals(cleaned, streamed);
// Retransfer the file, making sure it is now idempotent (see CASSANDRA-3481)
cfs.clearUnsafe();
transfer(table, streamed);
SSTableReader restreamed = cfs.getSSTables().iterator().next();
SSTableUtils.assertContentEquals(streamed, restreamed);
}
Aggregations