use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class PerSSTableIndexWriterTest method testPartialIndexWrites.
@Test
public void testPartialIndexWrites() throws Exception {
final int maxKeys = 100000, numParts = 4, partSize = maxKeys / numParts;
final String keyFormat = "key%06d";
final long timestamp = System.currentTimeMillis();
ColumnFamilyStore cfs = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME);
ColumnMetadata column = cfs.metadata().getColumn(UTF8Type.instance.decompose("age"));
SASIIndex sasi = (SASIIndex) cfs.indexManager.getIndexByName(cfs.name + "_age");
File directory = cfs.getDirectories().getDirectoryForNewSSTables();
Descriptor descriptor = cfs.newSSTableDescriptor(directory);
PerSSTableIndexWriter indexWriter = (PerSSTableIndexWriter) sasi.getFlushObserver(descriptor, OperationType.FLUSH);
SortedMap<DecoratedKey, Row> expectedKeys = new TreeMap<>(DecoratedKey.comparator);
for (int i = 0; i < maxKeys; i++) {
ByteBuffer key = ByteBufferUtil.bytes(String.format(keyFormat, i));
expectedKeys.put(cfs.metadata().partitioner.decorateKey(key), BTreeRow.singleCellRow(Clustering.EMPTY, BufferCell.live(column, timestamp, Int32Type.instance.decompose(i))));
}
indexWriter.begin();
Iterator<Map.Entry<DecoratedKey, Row>> keyIterator = expectedKeys.entrySet().iterator();
long position = 0;
Set<String> segments = new HashSet<>();
outer: for (; ; ) {
for (int i = 0; i < partSize; i++) {
if (!keyIterator.hasNext())
break outer;
Map.Entry<DecoratedKey, Row> key = keyIterator.next();
indexWriter.startPartition(key.getKey(), position++);
indexWriter.nextUnfilteredCluster(key.getValue());
}
PerSSTableIndexWriter.Index index = indexWriter.getIndex(column);
OnDiskIndex segment = index.scheduleSegmentFlush(false).call();
index.segments.add(Futures.immediateFuture(segment));
segments.add(segment.getIndexPath());
}
for (String segment : segments) Assert.assertTrue(new File(segment).exists());
String indexFile = indexWriter.indexes.get(column).filename(true);
// final flush
indexWriter.complete();
for (String segment : segments) Assert.assertFalse(new File(segment).exists());
OnDiskIndex index = new OnDiskIndex(new File(indexFile), Int32Type.instance, keyPosition -> {
ByteBuffer key = ByteBufferUtil.bytes(String.format(keyFormat, keyPosition));
return cfs.metadata().partitioner.decorateKey(key);
});
Assert.assertEquals(0, UTF8Type.instance.compare(index.minKey(), ByteBufferUtil.bytes(String.format(keyFormat, 0))));
Assert.assertEquals(0, UTF8Type.instance.compare(index.maxKey(), ByteBufferUtil.bytes(String.format(keyFormat, maxKeys - 1))));
Set<DecoratedKey> actualKeys = new HashSet<>();
int count = 0;
for (OnDiskIndex.DataTerm term : index) {
RangeIterator<Long, Token> tokens = term.getTokens();
while (tokens.hasNext()) {
for (DecoratedKey key : tokens.next()) actualKeys.add(key);
}
Assert.assertEquals(count++, (int) Int32Type.instance.compose(term.getTerm()));
}
Assert.assertEquals(expectedKeys.size(), actualKeys.size());
for (DecoratedKey key : expectedKeys.keySet()) Assert.assertTrue(actualKeys.contains(key));
FileUtils.closeQuietly(index);
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class HintsServiceTest method testEarliestHint.
@Test
@BMRule(name = "GetHintTS", targetClass = "HintsBuffer$Allocation", targetMethod = "write(Iterable, Hint)", targetLocation = "AFTER INVOKE putIfAbsent", action = "org.apache.cassandra.hints.HintsServiceTest.timestampForHint = $ts")
public void testEarliestHint() throws InterruptedException {
// create and write noOfHints using service
UUID hostId = StorageService.instance.getLocalHostUUID();
TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE, TABLE);
long ts = System.currentTimeMillis();
DecoratedKey dkey = Util.dk(String.valueOf(1));
PartitionUpdate.SimpleBuilder builder = PartitionUpdate.simpleBuilder(metadata, dkey).timestamp(ts);
builder.row("column0").add("val", "value0");
Hint hint = Hint.create(builder.buildAsMutation(), ts);
HintsService.instance.write(hostId, hint);
long oldestHintTime = timestampForHint;
Thread.sleep(1);
HintsService.instance.write(hostId, hint);
Thread.sleep(1);
HintsService.instance.write(hostId, hint);
// Close and fsync so that we get the timestamp from the descriptor rather than the buffer.
HintsStore store = HintsService.instance.getCatalog().get(hostId);
HintsService.instance.flushAndFsyncBlockingly(Collections.singletonList(hostId));
store.closeWriter();
long earliest = HintsService.instance.getEarliestHintForHost(hostId);
assertEquals(oldestHintTime, earliest);
assertNotEquals(oldestHintTime, timestampForHint);
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class AutoSavingCache method readSaved.
public Set<DecoratedKey> readSaved(String ksName, String cfName) {
File path = getCachePath(ksName, cfName);
Set<DecoratedKey> keys = new TreeSet<DecoratedKey>();
if (path.exists()) {
DataInputStream in = null;
try {
long start = System.currentTimeMillis();
logger.info(String.format("reading saved cache %s", path));
in = new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
while (in.available() > 0) {
int size = in.readInt();
byte[] bytes = new byte[size];
in.readFully(bytes);
ByteBuffer buffer = ByteBuffer.wrap(bytes);
DecoratedKey key;
try {
key = StorageService.getPartitioner().decorateKey(buffer);
} catch (Exception e) {
logger.info(String.format("unable to read entry #%s from saved cache %s; skipping remaining entries", keys.size(), path.getAbsolutePath()), e);
break;
}
keys.add(key);
}
if (logger.isDebugEnabled())
logger.debug(String.format("completed reading (%d ms; %d keys) saved cache %s", System.currentTimeMillis() - start, keys.size(), path));
} catch (Exception e) {
logger.warn(String.format("error reading saved cache %s", path.getAbsolutePath()), e);
} finally {
FileUtils.closeQuietly(in);
}
}
return keys;
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeTombstonedSuperColumn.
@Test
public void testCompactionPurgeTombstonedSuperColumn() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "Keyspace1";
String cfName = "Super5";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key5");
RowMutation rm;
ByteBuffer scName = ByteBufferUtil.bytes("sc");
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, scName, ByteBuffer.wrap(String.valueOf(i).getBytes())), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// deletes supercolumn with timestamp such that not all columns go
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, scName, null), 4);
rm.apply();
// flush and major compact
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 5; i++) {
rm.add(new QueryPath(cfName, scName, ByteBuffer.wrap(String.valueOf(i).getBytes())), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
SuperColumn sc = (SuperColumn) cf.getColumn(scName);
assert sc != null;
assertEquals(10, sc.getColumnCount());
}
use of org.apache.cassandra.db.DecoratedKey in project eiger by wlloyd.
the class CompactionsPurgeTest method testCompactionPurgeCachedRow.
@Test
public void testCompactionPurgeCachedRow() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
String tableName = "RowCacheSpace";
String cfName = "CachedCF";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
RowMutation rm;
// inserts
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
// move the key up in row cache
cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
// deletes row
rm = new RowMutation(tableName, key.key);
rm.delete(new QueryPath(cfName, null, null), 1);
rm.apply();
// flush and major compact
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
// re-inserts with timestamp lower than delete
rm = new RowMutation(tableName, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
// Check that the second insert did went in
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
assertEquals(10, cf.getColumnCount());
for (IColumn c : cf) assert !c.isMarkedForDelete();
}
Aggregations