use of org.apache.cassandra.io.sstable.SSTableReader in project eiger by wlloyd.
the class StreamingTransferTest method createAndTransfer.
/**
* Create and transfer a single sstable, and return the keys that should have been transferred.
* The Mutator must create the given column, but it may also create any other columns it pleases.
*/
private List<String> createAndTransfer(Table table, ColumnFamilyStore cfs, Mutator mutator) throws Exception {
// write a temporary SSTable, and unregister it
logger.debug("Mutating " + cfs.columnFamily);
long timestamp = 1234;
for (int i = 1; i <= 3; i++) mutator.mutate("key" + i, "col" + i, timestamp);
cfs.forceBlockingFlush();
Util.compactAll(cfs).get();
assertEquals(1, cfs.getSSTables().size());
SSTableReader sstable = cfs.getSSTables().iterator().next();
cfs.clearUnsafe();
// transfer the first and last key
logger.debug("Transferring " + cfs.columnFamily);
transfer(table, sstable);
// confirm that a single SSTable was transferred and registered
assertEquals(1, cfs.getSSTables().size());
// and that the index and filter were properly recovered
int[] offs = new int[] { 1, 3 };
List<Row> rows = Util.getRangeSlice(cfs);
assertEquals(offs.length, rows.size());
for (int i = 0; i < offs.length; i++) {
String key = "key" + offs[i];
String col = "col" + offs[i];
assert null != cfs.getColumnFamily(QueryFilter.getIdentityFilter(Util.dk(key), new QueryPath(cfs.columnFamily)));
assert rows.get(i).key.key.equals(ByteBufferUtil.bytes(key));
assert rows.get(i).cf.getColumn(ByteBufferUtil.bytes(col)) != null;
}
// and that the max timestamp for the file was rediscovered
assertEquals(timestamp, cfs.getSSTables().iterator().next().getMaxTimestamp());
List<String> keys = new ArrayList<String>();
for (int off : offs) keys.add("key" + off);
logger.debug("... everything looks good for " + cfs.columnFamily);
return keys;
}
use of org.apache.cassandra.io.sstable.SSTableReader in project eiger by wlloyd.
the class SSTableImportTest method testImportSimpleCf.
@Test
public void testImportSimpleCf() throws IOException, URISyntaxException {
// Import JSON to temp SSTable file
String jsonUrl = resourcePath("SimpleCF.json");
File tempSS = tempSSTableFile("Keyspace1", "Standard1");
SSTableImport.importJson(jsonUrl, "Keyspace1", "Standard1", tempSS.getPath());
// Verify results
SSTableReader reader = SSTableReader.open(Descriptor.fromFilename(tempSS.getPath()));
QueryFilter qf = QueryFilter.getIdentityFilter(Util.dk("rowA"), new QueryPath("Standard1"));
IColumnIterator iter = qf.getSSTableColumnIterator(reader);
ColumnFamily cf = iter.getColumnFamily();
while (iter.hasNext()) cf.addColumn(iter.next());
assert cf.getColumn(ByteBufferUtil.bytes("colAA")).value().equals(hexToBytes("76616c4141"));
assert !(cf.getColumn(ByteBufferUtil.bytes("colAA")) instanceof DeletedColumn);
IColumn expCol = cf.getColumn(ByteBufferUtil.bytes("colAC"));
assert expCol.value().equals(hexToBytes("76616c4143"));
assert expCol instanceof ExpiringColumn;
assert ((ExpiringColumn) expCol).getTimeToLive() == 42 && expCol.getLocalDeletionTime() == 2000000000;
}
use of org.apache.cassandra.io.sstable.SSTableReader in project eiger by wlloyd.
the class RowIteratorFactory method getIterator.
/**
* Get a row iterator over the provided memtables and sstables, between the provided keys
* and filtered by the queryfilter.
* @param memtables Memtables pending flush.
* @param sstables SStables to scan through.
* @param startWith Start at this key
* @param stopAt Stop and this key
* @param filter Used to decide which columns to pull out
* @param cfs
* @return A row iterator following all the given restrictions
*/
public static CloseableIterator<Row> getIterator(final Iterable<Memtable> memtables, final Collection<SSTableReader> sstables, final RowPosition startWith, final RowPosition stopAt, final QueryFilter filter, final ColumnFamilyStore cfs) {
// fetch data from current memtable, historical memtables, and SSTables in the correct order.
final List<CloseableIterator<IColumnIterator>> iterators = new ArrayList<CloseableIterator<IColumnIterator>>();
// memtables
for (Memtable memtable : memtables) {
iterators.add(new ConvertToColumnIterator(filter, memtable.getEntryIterator(startWith, stopAt)));
}
for (SSTableReader sstable : sstables) {
final SSTableScanner scanner = sstable.getScanner(filter);
scanner.seekTo(startWith);
// otherwise we leak FDs
assert scanner instanceof Closeable;
iterators.add(scanner);
}
// reduce rows from all sources into a single row
return MergeIterator.get(iterators, COMPARE_BY_KEY, new MergeIterator.Reducer<IColumnIterator, Row>() {
private final int gcBefore = (int) (System.currentTimeMillis() / 1000) - cfs.metadata.getGcGraceSeconds();
private final List<IColumnIterator> colIters = new ArrayList<IColumnIterator>();
private DecoratedKey key;
private ColumnFamily returnCF;
@Override
protected void onKeyChange() {
this.returnCF = ColumnFamily.create(cfs.metadata);
}
public void reduce(IColumnIterator current) {
this.colIters.add(current);
this.key = current.getKey();
this.returnCF.delete(current.getColumnFamily());
}
protected Row getReduced() {
// First check if this row is in the rowCache. If it is we can skip the rest
ColumnFamily cached = cfs.getRawCachedRow(key);
if (cached == null)
// not cached: collate
filter.collateColumns(returnCF, colIters, gcBefore);
else {
QueryFilter keyFilter = new QueryFilter(key, filter.path, filter.filter);
returnCF = cfs.filterColumnFamily(cached, keyFilter, gcBefore);
}
Row rv = new Row(key, returnCF);
colIters.clear();
key = null;
return rv;
}
});
}
use of org.apache.cassandra.io.sstable.SSTableReader in project eiger by wlloyd.
the class TableTest method testGetSliceFromLarge.
@Test
public void testGetSliceFromLarge() throws Throwable {
// tests slicing against 1000 columns in an sstable
Table table = Table.open("Keyspace1");
ColumnFamilyStore cfStore = table.getColumnFamilyStore("Standard1");
DecoratedKey key = Util.dk("row3");
RowMutation rm = new RowMutation("Keyspace1", key.key);
ColumnFamily cf = ColumnFamily.create("Keyspace1", "Standard1");
for (int i = 1000; i < 2000; i++) cf.addColumn(column("col" + i, ("v" + i), 1L));
rm.add(cf);
rm.apply();
cfStore.forceBlockingFlush();
validateSliceLarge(cfStore);
// compact so we have a big row with more than the minimum index count
if (cfStore.getSSTables().size() > 1) {
CompactionManager.instance.performMaximal(cfStore);
}
// verify that we do indeed have multiple index entries
SSTableReader sstable = cfStore.getSSTables().iterator().next();
long position = sstable.getPosition(key, SSTableReader.Operator.EQ);
RandomAccessReader file = sstable.openDataReader(false);
file.seek(position);
assert ByteBufferUtil.readWithShortLength(file).equals(key.key);
SSTableReader.readRowSize(file, sstable.descriptor);
IndexHelper.skipBloomFilter(file);
ArrayList<IndexHelper.IndexInfo> indexes = IndexHelper.deserializeIndex(file);
assert indexes.size() > 2;
validateSliceLarge(cfStore);
}
use of org.apache.cassandra.io.sstable.SSTableReader in project eiger by wlloyd.
the class CompactionsPurgeTest method testMinorCompactionPurge.
@Test
public void testMinorCompactionPurge() throws IOException, ExecutionException, InterruptedException {
CompactionManager.instance.disableAutoCompaction();
Table table = Table.open(TABLE2);
String cfName = "Standard1";
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
RowMutation rm;
for (int k = 1; k <= 2; ++k) {
DecoratedKey key = Util.dk("key" + k);
// inserts
rm = new RowMutation(TABLE2, key.key);
for (int i = 0; i < 10; i++) {
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
}
rm.apply();
cfs.forceBlockingFlush();
// deletes
for (int i = 0; i < 10; i++) {
rm = new RowMutation(TABLE2, key.key);
rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), 1);
rm.apply();
}
cfs.forceBlockingFlush();
}
DecoratedKey key1 = Util.dk("key1");
DecoratedKey key2 = Util.dk("key2");
// flush, remember the current sstable and then resurrect one column
// for first key. Then submit minor compaction on remembered sstables.
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesIncomplete = cfs.getSSTables();
rm = new RowMutation(TABLE2, key1.key);
rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(5))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
rm.apply();
cfs.forceBlockingFlush();
new CompactionTask(cfs, sstablesIncomplete, Integer.MAX_VALUE).execute(null);
// verify that minor compaction does not GC when key is present
// in a non-compacted sstable
ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key1, new QueryPath(cfName)));
assert cf.getColumnCount() == 10;
// verify that minor compaction does GC when key is provably not
// present in a non-compacted sstable
cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key2, new QueryPath(cfName)));
assert cf == null;
}
Aggregations