use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.
the class Migration method getLastMigrationId.
public static UUID getLastMigrationId() {
DecoratedKey<?> dkey = StorageService.getPartitioner().decorateKey(LAST_MIGRATION_KEY);
Table defs = Table.open(Table.SYSTEM_TABLE);
ColumnFamilyStore cfStore = defs.getColumnFamilyStore(SCHEMA_CF);
QueryFilter filter = QueryFilter.getNamesFilter(dkey, new QueryPath(SCHEMA_CF), LAST_MIGRATION_KEY);
ColumnFamily cf = cfStore.getColumnFamily(filter);
if (cf == null || cf.getColumnNames().size() == 0)
return null;
else
return UUIDGen.getUUID(cf.getColumn(LAST_MIGRATION_KEY).value());
}
use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.
the class CompactionsTest method testDontPurgeAccidentaly.
private void testDontPurgeAccidentaly(String k, String cfname, boolean forceDeserialize) throws IOException, ExecutionException, InterruptedException {
// This test catches the regression of CASSANDRA-2786
Table table = Table.open(TABLE1);
ColumnFamilyStore store = table.getColumnFamilyStore(cfname);
// disable compaction while flushing
store.clearUnsafe();
store.disableAutoCompaction();
// Add test row
DecoratedKey key = Util.dk(k);
RowMutation rm = new RowMutation(TABLE1, key.key);
rm.add(new QueryPath(cfname, ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
rm.apply();
store.forceBlockingFlush();
Collection<SSTableReader> sstablesBefore = store.getSSTables();
QueryFilter filter = QueryFilter.getIdentityFilter(key, new QueryPath(cfname, null, null));
assert !store.getColumnFamily(filter).isEmpty();
// Remove key
rm = new RowMutation(TABLE1, key.key);
rm.delete(new QueryPath(cfname, null, null), 2);
rm.apply();
ColumnFamily cf = store.getColumnFamily(filter);
assert cf == null || cf.isEmpty() : "should be empty: " + cf;
store.forceBlockingFlush();
Collection<SSTableReader> sstablesAfter = store.getSSTables();
Collection<SSTableReader> toCompact = new ArrayList<SSTableReader>();
for (SSTableReader sstable : sstablesAfter) if (!sstablesBefore.contains(sstable))
toCompact.add(sstable);
Util.compact(store, toCompact, forceDeserialize);
cf = store.getColumnFamily(filter);
assert cf == null || cf.isEmpty() : "should be empty: " + cf;
}
use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.
the class SSTableImportTest method testImportSimpleCf.
@Test
public void testImportSimpleCf() throws IOException, URISyntaxException {
// Import JSON to temp SSTable file
String jsonUrl = resourcePath("SimpleCF.json");
File tempSS = tempSSTableFile("Keyspace1", "Standard1");
SSTableImport.importJson(jsonUrl, "Keyspace1", "Standard1", tempSS.getPath());
// Verify results
SSTableReader reader = SSTableReader.open(Descriptor.fromFilename(tempSS.getPath()));
QueryFilter qf = QueryFilter.getIdentityFilter(Util.dk("rowA"), new QueryPath("Standard1"));
IColumnIterator iter = qf.getSSTableColumnIterator(reader);
ColumnFamily cf = iter.getColumnFamily();
while (iter.hasNext()) cf.addColumn(iter.next());
assert cf.getColumn(ByteBufferUtil.bytes("colAA")).value().equals(hexToBytes("76616c4141"));
assert !(cf.getColumn(ByteBufferUtil.bytes("colAA")) instanceof DeletedColumn);
IColumn expCol = cf.getColumn(ByteBufferUtil.bytes("colAC"));
assert expCol.value().equals(hexToBytes("76616c4143"));
assert expCol instanceof ExpiringColumn;
assert ((ExpiringColumn) expCol).getTimeToLive() == 42 && expCol.getLocalDeletionTime() == 2000000000;
}
use of org.apache.cassandra.db.filter.QueryFilter in project stargate-core by tuplejump.
the class RowIndexSupport method loadOldRow.
private void loadOldRow(DecoratedKey dk, ByteBuffer pkBuf, List<Field> fields) {
CellName clusteringKey = tableMapper.makeClusteringKey(pkBuf);
Composite start = tableMapper.start(clusteringKey);
Composite end = tableMapper.end(start);
ColumnSlice columnSlice = new ColumnSlice(start, end);
SliceQueryFilter sliceQueryFilter = new SliceQueryFilter(columnSlice, false, Integer.MAX_VALUE);
QueryFilter queryFilter = new QueryFilter(dk, tableMapper.table.name, sliceQueryFilter, new Date().getTime());
ColumnFamily columnFamily = tableMapper.table.getColumnFamily(queryFilter);
Map<CellName, ColumnFamily> fullSlice = tableMapper.getRows(columnFamily);
ColumnFamily oldDocument = fullSlice.get(clusteringKey);
for (Cell cell : oldDocument) {
CellName cellName = cell.name();
ColumnIdentifier cql3ColName = cellName.cql3ColumnName(tableMapper.cfMetaData);
String actualColName = cql3ColName.toString();
ColumnDefinition columnDefinition = tableMapper.cfMetaData.getColumnDefinition(cql3ColName);
if (options.shouldIndex(actualColName)) {
addFields(cell, actualColName, columnDefinition, fields);
}
}
}
use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.
the class RowRepairResolver method resolveSuperset.
static ColumnFamily resolveSuperset(Iterable<ColumnFamily> versions) {
assert Iterables.size(versions) > 0;
ColumnFamily resolved = null;
for (ColumnFamily cf : versions) {
if (cf == null)
continue;
if (resolved == null)
resolved = cf.cloneMeShallow();
else
resolved.delete(cf);
}
if (resolved == null)
return null;
// mimic the collectCollatedColumn + removeDeleted path that getColumnFamily takes.
// this will handle removing columns and subcolumns that are supressed by a row or
// supercolumn tombstone.
QueryFilter filter = new QueryFilter(null, new QueryPath(resolved.metadata().cfName), new IdentityQueryFilter());
List<CloseableIterator<IColumn>> iters = new ArrayList<CloseableIterator<IColumn>>();
for (ColumnFamily version : versions) {
if (version == null)
continue;
iters.add(FBUtilities.closeableIterator(version.iterator()));
}
filter.collateColumns(resolved, iters, Integer.MIN_VALUE);
return ColumnFamilyStore.removeDeleted(resolved, Integer.MIN_VALUE);
}
Aggregations