use of org.apache.cassandra.io.sstable.SSTable in project eiger by wlloyd.
the class ColumnFamilyStoreTest method testDeleteStandardRowSticksAfterFlush.
@Test
public void testDeleteStandardRowSticksAfterFlush() throws Throwable {
// test to make sure flushing after a delete doesn't resurrect delted cols.
String tableName = "Keyspace1";
String cfName = "Standard1";
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("f-flush-resurrection");
SlicePredicate sp = new SlicePredicate();
sp.setSlice_range(new SliceRange());
sp.getSlice_range().setCount(100);
sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);
// insert
putColsStandard(cfs, key, column("col1", "val1", 1), column("col2", "val2", 1));
assertRowAndColCount(1, 2, null, false, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// flush.
cfs.forceBlockingFlush();
// insert, don't flush
putColsStandard(cfs, key, column("col3", "val3", 1), column("col4", "val4", 1));
assertRowAndColCount(1, 4, null, false, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// delete (from sstable and memtable)
RowMutation rm = new RowMutation(table.name, key.key);
rm.delete(new QueryPath(cfs.columnFamily, null, null), 2);
rm.apply();
// verify delete
assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// flush
cfs.forceBlockingFlush();
// re-verify delete. // first breakage is right here because of CASSANDRA-1837.
assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read.
putColsStandard(cfs, key, column("col5", "val5", 1), column("col2", "val2", 1));
// should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837.
assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// make sure that new writes are recognized.
putColsStandard(cfs, key, column("col6", "val6", 3), column("col7", "val7", 3));
assertRowAndColCount(1, 2, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// and it remains so after flush. (this wasn't failing before, but it's good to check.)
cfs.forceBlockingFlush();
assertRowAndColCount(1, 2, null, true, cfs.getRangeSlice(null, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
}
use of org.apache.cassandra.io.sstable.SSTable in project eiger by wlloyd.
the class ColumnFamilyStoreTest method testDeleteSuperRowSticksAfterFlush.
@Test
public void testDeleteSuperRowSticksAfterFlush() throws Throwable {
String tableName = "Keyspace1";
String cfName = "Super1";
ByteBuffer scfName = ByteBufferUtil.bytes("SuperDuper");
Table table = Table.open(tableName);
ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("flush-resurrection");
// create an isolated sstable.
putColsSuper(cfs, key, scfName, new Column(getBytes(1L), ByteBufferUtil.bytes("val1"), 1), new Column(getBytes(2L), ByteBufferUtil.bytes("val2"), 1), new Column(getBytes(3L), ByteBufferUtil.bytes("val3"), 1));
cfs.forceBlockingFlush();
// insert, don't flush.
putColsSuper(cfs, key, scfName, new Column(getBytes(4L), ByteBufferUtil.bytes("val4"), 1), new Column(getBytes(5L), ByteBufferUtil.bytes("val5"), 1), new Column(getBytes(6L), ByteBufferUtil.bytes("val6"), 1));
// verify insert.
final SlicePredicate sp = new SlicePredicate();
sp.setSlice_range(new SliceRange());
sp.getSlice_range().setCount(100);
sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);
assertRowAndColCount(1, 6, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// deeleet.
RowMutation rm = new RowMutation(table.name, key.key);
rm.delete(new QueryPath(cfName, scfName), 2);
rm.apply();
// verify delete.
assertRowAndColCount(1, 0, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// flush
cfs.forceBlockingFlush();
// re-verify delete.
assertRowAndColCount(1, 0, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// late insert.
putColsSuper(cfs, key, scfName, new Column(getBytes(4L), ByteBufferUtil.bytes("val4"), 1L), new Column(getBytes(7L), ByteBufferUtil.bytes("val7"), 1L));
// re-verify delete.
assertRowAndColCount(1, 0, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
// make sure new writes are recognized.
putColsSuper(cfs, key, scfName, new Column(getBytes(3L), ByteBufferUtil.bytes("val3"), 3), new Column(getBytes(8L), ByteBufferUtil.bytes("val8"), 3), new Column(getBytes(9L), ByteBufferUtil.bytes("val9"), 3));
assertRowAndColCount(1, 3, scfName, false, cfs.getRangeSlice(scfName, Util.range("f", "g"), 100, QueryFilter.getFilter(sp, cfs.getComparator()), null));
}
use of org.apache.cassandra.io.sstable.SSTable in project cassandra by apache.
the class CompactionStrategyManager method findUpgradeSSTableTask.
/**
* finds the oldest (by modification date) non-latest-version sstable on disk and creates an upgrade task for it
* @return
*/
@VisibleForTesting
// transaction is closed by AbstractCompactionTask::execute
@SuppressWarnings("resource")
AbstractCompactionTask findUpgradeSSTableTask() {
if (!isEnabled() || !DatabaseDescriptor.automaticSSTableUpgrade())
return null;
Set<SSTableReader> compacting = cfs.getTracker().getCompacting();
List<SSTableReader> potentialUpgrade = cfs.getLiveSSTables().stream().filter(s -> !compacting.contains(s) && !s.descriptor.version.isLatestVersion()).sorted((o1, o2) -> {
File f1 = new File(o1.descriptor.filenameFor(Component.DATA));
File f2 = new File(o2.descriptor.filenameFor(Component.DATA));
return Longs.compare(f1.lastModified(), f2.lastModified());
}).collect(Collectors.toList());
for (SSTableReader sstable : potentialUpgrade) {
LifecycleTransaction txn = cfs.getTracker().tryModify(sstable, OperationType.UPGRADE_SSTABLES);
if (txn != null) {
logger.debug("Running automatic sstable upgrade for {}", sstable);
return getCompactionStrategyFor(sstable).getCompactionTask(txn, Integer.MIN_VALUE, Long.MAX_VALUE);
}
}
return null;
}
use of org.apache.cassandra.io.sstable.SSTable in project cassandra by apache.
the class LogRecord method make.
public static Map<SSTable, LogRecord> make(Type type, Iterable<SSTableReader> tables) {
// contains a mapping from sstable absolute path (everything up until the 'Data'/'Index'/etc part of the filename) to the sstable
Map<String, SSTable> absolutePaths = new HashMap<>();
for (SSTableReader table : tables) absolutePaths.put(absolutePath(table.descriptor.baseFilename()), table);
// maps sstable base file name to the actual files on disk
Map<String, List<File>> existingFiles = getExistingFiles(absolutePaths.keySet());
Map<SSTable, LogRecord> records = new HashMap<>(existingFiles.size());
for (Map.Entry<String, List<File>> entry : existingFiles.entrySet()) {
List<File> filesOnDisk = entry.getValue();
String baseFileName = entry.getKey();
SSTable sstable = absolutePaths.get(baseFileName);
records.put(sstable, make(type, filesOnDisk, sstable.getAllFilePaths().size(), baseFileName));
}
return records;
}
use of org.apache.cassandra.io.sstable.SSTable in project cassandra by apache.
the class SASIIndexTest method testTableRebuild.
@Test
public void testTableRebuild() throws Exception {
ColumnFamilyStore store = Keyspace.open(KS_NAME).getColumnFamilyStore(CLUSTERING_CF_NAME_1);
executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, nickname, location, age, height, score) VALUES (?, ?, ?, ?, ?, ?)", "Pavel", "xedin", "US", 27, 183, 1.0);
executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, location, age, height, score) VALUES (?, ?, ?, ?, ?)", "Pavel", "BY", 28, 182, 2.0);
executeCQL(CLUSTERING_CF_NAME_1, "INSERT INTO %s.%s (name, nickname, location, age, height, score) VALUES (?, ?, ?, ?, ?, ?)", "Jordan", "jrwest", "US", 27, 182, 1.0);
store.forceBlockingFlush();
SSTable ssTable = store.getSSTables(SSTableSet.LIVE).iterator().next();
Path path = FileSystems.getDefault().getPath(ssTable.getFilename().replace("-Data", "-SI_" + CLUSTERING_CF_NAME_1 + "_age"));
// Overwrite index file with garbage
try (FileChannel fc = FileChannel.open(path, StandardOpenOption.WRITE)) {
fc.truncate(8).write(ByteBuffer.wrap("grabage".getBytes(StandardCharsets.UTF_8)));
}
long size1 = Files.readAttributes(path, BasicFileAttributes.class).size();
// Trying to query the corrupted index file yields no results
Assert.assertTrue(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE age = 27 AND name = 'Pavel'").isEmpty());
// Rebuld index
store.rebuildSecondaryIndex(CLUSTERING_CF_NAME_1 + "_age");
long size2 = Files.readAttributes(path, BasicFileAttributes.class).size();
// Make sure that garbage was overwriten
Assert.assertTrue(size2 > size1);
// Make sure that indexes work for rebuit tables
CQLTester.assertRows(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE age = 27 AND name = 'Pavel'"), CQLTester.row("Pavel", "US", 27, "xedin", 183, 1.0));
CQLTester.assertRows(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE age = 28"), CQLTester.row("Pavel", "BY", 28, "xedin", 182, 2.0));
CQLTester.assertRows(executeCQL(CLUSTERING_CF_NAME_1, "SELECT * FROM %s.%s WHERE score < 2.0 AND nickname = 'jrwest' ALLOW FILTERING"), CQLTester.row("Jordan", "US", 27, "jrwest", 182, 1.0));
}
Aggregations