use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class ViewFilteringTest method complexRestrictedTimestampUpdateTest.
public void complexRestrictedTimestampUpdateTest(boolean flush) throws Throwable {
createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY (a, b))");
execute("USE " + keyspace());
executeNet(protocolVersion, "USE " + keyspace());
Keyspace ks = Keyspace.open(keyspace());
createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND c = 1 PRIMARY KEY (c, a, b)");
ks.getColumnFamilyStore("mv").disableAutoCompaction();
//Set initial values TS=0, matching the restriction and verify view
executeNet(protocolVersion, "INSERT INTO %s (a, b, c, d) VALUES (0, 0, 1, 0) USING TIMESTAMP 0");
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//update c's timestamp TS=2
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//change c's value and TS=3, tombstones c=1 and adds c=0 record
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? and b = ? ", 0, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 0, 0, 0));
if (flush) {
ks.getColumnFamilyStore("mv").forceMajorCompaction();
FBUtilities.waitOnFutures(ks.flush());
}
//change c's value back to 1 with TS=4, check we can see d
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
if (flush) {
ks.getColumnFamilyStore("mv").forceMajorCompaction();
FBUtilities.waitOnFutures(ks.flush());
}
assertRows(execute("SELECT d, e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, null));
//Add e value @ TS=1
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 1 SET e = ? WHERE a = ? and b = ? ", 1, 0, 0);
assertRows(execute("SELECT d, e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, 1));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//Change d value @ TS=2
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET d = ? WHERE a = ? and b = ? ", 2, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(2));
if (flush)
FBUtilities.waitOnFutures(ks.flush());
//Change d value @ TS=3
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? and b = ? ", 1, 0, 0);
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(1));
//Tombstone c
executeNet(protocolVersion, "DELETE FROM %s WHERE a = ? and b = ?", 0, 0);
assertRows(execute("SELECT d from mv"));
//Add back without D
executeNet(protocolVersion, "INSERT INTO %s (a, b, c) VALUES (0, 0, 1)");
//Make sure D doesn't pop back in.
assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row((Object) null));
//New partition
// insert a row with timestamp 0
executeNet(protocolVersion, "INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?) USING TIMESTAMP 0", 1, 0, 1, 0, 0);
// overwrite pk and e with timestamp 1, but don't overwrite d
executeNet(protocolVersion, "INSERT INTO %s (a, b, c, e) VALUES (?, ?, ?, ?) USING TIMESTAMP 1", 1, 0, 1, 0);
// delete with timestamp 0 (which should only delete d)
executeNet(protocolVersion, "DELETE FROM %s USING TIMESTAMP 0 WHERE a = ? AND b = ?", 1, 0);
assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0), row(1, 0, 1, null, 0));
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? AND b = ?", 1, 1, 1);
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? AND b = ?", 1, 1, 0);
assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0), row(1, 0, 1, null, 0));
executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? AND b = ?", 0, 1, 0);
assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0), row(1, 0, 1, 0, 0));
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SnapshotDeletingTest method testCompactionHook.
@Test
public void testCompactionHook() throws Exception {
Assume.assumeTrue(FBUtilities.isWindows);
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
store.clearUnsafe();
populate(10000);
store.snapshot("snapshot1");
// Confirm snapshot deletion fails. Sleep for a bit just to make sure the SnapshotDeletingTask has
// time to run and fail.
Thread.sleep(500);
store.clearSnapshot("snapshot1");
assertEquals(1, SnapshotDeletingTask.pendingDeletionCount());
// Compact the cf and confirm that the executor's after hook calls rescheduleDeletion
populate(20000);
store.forceBlockingFlush();
store.forceMajorCompaction();
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < 1000 && SnapshotDeletingTask.pendingDeletionCount() > 0) {
Thread.yield();
}
assertEquals(0, SnapshotDeletingTask.pendingDeletionCount());
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class IndexSummaryManagerTest method testCancelIndex.
@Test
public void testCancelIndex() throws Exception {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
final int numSSTables = 4;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
final List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
final long singleSummaryOffHeapSpace = sstables.get(0).getIndexSummaryOffHeapSize();
// everything should get cut in half
final AtomicReference<CompactionInterruptedException> exception = new AtomicReference<>();
// barrier to control when redistribution runs
final CountDownLatch barrier = new CountDownLatch(1);
Thread t = NamedThreadFactory.createThread(new Runnable() {
public void run() {
try {
// Don't leave enough space for even the minimal index summaries
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
IndexSummaryManager.redistributeSummaries(new ObservableRedistribution(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), singleSummaryOffHeapSpace, barrier));
}
} catch (CompactionInterruptedException ex) {
exception.set(ex);
} catch (IOException ignored) {
}
}
});
t.start();
while (CompactionManager.instance.getActiveCompactions() == 0 && t.isAlive()) Thread.sleep(1);
// to ensure that the stop condition check in IndexSummaryRedistribution::redistributeSummaries
// is made *after* the halt request is made to the CompactionManager, don't allow the redistribution
// to proceed until stopCompaction has been called.
CompactionManager.instance.stopCompaction("INDEX_SUMMARY");
// allows the redistribution to proceed
barrier.countDown();
t.join();
assertNotNull("Expected compaction interrupted exception", exception.get());
assertTrue("Expected no active compactions", CompactionMetrics.getCompactions().isEmpty());
Set<SSTableReader> beforeRedistributionSSTables = new HashSet<>(sstables);
Set<SSTableReader> afterCancelSSTables = new HashSet<>(cfs.getLiveSSTables());
Set<SSTableReader> disjoint = Sets.symmetricDifference(beforeRedistributionSSTables, afterCancelSSTables);
assertTrue(String.format("Mismatched files before and after cancelling redistribution: %s", Joiner.on(",").join(disjoint)), disjoint.isEmpty());
validateData(cfs, numRows);
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SSTableRewriterTest method testSSTableSectionsForRanges.
@Test
public void testSSTableSectionsForRanges() throws IOException, InterruptedException, ExecutionException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.addSSTable(writeFile(cfs, 1000));
Collection<SSTableReader> allSSTables = cfs.getLiveSSTables();
assertEquals(1, allSSTables.size());
final Token firstToken = allSSTables.iterator().next().first.getToken();
DatabaseDescriptor.setSSTablePreempiveOpenIntervalInMB(1);
List<StreamSession.SSTableStreamingSections> sectionsBeforeRewrite = StreamSession.getSSTableSectionsForRanges(Collections.singleton(new Range<Token>(firstToken, firstToken)), Collections.singleton(cfs), 0L, null);
assertEquals(1, sectionsBeforeRewrite.size());
for (StreamSession.SSTableStreamingSections section : sectionsBeforeRewrite) section.ref.release();
final AtomicInteger checkCount = new AtomicInteger();
// needed since we get notified when compaction is done as well - we can't get sections for ranges for obsoleted sstables
final AtomicBoolean done = new AtomicBoolean(false);
final AtomicBoolean failed = new AtomicBoolean(false);
Runnable r = new Runnable() {
public void run() {
while (!done.get()) {
Set<Range<Token>> range = Collections.singleton(new Range<Token>(firstToken, firstToken));
List<StreamSession.SSTableStreamingSections> sections = StreamSession.getSSTableSectionsForRanges(range, Collections.singleton(cfs), 0L, null);
if (sections.size() != 1)
failed.set(true);
for (StreamSession.SSTableStreamingSections section : sections) section.ref.release();
checkCount.incrementAndGet();
Uninterruptibles.sleepUninterruptibly(5, TimeUnit.MILLISECONDS);
}
}
};
Thread t = NamedThreadFactory.createThread(r);
try {
t.start();
cfs.forceMajorCompaction();
// reset
} finally {
DatabaseDescriptor.setSSTablePreempiveOpenIntervalInMB(50);
done.set(true);
t.join(20);
}
assertFalse(failed.get());
assertTrue(checkCount.get() >= 2);
truncate(cfs);
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class Schema method reload.
private void reload(KeyspaceMetadata previous, KeyspaceMetadata updated) {
Keyspace keyspace = getKeyspaceInstance(updated.name);
if (keyspace != null)
keyspace.setMetadata(updated);
MapDifference<TableId, TableMetadata> tablesDiff = previous.tables.diff(updated.tables);
MapDifference<TableId, ViewMetadata> viewsDiff = previous.views.diff(updated.views);
MapDifference<String, TableMetadata> indexesDiff = previous.tables.indexesDiff(updated.tables);
// clean up after removed entries
tablesDiff.entriesOnlyOnLeft().values().forEach(table -> metadataRefs.remove(table.id));
viewsDiff.entriesOnlyOnLeft().values().forEach(view -> metadataRefs.remove(view.metadata.id));
indexesDiff.entriesOnlyOnLeft().values().forEach(indexTable -> indexMetadataRefs.remove(Pair.create(indexTable.keyspace, indexTable.indexName().get())));
// load up new entries
tablesDiff.entriesOnlyOnRight().values().forEach(table -> metadataRefs.put(table.id, new TableMetadataRef(table)));
viewsDiff.entriesOnlyOnRight().values().forEach(view -> metadataRefs.put(view.metadata.id, new TableMetadataRef(view.metadata)));
indexesDiff.entriesOnlyOnRight().values().forEach(indexTable -> indexMetadataRefs.put(Pair.create(indexTable.keyspace, indexTable.indexName().get()), new TableMetadataRef(indexTable)));
// refresh refs to updated ones
tablesDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().id).set(diff.rightValue()));
viewsDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().metadata.id).set(diff.rightValue().metadata));
indexesDiff.entriesDiffering().values().stream().map(MapDifference.ValueDifference::rightValue).forEach(indexTable -> indexMetadataRefs.get(Pair.create(indexTable.keyspace, indexTable.indexName().get())).set(indexTable));
}
Aggregations