use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class TTLTest method testRecoverOverflowedExpirationWithScrub.
public void testRecoverOverflowedExpirationWithScrub(boolean simple, boolean clustering, boolean runScrub, boolean runSStableScrub, boolean reinsertOverflowedTTL) throws Throwable {
if (reinsertOverflowedTTL) {
assert runScrub || runSStableScrub;
}
createTable(simple, clustering);
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(currentTable());
assertEquals(0, cfs.getLiveSSTables().size());
copySSTablesToTableDir(currentTable(), simple, clustering);
cfs.loadNewSSTables();
if (runScrub) {
cfs.scrub(true, false, true, reinsertOverflowedTTL, 1);
if (reinsertOverflowedTTL) {
if (simple)
assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, null));
else
assertRows(execute("SELECT * from %s"), row(1, 1, set("v11", "v12", "v13", "v14")), row(2, 2, set("v21", "v22", "v23", "v24")));
cfs.forceMajorCompaction();
if (simple)
assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, null));
else
assertRows(execute("SELECT * from %s"), row(1, 1, set("v11", "v12", "v13", "v14")), row(2, 2, set("v21", "v22", "v23", "v24")));
} else {
assertEmpty(execute("SELECT * from %s"));
}
}
if (runSStableScrub) {
// Necessary for testing
System.setProperty(org.apache.cassandra.tools.Util.ALLOW_TOOL_REINIT_FOR_TEST, "true");
try {
ToolResult tool;
if (reinsertOverflowedTTL)
tool = ToolRunner.invokeClass(StandaloneScrubber.class, "-r", KEYSPACE, cfs.name);
else
tool = ToolRunner.invokeClass(StandaloneScrubber.class, KEYSPACE, cfs.name);
tool.assertOnCleanExit();
Assertions.assertThat(tool.getStdout()).contains("Pre-scrub sstables snapshotted into");
if (reinsertOverflowedTTL)
Assertions.assertThat(tool.getStdout()).contains("Fixed 2 rows with overflowed local deletion time.");
else
Assertions.assertThat(tool.getStdout()).contains("No valid partitions found while scrubbing");
} finally {
System.clearProperty(org.apache.cassandra.tools.Util.ALLOW_TOOL_REINIT_FOR_TEST);
}
}
try {
cfs.truncateBlocking();
dropTable("DROP TABLE %s");
} catch (Throwable e) {
// StandaloneScrubber.class should be ran as a tool with a stable env. In a test env there are things moving
// under its feet such as the async CQLTester.afterTest() operations. We try to sync cleanup of tables here
// but we need to catch any exceptions we might run into bc of the hack. See CASSANDRA-16546
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class TTLTest method testCapExpirationDateOverflowPolicy.
public void testCapExpirationDateOverflowPolicy(boolean simple, boolean clustering, boolean flush) throws Throwable {
// Create Table
createTable(simple, clustering);
// Insert data with INSERT and UPDATE
if (simple) {
execute("INSERT INTO %s (k, a) VALUES (?, ?) USING TTL " + MAX_TTL, 2, 2);
if (clustering)
execute("UPDATE %s USING TTL " + MAX_TTL + " SET b = 1 WHERE k = 1 AND a = 1;");
else
execute("UPDATE %s USING TTL " + MAX_TTL + " SET a = 1, b = 1 WHERE k = 1;");
} else {
execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?) USING TTL " + MAX_TTL, 2, 2, set("v21", "v22", "v23", "v24"));
if (clustering)
execute("UPDATE %s USING TTL " + MAX_TTL + " SET b = ? WHERE k = 1 AND a = 1;", set("v11", "v12", "v13", "v14"));
else
execute("UPDATE %s USING TTL " + MAX_TTL + " SET a = 1, b = ? WHERE k = 1;", set("v11", "v12", "v13", "v14"));
}
// Maybe Flush
Keyspace ks = Keyspace.open(keyspace());
if (flush)
FBUtilities.waitOnFutures(ks.flush());
// Verify data
verifyData(simple);
// Maybe major compact
if (flush) {
// Major compact and check data is still present
ks.getColumnFamilyStore(currentTable()).forceMajorCompaction();
// Verify data again
verifyData(simple);
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class DeleteTest method isMemtableEmpty.
/**
* Checks if the memtable is empty or not
* @return {@code true} if the memtable is empty, {@code false} otherwise.
*/
private boolean isMemtableEmpty() {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(currentTable());
return cfs.metric.allMemtablesLiveDataSize.getValue() == 0;
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class CassandraStreamReceiver method sendThroughWritePath.
private void sendThroughWritePath(ColumnFamilyStore cfs, Collection<SSTableReader> readers) {
boolean hasCdc = hasCDC(cfs);
ColumnFilter filter = ColumnFilter.all(cfs.metadata());
for (SSTableReader reader : readers) {
Keyspace ks = Keyspace.open(reader.getKeyspaceName());
// ({@link Stream MAX_ROWS_PER_BATCH}) to avoid OOMing and generating heap pressure
try (ISSTableScanner scanner = reader.getScanner();
CloseableIterator<UnfilteredRowIterator> throttledPartitions = ThrottledUnfilteredIterator.throttle(scanner, MAX_ROWS_PER_BATCH)) {
while (throttledPartitions.hasNext()) {
// MV *can* be applied unsafe if there's no CDC on the CFS as we flush
// before transaction is done.
//
// If the CFS has CDC, however, these updates need to be written to the CommitLog
// so they get archived into the cdc_raw folder
ks.apply(new Mutation(PartitionUpdate.fromIterator(throttledPartitions.next(), filter)), hasCdc, true, false);
}
}
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class RepairJob method run.
/**
* Runs repair job.
*
* This sets up necessary task and runs them on given {@code taskExecutor}.
* After submitting all tasks, waits until validation with replica completes.
*/
@SuppressWarnings("UnstableApiUsage")
public void run() {
Keyspace ks = Keyspace.open(desc.keyspace);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(desc.columnFamily);
cfs.metric.repairsStarted.inc();
List<InetAddressAndPort> allEndpoints = new ArrayList<>(session.commonRange.endpoints);
allEndpoints.add(FBUtilities.getBroadcastAddressAndPort());
Future<List<TreeResponse>> treeResponses;
// Create a snapshot at all nodes unless we're using pure parallel repairs
if (parallelismDegree != RepairParallelism.PARALLEL) {
Future<List<InetAddressAndPort>> allSnapshotTasks;
if (session.isIncremental) {
// consistent repair does it's own "snapshotting"
allSnapshotTasks = ImmediateFuture.success(allEndpoints);
} else {
// Request snapshot to all replica
List<Future<InetAddressAndPort>> snapshotTasks = new ArrayList<>(allEndpoints.size());
for (InetAddressAndPort endpoint : allEndpoints) {
SnapshotTask snapshotTask = new SnapshotTask(desc, endpoint);
snapshotTasks.add(snapshotTask);
taskExecutor.execute(snapshotTask);
}
allSnapshotTasks = FutureCombiner.allOf(snapshotTasks);
}
// When all snapshot complete, send validation requests
treeResponses = allSnapshotTasks.flatMap(endpoints -> {
if (parallelismDegree == RepairParallelism.SEQUENTIAL)
return sendSequentialValidationRequest(endpoints);
else
return sendDCAwareValidationRequest(endpoints);
}, taskExecutor);
} else {
// If not sequential, just send validation request to all replica
treeResponses = sendValidationRequest(allEndpoints);
}
// When all validations complete, submit sync tasks
Future<List<SyncStat>> syncResults = treeResponses.flatMap(session.optimiseStreams && !session.pullRepair ? this::optimisedSyncing : this::standardSyncing, taskExecutor);
// When all sync complete, set the final result
syncResults.addCallback(new FutureCallback<List<SyncStat>>() {
public void onSuccess(List<SyncStat> stats) {
if (!session.previewKind.isPreview()) {
logger.info("{} {}.{} is fully synced", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
SystemDistributedKeyspace.successfulRepairJob(session.getId(), desc.keyspace, desc.columnFamily);
}
cfs.metric.repairsCompleted.inc();
trySuccess(new RepairResult(desc, stats));
}
/**
* Snapshot, validation and sync failures are all handled here
*/
public void onFailure(Throwable t) {
// Make sure all validation tasks have cleaned up the off-heap Merkle trees they might contain.
validationTasks.forEach(ValidationTask::abort);
if (!session.previewKind.isPreview()) {
logger.warn("{} {}.{} sync failed", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
SystemDistributedKeyspace.failedRepairJob(session.getId(), desc.keyspace, desc.columnFamily, t);
}
cfs.metric.repairsCompleted.inc();
tryFailure(t instanceof NoSuchRepairSessionExceptionWrapper ? ((NoSuchRepairSessionExceptionWrapper) t).wrapped : t);
}
}, taskExecutor);
}
Aggregations