use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class ValidatorTest method testValidatorComplete.
@Test
public void testValidatorComplete() throws Throwable {
Range<Token> range = new Range<>(partitioner.getMinimumToken(), partitioner.getRandomToken());
final RepairJobDesc desc = new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), keyspace, columnFamily, Arrays.asList(range));
final CompletableFuture<MessageOut> outgoingMessageSink = registerOutgoingMessageSink();
InetAddress remote = InetAddress.getByName("127.0.0.2");
ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(columnFamily);
Validator validator = new Validator(desc, remote, 0);
MerkleTrees tree = new MerkleTrees(partitioner);
tree.addMerkleTrees((int) Math.pow(2, 15), validator.desc.ranges);
validator.prepare(cfs, tree);
// and confirm that the tree was split
assertTrue(tree.size() > 1);
// add a row
Token mid = partitioner.midpoint(range.left, range.right);
validator.add(EmptyIterators.unfilteredRow(cfs.metadata(), new BufferDecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")), false));
validator.complete();
// confirm that the tree was validated
Token min = tree.partitioner().getMinimumToken();
assertNotNull(tree.hash(new Range<>(min, min)));
MessageOut message = outgoingMessageSink.get(TEST_TIMEOUT, TimeUnit.SECONDS);
assertEquals(MessagingService.Verb.REPAIR_MESSAGE, message.verb);
RepairMessage m = (RepairMessage) message.payload;
assertEquals(RepairMessage.Type.VALIDATION_COMPLETE, m.messageType);
assertEquals(desc, m.desc);
assertTrue(((ValidationComplete) m).success());
assertNotNull(((ValidationComplete) m).trees);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class ActiveRepairServiceTest method testSnapshotAddSSTables.
@Test
public void testSnapshotAddSSTables() throws Exception {
ColumnFamilyStore store = prepareColumnFamilyStore();
UUID prsId = UUID.randomUUID();
Set<SSTableReader> original = Sets.newHashSet(store.select(View.select(SSTableSet.CANONICAL, (s) -> !s.isRepaired())).sstables);
ActiveRepairService.instance.registerParentRepairSession(prsId, FBUtilities.getBroadcastAddress(), Collections.singletonList(store), Collections.singleton(new Range<>(store.getPartitioner().getMinimumToken(), store.getPartitioner().getMinimumToken())), true, System.currentTimeMillis(), true);
ActiveRepairService.instance.getParentRepairSession(prsId).maybeSnapshot(store.metadata.id, prsId);
UUID prsId2 = UUID.randomUUID();
ActiveRepairService.instance.registerParentRepairSession(prsId2, FBUtilities.getBroadcastAddress(), Collections.singletonList(store), Collections.singleton(new Range<>(store.getPartitioner().getMinimumToken(), store.getPartitioner().getMinimumToken())), true, System.currentTimeMillis(), true);
createSSTables(store, 2);
ActiveRepairService.instance.getParentRepairSession(prsId).maybeSnapshot(store.metadata.id, prsId);
try (Refs<SSTableReader> refs = store.getSnapshotSSTableReaders(prsId.toString())) {
assertEquals(original, Sets.newHashSet(refs.iterator()));
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class StreamTransferTaskTest method testScheduleTimeout.
@Test
public void testScheduleTimeout() throws Exception {
InetAddress peer = FBUtilities.getBroadcastAddress();
StreamSession session = new StreamSession(peer, peer, null, 0, true, false, null);
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD);
// create two sstables
for (int i = 0; i < 2; i++) {
SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, i, 1);
cfs.forceBlockingFlush();
}
// create streaming task that streams those two sstables
StreamTransferTask task = new StreamTransferTask(session, cfs.metadata.id);
for (SSTableReader sstable : cfs.getLiveSSTables()) {
List<Range<Token>> ranges = new ArrayList<>();
ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
task.addTransferFile(sstable.selfRef(), 1, sstable.getPositionsForRanges(ranges), 0);
}
assertEquals(2, task.getTotalNumberOfFiles());
// if file sending completes before timeout then the task should be canceled.
Future f = task.scheduleTimeout(0, 0, TimeUnit.NANOSECONDS);
f.get();
// when timeout runs on second file, task should be completed
f = task.scheduleTimeout(1, 10, TimeUnit.MILLISECONDS);
task.complete(1);
try {
f.get();
Assert.assertTrue(false);
} catch (CancellationException ex) {
}
assertEquals(StreamSession.State.WAIT_COMPLETE, session.state());
// when all streaming are done, time out task should not be scheduled.
assertNull(task.scheduleTimeout(1, 1, TimeUnit.SECONDS));
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class CompactionStress method initCf.
ColumnFamilyStore initCf(StressProfile stressProfile, boolean loadSSTables) {
generateTokens(stressProfile.seedStr, StorageService.instance.getTokenMetadata(), numTokens);
CreateTableStatement.RawStatement createStatement = stressProfile.getCreateStatement();
List<File> dataDirectories = getDataDirectories();
ColumnFamilyStore cfs = StressCQLSSTableWriter.Builder.createOfflineTable(createStatement, Collections.EMPTY_LIST, dataDirectories);
if (loadSSTables) {
Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.IGNORE).skipTemporary(true);
List<SSTableReader> sstables = new ArrayList<>();
//Offline open sstables
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
Set<Component> components = entry.getValue();
if (!components.contains(Component.DATA))
continue;
try {
SSTableReader sstable = SSTableReader.openNoValidation(entry.getKey(), components, cfs);
sstables.add(sstable);
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
System.err.println(String.format("Error Loading %s: %s", entry.getKey(), e.getMessage()));
}
}
cfs.disableAutoCompaction();
//Register with cfs
cfs.addSSTables(sstables);
}
return cfs;
}
use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.
the class UpdateColumnFamily method applyModels.
void applyModels() throws IOException {
logger.debug("Updating " + schema.getCFMetaData(metadata.cfId) + " to " + metadata);
// apply the meta update.
try {
schema.getCFMetaData(metadata.cfId).apply(metadata.toAvro());
} catch (ConfigurationException ex) {
throw new IOException(ex);
}
schema.setTableDefinition(null, newVersion);
if (!StorageService.instance.isClientMode()) {
Table table = Table.open(metadata.ksName, schema);
ColumnFamilyStore oldCfs = table.getColumnFamilyStore(metadata.cfName);
oldCfs.reload();
}
}
Aggregations