use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.
the class DropColumnFamily method applyModels.
public void applyModels() throws IOException {
ColumnFamilyStore cfs = Table.open(tableName, schema).getColumnFamilyStore(cfName);
// reinitialize the table.
KSMetaData existing = schema.getTableDefinition(tableName);
CFMetaData cfm = existing.cfMetaData().get(cfName);
KSMetaData ksm = makeNewKeyspaceDefinition(existing);
schema.purge(cfm);
schema.setTableDefinition(ksm, newVersion);
if (!StorageService.instance.isClientMode()) {
cfs.snapshot(Table.getTimestampedSnapshotName(cfs.columnFamily));
Table.open(ksm.name, schema).dropCf(cfm.cfId);
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.
the class DropKeyspace method applyModels.
public void applyModels() throws IOException {
String snapshotName = Table.getTimestampedSnapshotName(name);
KSMetaData ksm = schema.getTableDefinition(name);
// remove all cfs from the table instance.
for (CFMetaData cfm : ksm.cfMetaData().values()) {
ColumnFamilyStore cfs = Table.open(ksm.name, schema).getColumnFamilyStore(cfm.cfName);
schema.purge(cfm);
if (!StorageService.instance.isClientMode()) {
cfs.snapshot(snapshotName);
Table.open(ksm.name, schema).dropCf(cfm.cfId);
}
}
// remove the table from the static instances.
Table.clear(ksm.name, schema);
// reset defs.
schema.clearTableDefinition(ksm, newVersion);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.
the class RangeSliceVerbHandler method doVerb.
public void doVerb(Message message, String id) {
try {
if (StorageService.instance.isBootstrapMode()) {
/* Don't service reads! */
throw new RuntimeException("Cannot service reads while bootstrapping!");
}
RangeSliceCommand command = RangeSliceCommand.read(message);
ColumnFamilyStore cfs = Table.open(command.keyspace).getColumnFamilyStore(command.column_family);
RangeSliceReply reply = new RangeSliceReply(executeLocally(command));
Message response = reply.getReply(message);
if (logger.isDebugEnabled())
logger.debug("Sending " + reply + " to " + id + "@" + message.getFrom());
MessagingService.instance().sendReply(response, id, message.getFrom());
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.
the class RangeSliceVerbHandler method executeLocally.
static List<Row> executeLocally(RangeSliceCommand command) throws ExecutionException, InterruptedException {
ColumnFamilyStore cfs = Table.open(command.keyspace).getColumnFamilyStore(command.column_family);
IFilter columnFilter = QueryFilter.getFilter(command.predicate, cfs.getComparator());
if (cfs.indexManager.hasIndexFor(command.row_filter))
return cfs.search(command.row_filter, command.range, command.maxResults, columnFilter, command.maxIsColumns);
else
return cfs.getRangeSlice(command.super_column, command.range, command.maxResults, columnFilter, command.row_filter, command.maxIsColumns);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.
the class IncomingStreamReader method streamIn.
private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException {
ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
DecoratedKey key;
SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
try {
BytesReadTracker in = new BytesReadTracker(input);
for (Pair<Long, Long> section : localFile.sections) {
long length = section.right - section.left;
long bytesRead = 0;
while (bytesRead < length) {
in.reset(0);
key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
long dataSize = SSTableReader.readRowSize(in, localFile.desc);
ColumnFamily cached = cfs.getRawCachedRow(key);
if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit()) {
// need to update row cache
// Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
// We don't expire anything so the row shouldn't be empty
assert !row.isEmpty();
writer.append(row);
// row append does not update the max timestamp on its own
writer.updateMaxTimestamp(row.maxTimestamp());
// update cache
ColumnFamily cf = row.getFullColumnFamily();
cfs.updateRowCache(key, cf);
} else {
writer.appendFromStream(key, cfs.metadata, dataSize, in);
cfs.invalidateCachedRow(key);
}
bytesRead += in.getBytesRead();
remoteFile.progress += in.getBytesRead();
}
}
return writer.closeAndOpenReader();
} catch (Exception e) {
writer.abort();
if (e instanceof IOException)
throw (IOException) e;
else
throw FBUtilities.unchecked(e);
}
}
Aggregations