use of org.apache.cassandra.db.ColumnFamily in project stargate-core by tuplejump.
the class ResultMapper method getCellNameColumnFamilyMap.
private Map<CellName, ColumnFamily> getCellNameColumnFamilyMap(DecoratedKey dk, ColumnSlice[] columnSlices) {
SliceQueryFilter sliceQueryFilter = new SliceQueryFilter(columnSlices, false, Integer.MAX_VALUE);
QueryFilter queryFilter = new QueryFilter(dk, tableMapper.table.name, sliceQueryFilter, filter.timestamp);
ColumnFamily columnFamily = tableMapper.table.getColumnFamily(queryFilter);
return tableMapper.getRows(columnFamily);
}
use of org.apache.cassandra.db.ColumnFamily in project stargate-core by tuplejump.
the class MatchPartition method getAllMatches.
private List<Tuple> getAllMatches(ResultMapper resultMapper, Map<String, Integer> positions) {
List<Tuple> allMatches = new ArrayList<>();
TreeMultimap<DecoratedKey, IndexEntryCollector.IndexEntry> docs = resultMapper.docsByRowKey();
for (final DecoratedKey dk : docs.keySet()) {
List<IndexEntryCollector.IndexEntry> entries = new ArrayList<>(docs.get(dk));
final Map<CellName, ColumnFamily> fullSlice = resultMapper.fetchRangeSlice(entries, dk);
List<Tuple> tuples = new ArrayList<>(fullSlice.size());
for (IndexEntryCollector.IndexEntry entry : entries) {
CellName cellName = entry.clusteringKey;
ColumnFamily cf = fullSlice.get(cellName);
if (cf != null) {
Tuple tuple = aggregateFunction.createTuple(options);
resultMapper.tableMapper.load(positions, tuple, new Row(dk, cf));
tuples.add(tuple);
}
}
int splice = Math.min(tuples.size(), maxMatches);
allMatches.addAll(matchPartition(tuples.subList(0, splice)));
}
return allMatches;
}
use of org.apache.cassandra.db.ColumnFamily in project stargate-core by tuplejump.
the class IndexEventHandler method onEvent.
@Override
public void onEvent(IndexEntryEvent event, long sequence, boolean endOfBatch) throws Exception {
if ((sequence % numberOfConsumers) == ordinal) {
ByteBuffer rowkeyBuffer = event.getRowKey();
ColumnFamily columnFamily = event.getColumnFamily();
final RowIndexSupport rowIndexSupport = indexingService.support.get(columnFamily.metadata().cfName);
try {
rowIndexSupport.indexRow(rowkeyBuffer, columnFamily);
} catch (Exception e) {
logger.error("Error occurred while indexing row of [" + columnFamily.metadata().cfName + "]", e);
} finally {
event.setData(null, null);
long readGen = indexingService.reads.incrementAndGet();
if (logger.isDebugEnabled())
logger.debug("Read gen:" + readGen);
}
}
}
use of org.apache.cassandra.db.ColumnFamily in project eiger by wlloyd.
the class IncomingStreamReader method streamIn.
private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException {
ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
DecoratedKey key;
SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
try {
BytesReadTracker in = new BytesReadTracker(input);
for (Pair<Long, Long> section : localFile.sections) {
long length = section.right - section.left;
long bytesRead = 0;
while (bytesRead < length) {
in.reset(0);
key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
long dataSize = SSTableReader.readRowSize(in, localFile.desc);
ColumnFamily cached = cfs.getRawCachedRow(key);
if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit()) {
// need to update row cache
// Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
// We don't expire anything so the row shouldn't be empty
assert !row.isEmpty();
writer.append(row);
// row append does not update the max timestamp on its own
writer.updateMaxTimestamp(row.maxTimestamp());
// update cache
ColumnFamily cf = row.getFullColumnFamily();
cfs.updateRowCache(key, cf);
} else {
writer.appendFromStream(key, cfs.metadata, dataSize, in);
cfs.invalidateCachedRow(key);
}
bytesRead += in.getBytesRead();
remoteFile.progress += in.getBytesRead();
}
}
return writer.closeAndOpenReader();
} catch (Exception e) {
writer.abort();
if (e instanceof IOException)
throw (IOException) e;
else
throw FBUtilities.unchecked(e);
}
}
use of org.apache.cassandra.db.ColumnFamily in project eiger by wlloyd.
the class StatusLogger method log.
public static void log() {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
// everything from o.a.c.concurrent
logger.info(String.format("%-25s%10s%10s%10s", "Pool Name", "Active", "Pending", "Blocked"));
Set<ObjectName> request, internal;
try {
request = server.queryNames(new ObjectName("org.apache.cassandra.request:type=*"), null);
internal = server.queryNames(new ObjectName("org.apache.cassandra.internal:type=*"), null);
} catch (MalformedObjectNameException e) {
throw new RuntimeException(e);
}
for (ObjectName objectName : Iterables.concat(request, internal)) {
String poolName = objectName.getKeyProperty("type");
JMXEnabledThreadPoolExecutorMBean threadPoolProxy = JMX.newMBeanProxy(server, objectName, JMXEnabledThreadPoolExecutorMBean.class);
logger.info(String.format("%-25s%10s%10s%10s", poolName, threadPoolProxy.getActiveCount(), threadPoolProxy.getPendingTasks(), threadPoolProxy.getCurrentlyBlockedTasks()));
}
// one offs
logger.info(String.format("%-25s%10s%10s", "CompactionManager", "n/a", CompactionManager.instance.getPendingTasks()));
int pendingCommands = 0;
for (int n : MessagingService.instance().getCommandPendingTasks().values()) {
pendingCommands += n;
}
int pendingResponses = 0;
for (int n : MessagingService.instance().getResponsePendingTasks().values()) {
pendingResponses += n;
}
logger.info(String.format("%-25s%10s%10s", "MessagingService", "n/a", pendingCommands + "," + pendingResponses));
// Global key/row cache information
AutoSavingCache<KeyCacheKey, Long> keyCache = CacheService.instance.keyCache;
AutoSavingCache<RowCacheKey, ColumnFamily> rowCache = CacheService.instance.rowCache;
int keyCacheKeysToSave = DatabaseDescriptor.getKeyCacheKeysToSave();
int rowCacheKeysToSave = DatabaseDescriptor.getRowCacheKeysToSave();
logger.info(String.format("%-25s%10s%25s%25s%65s", "Cache Type", "Size", "Capacity", "KeysToSave", "Provider"));
logger.info(String.format("%-25s%10s%25s%25s%65s", "KeyCache", keyCache.weightedSize(), keyCache.getCapacity(), keyCacheKeysToSave == Integer.MAX_VALUE ? "all" : keyCacheKeysToSave, ""));
logger.info(String.format("%-25s%10s%25s%25s%65s", "RowCache", rowCache.weightedSize(), rowCache.getCapacity(), rowCacheKeysToSave == Integer.MAX_VALUE ? "all" : rowCacheKeysToSave, DatabaseDescriptor.getRowCacheProvider().getClass().getName()));
// per-CF stats
logger.info(String.format("%-25s%20s", "ColumnFamily", "Memtable ops,data"));
for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
logger.info(String.format("%-25s%20s", cfs.table.name + "." + cfs.columnFamily, cfs.getMemtableColumnsCount() + "," + cfs.getMemtableDataSize()));
}
}
Aggregations