use of org.apache.cassandra.db.compaction.PrecompactedRow in project eiger by wlloyd.
the class IncomingStreamReader method streamIn.
private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException {
ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
DecoratedKey key;
SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
try {
BytesReadTracker in = new BytesReadTracker(input);
for (Pair<Long, Long> section : localFile.sections) {
long length = section.right - section.left;
long bytesRead = 0;
while (bytesRead < length) {
in.reset(0);
key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
long dataSize = SSTableReader.readRowSize(in, localFile.desc);
ColumnFamily cached = cfs.getRawCachedRow(key);
if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit()) {
// need to update row cache
// Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
// We don't expire anything so the row shouldn't be empty
assert !row.isEmpty();
writer.append(row);
// row append does not update the max timestamp on its own
writer.updateMaxTimestamp(row.maxTimestamp());
// update cache
ColumnFamily cf = row.getFullColumnFamily();
cfs.updateRowCache(key, cf);
} else {
writer.appendFromStream(key, cfs.metadata, dataSize, in);
cfs.invalidateCachedRow(key);
}
bytesRead += in.getBytesRead();
remoteFile.progress += in.getBytesRead();
}
}
return writer.closeAndOpenReader();
} catch (Exception e) {
writer.abort();
if (e instanceof IOException)
throw (IOException) e;
else
throw FBUtilities.unchecked(e);
}
}
use of org.apache.cassandra.db.compaction.PrecompactedRow in project eiger by wlloyd.
the class AntiEntropyServiceTestAbstract method testValidatorAdd.
@Test
public void testValidatorAdd() throws Throwable {
Validator validator = new Validator(request);
IPartitioner part = validator.tree.partitioner();
Token mid = part.midpoint(local_range.left, local_range.right);
validator.prepare(store);
// add a row
validator.add(new PrecompactedRow(new DecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")), ColumnFamily.create(Schema.instance.getCFMetaData(tablename, cfname))));
validator.completeTree();
// confirm that the tree was validated
assert null != validator.tree.hash(local_range);
}
Aggregations