use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class Rows method removeShadowedCells.
/**
* Returns a row that is obtained from the given existing row by removing everything that is shadowed by data in
* the update row. In other words, produces the smallest result row such that
* {@code merge(result, update, nowInSec) == merge(existing, update, nowInSec)} after filtering by rangeDeletion.
*
* @param existing source row
* @param update shadowing row
* @param rangeDeletion extra {@code DeletionTime} from covering tombstone
* @param nowInSec the current time in seconds (which plays a role during reconciliation
* because deleted cells always have precedence on timestamp equality and deciding if a
* cell is a live or not depends on the current time due to expiring cells).
*/
public static Row removeShadowedCells(Row existing, Row update, DeletionTime rangeDeletion, int nowInSec) {
Row.Builder builder = BTreeRow.sortedBuilder();
Clustering clustering = existing.clustering();
builder.newRow(clustering);
DeletionTime deletion = update.deletion().time();
if (rangeDeletion.supersedes(deletion))
deletion = rangeDeletion;
LivenessInfo existingInfo = existing.primaryKeyLivenessInfo();
if (!deletion.deletes(existingInfo))
builder.addPrimaryKeyLivenessInfo(existingInfo);
Row.Deletion rowDeletion = existing.deletion();
if (!deletion.supersedes(rowDeletion.time()))
builder.addRowDeletion(rowDeletion);
Iterator<ColumnData> a = existing.iterator();
Iterator<ColumnData> b = update.iterator();
ColumnData nexta = a.hasNext() ? a.next() : null, nextb = b.hasNext() ? b.next() : null;
while (nexta != null) {
int comparison = nextb == null ? -1 : nexta.column.compareTo(nextb.column);
if (comparison <= 0) {
ColumnData cura = nexta;
ColumnMetadata column = cura.column;
ColumnData curb = comparison == 0 ? nextb : null;
if (column.isSimple()) {
Cells.addNonShadowed((Cell) cura, (Cell) curb, deletion, builder, nowInSec);
} else {
ComplexColumnData existingData = (ComplexColumnData) cura;
ComplexColumnData updateData = (ComplexColumnData) curb;
DeletionTime existingDt = existingData.complexDeletion();
DeletionTime updateDt = updateData == null ? DeletionTime.LIVE : updateData.complexDeletion();
DeletionTime maxDt = updateDt.supersedes(deletion) ? updateDt : deletion;
if (existingDt.supersedes(maxDt)) {
builder.addComplexDeletion(column, existingDt);
maxDt = existingDt;
}
Iterator<Cell> existingCells = existingData.iterator();
Iterator<Cell> updateCells = updateData == null ? null : updateData.iterator();
Cells.addNonShadowedComplex(column, existingCells, updateCells, maxDt, builder, nowInSec);
}
nexta = a.hasNext() ? a.next() : null;
if (curb != null)
nextb = b.hasNext() ? b.next() : null;
} else {
nextb = b.hasNext() ? b.next() : null;
}
}
Row row = builder.build();
return row != null && !row.isEmpty() ? row : null;
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class UnfilteredSerializer method serializeRowBody.
@Inline
private void serializeRowBody(Row row, int flags, SerializationHeader header, DataOutputPlus out) throws IOException {
boolean isStatic = row.isStatic();
Columns headerColumns = header.columns(isStatic);
LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
Row.Deletion deletion = row.deletion();
if ((flags & HAS_TIMESTAMP) != 0)
header.writeTimestamp(pkLiveness.timestamp(), out);
if ((flags & HAS_TTL) != 0) {
header.writeTTL(pkLiveness.ttl(), out);
header.writeLocalDeletionTime(pkLiveness.localExpirationTime(), out);
}
if ((flags & HAS_DELETION) != 0)
header.writeDeletionTime(deletion.time(), out);
if ((flags & HAS_ALL_COLUMNS) == 0)
Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
SearchIterator<ColumnMetadata, ColumnMetadata> si = headerColumns.iterator();
try {
row.apply(cd -> {
ColumnMetadata column = si.next(cd.column());
assert column != null : cd.column.toString();
try {
if (cd.column.isSimple())
Cell.serializer.serialize((Cell) cd, column, out, pkLiveness, header);
else
writeComplexColumn((ComplexColumnData) cd, column, (flags & HAS_COMPLEX_DELETION) != 0, pkLiveness, header, out);
} catch (IOException e) {
throw new WrappedException(e);
}
}, false);
} catch (WrappedException e) {
if (e.getCause() instanceof IOException)
throw (IOException) e.getCause();
throw e;
}
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class CounterCacheKey method readCounterValue.
/**
* Reads the value of the counter represented by this key.
*
* @param cfs the store for the table this is a key of.
* @return the value for the counter represented by this key, or {@code null} if there
* is not such counter.
*/
public ByteBuffer readCounterValue(ColumnFamilyStore cfs) {
TableMetadata metadata = cfs.metadata();
assert metadata.id.equals(tableId) && Objects.equals(metadata.indexName().orElse(null), indexName);
DecoratedKey key = cfs.decorateKey(partitionKey());
int clusteringSize = metadata.comparator.size();
List<ByteBuffer> buffers = CompositeType.splitName(ByteBuffer.wrap(cellName));
// See makeCellName above
assert buffers.size() >= clusteringSize + 1;
Clustering clustering = Clustering.make(buffers.subList(0, clusteringSize).toArray(new ByteBuffer[clusteringSize]));
ColumnMetadata column = metadata.getColumn(buffers.get(clusteringSize));
// try to load it. Not point if failing in any case, just skip the value.
if (column == null)
return null;
CellPath path = column.isComplex() ? CellPath.create(buffers.get(buffers.size() - 1)) : null;
int nowInSec = FBUtilities.nowInSeconds();
ColumnFilter.Builder builder = ColumnFilter.selectionBuilder();
if (path == null)
builder.add(column);
else
builder.select(column, path);
ClusteringIndexFilter filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(clustering, metadata.comparator), false);
SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(metadata, nowInSec, key, builder.build(), filter);
try (ReadExecutionController controller = cmd.executionController();
RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) {
ByteBuffer value = null;
if (column.isStatic())
value = iter.staticRow().getCell(column).value();
else if (iter.hasNext())
value = iter.next().getCell(column).value();
return value;
}
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class ClusteringColumnRestrictions method mergeWith.
public ClusteringColumnRestrictions mergeWith(Restriction restriction) throws InvalidRequestException {
SingleRestriction newRestriction = (SingleRestriction) restriction;
RestrictionSet newRestrictionSet = restrictions.addRestriction(newRestriction);
if (!isEmpty() && !allowFiltering) {
SingleRestriction lastRestriction = restrictions.lastRestriction();
ColumnMetadata lastRestrictionStart = lastRestriction.getFirstColumn();
ColumnMetadata newRestrictionStart = restriction.getFirstColumn();
checkFalse(lastRestriction.isSlice() && newRestrictionStart.position() > lastRestrictionStart.position(), "Clustering column \"%s\" cannot be restricted (preceding column \"%s\" is restricted by a non-EQ relation)", newRestrictionStart.name, lastRestrictionStart.name);
if (newRestrictionStart.position() < lastRestrictionStart.position() && newRestriction.isSlice())
throw invalidRequest("PRIMARY KEY column \"%s\" cannot be restricted (preceding column \"%s\" is restricted by a non-EQ relation)", restrictions.nextColumn(newRestrictionStart).name, newRestrictionStart.name);
}
return new ClusteringColumnRestrictions(this.comparator, newRestrictionSet, allowFiltering);
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class MultiColumnRestriction method getColumnsInCommons.
/**
* Returns the names of the columns that are specified within this <code>Restrictions</code> and the other one
* as a comma separated <code>String</code>.
*
* @param otherRestriction the other restrictions
* @return the names of the columns that are specified within this <code>Restrictions</code> and the other one
* as a comma separated <code>String</code>.
*/
protected final String getColumnsInCommons(Restriction otherRestriction) {
Set<ColumnMetadata> commons = new HashSet<>(getColumnDefs());
commons.retainAll(otherRestriction.getColumnDefs());
StringBuilder builder = new StringBuilder();
for (ColumnMetadata columnMetadata : commons) {
if (builder.length() != 0)
builder.append(" ,");
builder.append(columnMetadata.name);
}
return builder.toString();
}
Aggregations