use of org.apache.cassandra.db.rows.Row.Deletion in project cassandra by apache.
the class UnfilteredSerializer method serializedRowBodySize.
private long serializedRowBodySize(Row row, SerializationHeader header, long previousUnfilteredSize, int version) {
long size = 0;
if (header.isForSSTable())
size += TypeSizes.sizeofUnsignedVInt(previousUnfilteredSize);
boolean isStatic = row.isStatic();
Columns headerColumns = header.columns(isStatic);
LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
Row.Deletion deletion = row.deletion();
boolean hasComplexDeletion = row.hasComplexDeletion();
boolean hasAllColumns = (row.size() == headerColumns.size());
if (!pkLiveness.isEmpty())
size += header.timestampSerializedSize(pkLiveness.timestamp());
if (pkLiveness.isExpiring()) {
size += header.ttlSerializedSize(pkLiveness.ttl());
size += header.localDeletionTimeSerializedSize(pkLiveness.localExpirationTime());
}
if (!deletion.isLive())
size += header.deletionTimeSerializedSize(deletion.time());
if (!hasAllColumns)
size += Columns.serializer.serializedSubsetSize(Collections2.transform(row, ColumnData::column), header.columns(isStatic));
SearchIterator<ColumnMetadata, ColumnMetadata> si = headerColumns.iterator();
for (ColumnData data : row) {
ColumnMetadata column = si.next(data.column());
assert column != null;
if (data.column.isSimple())
size += Cell.serializer.serializedSize((Cell) data, column, pkLiveness, header);
else
size += sizeOfComplexColumn((ComplexColumnData) data, column, hasComplexDeletion, pkLiveness, header);
}
return size;
}
use of org.apache.cassandra.db.rows.Row.Deletion in project cassandra by apache.
the class UnfilteredSerializer method serialize.
private void serialize(Row row, SerializationHeader header, DataOutputPlus out, long previousUnfilteredSize, int version) throws IOException {
int flags = 0;
int extendedFlags = 0;
boolean isStatic = row.isStatic();
Columns headerColumns = header.columns(isStatic);
LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
Row.Deletion deletion = row.deletion();
boolean hasComplexDeletion = row.hasComplexDeletion();
boolean hasAllColumns = (row.size() == headerColumns.size());
boolean hasExtendedFlags = hasExtendedFlags(row);
if (isStatic)
extendedFlags |= IS_STATIC;
if (!pkLiveness.isEmpty())
flags |= HAS_TIMESTAMP;
if (pkLiveness.isExpiring())
flags |= HAS_TTL;
if (!deletion.isLive()) {
flags |= HAS_DELETION;
if (deletion.isShadowable())
extendedFlags |= HAS_SHADOWABLE_DELETION;
}
if (hasComplexDeletion)
flags |= HAS_COMPLEX_DELETION;
if (hasAllColumns)
flags |= HAS_ALL_COLUMNS;
if (hasExtendedFlags)
flags |= EXTENSION_FLAG;
out.writeByte((byte) flags);
if (hasExtendedFlags)
out.writeByte((byte) extendedFlags);
if (!isStatic)
Clustering.serializer.serialize(row.clustering(), out, version, header.clusteringTypes());
if (header.isForSSTable()) {
try (DataOutputBuffer dob = DataOutputBuffer.scratchBuffer.get()) {
serializeRowBody(row, flags, header, dob);
out.writeUnsignedVInt(dob.position() + TypeSizes.sizeofUnsignedVInt(previousUnfilteredSize));
// We write the size of the previous unfiltered to make reverse queries more efficient (and simpler).
// This is currently not used however and using it is tbd.
out.writeUnsignedVInt(previousUnfilteredSize);
out.write(dob.getData(), 0, dob.getLength());
}
} else {
serializeRowBody(row, flags, header, out);
}
}
use of org.apache.cassandra.db.rows.Row.Deletion in project cassandra by apache.
the class UnfilteredSerializer method deserializeTombstonesOnly.
public Unfiltered deserializeTombstonesOnly(FileDataInput in, SerializationHeader header, SerializationHelper helper) throws IOException {
while (true) {
int flags = in.readUnsignedByte();
if (isEndOfPartition(flags))
return null;
int extendedFlags = readExtendedFlags(in, flags);
if (kind(flags) == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER) {
ClusteringBoundOrBoundary bound = ClusteringBoundOrBoundary.serializer.deserialize(in, helper.version, header.clusteringTypes());
return deserializeMarkerBody(in, header, bound);
} else {
// deserializeStaticRow should be used for that.
assert !isStatic(extendedFlags);
if ((flags & HAS_DELETION) != 0) {
assert header.isForSSTable();
boolean hasTimestamp = (flags & HAS_TIMESTAMP) != 0;
boolean hasTTL = (flags & HAS_TTL) != 0;
boolean deletionIsShadowable = (extendedFlags & HAS_SHADOWABLE_DELETION) != 0;
Clustering clustering = Clustering.serializer.deserialize(in, helper.version, header.clusteringTypes());
long nextPosition = in.readUnsignedVInt() + in.getFilePointer();
// skip previous unfiltered size
in.readUnsignedVInt();
if (hasTimestamp) {
header.readTimestamp(in);
if (hasTTL) {
header.readTTL(in);
header.readLocalDeletionTime(in);
}
}
Deletion deletion = new Row.Deletion(header.readDeletionTime(in), deletionIsShadowable);
in.seek(nextPosition);
return BTreeRow.emptyDeletedRow(clustering, deletion);
} else {
Clustering.serializer.skip(in, helper.version, header.clusteringTypes());
skipRowBody(in);
// Continue with next item.
}
}
}
}
use of org.apache.cassandra.db.rows.Row.Deletion in project cassandra by apache.
the class UnfilteredSerializer method serializeRowBody.
@Inline
private void serializeRowBody(Row row, int flags, SerializationHeader header, DataOutputPlus out) throws IOException {
boolean isStatic = row.isStatic();
Columns headerColumns = header.columns(isStatic);
LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
Row.Deletion deletion = row.deletion();
if ((flags & HAS_TIMESTAMP) != 0)
header.writeTimestamp(pkLiveness.timestamp(), out);
if ((flags & HAS_TTL) != 0) {
header.writeTTL(pkLiveness.ttl(), out);
header.writeLocalDeletionTime(pkLiveness.localExpirationTime(), out);
}
if ((flags & HAS_DELETION) != 0)
header.writeDeletionTime(deletion.time(), out);
if ((flags & HAS_ALL_COLUMNS) == 0)
Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
SearchIterator<ColumnMetadata, ColumnMetadata> si = headerColumns.iterator();
try {
row.apply(cd -> {
ColumnMetadata column = si.next(cd.column());
assert column != null : cd.column.toString();
try {
if (cd.column.isSimple())
Cell.serializer.serialize((Cell) cd, column, out, pkLiveness, header);
else
writeComplexColumn((ComplexColumnData) cd, column, (flags & HAS_COMPLEX_DELETION) != 0, pkLiveness, header, out);
} catch (IOException e) {
throw new WrappedException(e);
}
}, false);
} catch (WrappedException e) {
if (e.getCause() instanceof IOException)
throw (IOException) e.getCause();
throw e;
}
}
Aggregations