use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class JsonTransformer method serializeClustering.
private void serializeClustering(ClusteringPrefix clustering) throws IOException {
if (clustering.size() > 0) {
json.writeFieldName("clustering");
objectIndenter.setCompact(true);
json.writeStartArray();
arrayIndenter.setCompact(true);
List<ColumnMetadata> clusteringColumns = metadata.clusteringColumns();
for (int i = 0; i < clusteringColumns.size(); i++) {
ColumnMetadata column = clusteringColumns.get(i);
if (i >= clustering.size()) {
json.writeString("*");
} else {
json.writeString(column.cellValueType().getString(clustering.get(i)));
}
}
json.writeEndArray();
objectIndenter.setCompact(false);
arrayIndenter.setCompact(false);
}
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class ViewUpdateGenerator method updateEntry.
/**
* Creates the updates to apply to the existing view entry given the base table row before
* and after the update, assuming that the update hasn't changed to which view entry the
* row correspond (that is, we know the columns composing the view PK haven't changed).
* <p>
* This method checks that the base row (before and after) does match the view filter before
* applying anything.
*/
private void updateEntry(Row existingBaseRow, Row mergedBaseRow) {
// they may not match the view filter.
if (!matchesViewFilter(existingBaseRow)) {
createEntry(mergedBaseRow);
return;
}
if (!matchesViewFilter(mergedBaseRow)) {
deleteOldEntryInternal(existingBaseRow);
return;
}
startNewUpdate(mergedBaseRow);
// In theory, it may be the PK liveness and row deletion hasn't been change by the update
// and we could condition the 2 additions below. In practice though, it's as fast (if not
// faster) to compute those info than to check if they have changed so we keep it simple.
currentViewEntryBuilder.addPrimaryKeyLivenessInfo(computeLivenessInfoForEntry(mergedBaseRow));
currentViewEntryBuilder.addRowDeletion(mergedBaseRow.deletion());
// We only add to the view update the cells from mergedBaseRow that differs from
// existingBaseRow. For that and for speed we can just cell pointer equality: if the update
// hasn't touched a cell, we know it will be the same object in existingBaseRow and
// mergedBaseRow (note that including more cells than we strictly should isn't a problem
// for correction, so even if the code change and pointer equality don't work anymore, it'll
// only a slightly inefficiency which we can fix then).
// Note: we could alternatively use Rows.diff() for this, but because it is a bit more generic
// than what we need here, it's also a bit less efficient (it allocates more in particular),
// and this might be called a lot of time for view updates. So, given that this is not a whole
// lot of code anyway, it's probably doing the diff manually.
PeekingIterator<ColumnData> existingIter = Iterators.peekingIterator(existingBaseRow.iterator());
for (ColumnData mergedData : mergedBaseRow) {
ColumnMetadata baseColumn = mergedData.column();
ColumnMetadata viewColumn = view.getViewColumn(baseColumn);
// Alose, if it's part of the view PK it's already been taken into account in the clustering.
if (viewColumn == null || viewColumn.isPrimaryKeyColumn())
continue;
ColumnData existingData = null;
// Find if there is data for that column in the existing row
while (existingIter.hasNext()) {
int cmp = baseColumn.compareTo(existingIter.peek().column());
if (cmp < 0)
break;
ColumnData next = existingIter.next();
if (cmp == 0) {
existingData = next;
break;
}
}
if (existingData == null) {
addColumnData(viewColumn, mergedData);
continue;
}
if (mergedData == existingData)
continue;
if (baseColumn.isComplex()) {
ComplexColumnData mergedComplexData = (ComplexColumnData) mergedData;
ComplexColumnData existingComplexData = (ComplexColumnData) existingData;
if (mergedComplexData.complexDeletion().supersedes(existingComplexData.complexDeletion()))
currentViewEntryBuilder.addComplexDeletion(viewColumn, mergedComplexData.complexDeletion());
PeekingIterator<Cell> existingCells = Iterators.peekingIterator(existingComplexData.iterator());
for (Cell mergedCell : mergedComplexData) {
Cell existingCell = null;
// Find if there is corresponding cell in the existing row
while (existingCells.hasNext()) {
int cmp = baseColumn.cellPathComparator().compare(mergedCell.path(), existingCells.peek().path());
if (cmp > 0)
break;
Cell next = existingCells.next();
if (cmp == 0) {
existingCell = next;
break;
}
}
if (mergedCell != existingCell)
addCell(viewColumn, mergedCell);
}
} else {
// Note that we've already eliminated the case where merged == existing
addCell(viewColumn, (Cell) mergedData);
}
}
submitUpdate();
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class BTreeRow method filter.
public Row filter(ColumnFilter filter, DeletionTime activeDeletion, boolean setActiveDeletionToRow, TableMetadata metadata) {
Map<ByteBuffer, DroppedColumn> droppedColumns = metadata.droppedColumns;
boolean mayFilterColumns = !filter.fetchesAllColumns(isStatic());
boolean mayHaveShadowed = activeDeletion.supersedes(deletion.time());
if (!mayFilterColumns && !mayHaveShadowed && droppedColumns.isEmpty())
return this;
LivenessInfo newInfo = primaryKeyLivenessInfo;
Deletion newDeletion = deletion;
if (mayHaveShadowed) {
if (activeDeletion.deletes(newInfo.timestamp()))
newInfo = LivenessInfo.EMPTY;
// note that mayHaveShadowed means the activeDeletion shadows the row deletion. So if don't have setActiveDeletionToRow,
// the row deletion is shadowed and we shouldn't return it.
newDeletion = setActiveDeletionToRow ? Deletion.regular(activeDeletion) : Deletion.LIVE;
}
Columns columns = filter.fetchedColumns().columns(isStatic());
Predicate<ColumnMetadata> inclusionTester = columns.inOrderInclusionTester();
Predicate<ColumnMetadata> queriedByUserTester = filter.queriedColumns().columns(isStatic()).inOrderInclusionTester();
final LivenessInfo rowLiveness = newInfo;
return transformAndFilter(newInfo, newDeletion, (cd) -> {
ColumnMetadata column = cd.column();
if (!inclusionTester.test(column))
return null;
DroppedColumn dropped = droppedColumns.get(column.name.bytes);
if (column.isComplex())
return ((ComplexColumnData) cd).filter(filter, mayHaveShadowed ? activeDeletion : DeletionTime.LIVE, dropped, rowLiveness);
Cell cell = (Cell) cd;
// We include the cell unless it is 1) shadowed, 2) for a dropped column or 3) skippable.
// And a cell is skippable if it is for a column that is not queried by the user and its timestamp
// is lower than the row timestamp (see #10657 or SerializationHelper.includes() for details).
boolean isForDropped = dropped != null && cell.timestamp() <= dropped.droppedTime;
boolean isShadowed = mayHaveShadowed && activeDeletion.deletes(cell);
boolean isSkippable = !queriedByUserTester.test(column) && cell.timestamp() < rowLiveness.timestamp();
return isForDropped || isShadowed || isSkippable ? null : cell;
});
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class View method updateDefinition.
/**
* This updates the columns stored which are dependent on the base TableMetadata.
*/
public void updateDefinition(ViewMetadata definition) {
this.definition = definition;
List<ColumnMetadata> nonPKDefPartOfViewPK = new ArrayList<>();
for (ColumnMetadata baseColumn : baseCfs.metadata().columns()) {
ColumnMetadata viewColumn = getViewColumn(baseColumn);
if (viewColumn != null && !baseColumn.isPrimaryKeyColumn() && viewColumn.isPrimaryKeyColumn())
nonPKDefPartOfViewPK.add(baseColumn);
}
this.baseNonPKColumnsInViewPK = nonPKDefPartOfViewPK;
}
use of org.apache.cassandra.schema.ColumnMetadata in project cassandra by apache.
the class ViewUpdateGenerator method startNewUpdate.
/**
* Computes the partition key and clustering for a new view entry, and setup the internal
* row builder for the new row.
*
* This assumes that there is corresponding entry, i.e. no values for the partition key and
* clustering are null (since we have eliminated that case through updateAction).
*/
private void startNewUpdate(Row baseRow) {
ByteBuffer[] clusteringValues = new ByteBuffer[viewMetadata.clusteringColumns().size()];
for (ColumnMetadata viewColumn : viewMetadata.primaryKeyColumns()) {
ColumnMetadata baseColumn = view.getBaseColumn(viewColumn);
ByteBuffer value = getValueForPK(baseColumn, baseRow);
if (viewColumn.isPartitionKey())
currentViewEntryPartitionKey[viewColumn.position()] = value;
else
clusteringValues[viewColumn.position()] = value;
}
currentViewEntryBuilder.newRow(Clustering.make(clusteringValues));
}
Aggregations