Search in sources :

Example 76 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class AbstractRow method toString.

public String toString(TableMetadata metadata, boolean includeClusterKeys, boolean fullDetails) {
    StringBuilder sb = new StringBuilder();
    sb.append("Row");
    if (fullDetails) {
        sb.append("[info=").append(primaryKeyLivenessInfo());
        if (!deletion().isLive())
            sb.append(" del=").append(deletion());
        sb.append(" ]");
    }
    sb.append(": ");
    if (includeClusterKeys)
        sb.append(clustering().toString(metadata));
    else
        sb.append(clustering().toCQLString(metadata));
    sb.append(" | ");
    boolean isFirst = true;
    for (ColumnData cd : this) {
        if (isFirst)
            isFirst = false;
        else
            sb.append(", ");
        if (fullDetails) {
            if (cd.column().isSimple()) {
                sb.append(cd);
            } else {
                ComplexColumnData complexData = (ComplexColumnData) cd;
                if (!complexData.complexDeletion().isLive())
                    sb.append("del(").append(cd.column().name).append(")=").append(complexData.complexDeletion());
                for (Cell cell : complexData) sb.append(", ").append(cell);
            }
        } else {
            if (cd.column().isSimple()) {
                Cell cell = (Cell) cd;
                sb.append(cell.column().name).append('=');
                if (cell.isTombstone())
                    sb.append("<tombstone>");
                else
                    sb.append(cell.column().type.getString(cell.value()));
            } else {
                sb.append(cd.column().name).append('=');
                ComplexColumnData complexData = (ComplexColumnData) cd;
                Function<Cell, String> transform = null;
                if (cd.column().type.isCollection()) {
                    CollectionType ct = (CollectionType) cd.column().type;
                    transform = cell -> String.format("%s -> %s", ct.nameComparator().getString(cell.path().get(0)), ct.valueComparator().getString(cell.value()));
                } else if (cd.column().type.isUDT()) {
                    UserType ut = (UserType) cd.column().type;
                    transform = cell -> {
                        Short fId = ut.nameComparator().getSerializer().deserialize(cell.path().get(0));
                        return String.format("%s -> %s", ut.fieldNameAsString(fId), ut.fieldType(fId).getString(cell.value()));
                    };
                } else {
                    transform = cell -> "";
                }
                sb.append(StreamSupport.stream(complexData.spliterator(), false).map(transform).collect(Collectors.joining(", ", "{", "}")));
            }
        }
    }
    return sb.toString();
}
Also used : Iterables(com.google.common.collect.Iterables) FBUtilities(org.apache.cassandra.utils.FBUtilities) MessageDigest(java.security.MessageDigest) org.apache.cassandra.db(org.apache.cassandra.db) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) ByteBuffer(java.nio.ByteBuffer) Objects(java.util.Objects) AbstractCollection(java.util.AbstractCollection) TableMetadata(org.apache.cassandra.schema.TableMetadata) StreamSupport(java.util.stream.StreamSupport) CollectionType(org.apache.cassandra.db.marshal.CollectionType) UserType(org.apache.cassandra.db.marshal.UserType) MarshalException(org.apache.cassandra.serializers.MarshalException) CollectionType(org.apache.cassandra.db.marshal.CollectionType) UserType(org.apache.cassandra.db.marshal.UserType)

Example 77 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class TableViews method readExistingRowsCommand.

/**
     * Returns the command to use to read the existing rows required to generate view updates for the provided base
     * base updates.
     *
     * @param updates the base table updates being applied.
     * @param views the views potentially affected by {@code updates}.
     * @param nowInSec the current time in seconds.
     * @return the command to use to read the base table rows required to generate view updates for {@code updates}.
     */
private SinglePartitionReadCommand readExistingRowsCommand(PartitionUpdate updates, Collection<View> views, int nowInSec) {
    Slices.Builder sliceBuilder = null;
    DeletionInfo deletionInfo = updates.deletionInfo();
    TableMetadata metadata = updates.metadata();
    DecoratedKey key = updates.partitionKey();
    // TODO: This is subtle: we need to gather all the slices that we have to fetch between partition del, range tombstones and rows.
    if (!deletionInfo.isLive()) {
        sliceBuilder = new Slices.Builder(metadata.comparator);
        // TODO: we should improve that latter part.
        if (!deletionInfo.getPartitionDeletion().isLive()) {
            for (View view : views) sliceBuilder.addAll(view.getSelectStatement().clusteringIndexFilterAsSlices());
        } else {
            assert deletionInfo.hasRanges();
            Iterator<RangeTombstone> iter = deletionInfo.rangeIterator(false);
            while (iter.hasNext()) sliceBuilder.add(iter.next().deletedSlice());
        }
    }
    // We need to read every row that is updated, unless we can prove that it has no impact on any view entries.
    // If we had some slices from the deletions above, we'll continue using that. Otherwise, it's more efficient to build
    // a names query.
    BTreeSet.Builder<Clustering> namesBuilder = sliceBuilder == null ? BTreeSet.builder(metadata.comparator) : null;
    for (Row row : updates) {
        // Don't read the existing state if we can prove the update won't affect any views
        if (!affectsAnyViews(key, row, views))
            continue;
        if (namesBuilder == null)
            sliceBuilder.add(Slice.make(row.clustering()));
        else
            namesBuilder.add(row.clustering());
    }
    NavigableSet<Clustering> names = namesBuilder == null ? null : namesBuilder.build();
    // to do.
    if (names != null && names.isEmpty())
        return null;
    ClusteringIndexFilter clusteringFilter = names == null ? new ClusteringIndexSliceFilter(sliceBuilder.build(), false) : new ClusteringIndexNamesFilter(names, false);
    // If we have more than one view, we should merge the queried columns by each views but to keep it simple we just
    // include everything. We could change that in the future.
    ColumnFilter queriedColumns = views.size() == 1 ? Iterables.getOnlyElement(views).getSelectStatement().queriedColumns() : ColumnFilter.all(metadata);
    // Note that the views could have restrictions on regular columns, but even if that's the case we shouldn't apply those
    // when we read, because even if an existing row doesn't match the view filter, the update can change that in which
    // case we'll need to know the existing content. There is also no easy way to merge those RowFilter when we have multiple views.
    // TODO: we could still make sense to special case for when there is a single view and a small number of updates (and
    // no deletions). Indeed, in that case we could check whether any of the update modify any of the restricted regular
    // column, and if that's not the case we could use view filter. We keep it simple for now though.
    RowFilter rowFilter = RowFilter.NONE;
    return SinglePartitionReadCommand.create(metadata, nowInSec, queriedColumns, rowFilter, DataLimits.NONE, key, clusteringFilter);
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) BTreeSet(org.apache.cassandra.utils.btree.BTreeSet)

Example 78 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class CounterCacheKey method readCounterValue.

/**
     * Reads the value of the counter represented by this key.
     *
     * @param cfs the store for the table this is a key of.
     * @return the value for the counter represented by this key, or {@code null} if there
     * is not such counter.
     */
public ByteBuffer readCounterValue(ColumnFamilyStore cfs) {
    TableMetadata metadata = cfs.metadata();
    assert metadata.id.equals(tableId) && Objects.equals(metadata.indexName().orElse(null), indexName);
    DecoratedKey key = cfs.decorateKey(partitionKey());
    int clusteringSize = metadata.comparator.size();
    List<ByteBuffer> buffers = CompositeType.splitName(ByteBuffer.wrap(cellName));
    // See makeCellName above
    assert buffers.size() >= clusteringSize + 1;
    Clustering clustering = Clustering.make(buffers.subList(0, clusteringSize).toArray(new ByteBuffer[clusteringSize]));
    ColumnMetadata column = metadata.getColumn(buffers.get(clusteringSize));
    // try to load it. Not point if failing in any case, just skip the value.
    if (column == null)
        return null;
    CellPath path = column.isComplex() ? CellPath.create(buffers.get(buffers.size() - 1)) : null;
    int nowInSec = FBUtilities.nowInSeconds();
    ColumnFilter.Builder builder = ColumnFilter.selectionBuilder();
    if (path == null)
        builder.add(column);
    else
        builder.select(column, path);
    ClusteringIndexFilter filter = new ClusteringIndexNamesFilter(FBUtilities.singleton(clustering, metadata.comparator), false);
    SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(metadata, nowInSec, key, builder.build(), filter);
    try (ReadExecutionController controller = cmd.executionController();
        RowIterator iter = UnfilteredRowIterators.filter(cmd.queryMemtableAndDisk(cfs, controller), nowInSec)) {
        ByteBuffer value = null;
        if (column.isStatic())
            value = iter.staticRow().getCell(column).value();
        else if (iter.hasNext())
            value = iter.next().getCell(column).value();
        return value;
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) CellPath(org.apache.cassandra.db.rows.CellPath) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ClusteringIndexNamesFilter(org.apache.cassandra.db.filter.ClusteringIndexNamesFilter) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) ByteBuffer(java.nio.ByteBuffer) RowIterator(org.apache.cassandra.db.rows.RowIterator) ClusteringIndexFilter(org.apache.cassandra.db.filter.ClusteringIndexFilter)

Example 79 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class CommitLogSegment method dirtyString.

// For debugging, not fast
public String dirtyString() {
    StringBuilder sb = new StringBuilder();
    for (TableId tableId : getDirtyTableIds()) {
        TableMetadata m = Schema.instance.getTableMetadata(tableId);
        sb.append(m == null ? "<deleted>" : m.name).append(" (").append(tableId).append(", dirty: ").append(tableDirty.get(tableId)).append(", clean: ").append(tableClean.get(tableId)).append("), ");
    }
    return sb.toString();
}
Also used : TableId(org.apache.cassandra.schema.TableId) TableMetadata(org.apache.cassandra.schema.TableMetadata)

Example 80 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class SSTableReader method loadSummary.

/**
     * Load index summary from Summary.db file if it exists.
     *
     * if loaded index summary has different index interval from current value stored in schema,
     * then Summary.db file will be deleted and this returns false to rebuild summary.
     *
     * @return true if index summary is loaded successfully from Summary.db file.
     */
@SuppressWarnings("resource")
public boolean loadSummary() {
    File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY));
    if (!summariesFile.exists())
        return false;
    DataInputStream iStream = null;
    try {
        TableMetadata metadata = metadata();
        iStream = new DataInputStream(new FileInputStream(summariesFile));
        indexSummary = IndexSummary.serializer.deserialize(iStream, getPartitioner(), metadata.params.minIndexInterval, metadata.params.maxIndexInterval);
        first = decorateKey(ByteBufferUtil.readWithLength(iStream));
        last = decorateKey(ByteBufferUtil.readWithLength(iStream));
    } catch (IOException e) {
        if (indexSummary != null)
            indexSummary.close();
        logger.trace("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage());
        // corrupted; delete it and fall back to creating a new summary
        FileUtils.closeQuietly(iStream);
        // delete it and fall back to creating a new summary
        FileUtils.deleteWithConfirm(summariesFile);
        return false;
    } finally {
        FileUtils.closeQuietly(iStream);
    }
    return true;
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata)

Aggregations

TableMetadata (org.apache.cassandra.schema.TableMetadata)129 Test (org.junit.Test)63 ByteBuffer (java.nio.ByteBuffer)29 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)17 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)13 File (java.io.File)10 PartitionUpdate (org.apache.cassandra.db.partitions.PartitionUpdate)10 Mutation (org.apache.cassandra.db.Mutation)8 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)8 KeyspaceMetadata (org.apache.cassandra.schema.KeyspaceMetadata)8 Descriptor (org.apache.cassandra.io.sstable.Descriptor)7 IndexMetadata (org.apache.cassandra.schema.IndexMetadata)6 IOException (java.io.IOException)5 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)5 IndexTarget (org.apache.cassandra.cql3.statements.IndexTarget)5 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)5 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)4 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)4 AbstractType (org.apache.cassandra.db.marshal.AbstractType)4 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)4