Search in sources :

Example 1 with MetadataComponent

use of org.apache.cassandra.io.sstable.metadata.MetadataComponent in project cassandra by apache.

the class SSTableHeaderFix method processSSTable.

private void processSSTable(Descriptor desc) {
    if (desc.cfname.indexOf('.') != -1) {
        // no static columns, no regular columns
        return;
    }
    TableMetadata tableMetadata = schemaCallback.apply(desc);
    if (tableMetadata == null) {
        error("Table %s.%s not found in the schema - NOT checking sstable %s", desc.ksname, desc.cfname, desc);
        return;
    }
    Set<Component> components = SSTable.discoverComponentsFor(desc);
    if (components.stream().noneMatch(c -> c.type == Component.Type.STATS)) {
        error("sstable %s has no -Statistics.db component.", desc);
        return;
    }
    Map<MetadataType, MetadataComponent> metadata = readSSTableMetadata(desc);
    if (metadata == null)
        return;
    MetadataComponent component = metadata.get(MetadataType.HEADER);
    if (!(component instanceof SerializationHeader.Component)) {
        error("sstable %s: Expected %s, but got %s from metadata.get(MetadataType.HEADER)", desc, SerializationHeader.Component.class.getName(), component != null ? component.getClass().getName() : "'null'");
        return;
    }
    SerializationHeader.Component header = (SerializationHeader.Component) component;
    // check partition key type
    AbstractType<?> keyType = validatePartitionKey(desc, tableMetadata, header);
    // check clustering columns
    List<AbstractType<?>> clusteringTypes = validateClusteringColumns(desc, tableMetadata, header);
    // check static and regular columns
    Map<ByteBuffer, AbstractType<?>> staticColumns = validateColumns(desc, tableMetadata, header.getStaticColumns(), ColumnMetadata.Kind.STATIC);
    Map<ByteBuffer, AbstractType<?>> regularColumns = validateColumns(desc, tableMetadata, header.getRegularColumns(), ColumnMetadata.Kind.REGULAR);
    SerializationHeader.Component newHeader = SerializationHeader.Component.buildComponentForTools(keyType, clusteringTypes, staticColumns, regularColumns, header.getEncodingStats());
    // SerializationHeader.Component has no equals(), but a "good" toString()
    if (header.toString().equals(newHeader.toString()))
        return;
    Map<MetadataType, MetadataComponent> newMetadata = new LinkedHashMap<>(metadata);
    newMetadata.put(MetadataType.HEADER, newHeader);
    updates.add(Pair.create(desc, newMetadata));
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) ByteBuffer(java.nio.ByteBuffer) LinkedHashMap(java.util.LinkedHashMap) SerializationHeader(org.apache.cassandra.db.SerializationHeader) AbstractType(org.apache.cassandra.db.marshal.AbstractType) MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent)

Example 2 with MetadataComponent

use of org.apache.cassandra.io.sstable.metadata.MetadataComponent in project cassandra by apache.

the class FBUtilities method newPartitioner.

/**
 * Create a new instance of a partitioner defined in an SSTable Descriptor
 * @param desc Descriptor of an sstable
 * @return a new IPartitioner instance
 * @throws IOException
 */
public static IPartitioner newPartitioner(Descriptor desc) throws IOException {
    EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.HEADER);
    Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
    ValidationMetadata validationMetadata = (ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION);
    SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
    return newPartitioner(validationMetadata.partitioner, Optional.of(header.getKeyType()));
}
Also used : MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent) SerializationHeader(org.apache.cassandra.db.SerializationHeader) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) ValidationMetadata(org.apache.cassandra.io.sstable.metadata.ValidationMetadata) MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent)

Example 3 with MetadataComponent

use of org.apache.cassandra.io.sstable.metadata.MetadataComponent in project cassandra by apache.

the class Util method metadataFromSSTable.

/**
 * Construct table schema from info stored in SSTable's Stats.db
 *
 * @param desc SSTable's descriptor
 * @return Restored CFMetaData
 * @throws IOException when Stats.db cannot be read
 */
public static TableMetadata metadataFromSSTable(Descriptor desc) throws IOException {
    if (desc.version.getVersion().compareTo("ma") < 0)
        throw new IOException("pre-3.0 SSTable is not supported.");
    EnumSet<MetadataType> types = EnumSet.of(MetadataType.STATS, MetadataType.HEADER);
    Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
    SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
    IPartitioner partitioner = FBUtilities.newPartitioner(desc);
    TableMetadata.Builder builder = TableMetadata.builder("keyspace", "table").partitioner(partitioner);
    header.getStaticColumns().entrySet().stream().forEach(entry -> {
        ColumnIdentifier ident = ColumnIdentifier.getInterned(UTF8Type.instance.getString(entry.getKey()), true);
        builder.addStaticColumn(ident, entry.getValue());
    });
    header.getRegularColumns().entrySet().stream().forEach(entry -> {
        ColumnIdentifier ident = ColumnIdentifier.getInterned(UTF8Type.instance.getString(entry.getKey()), true);
        builder.addRegularColumn(ident, entry.getValue());
    });
    builder.addPartitionKeyColumn("PartitionKey", header.getKeyType());
    for (int i = 0; i < header.getClusteringTypes().size(); i++) {
        builder.addClusteringColumn("clustering" + (i > 0 ? i : ""), header.getClusteringTypes().get(i));
    }
    return builder.build();
}
Also used : MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent) TableMetadata(org.apache.cassandra.schema.TableMetadata) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) IOException(java.io.IOException) SerializationHeader(org.apache.cassandra.db.SerializationHeader) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent) IPartitioner(org.apache.cassandra.dht.IPartitioner)

Example 4 with MetadataComponent

use of org.apache.cassandra.io.sstable.metadata.MetadataComponent in project cassandra by apache.

the class SSTableExport method metadataFromSSTable.

/**
     * Construct table schema from info stored in SSTable's Stats.db
     *
     * @param desc SSTable's descriptor
     * @return Restored TableMetadata
     * @throws IOException when Stats.db cannot be read
     */
public static TableMetadata metadataFromSSTable(Descriptor desc) throws IOException {
    if (!desc.version.isCompatible())
        throw new IOException("Cannot process old and unsupported SSTable version.");
    EnumSet<MetadataType> types = EnumSet.of(MetadataType.STATS, MetadataType.HEADER);
    Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
    SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
    IPartitioner partitioner = FBUtilities.newPartitioner(desc);
    TableMetadata.Builder builder = TableMetadata.builder("keyspace", "table").partitioner(partitioner);
    header.getStaticColumns().entrySet().stream().forEach(entry -> {
        ColumnIdentifier ident = ColumnIdentifier.getInterned(UTF8Type.instance.getString(entry.getKey()), true);
        builder.addStaticColumn(ident, entry.getValue());
    });
    header.getRegularColumns().entrySet().stream().forEach(entry -> {
        ColumnIdentifier ident = ColumnIdentifier.getInterned(UTF8Type.instance.getString(entry.getKey()), true);
        builder.addRegularColumn(ident, entry.getValue());
    });
    builder.addPartitionKeyColumn("PartitionKey", header.getKeyType());
    for (int i = 0; i < header.getClusteringTypes().size(); i++) {
        builder.addClusteringColumn("clustering" + (i > 0 ? i : ""), header.getClusteringTypes().get(i));
    }
    return builder.build();
}
Also used : MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent) TableMetadata(org.apache.cassandra.schema.TableMetadata) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) IOException(java.io.IOException) SerializationHeader(org.apache.cassandra.db.SerializationHeader) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent)

Example 5 with MetadataComponent

use of org.apache.cassandra.io.sstable.metadata.MetadataComponent in project cassandra by apache.

the class Verifier method verify.

public void verify() {
    boolean extended = options.extendedVerification;
    long rowStart = 0;
    outputHandler.output(String.format("Verifying %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length())));
    if (options.checkVersion && !sstable.descriptor.version.isLatestVersion()) {
        String msg = String.format("%s is not the latest version, run upgradesstables", sstable);
        outputHandler.output(msg);
        // don't use markAndThrow here because we don't want a CorruptSSTableException for this.
        throw new RuntimeException(msg);
    }
    outputHandler.output(String.format("Deserializing sstable metadata for %s ", sstable));
    try {
        EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.STATS, MetadataType.HEADER);
        Map<MetadataType, MetadataComponent> sstableMetadata = sstable.descriptor.getMetadataSerializer().deserialize(sstable.descriptor, types);
        if (sstableMetadata.containsKey(MetadataType.VALIDATION) && !((ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION)).partitioner.equals(sstable.getPartitioner().getClass().getCanonicalName()))
            throw new IOException("Partitioner does not match validation metadata");
    } catch (Throwable t) {
        outputHandler.warn(t);
        markAndThrow(t, false);
    }
    try {
        outputHandler.debug("Deserializing index for " + sstable);
        deserializeIndex(sstable);
    } catch (Throwable t) {
        outputHandler.warn(t);
        markAndThrow(t);
    }
    try {
        outputHandler.debug("Deserializing index summary for " + sstable);
        deserializeIndexSummary(sstable);
    } catch (Throwable t) {
        outputHandler.output("Index summary is corrupt - if it is removed it will get rebuilt on startup " + sstable.descriptor.filenameFor(Component.SUMMARY));
        outputHandler.warn(t);
        markAndThrow(t, false);
    }
    try {
        outputHandler.debug("Deserializing bloom filter for " + sstable);
        deserializeBloomFilter(sstable);
    } catch (Throwable t) {
        outputHandler.warn(t);
        markAndThrow(t);
    }
    if (options.checkOwnsTokens && !isOffline && !(cfs.getPartitioner() instanceof LocalPartitioner)) {
        outputHandler.debug("Checking that all tokens are owned by the current node");
        try (KeyIterator iter = new KeyIterator(sstable.descriptor, sstable.metadata())) {
            List<Range<Token>> ownedRanges = Range.normalize(tokenLookup.apply(cfs.metadata.keyspace));
            if (ownedRanges.isEmpty())
                return;
            RangeOwnHelper rangeOwnHelper = new RangeOwnHelper(ownedRanges);
            while (iter.hasNext()) {
                DecoratedKey key = iter.next();
                rangeOwnHelper.validate(key);
            }
        } catch (Throwable t) {
            outputHandler.warn(t);
            markAndThrow(t);
        }
    }
    if (options.quick)
        return;
    // Verify will use the Digest files, which works for both compressed and uncompressed sstables
    outputHandler.output(String.format("Checking computed hash of %s ", sstable));
    try {
        validator = null;
        if (new File(sstable.descriptor.filenameFor(Component.DIGEST)).exists()) {
            validator = DataIntegrityMetadata.fileDigestValidator(sstable.descriptor);
            validator.validate();
        } else {
            outputHandler.output("Data digest missing, assuming extended verification of disk values");
            extended = true;
        }
    } catch (IOException e) {
        outputHandler.warn(e);
        markAndThrow(e);
    } finally {
        FileUtils.closeQuietly(validator);
    }
    if (!extended)
        return;
    outputHandler.output("Extended Verify requested, proceeding to inspect values");
    try {
        ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
        {
            long firstRowPositionFromIndex = rowIndexEntrySerializer.deserializePositionAndSkip(indexFile);
            if (firstRowPositionFromIndex != 0)
                markAndThrow(new RuntimeException("firstRowPositionFromIndex != 0: " + firstRowPositionFromIndex));
        }
        List<Range<Token>> ownedRanges = isOffline ? Collections.emptyList() : Range.normalize(tokenLookup.apply(cfs.metadata().keyspace));
        RangeOwnHelper rangeOwnHelper = new RangeOwnHelper(ownedRanges);
        DecoratedKey prevKey = null;
        while (!dataFile.isEOF()) {
            if (verifyInfo.isStopRequested())
                throw new CompactionInterruptedException(verifyInfo.getCompactionInfo());
            rowStart = dataFile.getFilePointer();
            outputHandler.debug("Reading row at " + rowStart);
            DecoratedKey key = null;
            try {
                key = sstable.decorateKey(ByteBufferUtil.readWithShortLength(dataFile));
            } catch (Throwable th) {
                throwIfFatal(th);
            // check for null key below
            }
            if (options.checkOwnsTokens && ownedRanges.size() > 0 && !(cfs.getPartitioner() instanceof LocalPartitioner)) {
                try {
                    rangeOwnHelper.validate(key);
                } catch (Throwable t) {
                    outputHandler.warn(String.format("Key %s in sstable %s not owned by local ranges %s", key, sstable, ownedRanges), t);
                    markAndThrow(t);
                }
            }
            ByteBuffer currentIndexKey = nextIndexKey;
            long nextRowPositionFromIndex = 0;
            try {
                nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : rowIndexEntrySerializer.deserializePositionAndSkip(indexFile);
            } catch (Throwable th) {
                markAndThrow(th);
            }
            long dataStart = dataFile.getFilePointer();
            long dataStartFromIndex = currentIndexKey == null ? -1 : rowStart + 2 + currentIndexKey.remaining();
            long dataSize = nextRowPositionFromIndex - dataStartFromIndex;
            // avoid an NPE if key is null
            String keyName = key == null ? "(unreadable key)" : ByteBufferUtil.bytesToHex(key.getKey());
            outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSize)));
            assert currentIndexKey != null || indexFile.isEOF();
            try {
                if (key == null || dataSize > dataFile.length())
                    markAndThrow(new RuntimeException(String.format("key = %s, dataSize=%d, dataFile.length() = %d", key, dataSize, dataFile.length())));
                // mimic the scrub read path, intentionally unused
                try (UnfilteredRowIterator iterator = SSTableIdentityIterator.create(sstable, dataFile, key)) {
                }
                if ((prevKey != null && prevKey.compareTo(key) > 0) || !key.getKey().equals(currentIndexKey) || dataStart != dataStartFromIndex)
                    markAndThrow(new RuntimeException("Key out of order: previous = " + prevKey + " : current = " + key));
                goodRows++;
                prevKey = key;
                outputHandler.debug(String.format("Row %s at %s valid, moving to next row at %s ", goodRows, rowStart, nextRowPositionFromIndex));
                dataFile.seek(nextRowPositionFromIndex);
            } catch (Throwable th) {
                markAndThrow(th);
            }
        }
    } catch (Throwable t) {
        throw Throwables.propagate(t);
    } finally {
        controller.close();
    }
    outputHandler.output("Verify of " + sstable + " succeeded. All " + goodRows + " rows read successfully");
}
Also used : MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) KeyIterator(org.apache.cassandra.io.sstable.KeyIterator) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) IOException(java.io.IOException) Range(org.apache.cassandra.dht.Range) ByteBuffer(java.nio.ByteBuffer) LocalPartitioner(org.apache.cassandra.dht.LocalPartitioner) File(org.apache.cassandra.io.util.File)

Aggregations

MetadataComponent (org.apache.cassandra.io.sstable.metadata.MetadataComponent)6 MetadataType (org.apache.cassandra.io.sstable.metadata.MetadataType)6 SerializationHeader (org.apache.cassandra.db.SerializationHeader)5 IOException (java.io.IOException)4 TableMetadata (org.apache.cassandra.schema.TableMetadata)4 ByteBuffer (java.nio.ByteBuffer)3 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)2 AbstractType (org.apache.cassandra.db.marshal.AbstractType)2 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)2 IPartitioner (org.apache.cassandra.dht.IPartitioner)2 File (org.apache.cassandra.io.util.File)2 MinMaxPriorityQueue (com.google.common.collect.MinMaxPriorityQueue)1 DataInputStream (java.io.DataInputStream)1 PrintStream (java.io.PrintStream)1 PrintWriter (java.io.PrintWriter)1 Files (java.nio.file.Files)1 Arrays (java.util.Arrays)1 Comparator (java.util.Comparator)1 EnumSet (java.util.EnumSet)1 LinkedHashMap (java.util.LinkedHashMap)1