Search in sources :

Example 1 with SerializationHeader

use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.

the class LogTransactionTest method sstable.

private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException {
    Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
    Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
    for (Component component : components) {
        File file = new File(descriptor.filenameFor(component));
        if (!file.exists())
            assertTrue(file.createFileIfNotExists());
        Util.setFileLength(file, size);
    }
    FileHandle dFile = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).complete();
    FileHandle iFile = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).complete();
    SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
    StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, -1, null, false, header).get(MetadataType.STATS);
    SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, iFile, dFile, MockSchema.indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
    reader.first = reader.last = MockSchema.readerBounds(generation);
    return reader;
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AlwaysPresentFilter(org.apache.cassandra.utils.AlwaysPresentFilter) SerializationHeader(org.apache.cassandra.db.SerializationHeader) FileHandle(org.apache.cassandra.io.util.FileHandle) Descriptor(org.apache.cassandra.io.sstable.Descriptor) Component(org.apache.cassandra.io.sstable.Component) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) File(org.apache.cassandra.io.util.File)

Example 2 with SerializationHeader

use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.

the class CassandraStreamHeaderTest method header.

private CassandraStreamHeader header(boolean entireSSTable, boolean compressed) {
    List<Range<Token>> requestedRanges = Collections.singletonList(new Range<>(store.getPartitioner().getMinimumToken(), sstable.last.getToken()));
    requestedRanges = Range.normalize(requestedRanges);
    List<SSTableReader.PartitionPositionBounds> sections = sstable.getPositionsForRanges(requestedRanges);
    CompressionInfo compressionInfo = compressed ? CompressionInfo.newLazyInstance(sstable.getCompressionMetadata(), sections) : null;
    TableMetadata metadata = store.metadata();
    SerializationHeader.Component serializationHeader = SerializationHeader.makeWithoutStats(metadata).toComponent();
    ComponentManifest componentManifest = entireSSTable ? ComponentManifest.create(sstable.descriptor) : null;
    DecoratedKey firstKey = entireSSTable ? sstable.first : null;
    return CassandraStreamHeader.builder().withSSTableFormat(SSTableFormat.Type.BIG).withSSTableVersion(BigFormat.latestVersion).withSSTableLevel(0).withEstimatedKeys(10).withCompressionInfo(compressionInfo).withSections(sections).isEntireSSTable(entireSSTable).withComponentManifest(componentManifest).withFirstKey(firstKey).withSerializationHeader(serializationHeader).withTableId(metadata.id).build();
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) SerializationHeader(org.apache.cassandra.db.SerializationHeader) DecoratedKey(org.apache.cassandra.db.DecoratedKey) Range(org.apache.cassandra.dht.Range)

Example 3 with SerializationHeader

use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.

the class SSTableFlushObserverTest method testFlushObserver.

@Test
public void testFlushObserver() {
    TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
    LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
    FlushObserver observer = new FlushObserver();
    String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
    File directory = new File(sstableDirectory + File.pathSeparator() + KS_NAME + File.pathSeparator() + CF_NAME);
    directory.deleteOnExit();
    if (!directory.exists() && !directory.tryCreateDirectories())
        throw new FSWriteError(new IOException("failed to create tmp directory"), directory.absolutePath());
    SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
    BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, false, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
    SSTableReader reader = null;
    Multimap<ByteBuffer, Cell<?>> expected = ArrayListMultimap.create();
    try {
        final long now = System.currentTimeMillis();
        ByteBuffer key = UTF8Type.instance.fromString("key1");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
        writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key2");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key3");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        reader = writer.finish(true);
    } finally {
        FileUtils.closeQuietly(writer);
    }
    Assert.assertTrue(observer.isComplete);
    Assert.assertEquals(expected.size(), observer.rows.size());
    for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
        ByteBuffer key = e.left;
        Long indexPosition = e.right;
        try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
            ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
            Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
        } catch (IOException ex) {
            throw new FSReadError(ex, reader.getIndexFilename());
        }
        Assert.assertEquals(expected.get(key), observer.rows.get(e));
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) FSWriteError(org.apache.cassandra.io.FSWriteError) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) FileDataInput(org.apache.cassandra.io.util.FileDataInput) SerializationHeader(org.apache.cassandra.db.SerializationHeader) FSReadError(org.apache.cassandra.io.FSReadError) BigTableWriter(org.apache.cassandra.io.sstable.format.big.BigTableWriter) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) File(org.apache.cassandra.io.util.File) Test(org.junit.Test)

Example 4 with SerializationHeader

use of org.apache.cassandra.db.SerializationHeader in project cassandra by apache.

the class SSTableRewriterTest method writeFiles.

public static Set<SSTableReader> writeFiles(ColumnFamilyStore cfs, int fileCount, int partitionCount, int cellCount) {
    int i = 0;
    Set<SSTableReader> result = new LinkedHashSet<>();
    for (int f = 0; f < fileCount; f++) {
        File dir = cfs.getDirectories().getDirectoryForNewSSTables();
        Descriptor desc = cfs.newSSTableDescriptor(dir);
        try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, null, false, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
            int end = f == fileCount - 1 ? partitionCount : ((f + 1) * partitionCount) / fileCount;
            for (; i < end; i++) {
                UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), ByteBufferUtil.bytes(i));
                for (int j = 0; j < cellCount; j++) builder.newRow(Integer.toString(i)).add("val", random(0, 1000));
                writer.append(builder.build().unfilteredIterator());
            }
            result.addAll(writer.finish(true));
        }
    }
    return result;
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SerializationHeader(org.apache.cassandra.db.SerializationHeader) UpdateBuilder(org.apache.cassandra.UpdateBuilder) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) File(org.apache.cassandra.io.util.File)

Aggregations

SerializationHeader (org.apache.cassandra.db.SerializationHeader)4 File (org.apache.cassandra.io.util.File)3 Descriptor (org.apache.cassandra.io.sstable.Descriptor)2 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)2 MetadataCollector (org.apache.cassandra.io.sstable.metadata.MetadataCollector)2 TableMetadata (org.apache.cassandra.schema.TableMetadata)2 IOException (java.io.IOException)1 ByteBuffer (java.nio.ByteBuffer)1 UpdateBuilder (org.apache.cassandra.UpdateBuilder)1 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)1 DecoratedKey (org.apache.cassandra.db.DecoratedKey)1 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)1 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)1 Range (org.apache.cassandra.dht.Range)1 FSReadError (org.apache.cassandra.io.FSReadError)1 FSWriteError (org.apache.cassandra.io.FSWriteError)1 Component (org.apache.cassandra.io.sstable.Component)1 BigTableWriter (org.apache.cassandra.io.sstable.format.big.BigTableWriter)1 StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)1 FileDataInput (org.apache.cassandra.io.util.FileDataInput)1