Search in sources :

Example 1 with CompressedSequentialWriter

use of org.apache.cassandra.io.compress.CompressedSequentialWriter in project cassandra by apache.

the class BigTableWriter method openFinal.

@SuppressWarnings("resource")
private SSTableReader openFinal(SSTableReader.OpenReason openReason) {
    if (maxDataAge < 0)
        maxDataAge = currentTimeMillis();
    IndexSummary indexSummary = null;
    FileHandle ifile = null;
    FileHandle dfile = null;
    SSTableReader sstable = null;
    try {
        StatsMetadata stats = statsMetadata();
        // finalize in-memory state for the reader
        indexSummary = iwriter.summary.build(metadata().partitioner);
        long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
        int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
        int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
        ifile = iwriter.builder.bufferSize(indexBufferSize).complete();
        if (compression)
            dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(0));
        dfile = dbuilder.bufferSize(dataBufferSize).complete();
        invalidateCacheAtBoundary(dfile);
        sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, openReason, header);
        sstable.first = getMinimalKey(first);
        sstable.last = getMinimalKey(last);
        return sstable;
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        // If we successfully created our sstable, we can rely on its InstanceTidier to clean things up for us
        if (sstable != null)
            sstable.selfRef().release();
        else
            Stream.of(indexSummary, ifile, dfile).filter(Objects::nonNull).forEach(SharedCloseableImpl::close);
        throw t;
    }
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) SharedCloseableImpl(org.apache.cassandra.utils.concurrent.SharedCloseableImpl) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader)

Example 2 with CompressedSequentialWriter

use of org.apache.cassandra.io.compress.CompressedSequentialWriter in project cassandra by apache.

the class BigTableWriter method openEarly.

@SuppressWarnings("resource")
public SSTableReader openEarly() {
    // find the max (exclusive) readable key
    IndexSummaryBuilder.ReadableBoundary boundary = iwriter.getMaxReadable();
    if (boundary == null)
        return null;
    IndexSummary indexSummary = null;
    FileHandle ifile = null;
    FileHandle dfile = null;
    SSTableReader sstable = null;
    try {
        StatsMetadata stats = statsMetadata();
        assert boundary.indexLength > 0 && boundary.dataLength > 0;
        // open the reader early
        indexSummary = iwriter.summary.build(metadata().partitioner, boundary);
        long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
        int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
        ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength);
        if (compression)
            dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength));
        int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
        dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength);
        invalidateCacheAtBoundary(dfile);
        sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, SSTableReader.OpenReason.EARLY, header);
        // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed)
        sstable.first = getMinimalKey(first);
        sstable.last = getMinimalKey(boundary.lastKey);
        return sstable;
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        // If we successfully created our sstable, we can rely on its InstanceTidier to clean things up for us
        if (sstable != null)
            sstable.selfRef().release();
        else
            Stream.of(indexSummary, ifile, dfile).filter(Objects::nonNull).forEach(SharedCloseableImpl::close);
        throw t;
    }
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) SharedCloseableImpl(org.apache.cassandra.utils.concurrent.SharedCloseableImpl) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader)

Example 3 with CompressedSequentialWriter

use of org.apache.cassandra.io.compress.CompressedSequentialWriter in project cassandra by apache.

the class MmappedRegionsTest method testMapForCompressionMetadata.

@Test
public void testMapForCompressionMetadata() throws Exception {
    int OLD_MAX_SEGMENT_SIZE = MmappedRegions.MAX_SEGMENT_SIZE;
    MmappedRegions.MAX_SEGMENT_SIZE = 1024;
    ByteBuffer buffer = allocateBuffer(128 * 1024);
    File f = FileUtils.createTempFile("testMapForCompressionMetadata", "1");
    f.deleteOnExit();
    File cf = FileUtils.createTempFile(f.name() + ".metadata", "1");
    cf.deleteOnExit();
    MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
    try (SequentialWriter writer = new CompressedSequentialWriter(f, cf.absolutePath(), null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(), sstableMetadataCollector)) {
        writer.write(buffer);
        writer.finish();
    }
    CompressionMetadata metadata = new CompressionMetadata(cf.absolutePath(), f.length(), true);
    try (ChannelProxy channel = new ChannelProxy(f);
        MmappedRegions regions = MmappedRegions.map(channel, metadata)) {
        assertFalse(regions.isEmpty());
        int i = 0;
        while (i < buffer.capacity()) {
            CompressionMetadata.Chunk chunk = metadata.chunkFor(i);
            MmappedRegions.Region region = regions.floor(chunk.offset);
            assertNotNull(region);
            ByteBuffer compressedChunk = region.buffer.duplicate();
            assertNotNull(compressedChunk);
            assertEquals(chunk.length + 4, compressedChunk.capacity());
            assertEquals(chunk.offset, region.offset());
            assertEquals(chunk.offset + chunk.length + 4, region.end());
            i += metadata.chunkLength();
        }
    } finally {
        MmappedRegions.MAX_SEGMENT_SIZE = OLD_MAX_SEGMENT_SIZE;
        metadata.close();
    }
}
Also used : CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) CompressionMetadata(org.apache.cassandra.io.compress.CompressionMetadata) ClusteringComparator(org.apache.cassandra.db.ClusteringComparator) CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 4 with CompressedSequentialWriter

use of org.apache.cassandra.io.compress.CompressedSequentialWriter in project cassandra by apache.

the class CompressedInputStreamTest method testCompressedReadWith.

/**
 * @param valuesToCheck array of longs of range(0-999)
 * @throws Exception
 */
private void testCompressedReadWith(long[] valuesToCheck, boolean testTruncate, boolean testException, double minCompressRatio) throws Exception {
    assert valuesToCheck != null && valuesToCheck.length > 0;
    // write compressed data file of longs
    File parentDir = new File(tempFolder.newFolder());
    Descriptor desc = new Descriptor(parentDir, "ks", "cf", 1);
    File tmp = new File(desc.filenameFor(Component.DATA));
    MetadataCollector collector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
    CompressionParams param = CompressionParams.snappy(32, minCompressRatio);
    Map<Long, Long> index = new HashMap<Long, Long>();
    try (CompressedSequentialWriter writer = new CompressedSequentialWriter(tmp, desc.filenameFor(Component.COMPRESSION_INFO), null, SequentialWriterOption.DEFAULT, param, collector)) {
        for (long l = 0L; l < 1000; l++) {
            index.put(l, writer.position());
            writer.writeLong(l);
        }
        writer.finish();
    }
    CompressionMetadata comp = CompressionMetadata.create(tmp.absolutePath());
    List<SSTableReader.PartitionPositionBounds> sections = new ArrayList<>();
    for (long l : valuesToCheck) {
        long position = index.get(l);
        sections.add(new SSTableReader.PartitionPositionBounds(position, position + 8));
    }
    CompressionMetadata.Chunk[] chunks = comp.getChunksForSections(sections);
    long totalSize = comp.getTotalSizeForSections(sections);
    long expectedSize = 0;
    for (CompressionMetadata.Chunk c : chunks) expectedSize += c.length + 4;
    assertEquals(expectedSize, totalSize);
    // buffer up only relevant parts of file
    int size = 0;
    for (CompressionMetadata.Chunk c : chunks) // 4bytes CRC
    size += (c.length + 4);
    byte[] toRead = new byte[size];
    try (RandomAccessReader f = RandomAccessReader.open(tmp)) {
        int pos = 0;
        for (CompressionMetadata.Chunk c : chunks) {
            f.seek(c.offset);
            pos += f.read(toRead, pos, c.length + 4);
        }
    }
    if (testTruncate) {
        byte[] actuallyRead = new byte[50];
        System.arraycopy(toRead, 0, actuallyRead, 0, 50);
        toRead = actuallyRead;
    }
    // read buffer using CompressedInputStream
    CompressionInfo info = CompressionInfo.newInstance(chunks, param);
    if (testException) {
        testException(sections, info);
        return;
    }
    CompressedInputStream input = new CompressedInputStream(new DataInputStreamPlus(new ByteArrayInputStream(toRead)), info, ChecksumType.CRC32, () -> 1.0);
    try (DataInputStream in = new DataInputStream(input)) {
        for (int i = 0; i < sections.size(); i++) {
            input.position(sections.get(i).lowerPosition);
            long readValue = in.readLong();
            assertEquals("expected " + valuesToCheck[i] + " but was " + readValue, valuesToCheck[i], readValue);
        }
    }
}
Also used : ClusteringComparator(org.apache.cassandra.db.ClusteringComparator) CompressionInfo(org.apache.cassandra.db.streaming.CompressionInfo) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RandomAccessReader(org.apache.cassandra.io.util.RandomAccessReader) DataInputStreamPlus(org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus) CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) CompressionMetadata(org.apache.cassandra.io.compress.CompressionMetadata) DataInputStream(java.io.DataInputStream) CompressionParams(org.apache.cassandra.schema.CompressionParams) CompressedInputStream(org.apache.cassandra.db.streaming.CompressedInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) File(org.apache.cassandra.io.util.File)

Aggregations

CompressedSequentialWriter (org.apache.cassandra.io.compress.CompressedSequentialWriter)4 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)3 ClusteringComparator (org.apache.cassandra.db.ClusteringComparator)2 CompressionMetadata (org.apache.cassandra.io.compress.CompressionMetadata)2 MetadataCollector (org.apache.cassandra.io.sstable.metadata.MetadataCollector)2 StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)2 SharedCloseableImpl (org.apache.cassandra.utils.concurrent.SharedCloseableImpl)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInputStream (java.io.DataInputStream)1 ByteBuffer (java.nio.ByteBuffer)1 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)1 CompressedInputStream (org.apache.cassandra.db.streaming.CompressedInputStream)1 CompressionInfo (org.apache.cassandra.db.streaming.CompressionInfo)1 Descriptor (org.apache.cassandra.io.sstable.Descriptor)1 DataInputStreamPlus (org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus)1 File (org.apache.cassandra.io.util.File)1 RandomAccessReader (org.apache.cassandra.io.util.RandomAccessReader)1 CompressionParams (org.apache.cassandra.schema.CompressionParams)1 Test (org.junit.Test)1