Search in sources :

Example 6 with MetadataCollector

use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.

the class LogTransactionTest method sstable.

private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException {
    Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
    Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
    for (Component component : components) {
        File file = new File(descriptor.filenameFor(component));
        if (!file.exists())
            assertTrue(file.createNewFile());
        try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
            raf.setLength(size);
        }
    }
    FileHandle dFile = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).complete();
    FileHandle iFile = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).complete();
    SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
    StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, -1, null, header).get(MetadataType.STATS);
    SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, dFile, iFile, MockSchema.indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
    reader.first = reader.last = MockSchema.readerBounds(generation);
    return reader;
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AlwaysPresentFilter(org.apache.cassandra.utils.AlwaysPresentFilter) RandomAccessFile(java.io.RandomAccessFile) SerializationHeader(org.apache.cassandra.db.SerializationHeader) FileHandle(org.apache.cassandra.io.util.FileHandle) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 7 with MetadataCollector

use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.

the class SSTableFlushObserverTest method testFlushObserver.

@Test
public void testFlushObserver() {
    TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
    LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
    FlushObserver observer = new FlushObserver();
    String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
    File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
    directory.deleteOnExit();
    if (!directory.exists() && !directory.mkdirs())
        throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
    SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
    BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
    SSTableReader reader = null;
    Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
    try {
        final long now = System.currentTimeMillis();
        ByteBuffer key = UTF8Type.instance.fromString("key1");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
        writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key2");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        key = UTF8Type.instance.fromString("key3");
        expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
        writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
        reader = writer.finish(true);
    } finally {
        FileUtils.closeQuietly(writer);
    }
    Assert.assertTrue(observer.isComplete);
    Assert.assertEquals(expected.size(), observer.rows.size());
    for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
        ByteBuffer key = e.left;
        Long indexPosition = e.right;
        try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
            ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
            Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
        } catch (IOException ex) {
            throw new FSReadError(ex, reader.getIndexFilename());
        }
        Assert.assertEquals(expected.get(key), observer.rows.get(e));
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) FSWriteError(org.apache.cassandra.io.FSWriteError) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) FileDataInput(org.apache.cassandra.io.util.FileDataInput) SerializationHeader(org.apache.cassandra.db.SerializationHeader) FSReadError(org.apache.cassandra.io.FSReadError) BigTableWriter(org.apache.cassandra.io.sstable.format.big.BigTableWriter) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) File(java.io.File) Test(org.junit.Test)

Example 8 with MetadataCollector

use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.

the class CompressedRandomAccessReaderTest method testDataCorruptionDetection.

/**
     * If the data read out doesn't match the checksum, an exception should be thrown
     */
@Test
public void testDataCorruptionDetection() throws IOException {
    String CONTENT = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam vitae.";
    File file = new File("testDataCorruptionDetection");
    file.deleteOnExit();
    File metadata = new File(file.getPath() + ".meta");
    metadata.deleteOnExit();
    assertTrue(file.createNewFile());
    assertTrue(metadata.createNewFile());
    MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
    try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(), sstableMetadataCollector)) {
        writer.write(CONTENT.getBytes());
        writer.finish();
    }
    // open compression metadata and get chunk information
    CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), true);
    CompressionMetadata.Chunk chunk = meta.chunkFor(0);
    try (FileHandle.Builder builder = new FileHandle.Builder(file.getPath()).withCompressionMetadata(meta);
        FileHandle fh = builder.complete();
        RandomAccessReader reader = fh.createReader()) {
        // read and verify compressed data
        assertEquals(CONTENT, reader.readLine());
        Random random = new Random();
        try (RandomAccessFile checksumModifier = new RandomAccessFile(file, "rw")) {
            byte[] checksum = new byte[4];
            // seek to the end of the compressed chunk
            checksumModifier.seek(chunk.length);
            // read checksum bytes
            checksumModifier.read(checksum);
            byte[] corruptChecksum = new byte[4];
            do {
                random.nextBytes(corruptChecksum);
            } while (Arrays.equals(corruptChecksum, checksum));
            updateChecksum(checksumModifier, chunk.length, corruptChecksum);
            try (final RandomAccessReader r = fh.createReader()) {
                Throwable exception = null;
                try {
                    r.readLine();
                } catch (Throwable t) {
                    exception = t;
                }
                assertNotNull(exception);
                assertSame(exception.getClass(), CorruptSSTableException.class);
                assertSame(exception.getCause().getClass(), CorruptBlockException.class);
            }
            // lets write original checksum and check if we can read data
            updateChecksum(checksumModifier, chunk.length, checksum);
            // read and verify compressed data
            try (RandomAccessReader cr = fh.createReader()) {
                assertEquals(CONTENT, cr.readLine());
            }
        }
    }
}
Also used : ClusteringComparator(org.apache.cassandra.db.ClusteringComparator) Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 9 with MetadataCollector

use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.

the class CompressedRandomAccessReaderTest method test6791.

@Test
public void test6791() throws IOException, ConfigurationException {
    File f = File.createTempFile("compressed6791_", "3");
    String filename = f.getAbsolutePath();
    MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
    try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(32), sstableMetadataCollector)) {
        for (int i = 0; i < 20; i++) writer.write("x".getBytes());
        DataPosition mark = writer.mark();
        // write enough garbage to create new chunks:
        for (int i = 0; i < 40; ++i) writer.write("y".getBytes());
        writer.resetAndTruncate(mark);
        for (int i = 0; i < 20; i++) writer.write("x".getBytes());
        writer.finish();
    }
    try (FileHandle.Builder builder = new FileHandle.Builder(filename).withCompressionMetadata(new CompressionMetadata(filename + ".metadata", f.length(), true));
        FileHandle fh = builder.complete();
        RandomAccessReader reader = fh.createReader()) {
        String res = reader.readLine();
        assertEquals(res, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
        assertEquals(40, res.length());
    } finally {
        if (f.exists())
            assertTrue(f.delete());
        File metadata = new File(filename + ".metadata");
        if (metadata.exists())
            metadata.delete();
    }
}
Also used : ClusteringComparator(org.apache.cassandra.db.ClusteringComparator) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 10 with MetadataCollector

use of org.apache.cassandra.io.sstable.metadata.MetadataCollector in project cassandra by apache.

the class CompressedSequentialWriterTest method resetAndTruncateTest.

@Test
@Override
public void resetAndTruncateTest() {
    File tempFile = new File(Files.createTempDir(), "reset.txt");
    File offsetsFile = FileUtils.createTempFile("compressedsequentialwriter.offset", "test");
    final int bufferSize = 48;
    final int writeSize = 64;
    byte[] toWrite = new byte[writeSize];
    try (SequentialWriter writer = new CompressedSequentialWriter(tempFile, offsetsFile.getPath(), null, SequentialWriterOption.DEFAULT, CompressionParams.lz4(bufferSize), new MetadataCollector(new ClusteringComparator(UTF8Type.instance)))) {
        // write bytes greather than buffer
        writer.write(toWrite);
        long flushedOffset = writer.getLastFlushOffset();
        assertEquals(writeSize, writer.position());
        // mark thi position
        DataPosition pos = writer.mark();
        // write another
        writer.write(toWrite);
        // another buffer should be flushed
        assertEquals(flushedOffset * 2, writer.getLastFlushOffset());
        assertEquals(writeSize * 2, writer.position());
        // reset writer
        writer.resetAndTruncate(pos);
        // current position and flushed size should be changed
        assertEquals(writeSize, writer.position());
        assertEquals(flushedOffset, writer.getLastFlushOffset());
        // write another byte less than buffer
        writer.write(new byte[] { 0 });
        assertEquals(writeSize + 1, writer.position());
        // flush off set should not be increase
        assertEquals(flushedOffset, writer.getLastFlushOffset());
        writer.finish();
    } catch (IOException e) {
        Assert.fail();
    }
}
Also used : ClusteringComparator(org.apache.cassandra.db.ClusteringComparator) IOException(java.io.IOException) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) File(java.io.File) Test(org.junit.Test)

Aggregations

MetadataCollector (org.apache.cassandra.io.sstable.metadata.MetadataCollector)17 File (java.io.File)9 ClusteringComparator (org.apache.cassandra.db.ClusteringComparator)7 RandomAccessFile (java.io.RandomAccessFile)5 Test (org.junit.Test)5 IOException (java.io.IOException)3 ByteBuffer (java.nio.ByteBuffer)3 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)3 Descriptor (org.apache.cassandra.io.sstable.Descriptor)3 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)3 SSTableWriter (org.apache.cassandra.io.sstable.format.SSTableWriter)3 SerializationHeader (org.apache.cassandra.db.SerializationHeader)2 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)2 CompressedSequentialWriter (org.apache.cassandra.io.compress.CompressedSequentialWriter)2 CompressionMetadata (org.apache.cassandra.io.compress.CompressionMetadata)2 StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)2 AlwaysPresentFilter (org.apache.cassandra.utils.AlwaysPresentFilter)2 Random (java.util.Random)1 FSReadError (org.apache.cassandra.io.FSReadError)1 FSWriteError (org.apache.cassandra.io.FSWriteError)1