Search in sources :

Example 1 with MappedFileDataInput

use of org.apache.cassandra.io.util.MappedFileDataInput in project eiger by wlloyd.

the class LazilyCompactedRowTest method assertBytes.

private static void assertBytes(Collection<SSTableReader> sstables, AbstractCompactionIterable ci1, AbstractCompactionIterable ci2) throws IOException {
    CloseableIterator<AbstractCompactedRow> iter1 = ci1.iterator();
    CloseableIterator<AbstractCompactedRow> iter2 = ci2.iterator();
    while (true) {
        if (!iter1.hasNext()) {
            assert !iter2.hasNext();
            break;
        }
        AbstractCompactedRow row1 = iter1.next();
        AbstractCompactedRow row2 = iter2.next();
        DataOutputBuffer out1 = new DataOutputBuffer();
        DataOutputBuffer out2 = new DataOutputBuffer();
        row1.write(out1);
        row2.write(out2);
        File tmpFile1 = File.createTempFile("lcrt1", null);
        File tmpFile2 = File.createTempFile("lcrt2", null);
        tmpFile1.deleteOnExit();
        tmpFile2.deleteOnExit();
        // writing data from row1
        new FileOutputStream(tmpFile1).write(out1.getData());
        // writing data from row2
        new FileOutputStream(tmpFile2).write(out2.getData());
        MappedFileDataInput in1 = new MappedFileDataInput(new FileInputStream(tmpFile1), tmpFile1.getAbsolutePath(), 0);
        MappedFileDataInput in2 = new MappedFileDataInput(new FileInputStream(tmpFile2), tmpFile2.getAbsolutePath(), 0);
        // key isn't part of what CompactedRow writes, that's done by SSTW.append
        // row size can differ b/c of bloom filter counts being different
        long rowSize1 = SSTableReader.readRowSize(in1, sstables.iterator().next().descriptor);
        long rowSize2 = SSTableReader.readRowSize(in2, sstables.iterator().next().descriptor);
        assertEquals(rowSize1 + 8, out1.getLength());
        assertEquals(rowSize2 + 8, out2.getLength());
        // bloom filter
        IndexHelper.defreezeBloomFilter(in1, rowSize1, false);
        IndexHelper.defreezeBloomFilter(in2, rowSize2, false);
        // index
        int indexSize1 = in1.readInt();
        int indexSize2 = in2.readInt();
        assertEquals(indexSize1, indexSize2);
        ByteBuffer bytes1 = in1.readBytes(indexSize1);
        ByteBuffer bytes2 = in2.readBytes(indexSize2);
        assert bytes1.equals(bytes2);
        // cf metadata
        ColumnFamily cf1 = ColumnFamily.create("Keyspace1", "Standard1");
        ColumnFamily cf2 = ColumnFamily.create("Keyspace1", "Standard1");
        ColumnFamily.serializer().deserializeFromSSTableNoColumns(cf1, in1);
        ColumnFamily.serializer().deserializeFromSSTableNoColumns(cf2, in2);
        assert cf1.getLocalDeletionTime() == cf2.getLocalDeletionTime();
        assert cf1.getMarkedForDeleteAt() == cf2.getMarkedForDeleteAt();
        // columns
        int columns = in1.readInt();
        assert columns == in2.readInt();
        for (int i = 0; i < columns; i++) {
            IColumn c1 = cf1.getColumnSerializer().deserialize(in1);
            IColumn c2 = cf2.getColumnSerializer().deserialize(in2);
            assert c1.equals(c2);
        }
        // that should be everything
        assert in1.available() == 0;
        assert in2.available() == 0;
    }
}
Also used : MappedFileDataInput(org.apache.cassandra.io.util.MappedFileDataInput) FileOutputStream(java.io.FileOutputStream) DataOutputBuffer(org.apache.cassandra.io.util.DataOutputBuffer) File(java.io.File) ByteBuffer(java.nio.ByteBuffer) FileInputStream(java.io.FileInputStream)

Aggregations

File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 FileOutputStream (java.io.FileOutputStream)1 ByteBuffer (java.nio.ByteBuffer)1 DataOutputBuffer (org.apache.cassandra.io.util.DataOutputBuffer)1 MappedFileDataInput (org.apache.cassandra.io.util.MappedFileDataInput)1