Search in sources :

Example 1 with Stats

use of org.apache.cassandra.spark.stats.Stats in project spark-cassandra-bulkreader by jberragan.

the class IndexDbTests method testReadToken.

@Test
public void testReadToken() {
    qt().withExamples(500).forAll(TestUtils.partitioners(), integers().all()).checkAssert((partitioner, value) -> {
        final IPartitioner iPartitioner = FourZero.getPartitioner(partitioner);
        final BigInteger expectedToken = token(iPartitioner, value);
        try (final DataInputStream in = mockDataInputStream(value, 0)) {
            IndexDbUtils.readNextToken(iPartitioner, in, new Stats() {

                public void readPartitionIndexDb(ByteBuffer key, BigInteger token) {
                    assertEquals(value.intValue(), key.getInt());
                    assertEquals(expectedToken, token);
                }
            });
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    });
}
Also used : Stats(org.apache.cassandra.spark.stats.Stats) BigInteger(java.math.BigInteger) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) IPartitioner(org.apache.cassandra.spark.shaded.fourzero.cassandra.dht.IPartitioner) TestUtils.runTest(org.apache.cassandra.spark.TestUtils.runTest) Test(org.junit.Test)

Example 2 with Stats

use of org.apache.cassandra.spark.stats.Stats in project spark-cassandra-bulkreader by jberragan.

the class SSTableReaderTests method testSkipNoPartitions.

@Test
public void testSkipNoPartitions() {
    runTest((partitioner, dir, bridge) -> {
        // write an SSTable
        final TestSchema schema = TestSchema.basic(bridge);
        TestUtils.writeSSTable(bridge, dir, partitioner, schema, (writer) -> {
            for (int i = 0; i < NUM_ROWS; i++) {
                for (int j = 0; j < NUM_COLS; j++) {
                    writer.write(i, j, i + j);
                }
            }
        });
        assertEquals(1, countSSTables(dir));
        final Path dataFile = getFirstFileType(dir, DataLayer.FileType.DATA);
        final Path summaryFile = getFirstFileType(dir, DataLayer.FileType.SUMMARY);
        final TableMetadata metaData = schema.schemaBuilder(partitioner).tableMetaData();
        final TestDataLayer dataLayer = new TestDataLayer(bridge, Collections.singletonList(dataFile));
        SummaryDbUtils.Summary summary;
        try (final InputStream in = new BufferedInputStream(Files.newInputStream(summaryFile))) {
            summary = SummaryDbUtils.readSummary(in, metaData.partitioner, metaData.params.minIndexInterval, metaData.params.maxIndexInterval);
        }
        // set Spark token range equal to SSTable token range
        final Range<BigInteger> sparkTokenRange = Range.closed(FourZeroUtils.tokenToBigInteger(summary.first().getToken()), FourZeroUtils.tokenToBigInteger(summary.last().getToken()));
        final SparkRangeFilter rangeFilter = SparkRangeFilter.create(sparkTokenRange);
        final AtomicBoolean skipped = new AtomicBoolean(false);
        final Stats stats = new Stats() {

            @Override
            public void skippedPartition(ByteBuffer key, BigInteger token) {
                LOGGER.error("Skipped partition when should not: " + token);
                skipped.set(true);
            }
        };
        final FourZeroSSTableReader reader = openReader(metaData, dataLayer.listSSTables().findFirst().orElseThrow(() -> new RuntimeException("Could not find SSTable")), Collections.singletonList(rangeFilter), true, stats);
        // shouldn't skip any partitions here
        assertEquals(NUM_ROWS * NUM_COLS, countAndValidateRows(reader));
        assertFalse(skipped.get());
    });
}
Also used : Path(java.nio.file.Path) TableMetadata(org.apache.cassandra.spark.shaded.fourzero.cassandra.schema.TableMetadata) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) TestSchema(org.apache.cassandra.spark.TestSchema) ByteBuffer(java.nio.ByteBuffer) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BufferedInputStream(java.io.BufferedInputStream) TestDataLayer(org.apache.cassandra.spark.TestDataLayer) Stats(org.apache.cassandra.spark.stats.Stats) BigInteger(java.math.BigInteger) SparkRangeFilter(org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter) TestUtils.runTest(org.apache.cassandra.spark.TestUtils.runTest) Test(org.junit.Test)

Example 3 with Stats

use of org.apache.cassandra.spark.stats.Stats in project spark-cassandra-bulkreader by jberragan.

the class SSTableReaderTests method testPartialFilterMatch.

@Test
public void testPartialFilterMatch() {
    runTest((partitioner, dir, bridge) -> {
        // write an SSTable
        final TestSchema schema = TestSchema.basic(bridge);
        TestUtils.writeSSTable(bridge, dir, partitioner, schema, (writer) -> {
            for (int i = 0; i < NUM_ROWS; i++) {
                for (int j = 0; j < NUM_COLS; j++) {
                    writer.write(i, j, i + j);
                }
            }
        });
        assertEquals(1, countSSTables(dir));
        final Path dataFile = getFirstFileType(dir, DataLayer.FileType.DATA);
        final TableMetadata metaData = new FourZeroSchemaBuilder(schema.createStmt, schema.keyspace, new ReplicationFactor(ReplicationFactor.ReplicationStrategy.SimpleStrategy, ImmutableMap.of("replication_factor", 1)), partitioner).tableMetaData();
        final TestDataLayer dataLayer = new TestDataLayer(bridge, Collections.singletonList(dataFile));
        final ByteBuffer key1 = Int32Type.instance.fromString("0");
        final BigInteger token1 = bridge.hash(partitioner, key1);
        final PartitionKeyFilter keyInSSTable = PartitionKeyFilter.create(key1, token1);
        final SparkRangeFilter rangeFilter = SparkRangeFilter.create(Range.closed(token1, token1));
        final ByteBuffer key2 = Int32Type.instance.fromString("55");
        final BigInteger token2 = bridge.hash(partitioner, key2);
        final PartitionKeyFilter keyNotInSSTable = PartitionKeyFilter.create(key2, token2);
        final List<CustomFilter> filters = Arrays.asList(rangeFilter, keyInSSTable, keyNotInSSTable);
        final AtomicBoolean pass = new AtomicBoolean(true);
        final AtomicInteger skipCount = new AtomicInteger(0);
        final Stats stats = new Stats() {

            @Override
            public void skippedPartition(ByteBuffer key, BigInteger token) {
                LOGGER.info("Skipping partition: " + token);
                skipCount.incrementAndGet();
                if (filters.stream().anyMatch(filter -> !filter.skipPartition(key, token))) {
                    LOGGER.info("Should not skip partition: " + token);
                    pass.set(false);
                }
            }
        };
        final FourZeroSSTableReader reader = openReader(metaData, dataLayer.listSSTables().findFirst().orElseThrow(() -> new RuntimeException("Could not find SSTable")), filters, false, stats);
        final int rows = countAndValidateRows(reader);
        assertTrue(skipCount.get() > 0);
        assertEquals(NUM_COLS, rows);
        // should skip partitions not matching filters
        assertEquals((NUM_ROWS - skipCount.get()) * NUM_COLS, rows);
        assertTrue(pass.get());
    });
}
Also used : Path(java.nio.file.Path) TableMetadata(org.apache.cassandra.spark.shaded.fourzero.cassandra.schema.TableMetadata) ReplicationFactor(org.apache.cassandra.spark.data.ReplicationFactor) TestSchema(org.apache.cassandra.spark.TestSchema) ByteBuffer(java.nio.ByteBuffer) PartitionKeyFilter(org.apache.cassandra.spark.sparksql.filters.PartitionKeyFilter) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CustomFilter(org.apache.cassandra.spark.sparksql.filters.CustomFilter) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestDataLayer(org.apache.cassandra.spark.TestDataLayer) Stats(org.apache.cassandra.spark.stats.Stats) BigInteger(java.math.BigInteger) SparkRangeFilter(org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter) TestUtils.runTest(org.apache.cassandra.spark.TestUtils.runTest) Test(org.junit.Test)

Example 4 with Stats

use of org.apache.cassandra.spark.stats.Stats in project spark-cassandra-bulkreader by jberragan.

the class SSTableReaderTests method testSkipPartitionsCompactionScanner.

@Test
public void testSkipPartitionsCompactionScanner() {
    runTest((partitioner, dir, bridge) -> {
        // write an SSTable
        final TestSchema schema = TestSchema.basic(bridge);
        TestUtils.writeSSTable(bridge, dir, partitioner, schema, (writer) -> {
            for (int i = 0; i < NUM_ROWS; i++) {
                for (int j = 0; j < NUM_COLS; j++) {
                    writer.write(i, j, i + j);
                }
            }
        });
        assertEquals(1, countSSTables(dir));
        final Path dataFile = getFirstFileType(dir, DataLayer.FileType.DATA);
        final TableMetadata metaData = schema.schemaBuilder(partitioner).tableMetaData();
        final Set<SparkSSTableReader> readers = new HashSet<>(1);
        final TestDataLayer dataLayer = new TestDataLayer(bridge, Collections.singletonList(dataFile), schema.buildSchema()) {

            public SSTablesSupplier sstables(final List<CustomFilter> filters) {
                return new SSTablesSupplier() {

                    public <T extends SparkSSTableReader> Set<T> openAll(ReaderOpener<T> readerOpener) {
                        return (Set<T>) readers;
                    }
                };
            }
        };
        final Range<BigInteger> sparkTokenRange;
        switch(partitioner) {
            case Murmur3Partitioner:
                sparkTokenRange = Range.closed(BigInteger.valueOf(-9223372036854775808L), BigInteger.valueOf(3074457345618258602L));
                break;
            case RandomPartitioner:
                sparkTokenRange = Range.closed(BigInteger.ZERO, new BigInteger("916176208424801638531839357843455255"));
                break;
            default:
                throw new RuntimeException("Unexpected partitioner: " + partitioner);
        }
        final SparkRangeFilter rangeFilter = SparkRangeFilter.create(sparkTokenRange);
        final AtomicBoolean pass = new AtomicBoolean(true);
        final AtomicInteger skipCount = new AtomicInteger(0);
        final Stats stats = new Stats() {

            @Override
            public void skippedPartition(ByteBuffer key, BigInteger token) {
                LOGGER.info("Skipping partition: " + token);
                skipCount.incrementAndGet();
                if (sparkTokenRange.contains(token)) {
                    LOGGER.info("Should not skip partition: " + token);
                    pass.set(false);
                }
            }
        };
        final FourZeroSSTableReader reader = openReader(metaData, dataLayer.listSSTables().findFirst().orElseThrow(() -> new RuntimeException("Could not find SSTable")), Collections.singletonList(rangeFilter), false, stats);
        readers.add(reader);
        // read the SSTable end-to-end using SparkRowIterator and verify it skips the required partitions
        // and all the partitions returned are within the Spark token range.
        final SparkRowIterator it = new SparkRowIterator(dataLayer);
        int count = 0;
        while (it.next()) {
            final InternalRow row = it.get();
            assertEquals(row.getInt(2), row.getInt(0) + row.getInt(1));
            final DecoratedKey key = FourZero.getPartitioner(partitioner).decorateKey((ByteBuffer) ByteBuffer.allocate(4).putInt(row.getInt(0)).flip());
            final BigInteger token = FourZeroUtils.tokenToBigInteger(key.getToken());
            assertTrue(sparkTokenRange.contains(token));
            count++;
        }
        assertTrue(skipCount.get() > 0);
        // should skip out of range partitions here
        assertEquals((NUM_ROWS - skipCount.get()) * NUM_COLS, count);
        assertTrue(pass.get());
    });
}
Also used : SparkRowIterator(org.apache.cassandra.spark.sparksql.SparkRowIterator) Set(java.util.Set) HashSet(java.util.HashSet) SSTablesSupplier(org.apache.cassandra.spark.data.SSTablesSupplier) List(java.util.List) ArrayList(java.util.ArrayList) SparkRangeFilter(org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter) InternalRow(org.apache.spark.sql.catalyst.InternalRow) HashSet(java.util.HashSet) Path(java.nio.file.Path) TableMetadata(org.apache.cassandra.spark.shaded.fourzero.cassandra.schema.TableMetadata) DecoratedKey(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.DecoratedKey) BufferDecoratedKey(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.BufferDecoratedKey) TestSchema(org.apache.cassandra.spark.TestSchema) ByteBuffer(java.nio.ByteBuffer) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestDataLayer(org.apache.cassandra.spark.TestDataLayer) Stats(org.apache.cassandra.spark.stats.Stats) BigInteger(java.math.BigInteger) SparkSSTableReader(org.apache.cassandra.spark.reader.SparkSSTableReader) TestUtils.runTest(org.apache.cassandra.spark.TestUtils.runTest) Test(org.junit.Test)

Example 5 with Stats

use of org.apache.cassandra.spark.stats.Stats in project spark-cassandra-bulkreader by jberragan.

the class SSTableReaderTests method testIncrementalRepair.

// incremental repair
@Test
public void testIncrementalRepair() {
    runTest((partitioner, dir, bridge) -> {
        final TestSchema schema = TestSchema.basic(bridge);
        final int numSSTables = 4;
        final int numRepaired = 2;
        final int numUnRepaired = numSSTables - numRepaired;
        // write some SSTables
        for (int a = 0; a < numSSTables; a++) {
            final int pos = a * NUM_ROWS;
            TestUtils.writeSSTable(bridge, dir, partitioner, schema, (writer) -> {
                for (int i = pos; i < pos + NUM_ROWS; i++) {
                    for (int j = 0; j < NUM_COLS; j++) {
                        writer.write(i, j, i + j);
                    }
                }
            });
        }
        assertEquals(numSSTables, countSSTables(dir));
        final TableMetadata metaData = new FourZeroSchemaBuilder(schema.createStmt, schema.keyspace, new ReplicationFactor(ReplicationFactor.ReplicationStrategy.SimpleStrategy, ImmutableMap.of("replication_factor", 1)), partitioner).tableMetaData();
        final TestDataLayer dataLayer = new TestDataLayer(bridge, getFileType(dir, DataLayer.FileType.DATA).collect(Collectors.toList()));
        final AtomicInteger skipCount = new AtomicInteger(0);
        final Stats stats = new Stats() {

            @Override
            public void skippedRepairedSSTable(DataLayer.SSTable ssTable, long repairedAt) {
                skipCount.incrementAndGet();
            }
        };
        // mark some SSTables as repaired
        final Map<DataLayer.SSTable, Boolean> isRepaired = dataLayer.listSSTables().collect(Collectors.toMap(Function.identity(), a -> false));
        int count = 0;
        for (final DataLayer.SSTable ssTable : isRepaired.keySet()) {
            if (count < numRepaired) {
                isRepaired.put(ssTable, true);
                count++;
            }
        }
        final List<FourZeroSSTableReader> primaryReaders = dataLayer.listSSTables().map(ssTable -> openIncrementalReader(metaData, ssTable, stats, true, isRepaired.get(ssTable))).filter(reader -> !reader.ignore()).collect(Collectors.toList());
        final List<FourZeroSSTableReader> nonPrimaryReaders = dataLayer.listSSTables().map(ssTable -> openIncrementalReader(metaData, ssTable, stats, false, isRepaired.get(ssTable))).filter(reader -> !reader.ignore()).collect(Collectors.toList());
        // primary repair replica should read all sstables
        assertEquals(numSSTables, primaryReaders.size());
        // non-primary repair replica should only read unrepaired sstables
        assertEquals(numUnRepaired, nonPrimaryReaders.size());
        for (final FourZeroSSTableReader reader : nonPrimaryReaders) {
            assertFalse(isRepaired.get(reader.sstable()));
        }
        assertEquals(numUnRepaired, skipCount.get());
        final Set<FourZeroSSTableReader> toCompact = Stream.concat(primaryReaders.stream().filter(r -> isRepaired.get(r.sstable())), nonPrimaryReaders.stream()).collect(Collectors.toSet());
        assertEquals(numSSTables, toCompact.size());
        int rowCount = 0;
        boolean[] found = new boolean[numSSTables * NUM_ROWS];
        try (final CompactionStreamScanner scanner = new CompactionStreamScanner(metaData, partitioner, toCompact)) {
            // iterate through CompactionScanner and verify we have all the partition keys we are looking for
            final Rid rid = scanner.getRid();
            while (scanner.hasNext()) {
                scanner.next();
                final int a = rid.getPartitionKey().asIntBuffer().get();
                found[a] = true;
                // extract clustering key value and column name
                final ByteBuffer colBuf = rid.getColumnName();
                final ByteBuffer clusteringKey = ByteBufUtils.readBytesWithShortLength(colBuf);
                colBuf.get();
                final String colName = ByteBufUtils.string(ByteBufUtils.readBytesWithShortLength(colBuf));
                colBuf.get();
                if (StringUtils.isEmpty(colName)) {
                    continue;
                }
                assertEquals("c", colName);
                final int b = clusteringKey.asIntBuffer().get();
                // extract value column
                final int c = rid.getValue().asIntBuffer().get();
                assertEquals(c, a + b);
                rowCount++;
            }
        }
        assertEquals(numSSTables * NUM_ROWS * NUM_COLS, rowCount);
        for (final boolean b : found) {
            assertTrue(b);
        }
    });
}
Also used : TableMetadata(org.apache.cassandra.spark.shaded.fourzero.cassandra.schema.TableMetadata) Arrays(java.util.Arrays) StringUtils(org.apache.commons.lang.StringUtils) BufferedInputStream(java.io.BufferedInputStream) UnfilteredRowIterator(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.rows.UnfilteredRowIterator) CustomFilter(org.apache.cassandra.spark.sparksql.filters.CustomFilter) LoggerFactory(org.slf4j.LoggerFactory) AbstractRow(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.rows.AbstractRow) SparkRowIterator(org.apache.cassandra.spark.sparksql.SparkRowIterator) ByteBuffer(java.nio.ByteBuffer) DecoratedKey(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.DecoratedKey) PartitionKeyFilter(org.apache.cassandra.spark.sparksql.filters.PartitionKeyFilter) Pair(org.apache.commons.lang3.tuple.Pair) ByteBufUtils(org.apache.cassandra.spark.utils.ByteBufUtils) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(org.apache.cassandra.spark.TestUtils) Map(java.util.Map) NUM_ROWS(org.apache.cassandra.spark.TestUtils.NUM_ROWS) BigInteger(java.math.BigInteger) Path(java.nio.file.Path) SSTablesSupplier(org.apache.cassandra.spark.data.SSTablesSupplier) ImmutableMap(com.google.common.collect.ImmutableMap) Range(com.google.common.collect.Range) Set(java.util.Set) Collectors(java.util.stream.Collectors) List(java.util.List) Stream(java.util.stream.Stream) Assert.assertFalse(org.junit.Assert.assertFalse) TestUtils.getFileType(org.apache.cassandra.spark.TestUtils.getFileType) Optional(java.util.Optional) SparkSSTableReader(org.apache.cassandra.spark.reader.SparkSSTableReader) NotNull(org.jetbrains.annotations.NotNull) Rid(org.apache.cassandra.spark.reader.Rid) Cell(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.rows.Cell) DataInputStream(java.io.DataInputStream) InternalRow(org.apache.spark.sql.catalyst.InternalRow) ReplicationFactor(org.apache.cassandra.spark.data.ReplicationFactor) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ISSTableScanner(org.apache.cassandra.spark.shaded.fourzero.cassandra.io.sstable.ISSTableScanner) Unfiltered(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.rows.Unfiltered) TestDataLayer(org.apache.cassandra.spark.TestDataLayer) Function(java.util.function.Function) Int32Type(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.marshal.Int32Type) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) TestUtils.runTest(org.apache.cassandra.spark.TestUtils.runTest) Stats(org.apache.cassandra.spark.stats.Stats) ColumnData(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.rows.ColumnData) Descriptor(org.apache.cassandra.spark.shaded.fourzero.cassandra.io.sstable.Descriptor) BufferDecoratedKey(org.apache.cassandra.spark.shaded.fourzero.cassandra.db.BufferDecoratedKey) Murmur3Partitioner(org.apache.cassandra.spark.shaded.fourzero.cassandra.dht.Murmur3Partitioner) SparkRangeFilter(org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter) TestUtils.countSSTables(org.apache.cassandra.spark.TestUtils.countSSTables) Logger(org.slf4j.Logger) TableMetadata(org.apache.cassandra.spark.shaded.fourzero.cassandra.schema.TableMetadata) Files(java.nio.file.Files) Assert.assertNotNull(org.junit.Assert.assertNotNull) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) FileInputStream(java.io.FileInputStream) File(java.io.File) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) DataInputPlus(org.apache.cassandra.spark.shaded.fourzero.cassandra.io.util.DataInputPlus) DataLayer(org.apache.cassandra.spark.data.DataLayer) Partitioner(org.apache.cassandra.spark.data.partitioner.Partitioner) Paths(java.nio.file.Paths) NUM_COLS(org.apache.cassandra.spark.TestUtils.NUM_COLS) TestUtils.getFirstFileType(org.apache.cassandra.spark.TestUtils.getFirstFileType) Collections(java.util.Collections) TestSchema(org.apache.cassandra.spark.TestSchema) Assert.assertEquals(org.junit.Assert.assertEquals) InputStream(java.io.InputStream) ReplicationFactor(org.apache.cassandra.spark.data.ReplicationFactor) TestSchema(org.apache.cassandra.spark.TestSchema) Rid(org.apache.cassandra.spark.reader.Rid) ByteBuffer(java.nio.ByteBuffer) TestDataLayer(org.apache.cassandra.spark.TestDataLayer) DataLayer(org.apache.cassandra.spark.data.DataLayer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestDataLayer(org.apache.cassandra.spark.TestDataLayer) Stats(org.apache.cassandra.spark.stats.Stats) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TestUtils.runTest(org.apache.cassandra.spark.TestUtils.runTest) Test(org.junit.Test)

Aggregations

BigInteger (java.math.BigInteger)10 Stats (org.apache.cassandra.spark.stats.Stats)10 ByteBuffer (java.nio.ByteBuffer)9 TestSchema (org.apache.cassandra.spark.TestSchema)8 TestUtils.runTest (org.apache.cassandra.spark.TestUtils.runTest)8 TableMetadata (org.apache.cassandra.spark.shaded.fourzero.cassandra.schema.TableMetadata)8 Test (org.junit.Test)8 Path (java.nio.file.Path)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)7 TestDataLayer (org.apache.cassandra.spark.TestDataLayer)7 SparkRangeFilter (org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)6 List (java.util.List)5 CustomFilter (org.apache.cassandra.spark.sparksql.filters.CustomFilter)5 ArrayList (java.util.ArrayList)4 ReplicationFactor (org.apache.cassandra.spark.data.ReplicationFactor)4 Range (com.google.common.collect.Range)3 DataInputStream (java.io.DataInputStream)3 HashSet (java.util.HashSet)3 Set (java.util.Set)3