Search in sources :

Example 46 with IPartitioner

use of org.apache.cassandra.dht.IPartitioner in project eiger by wlloyd.

the class ColumnFamilyStoreTest method queryBirthdate.

private void queryBirthdate(Table table) throws CharacterCodingException {
    IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexOperator.EQ, ByteBufferUtil.bytes(1L));
    List<IndexExpression> clause = Arrays.asList(expr);
    IFilter filter = new IdentityQueryFilter();
    IPartitioner p = StorageService.getPartitioner();
    List<Row> rows = table.getColumnFamilyStore("Indexed2").search(clause, Util.range("", ""), 100, filter);
    assert rows.size() == 1 : StringUtils.join(rows, ",");
    assertEquals("k1", ByteBufferUtil.string(rows.get(0).key.key));
}
Also used : IdentityQueryFilter(org.apache.cassandra.db.columniterator.IdentityQueryFilter) IPartitioner(org.apache.cassandra.dht.IPartitioner)

Example 47 with IPartitioner

use of org.apache.cassandra.dht.IPartitioner in project eiger by wlloyd.

the class ColumnFamilyStoreTest method testIndexUpdate.

@Test
public void testIndexUpdate() throws IOException {
    Table table = Table.open("Keyspace2");
    // create a row and update the birthdate value, test that the index query fetches the new version
    RowMutation rm;
    rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 1);
    rm.apply();
    rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(2L), 2);
    rm.apply();
    IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexOperator.EQ, ByteBufferUtil.bytes(1L));
    List<IndexExpression> clause = Arrays.asList(expr);
    IFilter filter = new IdentityQueryFilter();
    IPartitioner p = StorageService.getPartitioner();
    Range<RowPosition> range = Util.range("", "");
    List<Row> rows = table.getColumnFamilyStore("Indexed1").search(clause, range, 100, filter);
    assert rows.size() == 0;
    expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexOperator.EQ, ByteBufferUtil.bytes(2L));
    clause = Arrays.asList(expr);
    rows = table.getColumnFamilyStore("Indexed1").search(clause, range, 100, filter);
    String key = ByteBufferUtil.string(rows.get(0).key.key);
    assert "k1".equals(key);
    // update the birthdate value with an OLDER timestamp, and test that the index ignores this
    rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(3L), 0);
    rm.apply();
    rows = table.getColumnFamilyStore("Indexed1").search(clause, range, 100, filter);
    key = ByteBufferUtil.string(rows.get(0).key.key);
    assert "k1".equals(key);
}
Also used : SSTable(org.apache.cassandra.io.sstable.SSTable) IdentityQueryFilter(org.apache.cassandra.db.columniterator.IdentityQueryFilter) IPartitioner(org.apache.cassandra.dht.IPartitioner) Test(org.junit.Test)

Example 48 with IPartitioner

use of org.apache.cassandra.dht.IPartitioner in project eiger by wlloyd.

the class ColumnFamilyStoreTest method testIndexDeletions.

@Test
public void testIndexDeletions() throws IOException {
    ColumnFamilyStore cfs = Table.open("Keyspace3").getColumnFamilyStore("Indexed1");
    RowMutation rm;
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 0);
    rm.apply();
    IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexOperator.EQ, ByteBufferUtil.bytes(1L));
    List<IndexExpression> clause = Arrays.asList(expr);
    IFilter filter = new IdentityQueryFilter();
    IPartitioner p = StorageService.getPartitioner();
    Range<RowPosition> range = Util.range("", "");
    List<Row> rows = cfs.search(clause, range, 100, filter);
    assert rows.size() == 1 : StringUtils.join(rows, ",");
    String key = ByteBufferUtil.string(rows.get(0).key.key);
    assert "k1".equals(key);
    // delete the column directly
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.delete(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), 1);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.isEmpty();
    // verify that it's not being indexed under the deletion column value either
    IColumn deletion = rm.getColumnFamilies().iterator().next().iterator().next();
    ByteBuffer deletionLong = ByteBufferUtil.bytes((long) ByteBufferUtil.toInt(deletion.value()));
    IndexExpression expr0 = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexOperator.EQ, deletionLong);
    List<IndexExpression> clause0 = Arrays.asList(expr0);
    rows = cfs.search(clause0, range, 100, filter);
    assert rows.isEmpty();
    // resurrect w/ a newer timestamp
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 2);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.size() == 1 : StringUtils.join(rows, ",");
    key = ByteBufferUtil.string(rows.get(0).key.key);
    assert "k1".equals(key);
    // verify that row and delete w/ older timestamp does nothing
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.delete(new QueryPath("Indexed1"), 1);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.size() == 1 : StringUtils.join(rows, ",");
    key = ByteBufferUtil.string(rows.get(0).key.key);
    assert "k1".equals(key);
    // similarly, column delete w/ older timestamp should do nothing
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.delete(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), 1);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.size() == 1 : StringUtils.join(rows, ",");
    key = ByteBufferUtil.string(rows.get(0).key.key);
    assert "k1".equals(key);
    // delete the entire row (w/ newer timestamp this time)
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.delete(new QueryPath("Indexed1"), 3);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.isEmpty() : StringUtils.join(rows, ",");
    // make sure obsolete mutations don't generate an index entry
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 3);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.isEmpty() : StringUtils.join(rows, ",");
    // try insert followed by row delete in the same mutation
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 1);
    rm.delete(new QueryPath("Indexed1"), 2);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.isEmpty() : StringUtils.join(rows, ",");
    // try row delete followed by insert in the same mutation
    rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
    rm.delete(new QueryPath("Indexed1"), 3);
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 4);
    rm.apply();
    rows = cfs.search(clause, range, 100, filter);
    assert rows.size() == 1 : StringUtils.join(rows, ",");
    key = ByteBufferUtil.string(rows.get(0).key.key);
    assert "k1".equals(key);
}
Also used : ByteBuffer(java.nio.ByteBuffer) IdentityQueryFilter(org.apache.cassandra.db.columniterator.IdentityQueryFilter) IPartitioner(org.apache.cassandra.dht.IPartitioner) Test(org.junit.Test)

Example 49 with IPartitioner

use of org.apache.cassandra.dht.IPartitioner in project eiger by wlloyd.

the class ColumnFamilyInputFormat method getSplits.

public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();
    validateConfiguration(conf);
    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);
    keyspace = ConfigHelper.getInputKeyspace(context.getConfiguration());
    cfName = ConfigHelper.getInputColumnFamily(context.getConfiguration());
    // cannonical ranges, split into pieces, fetching the splits in parallel
    ExecutorService executor = Executors.newCachedThreadPool();
    List<InputSplit> splits = new ArrayList<InputSplit>();
    try {
        List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>();
        KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf);
        IPartitioner partitioner = null;
        Range<Token> jobRange = null;
        if (jobKeyRange != null) {
            partitioner = ConfigHelper.getInputPartitioner(context.getConfiguration());
            assert partitioner.preservesOrder() : "ConfigHelper.setInputKeyRange(..) can only be used with a order preserving paritioner";
            assert jobKeyRange.start_key == null : "only start_token supported";
            assert jobKeyRange.end_key == null : "only end_token supported";
            jobRange = new Range<Token>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token), partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner);
        }
        for (TokenRange range : masterRangeNodes) {
            if (jobRange == null) {
                // for each range, pick a live owner and ask it to compute bite-sized splits
                splitfutures.add(executor.submit(new SplitCallable(range, conf)));
            } else {
                Range<Token> dhtRange = new Range<Token>(partitioner.getTokenFactory().fromString(range.start_token), partitioner.getTokenFactory().fromString(range.end_token), partitioner);
                if (dhtRange.intersects(jobRange)) {
                    for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
                        range.start_token = partitioner.getTokenFactory().toString(intersection.left);
                        range.end_token = partitioner.getTokenFactory().toString(intersection.right);
                        // for each range, pick a live owner and ask it to compute bite-sized splits
                        splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                    }
                }
            }
        }
        // wait until we have all the results back
        for (Future<List<InputSplit>> futureInputSplits : splitfutures) {
            try {
                splits.addAll(futureInputSplits.get());
            } catch (Exception e) {
                throw new IOException("Could not get input splits", e);
            }
        }
    } finally {
        executor.shutdownNow();
    }
    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) KeyRange(org.apache.cassandra.thrift.KeyRange) ArrayList(java.util.ArrayList) Token(org.apache.cassandra.dht.Token) IOException(java.io.IOException) Range(org.apache.cassandra.dht.Range) TokenRange(org.apache.cassandra.thrift.TokenRange) KeyRange(org.apache.cassandra.thrift.KeyRange) InvalidRequestException(org.apache.cassandra.thrift.InvalidRequestException) TException(org.apache.thrift.TException) IOException(java.io.IOException) Random(java.util.Random) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) TokenRange(org.apache.cassandra.thrift.TokenRange) ArrayList(java.util.ArrayList) List(java.util.List) InputSplit(org.apache.hadoop.mapreduce.InputSplit) IPartitioner(org.apache.cassandra.dht.IPartitioner)

Example 50 with IPartitioner

use of org.apache.cassandra.dht.IPartitioner in project eiger by wlloyd.

the class AntiEntropyServiceTestAbstract method testValidatorAdd.

@Test
public void testValidatorAdd() throws Throwable {
    Validator validator = new Validator(request);
    IPartitioner part = validator.tree.partitioner();
    Token mid = part.midpoint(local_range.left, local_range.right);
    validator.prepare(store);
    // add a row
    validator.add(new PrecompactedRow(new DecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")), ColumnFamily.create(Schema.instance.getCFMetaData(tablename, cfname))));
    validator.completeTree();
    // confirm that the tree was validated
    assert null != validator.tree.hash(local_range);
}
Also used : PrecompactedRow(org.apache.cassandra.db.compaction.PrecompactedRow) Token(org.apache.cassandra.dht.Token) IPartitioner(org.apache.cassandra.dht.IPartitioner) Test(org.junit.Test)

Aggregations

IPartitioner (org.apache.cassandra.dht.IPartitioner)55 Token (org.apache.cassandra.dht.Token)28 Test (org.junit.Test)27 InetAddress (java.net.InetAddress)15 Range (org.apache.cassandra.dht.Range)14 TokenMetadata (org.apache.cassandra.locator.TokenMetadata)10 IdentityQueryFilter (org.apache.cassandra.db.columniterator.IdentityQueryFilter)9 BigIntegerToken (org.apache.cassandra.dht.RandomPartitioner.BigIntegerToken)9 VersionedValue (org.apache.cassandra.gms.VersionedValue)9 IOException (java.io.IOException)7 ArrayList (java.util.ArrayList)7 RandomPartitioner (org.apache.cassandra.dht.RandomPartitioner)6 IOError (java.io.IOError)4 ByteBuffer (java.nio.ByteBuffer)4 ExecutionException (java.util.concurrent.ExecutionException)3 IFilter (org.apache.cassandra.db.filter.IFilter)3 QueryPath (org.apache.cassandra.db.filter.QueryPath)3 AbstractReplicationStrategy (org.apache.cassandra.locator.AbstractReplicationStrategy)3 IndexExpression (org.apache.cassandra.thrift.IndexExpression)3 HashMultimap (com.google.common.collect.HashMultimap)2