Search in sources :

Example 41 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class PairOfSequencesPaxosSimulation method plan.

public ActionPlan plan() {
    ActionPlan plan = new KeyspaceActions(simulated, KEYSPACE, TABLE, CREATE_TABLE, cluster, clusterOptions, serialConsistency, this, primaryKeys, debug).plan();
    plan = plan.encapsulate(ActionPlan.setUpTearDown(ActionList.of(cluster.stream().map(i -> simulated.run("Insert Partitions", i, executeForPrimaryKeys(INSERT1, primaryKeys)))), ActionList.of(cluster.stream().map(i -> simulated.run("Delete Partitions", i, executeForPrimaryKeys(DELETE1, primaryKeys))))));
    final int nodes = cluster.size();
    for (int primaryKey : primaryKeys) historyCheckers.add(new HistoryChecker(primaryKey));
    List<Supplier<Action>> primaryKeyActions = new ArrayList<>();
    for (int pki = 0; pki < primaryKeys.length; ++pki) {
        int primaryKey = primaryKeys[pki];
        HistoryChecker historyChecker = historyCheckers.get(pki);
        Supplier<Action> supplier = new Supplier<Action>() {

            int i = 0;

            @Override
            public Action get() {
                int node = simulated.random.uniform(1, nodes + 1);
                IInvokableInstance instance = cluster.get(node);
                switch(serialConsistency) {
                    default:
                        throw new AssertionError();
                    case LOCAL_SERIAL:
                        if (simulated.snitch.dcOf(node) > 0) {
                            // perform some queries against these nodes but don't expect them to be linearizable
                            return new NonVerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker);
                        }
                    case SERIAL:
                        return simulated.random.decide(readRatio) ? new VerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker) : new ModifyingOperation(i++, instance, ANY, serialConsistency, primaryKey, historyChecker);
                }
            }

            @Override
            public String toString() {
                return Integer.toString(primaryKey);
            }
        };
        final ActionListener listener = debug.debug(PARTITION, simulated.time, cluster, KEYSPACE, primaryKey);
        if (listener != null) {
            Supplier<Action> wrap = supplier;
            supplier = new Supplier<Action>() {

                @Override
                public Action get() {
                    Action action = wrap.get();
                    action.register(listener);
                    return action;
                }

                @Override
                public String toString() {
                    return wrap.toString();
                }
            };
        }
        primaryKeyActions.add(supplier);
    }
    List<Integer> available = IntStream.range(0, primaryKeys.length).boxed().collect(Collectors.toList());
    Action stream = Actions.infiniteStream(concurrency, new Supplier<Action>() {

        @Override
        public Action get() {
            int i = simulated.random.uniform(0, available.size());
            int next = available.get(i);
            available.set(i, available.get(available.size() - 1));
            available.remove(available.size() - 1);
            long untilNanos = simulated.time.nanoTime() + SECONDS.toNanos(simulateKeyForSeconds.select(simulated.random));
            int concurrency = withinKeyConcurrency.select(simulated.random);
            Supplier<Action> supplier = primaryKeyActions.get(next);
            // while this stream is finite, it participates in an infinite stream via its parent, so we want to permit termination while it's running
            return Actions.infiniteStream(concurrency, new Supplier<Action>() {

                @Override
                public Action get() {
                    if (simulated.time.nanoTime() >= untilNanos) {
                        available.add(next);
                        return null;
                    }
                    return supplier.get();
                }

                @Override
                public String toString() {
                    return supplier.toString();
                }
            });
        }

        @Override
        public String toString() {
            return "Primary Key Actions";
        }
    });
    return simulated.execution.plan().encapsulate(plan).encapsulate(ActionPlan.interleave(singletonList(ActionList.of(stream))));
}
Also used : Action(org.apache.cassandra.simulator.Action) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) ArrayList(java.util.ArrayList) ActionPlan(org.apache.cassandra.simulator.ActionPlan) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ActionListener(org.apache.cassandra.simulator.ActionListener) KeyspaceActions(org.apache.cassandra.simulator.cluster.KeyspaceActions) LongSupplier(java.util.function.LongSupplier) Supplier(java.util.function.Supplier)

Example 42 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class ClientNetworkStopStartTest method stopStartNative.

/**
 * @see <a href="https://issues.apache.org/jira/browse/CASSANDRA-16127">CASSANDRA-16127</a>
 */
@Test
public void stopStartNative() throws IOException {
    // TODO why does trunk need GOSSIP for native to work but no other branch does?
    try (Cluster cluster = init(Cluster.build(1).withConfig(c -> c.with(Feature.GOSSIP, Feature.NATIVE_PROTOCOL)).start())) {
        IInvokableInstance node = cluster.get(1);
        assertTransportStatus(node, "binary", true);
        node.nodetoolResult("disablebinary").asserts().success();
        assertTransportStatus(node, "binary", false);
        node.nodetoolResult("enablebinary").asserts().success();
        assertTransportStatus(node, "binary", true);
        // now use it to make sure it still works!
        cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, value int, PRIMARY KEY (pk))");
        try (com.datastax.driver.core.Cluster client = com.datastax.driver.core.Cluster.builder().addContactPoints(node.broadcastAddress().getAddress()).build();
            Session session = client.connect()) {
            session.execute("INSERT INTO " + KEYSPACE + ".tbl (pk, value) VALUES (?, ?)", 0, 0);
        }
        SimpleQueryResult qr = cluster.coordinator(1).executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
        AssertUtils.assertRows(qr, QueryResults.builder().row(0, 0).build());
    }
}
Also used : IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) SimpleQueryResult(org.apache.cassandra.distributed.api.SimpleQueryResult) Cluster(org.apache.cassandra.distributed.Cluster) Session(com.datastax.driver.core.Session) Test(org.junit.Test)

Example 43 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class DistributedRepairUtils method queryParentRepairHistory.

public static QueryResult queryParentRepairHistory(ICluster<IInvokableInstance> cluster, int coordinator, String ks, String table) {
    // This is kinda brittle since the caller never gets the ID and can't ask for the ID; it needs to infer the id
    // this logic makes the assumption the ks/table pairs are unique (should be or else create should fail) so any
    // repair for that pair will be the repair id
    Set<String> tableNames = table == null ? Collections.emptySet() : ImmutableSet.of(table);
    QueryResult rs = retryWithBackoffBlocking(10, () -> cluster.coordinator(coordinator).executeWithResult("SELECT * FROM system_distributed.parent_repair_history", ConsistencyLevel.QUORUM).filter(row -> ks.equals(row.getString("keyspace_name"))).filter(row -> tableNames.equals(row.getSet("columnfamily_names"))));
    return rs;
}
Also used : QueryResult(org.apache.cassandra.distributed.api.QueryResult) StorageMetrics(org.apache.cassandra.metrics.StorageMetrics) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) ICluster(org.apache.cassandra.distributed.api.ICluster) ArrayUtils(org.apache.commons.lang3.ArrayUtils) ConsistencyLevel(org.apache.cassandra.distributed.api.ConsistencyLevel) Consumer(java.util.function.Consumer) Row(org.apache.cassandra.distributed.api.Row) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Assert(org.junit.Assert) Retry.retryWithBackoffBlocking(org.apache.cassandra.utils.Retry.retryWithBackoffBlocking) Collections(java.util.Collections) AbstractCluster(org.apache.cassandra.distributed.impl.AbstractCluster) NodeToolResult(org.apache.cassandra.distributed.api.NodeToolResult) QueryResult(org.apache.cassandra.distributed.api.QueryResult)

Example 44 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class FailingRepairTest method testFailingMessage.

@Test(timeout = 10 * 60 * 1000)
public void testFailingMessage() throws IOException {
    final int replica = 1;
    final int coordinator = 2;
    String tableName = getCfName(messageType, parallelism, withTracing);
    String fqtn = KEYSPACE + "." + tableName;
    CLUSTER.schemaChange("CREATE TABLE " + fqtn + " (k INT, PRIMARY KEY (k))");
    // create data which will NOT conflict
    int lhsOffset = 10;
    int rhsOffset = 20;
    int limit = rhsOffset + (rhsOffset - lhsOffset);
    // setup data which is consistent on both sides
    for (int i = 0; i < lhsOffset; i++) CLUSTER.coordinator(replica).execute("INSERT INTO " + fqtn + " (k) VALUES (?)", ConsistencyLevel.ALL, i);
    // create data on LHS which does NOT exist in RHS
    for (int i = lhsOffset; i < rhsOffset; i++) CLUSTER.get(replica).executeInternal("INSERT INTO " + fqtn + " (k) VALUES (?)", i);
    // create data on RHS which does NOT exist in LHS
    for (int i = rhsOffset; i < limit; i++) CLUSTER.get(coordinator).executeInternal("INSERT INTO " + fqtn + " (k) VALUES (?)", i);
    // at this point, the two nodes should be out of sync, so confirm missing data
    // node 1
    Object[][] node1Records = toRows(IntStream.range(0, rhsOffset));
    Object[][] node1Actuals = toNaturalOrder(CLUSTER.get(replica).executeInternal("SELECT k FROM " + fqtn));
    Assert.assertArrayEquals(node1Records, node1Actuals);
    // node 2
    Object[][] node2Records = toRows(IntStream.concat(IntStream.range(0, lhsOffset), IntStream.range(rhsOffset, limit)));
    Object[][] node2Actuals = toNaturalOrder(CLUSTER.get(coordinator).executeInternal("SELECT k FROM " + fqtn));
    Assert.assertArrayEquals(node2Records, node2Actuals);
    // Inject the failure
    CLUSTER.get(replica).runOnInstance(() -> setup.run());
    // run a repair which is expected to fail
    List<String> repairStatus = CLUSTER.get(coordinator).callOnInstance(() -> {
        // need all ranges on the host
        String ranges = StorageService.instance.getLocalAndPendingRanges(KEYSPACE).stream().map(r -> r.left + ":" + r.right).collect(Collectors.joining(","));
        Map<String, String> args = new HashMap<String, String>() {

            {
                put(RepairOption.PARALLELISM_KEY, parallelism.getName());
                put(RepairOption.PRIMARY_RANGE_KEY, "false");
                put(RepairOption.INCREMENTAL_KEY, "false");
                put(RepairOption.TRACE_KEY, Boolean.toString(withTracing));
                put(RepairOption.PULL_REPAIR_KEY, "false");
                put(RepairOption.FORCE_REPAIR_KEY, "false");
                put(RepairOption.RANGES_KEY, ranges);
                put(RepairOption.COLUMNFAMILIES_KEY, tableName);
            }
        };
        int cmd = StorageService.instance.repairAsync(KEYSPACE, args);
        Assert.assertFalse("repair return status was 0, expected non-zero return status, 0 indicates repair not submitted", cmd == 0);
        List<String> status;
        do {
            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
            status = StorageService.instance.getParentRepairStatus(cmd);
        } while (status == null || status.get(0).equals(ParentRepairStatus.IN_PROGRESS.name()));
        return status;
    });
    Assert.assertEquals(repairStatus.toString(), ParentRepairStatus.FAILED, ParentRepairStatus.valueOf(repairStatus.get(0)));
    // its possible that the coordinator gets the message that the replica failed before the replica completes
    // shutting down; this then means that isKilled could be updated after the fact
    IInvokableInstance replicaInstance = CLUSTER.get(replica);
    Awaitility.await().atMost(Duration.ofSeconds(30)).until(replicaInstance::isShutdown);
    Assert.assertEquals("coordinator should not be killed", 0, CLUSTER.get(coordinator).killAttempts());
}
Also used : Arrays(java.util.Arrays) ChannelProxy(org.apache.cassandra.io.util.ChannelProxy) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) ParentRepairStatus(org.apache.cassandra.service.ActiveRepairService.ParentRepairStatus) Duration(java.time.Duration) Map(java.util.Map) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Parameterized(org.junit.runners.Parameterized) Keyspace(org.apache.cassandra.db.Keyspace) AfterClass(org.junit.AfterClass) Collection(java.util.Collection) Set(java.util.Set) ConsistencyLevel(org.apache.cassandra.distributed.api.ConsistencyLevel) Verb(org.apache.cassandra.net.Verb) Collectors(java.util.stream.Collectors) Serializable(java.io.Serializable) List(java.util.List) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) TableMetadata(org.apache.cassandra.schema.TableMetadata) CorruptSSTableException(org.apache.cassandra.io.sstable.CorruptSSTableException) Awaitility(org.awaitility.Awaitility) IntStream(java.util.stream.IntStream) RepairOption(org.apache.cassandra.repair.messages.RepairOption) BeforeClass(org.junit.BeforeClass) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) Range(org.apache.cassandra.dht.Range) HashMap(java.util.HashMap) RepairParallelism(org.apache.cassandra.repair.RepairParallelism) ArrayList(java.util.ArrayList) ForwardingSSTableReader(org.apache.cassandra.io.sstable.format.ForwardingSSTableReader) HashSet(java.util.HashSet) Token(org.apache.cassandra.dht.Token) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Before(org.junit.Before) Feature(org.apache.cassandra.distributed.api.Feature) Uninterruptibles(com.google.common.util.concurrent.Uninterruptibles) Iterator(java.util.Iterator) SSTableReadsListener(org.apache.cassandra.io.sstable.format.SSTableReadsListener) SerializableRunnable(org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable) DataRange(org.apache.cassandra.db.DataRange) ICluster(org.apache.cassandra.distributed.api.ICluster) ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) StorageService(org.apache.cassandra.service.StorageService) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) PartitionPosition(org.apache.cassandra.db.PartitionPosition) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Cluster(org.apache.cassandra.distributed.Cluster) InstanceKiller(org.apache.cassandra.distributed.impl.InstanceKiller) Assert(org.junit.Assert) Collections(java.util.Collections) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 45 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class MessageFiltersTest method testMessageMatching.

@Test
public void testMessageMatching() throws Throwable {
    String read = "SELECT * FROM " + KEYSPACE + ".tbl";
    String write = "INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1)";
    try (ICluster<IInvokableInstance> cluster = builder().withNodes(2).withConfig(c -> c.set("range_request_timeout", "2000ms")).start()) {
        cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + cluster.size() + "};");
        cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
        AtomicInteger counter = new AtomicInteger();
        Set<Integer> verbs = Sets.newHashSet(Arrays.asList(Verb.RANGE_REQ.id, Verb.RANGE_RSP.id, Verb.MUTATION_REQ.id, Verb.MUTATION_RSP.id));
        for (boolean inbound : Arrays.asList(true, false)) {
            counter.set(0);
            // Reads and writes are going to time out in both directions
            IMessageFilters.Filter filter = cluster.filters().allVerbs().inbound(inbound).from(1).to(2).messagesMatching((from, to, msg) -> {
                // Decode and verify message on instance; return the result back here
                Integer id = cluster.get(1).callsOnInstance((IIsolatedExecutor.SerializableCallable<Integer>) () -> {
                    Message decoded = Instance.deserializeMessage(msg);
                    return (Integer) decoded.verb().id;
                }).call();
                Assert.assertTrue(verbs.contains(id));
                counter.incrementAndGet();
                return false;
            }).drop();
            for (int i : new int[] { 1, 2 }) cluster.coordinator(i).execute(read, ConsistencyLevel.ALL);
            for (int i : new int[] { 1, 2 }) cluster.coordinator(i).execute(write, ConsistencyLevel.ALL);
            filter.off();
            Assert.assertEquals(4, counter.get());
        }
    }
}
Also used : Instance(org.apache.cassandra.distributed.impl.Instance) IMessageFilters(org.apache.cassandra.distributed.api.IMessageFilters) InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) Arrays(java.util.Arrays) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Message(org.apache.cassandra.net.Message) IMessage(org.apache.cassandra.distributed.api.IMessage) HashSet(java.util.HashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) NoPayload(org.apache.cassandra.net.NoPayload) IIsolatedExecutor(org.apache.cassandra.distributed.api.IIsolatedExecutor) MessageFilters(org.apache.cassandra.distributed.shared.MessageFilters) NETWORK(org.apache.cassandra.distributed.api.Feature.NETWORK) MessagingService(org.apache.cassandra.net.MessagingService) Set(java.util.Set) ICluster(org.apache.cassandra.distributed.api.ICluster) Test(org.junit.Test) ConsistencyLevel(org.apache.cassandra.distributed.api.ConsistencyLevel) Verb(org.apache.cassandra.net.Verb) InetSocketAddress(java.net.InetSocketAddress) Sets(com.google.common.collect.Sets) CountDownLatch(java.util.concurrent.CountDownLatch) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Cluster(org.apache.cassandra.distributed.Cluster) Assert(org.junit.Assert) HintMessage(org.apache.cassandra.hints.HintMessage) GOSSIP(org.apache.cassandra.distributed.api.Feature.GOSSIP) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) IMessageFilters(org.apache.cassandra.distributed.api.IMessageFilters) Message(org.apache.cassandra.net.Message) IMessage(org.apache.cassandra.distributed.api.IMessage) HintMessage(org.apache.cassandra.hints.HintMessage) IIsolatedExecutor(org.apache.cassandra.distributed.api.IIsolatedExecutor) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Test(org.junit.Test)

Aggregations

IInvokableInstance (org.apache.cassandra.distributed.api.IInvokableInstance)55 Test (org.junit.Test)36 Cluster (org.apache.cassandra.distributed.Cluster)31 List (java.util.List)16 IOException (java.io.IOException)15 ConsistencyLevel (org.apache.cassandra.distributed.api.ConsistencyLevel)14 Feature (org.apache.cassandra.distributed.api.Feature)13 GOSSIP (org.apache.cassandra.distributed.api.Feature.GOSSIP)13 NETWORK (org.apache.cassandra.distributed.api.Feature.NETWORK)13 ICluster (org.apache.cassandra.distributed.api.ICluster)13 TestBaseImpl (org.apache.cassandra.distributed.test.TestBaseImpl)13 TokenSupplier (org.apache.cassandra.distributed.api.TokenSupplier)12 Session (com.datastax.driver.core.Session)11 Arrays (java.util.Arrays)11 Assertions (org.assertj.core.api.Assertions)10 Set (java.util.Set)9 NATIVE_PROTOCOL (org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL)9 Assert (org.junit.Assert)9 PreparedStatement (com.datastax.driver.core.PreparedStatement)8 Map (java.util.Map)8