Search in sources :

Example 16 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class MixedModeFuzzTest method mixedModeFuzzTest.

@Test
public void mixedModeFuzzTest() throws Throwable {
    try (ICluster<IInvokableInstance> c = builder().withNodes(2).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(PrepareBehaviour::oldNewBehaviour).start()) {
        // Long string to make us invalidate caches occasionally
        String veryLongString = "very";
        for (int i = 0; i < 2; i++) veryLongString += veryLongString;
        final String qualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM ks%d.tbl";
        final String unqualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM tbl";
        int KEYSPACES = 3;
        final int STATEMENTS_PER_KS = 2;
        for (int i = 0; i < KEYSPACES; i++) {
            c.schemaChange(withKeyspace("CREATE KEYSPACE ks" + i + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};"));
            c.schemaChange(withKeyspace("CREATE TABLE ks" + i + ".tbl (pk int, ck int, PRIMARY KEY (pk, ck));"));
            for (int j = 0; j < i; j++) c.coordinator(1).execute("INSERT INTO ks" + i + ".tbl (pk, ck) VALUES (?, ?)", ConsistencyLevel.ALL, 1, j);
        }
        List<Thread> threads = new ArrayList<>();
        AtomicBoolean interrupt = new AtomicBoolean(false);
        AtomicReference<Throwable> thrown = new AtomicReference<>();
        int INFREQUENT_ACTION_COEF = 100;
        long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(20);
        for (int i = 0; i < 3; i++) {
            int seed = i;
            threads.add(new Thread(() -> {
                com.datastax.driver.core.Cluster cluster = null;
                Map<String, Session> sessions = new HashMap<>();
                try {
                    AtomicBoolean nodeWithFix = new AtomicBoolean(false);
                    Supplier<Cluster> clusterSupplier = () -> {
                        return com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").addContactPoint("127.0.0.2").build();
                    };
                    AtomicBoolean allUpgraded = new AtomicBoolean(false);
                    Random rng = new Random(seed);
                    boolean reconnected = false;
                    Map<Pair<Integer, Integer>, PreparedStatement> qualifiedStatements = new HashMap<>();
                    Map<Pair<Integer, Integer>, PreparedStatement> unqualifiedStatements = new HashMap<>();
                    cluster = clusterSupplier.get();
                    for (int j = 0; j < KEYSPACES; j++) {
                        String ks = "ks" + j;
                        sessions.put(ks, cluster.connect(ks));
                        Assert.assertEquals(sessions.get(ks).getLoggedKeyspace(), ks);
                    }
                    long firstVersionBump = System.nanoTime() + TimeUnit.SECONDS.toNanos(5);
                    long reconnectAfter = System.nanoTime() + TimeUnit.SECONDS.toNanos(15);
                    while (!interrupt.get() && (System.nanoTime() < deadline)) {
                        nodeWithFix.set(rng.nextBoolean());
                        final int ks = rng.nextInt(KEYSPACES);
                        final int statementIdx = rng.nextInt(STATEMENTS_PER_KS);
                        final Pair<Integer, Integer> statementId = Pair.create(ks, statementIdx);
                        int v = rng.nextInt(INFREQUENT_ACTION_COEF + 1);
                        Action[] pool;
                        if (v == INFREQUENT_ACTION_COEF)
                            pool = infrequent;
                        else
                            pool = frequent;
                        Action action = pool[rng.nextInt(pool.length)];
                        // logger.info(String.format("Executing %s on the node %s. ks %d", action, nodeWithFix.get() ? "1" : "2", ks));
                        switch(action) {
                            case BUMP_VERSION:
                                if (System.nanoTime() < firstVersionBump)
                                    break;
                                c.stream().forEach(node -> node.runOnInstance(() -> {
                                    if (version.get().equals(INITIAL_VERSION)) {
                                        CassandraVersion upgradeTo = QueryProcessor.NEW_PREPARED_STATEMENT_BEHAVIOUR_SINCE_40;
                                        while (!version.get().equals(upgradeTo)) {
                                            if (version.compareAndSet(INITIAL_VERSION, upgradeTo)) {
                                                logger.info("Bumped version to " + upgradeTo);
                                                break;
                                            }
                                        }
                                    }
                                }));
                                break;
                            case EXECUTE_QUALIFIED:
                                if (!qualifiedStatements.containsKey(statementId))
                                    continue;
                                try {
                                    int counter = 0;
                                    BoundStatement boundStatement = qualifiedStatements.get(statementId).bind();
                                    boundStatement.setHost(getHost(cluster, nodeWithFix.get()));
                                    for (Iterator<Object[]> iter = RowUtil.toObjects(sessions.get("ks" + ks).execute(boundStatement)); iter.hasNext(); ) {
                                        Object[] current = iter.next();
                                        int v0 = (int) current[0];
                                        int v1 = (int) current[1];
                                        Assert.assertEquals(v0, 1);
                                        Assert.assertEquals(v1, counter++);
                                    }
                                    if (nodeWithFix.get())
                                        Assert.assertEquals(ks, counter);
                                } catch (Throwable t) {
                                    if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
                                        continue;
                                    throw t;
                                }
                                break;
                            case EXECUTE_UNQUALIFIED:
                                if (!unqualifiedStatements.containsKey(statementId))
                                    continue;
                                try {
                                    BoundStatement boundStatement = unqualifiedStatements.get(statementId).bind();
                                    boundStatement.setHost(getHost(cluster, nodeWithFix.get()));
                                    int counter = 0;
                                    for (Iterator<Object[]> iter = RowUtil.toObjects(sessions.get("ks" + ks).execute(boundStatement)); iter.hasNext(); ) {
                                        Object[] current = iter.next();
                                        int v0 = (int) current[0];
                                        int v1 = (int) current[1];
                                        Assert.assertEquals(v0, 1);
                                        Assert.assertEquals(v1, counter++);
                                    }
                                    if (nodeWithFix.get() && allUpgraded.get()) {
                                        Assert.assertEquals(unqualifiedStatements.get(statementId).getQueryKeyspace() + " " + ks + " " + statementId, ks, counter);
                                    }
                                } catch (Throwable t) {
                                    if (t.getMessage().contains("ID mismatch while trying to reprepare") || (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))) {
                                        logger.info("Detected id mismatch, skipping as it is expected: ");
                                        continue;
                                    }
                                    throw t;
                                }
                                break;
                            case FORGET_PREPARED:
                                Map<Pair<Integer, Integer>, PreparedStatement> toCleanup = rng.nextBoolean() ? qualifiedStatements : unqualifiedStatements;
                                Set<Pair<Integer, Integer>> toDrop = new HashSet<>();
                                for (Pair<Integer, Integer> e : toCleanup.keySet()) {
                                    if (rng.nextBoolean())
                                        toDrop.add(e);
                                }
                                for (Pair<Integer, Integer> e : toDrop) toCleanup.remove(e);
                                toDrop.clear();
                                break;
                            case CLEAR_CACHES:
                                if (!nodeWithFix.get() && !allUpgraded.get())
                                    continue;
                                c.get(nodeWithFix.get() ? 1 : 2).runOnInstance(() -> {
                                    SystemKeyspace.loadPreparedStatements((id, query, keyspace) -> {
                                        if (rng.nextBoolean())
                                            QueryProcessor.instance.evictPrepared(id);
                                        return true;
                                    });
                                });
                                break;
                            case PREPARE_QUALIFIED:
                                if (unqualifiedStatements.containsKey(statementId))
                                    continue;
                                try {
                                    String qs = String.format(qualified, statementIdx, statementIdx, ks);
                                    String keyspace = "ks" + ks;
                                    PreparedStatement preparedQualified = sessions.get("ks" + ks).prepare(qs);
                                    // With prepared qualified, keyspace will be set to the keyspace of the statement when it was first executed
                                    if (allUpgraded.get())
                                        PreparedStatementHelper.assertHashWithoutKeyspace(preparedQualified, qs, keyspace);
                                    qualifiedStatements.put(statementId, preparedQualified);
                                } catch (Throwable t) {
                                    throw t;
                                }
                                break;
                            case PREPARE_UNQUALIFIED:
                                if (unqualifiedStatements.containsKey(statementId))
                                    continue;
                                try {
                                    String qs = String.format(unqualified, statementIdx, statementIdx);
                                    // we don't know where it's going to be executed
                                    PreparedStatement preparedUnqalified = sessions.get("ks" + ks).prepare(qs);
                                    unqualifiedStatements.put(Pair.create(ks, statementIdx), preparedUnqalified);
                                } catch (InvalidQueryException iqe) {
                                    if (!iqe.getMessage().contains("No keyspace has been"))
                                        throw iqe;
                                } catch (Throwable t) {
                                    throw t;
                                }
                                break;
                            case BOUNCE_CLIENT:
                                if (System.nanoTime() < reconnectAfter)
                                    break;
                                if (!reconnected) {
                                    for (Session s : sessions.values()) s.close();
                                    cluster.close();
                                    cluster = clusterSupplier.get();
                                    for (int j = 0; j < KEYSPACES; j++) sessions.put("ks" + j, cluster.connect("ks" + j));
                                    qualifiedStatements.clear();
                                    unqualifiedStatements.clear();
                                    reconnected = true;
                                }
                                break;
                        }
                    }
                } catch (Throwable t) {
                    interrupt.set(true);
                    t.printStackTrace();
                    while (true) {
                        Throwable seen = thrown.get();
                        Throwable merged = Throwables.merge(seen, t);
                        if (thrown.compareAndSet(seen, merged))
                            break;
                    }
                    throw t;
                } finally {
                    logger.info("Exiting...");
                    if (cluster != null)
                        cluster.close();
                }
            }));
        }
        for (Thread thread : threads) thread.start();
        for (Thread thread : threads) thread.join();
        if (thrown.get() != null)
            throw thrown.get();
    }
}
Also used : DynamicType(net.bytebuddy.dynamic.DynamicType) MethodDelegation(net.bytebuddy.implementation.MethodDelegation) ByteBuddy(net.bytebuddy.ByteBuddy) ElementMatchers.takesArguments(net.bytebuddy.matcher.ElementMatchers.takesArguments) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PreparedStatementHelper(com.datastax.driver.core.PreparedStatementHelper) HashMap(java.util.HashMap) Random(java.util.Random) CQLStatement(org.apache.cassandra.cql3.CQLStatement) QueryProcessor(org.apache.cassandra.cql3.QueryProcessor) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) QueryHandler(org.apache.cassandra.cql3.QueryHandler) ByteBuffer(java.nio.ByteBuffer) SystemKeyspace(org.apache.cassandra.db.SystemKeyspace) ArrayList(java.util.ArrayList) NATIVE_PROTOCOL(org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL) HashSet(java.util.HashSet) PreparedStatement(com.datastax.driver.core.PreparedStatement) RowUtil(org.apache.cassandra.distributed.impl.RowUtil) BoundStatement(com.datastax.driver.core.BoundStatement) Pair(org.apache.cassandra.utils.Pair) Map(java.util.Map) Session(com.datastax.driver.core.Session) CassandraVersion(org.apache.cassandra.utils.CassandraVersion) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException) NETWORK(org.apache.cassandra.distributed.api.Feature.NETWORK) ResultMessage(org.apache.cassandra.transport.messages.ResultMessage) Logger(org.slf4j.Logger) FBUtilities(org.apache.cassandra.utils.FBUtilities) Iterator(java.util.Iterator) ElementMatchers.named(net.bytebuddy.matcher.ElementMatchers.named) ClientState(org.apache.cassandra.service.ClientState) Set(java.util.Set) ICluster(org.apache.cassandra.distributed.api.ICluster) Test(org.junit.Test) ConsistencyLevel(org.apache.cassandra.distributed.api.ConsistencyLevel) ClassLoadingStrategy(net.bytebuddy.dynamic.loading.ClassLoadingStrategy) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Cluster(com.datastax.driver.core.Cluster) Host(com.datastax.driver.core.Host) InvalidQueryException(com.datastax.driver.core.exceptions.InvalidQueryException) Throwables(org.apache.cassandra.utils.Throwables) Assert(org.junit.Assert) GOSSIP(org.apache.cassandra.distributed.api.Feature.GOSSIP) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) HashSet(java.util.HashSet) Set(java.util.Set) ArrayList(java.util.ArrayList) Random(java.util.Random) Iterator(java.util.Iterator) Supplier(java.util.function.Supplier) CassandraVersion(org.apache.cassandra.utils.CassandraVersion) Pair(org.apache.cassandra.utils.Pair) ICluster(org.apache.cassandra.distributed.api.ICluster) Cluster(com.datastax.driver.core.Cluster) AtomicReference(java.util.concurrent.atomic.AtomicReference) PreparedStatement(com.datastax.driver.core.PreparedStatement) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Map(java.util.Map) BoundStatement(com.datastax.driver.core.BoundStatement) InvalidQueryException(com.datastax.driver.core.exceptions.InvalidQueryException) Session(com.datastax.driver.core.Session) Test(org.junit.Test)

Example 17 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class NativeMixedVersionTest method v4ConnectionCleansUpThreadLocalState.

@Test
public void v4ConnectionCleansUpThreadLocalState() throws IOException {
    // make sure to limit the netty thread pool to size 1, this will make the test determanistic as all work
    // will happen on the single thread.
    System.setProperty("io.netty.eventLoopThreads", "1");
    try (Cluster cluster = Cluster.build(1).withConfig(c -> c.with(Feature.values()).set("track_warnings", ImmutableMap.of("enabled", true, "local_read_size", ImmutableMap.of("warn_threshold_kb", 1)))).start()) {
        init(cluster);
        cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck1 int, value blob, PRIMARY KEY (pk, ck1))"));
        IInvokableInstance node = cluster.get(1);
        ByteBuffer blob = ByteBuffer.wrap("This is just some large string to get some number of bytes".getBytes(StandardCharsets.UTF_8));
        for (int i = 0; i < 100; i++) node.executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck1, value) VALUES (?, ?, ?)"), 0, i, blob);
        // does not support warnings)
        try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V5);
            Session session = driver.connect()) {
            ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
            Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isNotEmpty();
        }
        try (com.datastax.driver.core.Cluster driver = JavaDriverUtils.create(cluster, ProtocolVersion.V3);
            Session session = driver.connect()) {
            ResultSet rs = session.execute(withKeyspace("SELECT * FROM %s.tbl"));
            Assertions.assertThat(rs.getExecutionInfo().getWarnings()).isEmpty();
        }
        // this should not happen; so make sure no logs are found
        List<String> result = node.logs().grep("Warnings present in message with version less than").getResult();
        Assertions.assertThat(result).isEmpty();
    } finally {
        System.clearProperty("io.netty.eventLoopThreads");
    }
}
Also used : Feature(org.apache.cassandra.distributed.api.Feature) ImmutableMap(com.google.common.collect.ImmutableMap) IOException(java.io.IOException) Test(org.junit.Test) ByteBuffer(java.nio.ByteBuffer) StandardCharsets(java.nio.charset.StandardCharsets) ProtocolVersion(com.datastax.driver.core.ProtocolVersion) List(java.util.List) ResultSet(com.datastax.driver.core.ResultSet) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Session(com.datastax.driver.core.Session) Assertions(org.assertj.core.api.Assertions) Cluster(org.apache.cassandra.distributed.Cluster) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) ResultSet(com.datastax.driver.core.ResultSet) Cluster(org.apache.cassandra.distributed.Cluster) ByteBuffer(java.nio.ByteBuffer) Session(com.datastax.driver.core.Session) Test(org.junit.Test)

Example 18 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class FqlReplayDDLExclusionTest method test.

@Ignore
@Test
public void test() throws Throwable {
    try (final Cluster cluster = init(builder().withNodes(1).withConfig(updater -> updater.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).start())) {
        final IInvokableInstance node = cluster.get(1);
        // in Cassandra where events are propagated to logger
        try (com.datastax.driver.core.Cluster c = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
            Session s = c.connect()) {
            s.execute("CREATE KEYSPACE fql_ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};");
            node.nodetool("enablefullquerylog", "--path", temporaryFolder.getRoot().getAbsolutePath());
            s.execute("CREATE TABLE fql_ks.fql_table (id int primary key);");
            s.execute("INSERT INTO fql_ks.fql_table (id) VALUES (1)");
            node.nodetool("disablefullquerylog");
            // here we are dropping and we expect that ddl replay will reconstruct it
            node.executeInternal("DROP TABLE fql_ks.fql_table;");
            // without --replay-ddl-statements, the replay will fail on insert because underlying table is not there
            final ToolResult negativeRunner = ToolRunner.invokeClass("org.apache.cassandra.fqltool.FullQueryLogTool", "replay", "--keyspace", "fql_ks", "--target", "127.0.0.1", "--", temporaryFolder.getRoot().getAbsolutePath());
            assertEquals(0, negativeRunner.getExitCode());
            try {
                node.executeInternalWithResult("SELECT * from fql_ks.fql_table");
                fail("This query should fail because we do not expect fql_ks.fql_table to be created!");
            } catch (final Exception ex) {
                assertTrue(ex.getMessage().contains("table fql_table does not exist"));
            }
            // here we replay with --replay-ddl-statements so table will be created and insert will succeed
            final ToolResult positiveRunner = ToolRunner.invokeClass("org.apache.cassandra.fqltool.FullQueryLogTool", "replay", "--keyspace", "fql_ks", "--target", "127.0.0.1", // important
            "--replay-ddl-statements", "--", temporaryFolder.getRoot().getAbsolutePath());
            assertEquals(0, positiveRunner.getExitCode());
            assertRows(node.executeInternalWithResult("SELECT * from fql_ks.fql_table"), QueryResults.builder().row(1).build());
        }
    }
}
Also used : IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Cluster(org.apache.cassandra.distributed.Cluster) ToolResult(org.apache.cassandra.tools.ToolRunner.ToolResult) Session(com.datastax.driver.core.Session) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 19 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class ReprepareFuzzTest method fuzzTest.

@Test
public void fuzzTest() throws Throwable {
    try (ICluster<IInvokableInstance> c = builder().withNodes(1).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(PrepareBehaviour::alwaysNewBehaviour).start()) {
        // Long string to make us invalidate caches occasionally
        String veryLongString = "very";
        for (int i = 0; i < 2; i++) veryLongString += veryLongString;
        final String qualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM ks%d.tbl";
        final String unqualified = "SELECT pk as " + veryLongString + "%d, ck as " + veryLongString + "%d FROM tbl";
        int KEYSPACES = 3;
        final int STATEMENTS_PER_KS = 3;
        for (int i = 0; i < KEYSPACES; i++) {
            c.schemaChange(withKeyspace("CREATE KEYSPACE ks" + i + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
            c.schemaChange(withKeyspace("CREATE TABLE ks" + i + ".tbl (pk int, ck int, PRIMARY KEY (pk, ck));"));
            for (int j = 0; j < i; j++) c.coordinator(1).execute("INSERT INTO ks" + i + ".tbl (pk, ck) VALUES (?, ?)", ConsistencyLevel.QUORUM, 1, j);
        }
        List<Thread> threads = new ArrayList<>();
        AtomicBoolean interrupt = new AtomicBoolean(false);
        AtomicReference<Throwable> thrown = new AtomicReference<>();
        int INFREQUENT_ACTION_COEF = 10;
        long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(60);
        for (int i = 0; i < FBUtilities.getAvailableProcessors() * 2; i++) {
            int seed = i;
            threads.add(new Thread(() -> {
                com.datastax.driver.core.Cluster cluster = null;
                Session session = null;
                try {
                    Random rng = new Random(seed);
                    int usedKsIdx = -1;
                    String usedKs = null;
                    Map<Pair<Integer, Integer>, PreparedStatement> qualifiedStatements = new HashMap<>();
                    Map<Pair<Integer, Integer>, PreparedStatement> unqualifiedStatements = new HashMap<>();
                    cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
                    session = cluster.connect();
                    while (!interrupt.get() && (System.nanoTime() < deadline)) {
                        final int ks = rng.nextInt(KEYSPACES);
                        final int statementIdx = rng.nextInt(STATEMENTS_PER_KS);
                        final Pair<Integer, Integer> statementId = Pair.create(ks, statementIdx);
                        int v = rng.nextInt(INFREQUENT_ACTION_COEF + 1);
                        Action[] pool;
                        if (v == INFREQUENT_ACTION_COEF)
                            pool = infrequent;
                        else
                            pool = frequent;
                        Action action = pool[rng.nextInt(pool.length)];
                        switch(action) {
                            case EXECUTE_QUALIFIED:
                                if (!qualifiedStatements.containsKey(statementId))
                                    continue;
                                try {
                                    int counter = 0;
                                    for (Iterator<Object[]> iter = RowUtil.toObjects(session.execute(qualifiedStatements.get(statementId).bind())); iter.hasNext(); ) {
                                        Object[] current = iter.next();
                                        int v0 = (int) current[0];
                                        int v1 = (int) current[1];
                                        Assert.assertEquals(v0, 1);
                                        Assert.assertEquals(v1, counter++);
                                    }
                                    Assert.assertEquals(ks, counter);
                                } catch (Throwable t) {
                                    if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
                                        continue;
                                    throw t;
                                }
                                break;
                            case EXECUTE_UNQUALIFIED:
                                if (!unqualifiedStatements.containsKey(statementId))
                                    continue;
                                try {
                                    int counter = 0;
                                    for (Iterator<Object[]> iter = RowUtil.toObjects(session.execute(unqualifiedStatements.get(statementId).bind())); iter.hasNext(); ) {
                                        Object[] current = iter.next();
                                        int v0 = (int) current[0];
                                        int v1 = (int) current[1];
                                        Assert.assertEquals(v0, 1);
                                        Assert.assertEquals(v1, counter++);
                                    }
                                    Assert.assertEquals(unqualifiedStatements.get(statementId).getQueryKeyspace() + " " + usedKs + " " + statementId, Integer.parseInt(unqualifiedStatements.get(statementId).getQueryKeyspace().replace("ks", "")), counter);
                                } catch (Throwable t) {
                                    if (t.getCause() != null && t.getCause().getMessage().contains("Statement was prepared on keyspace"))
                                        continue;
                                    throw t;
                                }
                                break;
                            case PREPARE_QUALIFIED:
                                {
                                    String qs = String.format(qualified, statementIdx, statementIdx, ks);
                                    String keyspace = "ks" + ks;
                                    PreparedStatement preparedQualified = session.prepare(qs);
                                    // With prepared qualified, keyspace will be set to the keyspace of the statement when it was first executed
                                    PreparedStatementHelper.assertHashWithoutKeyspace(preparedQualified, qs, keyspace);
                                    qualifiedStatements.put(statementId, preparedQualified);
                                }
                                break;
                            case PREPARE_UNQUALIFIED:
                                try {
                                    String qs = String.format(unqualified, statementIdx, statementIdx, ks);
                                    PreparedStatement preparedUnqalified = session.prepare(qs);
                                    Assert.assertEquals(preparedUnqalified.getQueryKeyspace(), usedKs);
                                    PreparedStatementHelper.assertHashWithKeyspace(preparedUnqalified, qs, usedKs);
                                    unqualifiedStatements.put(Pair.create(usedKsIdx, statementIdx), preparedUnqalified);
                                } catch (InvalidQueryException iqe) {
                                    if (!iqe.getMessage().contains("No keyspace has been"))
                                        throw iqe;
                                } catch (Throwable t) {
                                    if (usedKs == null) {
                                        // ignored
                                        continue;
                                    }
                                    throw t;
                                }
                                break;
                            case CLEAR_CACHES:
                                c.get(1).runOnInstance(() -> {
                                    SystemKeyspace.loadPreparedStatements((id, query, keyspace) -> {
                                        if (rng.nextBoolean())
                                            QueryProcessor.instance.evictPrepared(id);
                                        return true;
                                    });
                                });
                                break;
                            case RELOAD_FROM_TABLES:
                                c.get(1).runOnInstance(QueryProcessor::clearPreparedStatementsCache);
                                c.get(1).runOnInstance(() -> QueryProcessor.instance.preloadPreparedStatements());
                                break;
                            case SWITCH_KEYSPACE:
                                usedKsIdx = ks;
                                usedKs = "ks" + ks;
                                session.execute("USE " + usedKs);
                                break;
                            case FORGET_PREPARED:
                                Map<Pair<Integer, Integer>, PreparedStatement> toCleanup = rng.nextBoolean() ? qualifiedStatements : unqualifiedStatements;
                                Set<Pair<Integer, Integer>> toDrop = new HashSet<>();
                                for (Pair<Integer, Integer> e : toCleanup.keySet()) {
                                    if (rng.nextBoolean())
                                        toDrop.add(e);
                                }
                                for (Pair<Integer, Integer> e : toDrop) toCleanup.remove(e);
                                toDrop.clear();
                                break;
                            case RECONNECT:
                                session.close();
                                cluster.close();
                                cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
                                session = cluster.connect();
                                qualifiedStatements.clear();
                                unqualifiedStatements.clear();
                                usedKs = null;
                                usedKsIdx = -1;
                                break;
                        }
                    }
                } catch (Throwable t) {
                    interrupt.set(true);
                    t.printStackTrace();
                    while (true) {
                        Throwable seen = thrown.get();
                        Throwable merged = Throwables.merge(seen, t);
                        if (thrown.compareAndSet(seen, merged))
                            break;
                    }
                    throw t;
                } finally {
                    if (session != null)
                        session.close();
                    if (cluster != null)
                        cluster.close();
                }
            }));
        }
        for (Thread thread : threads) thread.start();
        for (Thread thread : threads) thread.join();
        if (thrown.get() != null)
            throw thrown.get();
    }
}
Also used : DynamicType(net.bytebuddy.dynamic.DynamicType) MethodDelegation(net.bytebuddy.implementation.MethodDelegation) ByteBuddy(net.bytebuddy.ByteBuddy) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PreparedStatementHelper(com.datastax.driver.core.PreparedStatementHelper) HashMap(java.util.HashMap) Random(java.util.Random) QueryProcessor(org.apache.cassandra.cql3.QueryProcessor) AtomicReference(java.util.concurrent.atomic.AtomicReference) SystemKeyspace(org.apache.cassandra.db.SystemKeyspace) ArrayList(java.util.ArrayList) NATIVE_PROTOCOL(org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL) HashSet(java.util.HashSet) PreparedStatement(com.datastax.driver.core.PreparedStatement) RowUtil(org.apache.cassandra.distributed.impl.RowUtil) Pair(org.apache.cassandra.utils.Pair) Map(java.util.Map) Session(com.datastax.driver.core.Session) NETWORK(org.apache.cassandra.distributed.api.Feature.NETWORK) Logger(org.slf4j.Logger) FBUtilities(org.apache.cassandra.utils.FBUtilities) Iterator(java.util.Iterator) ElementMatchers.named(net.bytebuddy.matcher.ElementMatchers.named) Set(java.util.Set) ICluster(org.apache.cassandra.distributed.api.ICluster) Test(org.junit.Test) ConsistencyLevel(org.apache.cassandra.distributed.api.ConsistencyLevel) ClassLoadingStrategy(net.bytebuddy.dynamic.loading.ClassLoadingStrategy) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) InvalidQueryException(com.datastax.driver.core.exceptions.InvalidQueryException) Throwables(org.apache.cassandra.utils.Throwables) Assert(org.junit.Assert) GOSSIP(org.apache.cassandra.distributed.api.Feature.GOSSIP) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) HashSet(java.util.HashSet) Set(java.util.Set) ArrayList(java.util.ArrayList) Random(java.util.Random) Iterator(java.util.Iterator) Pair(org.apache.cassandra.utils.Pair) ICluster(org.apache.cassandra.distributed.api.ICluster) AtomicReference(java.util.concurrent.atomic.AtomicReference) PreparedStatement(com.datastax.driver.core.PreparedStatement) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Map(java.util.Map) InvalidQueryException(com.datastax.driver.core.exceptions.InvalidQueryException) Session(com.datastax.driver.core.Session) Test(org.junit.Test)

Example 20 with IInvokableInstance

use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.

the class HintsServiceMetricsTest method testHintsServiceMetrics.

@Test
public void testHintsServiceMetrics() throws Exception {
    // setup a 3-node cluster with a bytebuddy injection that makes the writting of some hints to fail
    try (Cluster cluster = builder().withNodes(3).withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).withInstanceInitializer(FailHints::install).start()) {
        // setup a message filter to drop some of the hint request messages from node1
        AtomicInteger hintsNode2 = new AtomicInteger();
        AtomicInteger hintsNode3 = new AtomicInteger();
        cluster.filters().verbs(Verb.HINT_REQ.id).from(1).messagesMatching((from, to, message) -> (to == 2 && hintsNode2.incrementAndGet() <= NUM_TIMEOUTS_PER_NODE) || (to == 3 && hintsNode3.incrementAndGet() <= NUM_TIMEOUTS_PER_NODE)).drop();
        // setup a message filter to drop mutations requests from node1, so it creates hints for those mutations
        AtomicBoolean dropWritesForNode2 = new AtomicBoolean(false);
        AtomicBoolean dropWritesForNode3 = new AtomicBoolean(false);
        cluster.filters().verbs(Verb.MUTATION_REQ.id).from(1).messagesMatching((from, to, message) -> (to == 2 && dropWritesForNode2.get()) || (to == 3 && dropWritesForNode3.get())).drop();
        // fix under replicated keyspaces so they don't produce hint requests while we are dropping mutations
        fixDistributedSchemas(cluster);
        cluster.schemaChange(withKeyspace("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}"));
        cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int PRIMARY KEY, v int)"));
        ICoordinator coordinator = cluster.coordinator(1);
        IInvokableInstance node1 = cluster.get(1);
        IInvokableInstance node2 = cluster.get(2);
        IInvokableInstance node3 = cluster.get(3);
        // write the first half of the rows with the second node dropping mutation requests,
        // so some hints will be created for that node
        dropWritesForNode2.set(true);
        for (int i = 0; i < NUM_ROWS / 2; i++) coordinator.execute(withKeyspace("INSERT INTO %s.t (k, v) VALUES (?, ?)"), QUORUM, i, i);
        dropWritesForNode2.set(false);
        // write the second half of the rows with the third node dropping mutations requests,
        // so some hints will be created for that node
        dropWritesForNode3.set(true);
        for (int i = NUM_ROWS / 2; i < NUM_ROWS; i++) coordinator.execute(withKeyspace("INSERT INTO %s.t (k, v) VALUES (?, ?)"), QUORUM, i, i);
        dropWritesForNode3.set(false);
        // wait until all the hints have been successfully applied to the nodes that have been dropping mutations
        waitUntilAsserted(() -> assertThat(countRows(node2)).isEqualTo(countRows(node3)).isEqualTo(NUM_ROWS));
        // Verify the metrics for the coordinator node, which is the only one actually sending hints.
        // The hint delivery errors that we have injected should have made the service try to send them again.
        // These retries are done periodically and in pages, so the retries may send again some of the hints that
        // were already successfully sent. This way, there may be more succeeded hints than actual hints/rows.
        waitUntilAsserted(() -> assertThat(countHintsSucceeded(node1)).isGreaterThanOrEqualTo(NUM_ROWS));
        waitUntilAsserted(() -> assertThat(countHintsFailed(node1)).isEqualTo(NUM_FAILURES_PER_NODE * 2));
        waitUntilAsserted(() -> assertThat(countHintsTimedOut(node1)).isEqualTo(NUM_TIMEOUTS_PER_NODE * 2));
        // verify delay metrics
        long numGlobalDelays = countGlobalDelays(node1);
        assertThat(numGlobalDelays).isGreaterThanOrEqualTo(NUM_ROWS);
        assertThat(countEndpointDelays(node1, node1)).isEqualTo(0);
        assertThat(countEndpointDelays(node1, node2)).isGreaterThan(0).isLessThanOrEqualTo(numGlobalDelays);
        assertThat(countEndpointDelays(node1, node3)).isGreaterThan(0).isLessThanOrEqualTo(numGlobalDelays);
        assertThat(countEndpointDelays(node1, node2) + countEndpointDelays(node1, node3)).isGreaterThanOrEqualTo(numGlobalDelays);
        // verify that the metrics for the not-coordinator nodes are zero
        for (IInvokableInstance node : Arrays.asList(node2, node3)) {
            assertThat(countHintsSucceeded(node)).isEqualTo(0);
            assertThat(countHintsFailed(node)).isEqualTo(0);
            assertThat(countHintsTimedOut(node)).isEqualTo(0);
            assertThat(countGlobalDelays(node)).isEqualTo(0);
            cluster.forEach(target -> assertThat(countEndpointDelays(node, target)).isEqualTo(0));
        }
    }
}
Also used : Arrays(java.util.Arrays) MethodDelegation(net.bytebuddy.implementation.MethodDelegation) ByteBuddy(net.bytebuddy.ByteBuddy) ElementMatchers.takesArguments(net.bytebuddy.matcher.ElementMatchers.takesArguments) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Callable(java.util.concurrent.Callable) MINUTES(java.util.concurrent.TimeUnit.MINUTES) NATIVE_PROTOCOL(org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL) ThrowingRunnable(org.awaitility.core.ThrowingRunnable) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ICoordinator(org.apache.cassandra.distributed.api.ICoordinator) Metrics(org.apache.cassandra.distributed.shared.Metrics) TestBaseImpl(org.apache.cassandra.distributed.test.TestBaseImpl) AssertionsForClassTypes.assertThat(org.assertj.core.api.AssertionsForClassTypes.assertThat) QUORUM(org.apache.cassandra.distributed.api.ConsistencyLevel.QUORUM) NETWORK(org.apache.cassandra.distributed.api.Feature.NETWORK) Awaitility.await(org.awaitility.Awaitility.await) ElementMatchers.named(net.bytebuddy.matcher.ElementMatchers.named) HintsServiceMetrics(org.apache.cassandra.metrics.HintsServiceMetrics) Test(org.junit.Test) Hint(org.apache.cassandra.hints.Hint) Verb(org.apache.cassandra.net.Verb) ClassLoadingStrategy(net.bytebuddy.dynamic.loading.ClassLoadingStrategy) SuperCall(net.bytebuddy.implementation.bind.annotation.SuperCall) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Future(org.apache.cassandra.utils.concurrent.Future) Cluster(org.apache.cassandra.distributed.Cluster) SECONDS(java.util.concurrent.TimeUnit.SECONDS) GOSSIP(org.apache.cassandra.distributed.api.Feature.GOSSIP) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ICoordinator(org.apache.cassandra.distributed.api.ICoordinator) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Cluster(org.apache.cassandra.distributed.Cluster) Hint(org.apache.cassandra.hints.Hint) Test(org.junit.Test)

Aggregations

IInvokableInstance (org.apache.cassandra.distributed.api.IInvokableInstance)55 Test (org.junit.Test)36 Cluster (org.apache.cassandra.distributed.Cluster)31 List (java.util.List)16 IOException (java.io.IOException)15 ConsistencyLevel (org.apache.cassandra.distributed.api.ConsistencyLevel)14 Feature (org.apache.cassandra.distributed.api.Feature)13 GOSSIP (org.apache.cassandra.distributed.api.Feature.GOSSIP)13 NETWORK (org.apache.cassandra.distributed.api.Feature.NETWORK)13 ICluster (org.apache.cassandra.distributed.api.ICluster)13 TestBaseImpl (org.apache.cassandra.distributed.test.TestBaseImpl)13 TokenSupplier (org.apache.cassandra.distributed.api.TokenSupplier)12 Session (com.datastax.driver.core.Session)11 Arrays (java.util.Arrays)11 Assertions (org.assertj.core.api.Assertions)10 Set (java.util.Set)9 NATIVE_PROTOCOL (org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL)9 Assert (org.junit.Assert)9 PreparedStatement (com.datastax.driver.core.PreparedStatement)8 Map (java.util.Map)8