use of org.apache.cassandra.distributed.api.LogAction in project cassandra by apache.
the class FailingResponseDoesNotLogTest method dispatcherErrorDoesNotLock.
@Test
public void dispatcherErrorDoesNotLock() throws IOException {
System.setProperty("cassandra.custom_query_handler_class", AlwaysRejectErrorQueryHandler.class.getName());
try (Cluster cluster = Cluster.build(1).withConfig(c -> c.with(Feature.NATIVE_PROTOCOL, Feature.GOSSIP).set("client_error_reporting_exclusions", ImmutableMap.of("subnets", Collections.singletonList("127.0.0.1")))).start()) {
try (SimpleClient client = SimpleClient.builder("127.0.0.1", 9042).build().connect(false)) {
client.execute("SELECT * FROM system.peers", ConsistencyLevel.ONE);
Assert.fail("Query should have failed");
} catch (Exception e) {
// ignore; expected
}
// logs happen before client response; so grep is enough
LogAction logs = cluster.get(1).logs();
LogResult<List<String>> matches = logs.grep("address contained in client_error_reporting_exclusions");
Assertions.assertThat(matches.getResult()).hasSize(1);
matches = logs.grep("Unexpected exception during request");
Assertions.assertThat(matches.getResult()).isEmpty();
} finally {
System.clearProperty("cassandra.custom_query_handler_class");
}
}
use of org.apache.cassandra.distributed.api.LogAction in project cassandra by apache.
the class UpgradeSSTablesTest method rewriteSSTablesTest.
@Test
public void rewriteSSTablesTest() throws Throwable {
try (ICluster<IInvokableInstance> cluster = builder().withNodes(1).withDataDirCount(1).start()) {
for (String compressionBefore : new String[] { "{'class' : 'LZ4Compressor', 'chunk_length_in_kb' : 32}", "{'enabled': 'false'}" }) {
for (String command : new String[] { "upgradesstables", "recompress_sstables" }) {
cluster.schemaChange(withKeyspace("DROP KEYSPACE IF EXISTS %s"));
cluster.schemaChange(withKeyspace("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"));
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v text, PRIMARY KEY (pk, ck)) " + "WITH compression = " + compressionBefore));
cluster.get(1).acceptsOnInstance((String ks) -> {
Keyspace.open(ks).getColumnFamilyStore("tbl").disableAutoCompaction();
}).accept(KEYSPACE);
String blob = "blob";
for (int i = 0; i < 6; i++) blob += blob;
for (int i = 0; i < 100; i++) {
cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (?,?,?)"), ConsistencyLevel.QUORUM, i, i, blob);
}
cluster.get(1).nodetool("flush", KEYSPACE, "tbl");
Assert.assertEquals(0, cluster.get(1).nodetool("upgradesstables", "-a", KEYSPACE, "tbl"));
cluster.schemaChange(withKeyspace("ALTER TABLE %s.tbl WITH compression = {'class' : 'LZ4Compressor', 'chunk_length_in_kb' : 128};"));
// Make sure timestamp will be different even with 1-second resolution.
Thread.sleep(2000);
long maxSoFar = cluster.get(1).appliesOnInstance((String ks) -> {
long maxTs = -1;
ColumnFamilyStore cfs = Keyspace.open(ks).getColumnFamilyStore("tbl");
cfs.disableAutoCompaction();
for (SSTableReader tbl : cfs.getLiveSSTables()) {
maxTs = Math.max(maxTs, tbl.getCreationTimeFor(Component.DATA));
}
return maxTs;
}).apply(KEYSPACE);
for (int i = 100; i < 200; i++) {
cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (?,?,?)"), ConsistencyLevel.QUORUM, i, i, blob);
}
cluster.get(1).nodetool("flush", KEYSPACE, "tbl");
LogAction logAction = cluster.get(1).logs();
logAction.mark();
long expectedCount = cluster.get(1).appliesOnInstance((String ks, Long maxTs) -> {
long count = 0;
long skipped = 0;
Set<SSTableReader> liveSSTables = Keyspace.open(ks).getColumnFamilyStore("tbl").getLiveSSTables();
assert liveSSTables.size() == 2 : String.format("Expected 2 sstables, but got " + liveSSTables.size());
for (SSTableReader tbl : liveSSTables) {
if (tbl.getCreationTimeFor(Component.DATA) <= maxTs)
count++;
else
skipped++;
}
assert skipped > 0;
return count;
}).apply(KEYSPACE, maxSoFar);
if (command.equals("upgradesstables"))
Assert.assertEquals(0, cluster.get(1).nodetool("upgradesstables", "-a", "-t", Long.toString(maxSoFar), KEYSPACE, "tbl"));
else
Assert.assertEquals(0, cluster.get(1).nodetool("recompress_sstables", KEYSPACE, "tbl"));
Assert.assertFalse(logAction.grep(String.format("%d sstables to", expectedCount)).getResult().isEmpty());
}
}
}
}
use of org.apache.cassandra.distributed.api.LogAction in project cassandra by apache.
the class UnableToParseClientMessageFromBlockedSubnetTest method badMessageCausesProtocolExceptionFromExcludeList.
@Test
public void badMessageCausesProtocolExceptionFromExcludeList() throws IOException, TimeoutException {
Cluster cluster = getCluster();
// write gibberish to the native protocol
IInvokableInstance node = cluster.get(1);
// make sure everything is fine at the start
Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.ProtocolException")).isEqualTo(0);
Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.UnknownException")).isEqualTo(0);
LogAction logs = node.logs();
long mark = logs.mark();
try (SimpleClient client = SimpleClient.builder("127.0.0.1", 9042).protocolVersion(version).useBeta().build()) {
client.connect(false, true);
// this should return a failed response
// disable waiting on procol errors as that logic was reverted until we can figure out its 100% safe
// right now ProtocolException is thrown for fatal and non-fatal issues, so closing the channel
// on non-fatal issues could cause other issues for the cluster
byte expectedVersion = (byte) (80 + version.asInt());
Message.Response response = client.execute(new UnableToParseClientMessageTest.CustomHeaderMessage(new byte[] { expectedVersion, 1, 2, 3, 4, 5, 6, 7, 8, 9 }), false);
Assertions.assertThat(response).isInstanceOf(ErrorMessage.class);
logs.watchFor(mark, "address contained in client_error_reporting_exclusions");
Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.ProtocolException")).isEqualTo(0);
Assertions.assertThat(node.metrics().getCounter("org.apache.cassandra.metrics.Client.UnknownException")).isEqualTo(0);
Assertions.assertThat(logs.grep(mark, "Excluding client exception fo").getResult()).hasSize(1);
Assertions.assertThat(logs.grep(mark, "Unexpected exception during request").getResult()).isEmpty();
}
}
use of org.apache.cassandra.distributed.api.LogAction in project cassandra by apache.
the class JVMDTestTest method instanceLogs.
@Test
public void instanceLogs() throws IOException, TimeoutException {
try (Cluster cluster = init(Cluster.build(2).withConfig(c -> c.with(Feature.values())).start())) {
// debug logging is turned on so we will see debug logs
Assertions.assertThat(cluster.get(1).logs().grep("^DEBUG").getResult()).isNotEmpty();
// make sure an exception is thrown in the cluster
LogAction logs = cluster.get(2).logs();
// get the current position so watching doesn't see any previous exceptions
long mark = logs.mark();
cluster.get(2).runOnInstance(() -> {
// pretend that an uncaught exception was thrown
JVMStabilityInspector.uncaughtException(Thread.currentThread(), new RuntimeException("fail without fail"));
});
List<String> errors = logs.watchFor(mark, "^ERROR").getResult();
Assertions.assertThat(errors).allMatch(s -> s.contains("ERROR")).allMatch(s -> s.contains("isolatedExecutor")).allMatch(s -> s.contains("Exception in thread")).as("Unable to find 'ERROR', 'isolatedExecutor', and 'Exception in thread'").isNotEmpty();
}
}
Aggregations