use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class UUIDGen method hash.
private static byte[] hash(Collection<InetAddressAndPort> data) {
// Identify the host.
Hasher hasher = Hashing.md5().newHasher();
for (InetAddressAndPort addr : data) {
hasher.putBytes(addr.addressBytes);
hasher.putInt(addr.getPort());
}
// Identify the process on the load: we use both the PID and class loader hash.
long pid = NativeLibrary.getProcessID();
if (pid < 0)
pid = new Random(currentTimeMillis()).nextLong();
updateWithLong(hasher, pid);
ClassLoader loader = UUIDGen.class.getClassLoader();
int loaderId = loader != null ? System.identityHashCode(loader) : 0;
updateWithInt(hasher, loaderId);
return hasher.hash().asBytes();
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class AssassinateAbruptDownedNodeTest method afterNodeStatusIsLeft.
@Override
protected void afterNodeStatusIsLeft(Cluster cluster, IInvokableInstance removedNode) {
// Check it is possible to alter keyspaces (see CASSANDRA-16422)
// First, make sure the node is convicted so the gossiper considers it unreachable
InetSocketAddress socketAddress = removedNode.config().broadcastAddress();
InetAddressAndPort removedEndpoint = InetAddressAndPort.getByAddressOverrideDefaults(socketAddress.getAddress(), socketAddress.getPort());
cluster.get(BaseAssassinatedCase.SEED_NUM).runOnInstance(() -> Gossiper.instance.convict(removedEndpoint, 1.0));
// Second, try and alter the keyspace. Before the bug was fixed, this would fail as the check includes
// unreachable nodes that could have LEFT status.
cluster.schemaChangeIgnoringStoppedInstances(String.format("ALTER KEYSPACE %s WITH DURABLE_WRITES = false", KEYSPACE));
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class RepairCoordinatorNeighbourDown method neighbourDown.
@Test
public void neighbourDown() {
String table = tableName("neighbourdown");
assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
CLUSTER.schemaChange(format("CREATE TABLE %s.%s (key text, value text, PRIMARY KEY (key))", KEYSPACE, table));
String downNodeAddress = CLUSTER.get(2).callOnInstance(() -> FBUtilities.getBroadcastAddressAndPort().getHostAddressAndPort());
Future<Void> shutdownFuture = CLUSTER.get(2).shutdown();
try {
// wait for the node to stop
shutdownFuture.get();
// wait for the failure detector to detect this
CLUSTER.get(1).runOnInstance(() -> {
InetAddressAndPort neighbor;
try {
neighbor = InetAddressAndPort.getByName(downNodeAddress);
} catch (UnknownHostException e) {
throw new RuntimeException(e);
}
while (FailureDetector.instance.isAlive(neighbor)) Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
});
long repairExceptions = getRepairExceptions(CLUSTER, 1);
NodeToolResult result = repair(1, KEYSPACE, table);
result.asserts().failure().errorContains("Endpoint not alive");
if (withNotifications) {
result.asserts().notificationContains(NodeToolResult.ProgressEventType.START, "Starting repair command").notificationContains(NodeToolResult.ProgressEventType.START, "repairing keyspace " + KEYSPACE + " with repair options").notificationContains(NodeToolResult.ProgressEventType.ERROR, "Endpoint not alive").notificationContains(NodeToolResult.ProgressEventType.COMPLETE, "finished with error");
}
Assert.assertEquals(repairExceptions + 1, getRepairExceptions(CLUSTER, 1));
} finally {
CLUSTER.get(2).startup();
}
// make sure to call outside of the try/finally so the node is up so we can actually query
if (repairType != RepairType.PREVIEW) {
assertParentRepairFailedWithMessageContains(CLUSTER, KEYSPACE, table, "Endpoint not alive");
} else {
assertParentRepairNotExist(CLUSTER, KEYSPACE, table);
}
});
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class StreamingMetricsTest method checkDataReceived.
private void checkDataReceived(Cluster cluster, int node, int peer, long receivedBytes, int files) {
InetAddressAndPort address = getNodeAddress(cluster, peer);
cluster.get(node).runOnInstance(() -> {
StreamingMetrics metrics = StreamingMetrics.get(address);
long actual = metrics.incomingBytes.getCount();
assertThat(actual).describedAs("The amount of data received by node" + node + " from node" + peer + " is not the expected one. [expected: " + receivedBytes + ", actual: " + actual + "]").isEqualTo(receivedBytes);
actual = metrics.incomingProcessTime.getCount();
// The incomingProcessTime timer is updated for each incoming file. By consequence incomingProcessTime.getCount() should be equals to the number of files received by the node.
assertThat(actual).describedAs("The amount of files received by node" + node + " from node" + peer + " is not the expected one. [expected: " + files + ", actual: " + actual + "]").isEqualTo(files);
if (metrics.incomingProcessTime.getCount() != 0) {
assertThat(metrics.incomingProcessTime.getSnapshot().getMedian()).describedAs("The median processing time for data streamed from node" + peer + " to node" + node + " should be non-0").isGreaterThan(0);
}
});
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class StreamingMetricsTest method checkThatNoStreamingOccured.
private void checkThatNoStreamingOccured(Cluster cluster, int node, int peer) {
InetAddressAndPort address = getNodeAddress(cluster, peer);
cluster.get(node).runOnInstance(() -> {
StreamingMetrics metrics = StreamingMetrics.get(address);
assertThat(metrics.incomingBytes.getCount()).describedAs("No SSTable should have been streamed so far from node" + node + " to node" + peer).isEqualTo(0);
assertThat(metrics.outgoingBytes.getCount()).describedAs("No SSTable should have been streamed so far from node" + node + " to node" + peer).isEqualTo(0);
assertThat(metrics.incomingProcessTime.getCount()).describedAs("No SSTable should have been streamed so far from node" + node + " to node" + peer).isEqualTo(0);
});
}
Aggregations