use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class StorageProxy method describeSchemaVersions.
/**
* initiate a request/response session with each live node to check whether or not everybody is using the same
* migration id. This is useful for determining if a schema change has propagated through the cluster. Disagreement
* is assumed if any node fails to respond.
*/
public static Map<String, List<String>> describeSchemaVersions(boolean withPort) {
final String myVersion = Schema.instance.getVersion().toString();
final Map<InetAddressAndPort, UUID> versions = new ConcurrentHashMap<>();
final Set<InetAddressAndPort> liveHosts = Gossiper.instance.getLiveMembers();
final CountDownLatch latch = newCountDownLatch(liveHosts.size());
RequestCallback<UUID> cb = message -> {
// record the response from the remote node.
versions.put(message.from(), message.payload);
latch.decrement();
};
// an empty message acts as a request to the SchemaVersionVerbHandler.
Message message = out(SCHEMA_VERSION_REQ, noPayload);
for (InetAddressAndPort endpoint : liveHosts) MessagingService.instance().sendWithCallback(message, endpoint, cb);
try {
// wait for as long as possible. timeout-1s if possible.
latch.await(DatabaseDescriptor.getRpcTimeout(NANOSECONDS), NANOSECONDS);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
}
// maps versions to hosts that are on that version.
Map<String, List<String>> results = new HashMap<String, List<String>>();
Iterable<InetAddressAndPort> allHosts = concat(Gossiper.instance.getLiveMembers(), Gossiper.instance.getUnreachableMembers());
for (InetAddressAndPort host : allHosts) {
UUID version = versions.get(host);
String stringVersion = version == null ? UNREACHABLE : version.toString();
List<String> hosts = results.get(stringVersion);
if (hosts == null) {
hosts = new ArrayList<String>();
results.put(stringVersion, hosts);
}
hosts.add(host.getHostAddress(withPort));
}
// we're done: the results map is ready to return to the client. the rest is just debug logging:
if (results.get(UNREACHABLE) != null)
logger.debug("Hosts not in agreement. Didn't get a response from everybody: {}", join(results.get(UNREACHABLE), ","));
for (Map.Entry<String, List<String>> entry : results.entrySet()) {
// check for version disagreement. log the hosts that don't agree.
if (entry.getKey().equals(UNREACHABLE) || entry.getKey().equals(myVersion))
continue;
for (String host : entry.getValue()) logger.debug("{} disagrees ({})", host, entry.getKey());
}
if (results.size() == 1)
logger.debug("Schemas are in agreement.");
return results;
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class StorageService method rebuild.
public void rebuild(String sourceDc, String keyspace, String tokens, String specificSources) {
// check ongoing rebuild
if (!isRebuilding.compareAndSet(false, true)) {
throw new IllegalStateException("Node is still rebuilding. Check nodetool netstats.");
}
// check the arguments
if (keyspace == null && tokens != null) {
throw new IllegalArgumentException("Cannot specify tokens without keyspace.");
}
logger.info("rebuild from dc: {}, {}, {}", sourceDc == null ? "(any dc)" : sourceDc, keyspace == null ? "(All keyspaces)" : keyspace, tokens == null ? "(All tokens)" : tokens);
try {
RangeStreamer streamer = new RangeStreamer(tokenMetadata, null, FBUtilities.getBroadcastAddressAndPort(), StreamOperation.REBUILD, useStrictConsistency && !replacing, DatabaseDescriptor.getEndpointSnitch(), streamStateStore, false, DatabaseDescriptor.getStreamingConnectionsPerHost());
if (sourceDc != null)
streamer.addSourceFilter(new RangeStreamer.SingleDatacenterFilter(DatabaseDescriptor.getEndpointSnitch(), sourceDc));
if (keyspace == null) {
for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces()) streamer.addRanges(keyspaceName, getLocalReplicas(keyspaceName));
} else if (tokens == null) {
streamer.addRanges(keyspace, getLocalReplicas(keyspace));
} else {
Token.TokenFactory factory = getTokenFactory();
List<Range<Token>> ranges = new ArrayList<>();
Pattern rangePattern = Pattern.compile("\\(\\s*(-?\\w+)\\s*,\\s*(-?\\w+)\\s*\\]");
try (Scanner tokenScanner = new Scanner(tokens)) {
while (tokenScanner.findInLine(rangePattern) != null) {
MatchResult range = tokenScanner.match();
Token startToken = factory.fromString(range.group(1));
Token endToken = factory.fromString(range.group(2));
logger.info("adding range: ({},{}]", startToken, endToken);
ranges.add(new Range<>(startToken, endToken));
}
if (tokenScanner.hasNext())
throw new IllegalArgumentException("Unexpected string: " + tokenScanner.next());
}
// Ensure all specified ranges are actually ranges owned by this host
RangesAtEndpoint localReplicas = getLocalReplicas(keyspace);
RangesAtEndpoint.Builder streamRanges = new RangesAtEndpoint.Builder(FBUtilities.getBroadcastAddressAndPort(), ranges.size());
for (Range<Token> specifiedRange : ranges) {
boolean foundParentRange = false;
for (Replica localReplica : localReplicas) {
if (localReplica.contains(specifiedRange)) {
streamRanges.add(localReplica.decorateSubrange(specifiedRange));
foundParentRange = true;
break;
}
}
if (!foundParentRange) {
throw new IllegalArgumentException(String.format("The specified range %s is not a range that is owned by this node. Please ensure that all token ranges specified to be rebuilt belong to this node.", specifiedRange.toString()));
}
}
if (specificSources != null) {
String[] stringHosts = specificSources.split(",");
Set<InetAddressAndPort> sources = new HashSet<>(stringHosts.length);
for (String stringHost : stringHosts) {
try {
InetAddressAndPort endpoint = InetAddressAndPort.getByName(stringHost);
if (FBUtilities.getBroadcastAddressAndPort().equals(endpoint)) {
throw new IllegalArgumentException("This host was specified as a source for rebuilding. Sources for a rebuild can only be other nodes in the cluster.");
}
sources.add(endpoint);
} catch (UnknownHostException ex) {
throw new IllegalArgumentException("Unknown host specified " + stringHost, ex);
}
}
streamer.addSourceFilter(new RangeStreamer.AllowedSourcesFilter(sources));
}
streamer.addRanges(keyspace, streamRanges.build());
}
StreamResultFuture resultFuture = streamer.fetchAsync();
// wait for result
resultFuture.get();
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
} catch (ExecutionException e) {
// This is used exclusively through JMX, so log the full trace but only throw a simple RTE
logger.error("Error while rebuilding node", e.getCause());
throw new RuntimeException("Error while rebuilding node: " + e.getCause().getMessage());
} finally {
// rebuild is done (successfully or not)
isRebuilding.set(false);
}
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class FullQueryLogger method stop.
public synchronized void stop() {
try {
BinLog binLog = this.binLog;
if (binLog != null) {
logger.info("Stopping full query logging to {}", binLog.path);
binLog.stop();
} else {
logger.info("Full query log already stopped");
}
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
} finally {
QueryEvents.instance.unregisterListener(this);
this.binLog = null;
}
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class BinLog method logRecord.
// todo: refactor to helper class?
public void logRecord(ReleaseableWriteMarshallable record) {
boolean putInQueue = false;
try {
if (blocking) {
try {
put(record);
putInQueue = true;
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
}
} else {
if (!offer(record)) {
logDroppedSample();
} else {
putInQueue = true;
}
}
} finally {
if (!putInQueue) {
record.release();
}
}
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class ExternalArchiver method stop.
/**
* Stops the archiver thread and tries to archive all existing files
*
* this handles the case where a user explicitly disables full/audit log and would expect all log files to be archived
* rolled or not
*/
public void stop() {
shouldContinue = false;
try {
// wait for the archiver thread to stop;
executor.submit(() -> {
}).get();
// and try to archive all remaining files before exiting
archiveExisting(path);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
Aggregations