use of com.datastax.driver.core.Metadata in project smscgateway by RestComm.
the class NN_DBOper method stop.
public void stop() throws Exception {
if (!this.started)
return;
cluster.close();
// cluster.shutdown();
Metadata metadata = cluster.getMetadata();
logger.info(String.format("Disconnected from cluster: %s\n", metadata.getClusterName()));
this.started = false;
}
use of com.datastax.driver.core.Metadata in project ff4j by ff4j.
the class CassandraConnection method initSession.
/**
* Init Cassandra session from Cluster.s
*/
public void initSession() {
if (null == cluster) {
Builder builder = Cluster.builder().addContactPoint(hostName).withPort(port);
if (Util.hasLength(userName)) {
builder.withCredentials(userName, userPassword);
}
this.cluster = builder.build();
}
Metadata metadata = cluster.getMetadata();
LOGGER.info("Connecting to cluster... '{}'", metadata.getClusterName());
for (Host host : metadata.getAllHosts()) {
LOGGER.info("Datatacenter: '{}' Host: '{}' Rack '{}'", host.getDatacenter(), host.getAddress(), host.getRack());
}
this.session = cluster.connect();
LOGGER.info("Connection Successful.");
}
use of com.datastax.driver.core.Metadata in project atlasdb by palantir.
the class CqlKeyValueService method initializeConnectionPool.
@SuppressWarnings("CyclomaticComplexity")
protected void initializeConnectionPool() {
Collection<InetSocketAddress> configuredHosts = config.servers();
Cluster.Builder clusterBuilder = Cluster.builder();
clusterBuilder.addContactPointsWithPorts(configuredHosts);
// for JMX metrics
clusterBuilder.withClusterName("atlas_cassandra_cluster_" + config.getKeyspaceOrThrow());
clusterBuilder.withCompression(Compression.LZ4);
if (config.sslConfiguration().isPresent()) {
SSLContext sslContext = SslSocketFactories.createSslContext(config.sslConfiguration().get());
SSLOptions sslOptions = new SSLOptions(sslContext, SSLOptions.DEFAULT_SSL_CIPHER_SUITES);
clusterBuilder.withSSL(sslOptions);
} else if (config.ssl().isPresent() && config.ssl().get()) {
clusterBuilder.withSSL();
}
PoolingOptions poolingOptions = new PoolingOptions();
poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, config.poolSize());
poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, config.poolSize());
poolingOptions.setPoolTimeoutMillis(config.cqlPoolTimeoutMillis());
clusterBuilder.withPoolingOptions(poolingOptions);
// defaults for queries; can override on per-query basis
QueryOptions queryOptions = new QueryOptions();
queryOptions.setFetchSize(config.fetchBatchCount());
clusterBuilder.withQueryOptions(queryOptions);
// Refuse to talk to nodes twice as (latency-wise) slow as the best one, over a timescale of 100ms,
// and every 10s try to re-evaluate ignored nodes performance by giving them queries again.
// Note we are being purposely datacenter-irreverent here, instead relying on latency alone
// to approximate what DCAwareRR would do;
// this is because DCs for Atlas are always quite latency-close and should be used this way,
// not as if we have some cross-country backup DC.
LoadBalancingPolicy policy = LatencyAwarePolicy.builder(new RoundRobinPolicy()).build();
// If user wants, do not automatically add in new nodes to pool (useful during DC migrations / rebuilds)
if (!config.autoRefreshNodes()) {
policy = new WhiteListPolicy(policy, configuredHosts);
}
// also try and select coordinators who own the data we're talking about to avoid an extra hop,
// but also shuffle which replica we talk to for a load balancing that comes at the expense
// of less effective caching
policy = new TokenAwarePolicy(policy, true);
clusterBuilder.withLoadBalancingPolicy(policy);
Metadata metadata;
try {
cluster = clusterBuilder.build();
// special; this is the first place we connect to
metadata = cluster.getMetadata();
// hosts, this is where people will see failures
} catch (NoHostAvailableException e) {
if (e.getMessage().contains("Unknown compression algorithm")) {
clusterBuilder.withCompression(Compression.NONE);
cluster = clusterBuilder.build();
metadata = cluster.getMetadata();
} else {
throw e;
}
} catch (IllegalStateException e) {
// god dammit datastax what did I do to _you_
if (e.getMessage().contains("requested compression is not available")) {
clusterBuilder.withCompression(Compression.NONE);
cluster = clusterBuilder.build();
metadata = cluster.getMetadata();
} else {
throw e;
}
}
session = cluster.connect();
clusterBuilder.withSocketOptions(new SocketOptions().setReadTimeoutMillis(CassandraConstants.LONG_RUNNING_QUERY_SOCKET_TIMEOUT_MILLIS));
longRunningQueryCluster = clusterBuilder.build();
longRunningQuerySession = longRunningQueryCluster.connect();
cqlStatementCache = new CqlStatementCache(session, longRunningQuerySession);
cqlKeyValueServices = new CqlKeyValueServices();
if (log.isInfoEnabled()) {
StringBuilder hostInfo = new StringBuilder();
for (Host host : metadata.getAllHosts()) {
hostInfo.append(String.format("Datatacenter: %s; Host: %s; Rack: %s%n", host.getDatacenter(), host.getAddress(), host.getRack()));
}
log.info("Initialized cassandra cluster using new API with hosts {}, seen keyspaces {}, cluster name {}", hostInfo.toString(), metadata.getKeyspaces(), metadata.getClusterName());
}
}
use of com.datastax.driver.core.Metadata in project tutorials by eugenp.
the class CassandraConnector method connect.
public void connect(final String node, final Integer port) {
Builder b = Cluster.builder().addContactPoint(node);
if (port != null) {
b.withPort(port);
}
cluster = b.build();
Metadata metadata = cluster.getMetadata();
LOG.info("Cluster name: " + metadata.getClusterName());
for (Host host : metadata.getAllHosts()) {
LOG.info("Datacenter: " + host.getDatacenter() + " Host: " + host.getAddress() + " Rack: " + host.getRack());
}
session = cluster.connect();
}
use of com.datastax.driver.core.Metadata in project cassandra by apache.
the class CqlInputFormat method getSplits.
public List<org.apache.hadoop.mapreduce.InputSplit> getSplits(JobContext context) throws IOException {
Configuration conf = HadoopCompat.getConfiguration(context);
validateConfiguration(conf);
keyspace = ConfigHelper.getInputKeyspace(conf);
cfName = ConfigHelper.getInputColumnFamily(conf);
partitioner = ConfigHelper.getInputPartitioner(conf);
logger.trace("partitioner is {}", partitioner);
// canonical ranges, split into pieces, fetching the splits in parallel
ExecutorService executor = executorFactory().pooled("HadoopInput", 128);
List<org.apache.hadoop.mapreduce.InputSplit> splits = new ArrayList<>();
String[] inputInitialAddress = ConfigHelper.getInputInitialAddress(conf).split(",");
try (Cluster cluster = CqlConfigHelper.getInputCluster(inputInitialAddress, conf);
Session session = cluster.connect()) {
List<SplitFuture> splitfutures = new ArrayList<>();
// TODO if the job range is defined and does perfectly match tokens, then the logic will be unable to get estimates since they are pre-computed
// tokens: [0, 10, 20]
// job range: [0, 10) - able to get estimate
// job range: [5, 15) - unable to get estimate
Pair<String, String> jobKeyRange = ConfigHelper.getInputKeyRange(conf);
Range<Token> jobRange = null;
if (jobKeyRange != null) {
jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.left), partitioner.getTokenFactory().fromString(jobKeyRange.right));
}
Metadata metadata = cluster.getMetadata();
// canonical ranges and nodes holding replicas
Map<TokenRange, List<Host>> masterRangeNodes = getRangeMap(keyspace, metadata, getTargetDC(metadata, inputInitialAddress));
for (TokenRange range : masterRangeNodes.keySet()) {
if (jobRange == null) {
for (TokenRange unwrapped : range.unwrap()) {
// for each tokenRange, pick a live owner and ask it for the byte-sized splits
SplitFuture task = new SplitFuture(new SplitCallable(unwrapped, masterRangeNodes.get(range), conf, session));
executor.submit(task);
splitfutures.add(task);
}
} else {
TokenRange jobTokenRange = rangeToTokenRange(metadata, jobRange);
if (range.intersects(jobTokenRange)) {
for (TokenRange intersection : range.intersectWith(jobTokenRange)) {
for (TokenRange unwrapped : intersection.unwrap()) {
// for each tokenRange, pick a live owner and ask it for the byte-sized splits
SplitFuture task = new SplitFuture(new SplitCallable(unwrapped, masterRangeNodes.get(range), conf, session));
executor.submit(task);
splitfutures.add(task);
}
}
}
}
}
// wait until we have all the results back
List<SplitFuture> failedTasks = new ArrayList<>();
int maxSplits = 0;
long expectedPartionsForFailedRanges = 0;
for (SplitFuture task : splitfutures) {
try {
List<ColumnFamilySplit> tokenRangeSplits = task.get();
if (tokenRangeSplits.size() > maxSplits) {
maxSplits = tokenRangeSplits.size();
expectedPartionsForFailedRanges = tokenRangeSplits.get(0).getLength();
}
splits.addAll(tokenRangeSplits);
} catch (Exception e) {
failedTasks.add(task);
}
}
// This logic attempts to guess the estimate from all the successful ranges
if (!failedTasks.isEmpty()) {
// if every split failed this will be 0
if (maxSplits == 0)
throwAllSplitsFailed(failedTasks);
for (SplitFuture task : failedTasks) {
try {
// the task failed, so this should throw
task.get();
} catch (Exception cause) {
logger.warn("Unable to get estimate for {}, the host {} had a exception; falling back to default estimate", task.splitCallable.tokenRange, task.splitCallable.hosts.get(0), cause);
}
}
for (SplitFuture task : failedTasks) splits.addAll(toSplit(task.splitCallable.hosts, splitTokenRange(task.splitCallable.tokenRange, maxSplits, expectedPartionsForFailedRanges)));
}
} finally {
executor.shutdownNow();
}
assert splits.size() > 0;
Collections.shuffle(splits, new Random(nanoTime()));
return splits;
}
Aggregations