use of com.netflix.astyanax.connectionpool.Host in project coprhd-controller by CoprHD.
the class DbClientContext method init.
public void init(final HostSupplierImpl hostSupplier) {
String svcName = hostSupplier.getDbSvcName();
log.info("Initializing hosts for {}", svcName);
List<Host> hosts = hostSupplier.get();
if ((hosts != null) && (hosts.isEmpty())) {
throw new IllegalStateException(String.format("DbClientContext.init() : host list in hostsupplier for %s is empty", svcName));
} else {
int hostCount = hosts == null ? 0 : hosts.size();
log.info(String.format("number of hosts in the hostsupplier for %s is %d", svcName, hostCount));
}
Partitioner murmur3partitioner = Murmur3Partitioner.get();
Map<String, Partitioner> partitioners = new HashMap<>();
partitioners.put("org.apache.cassandra.dht.Murmur3Partitioner.class.getCanonicalName()", murmur3partitioner);
ConsistencyLevel readCL = ConsistencyLevel.CL_LOCAL_QUORUM;
ConsistencyLevel writeCL = ConsistencyLevel.CL_EACH_QUORUM;
ConnectionPoolConfigurationImpl cfg = new ConnectionPoolConfigurationImpl(DEFAULT_CN_POOL_NANE).setMaxConns(maxConnections).setMaxConnsPerHost(maxConnectionsPerHost).setConnectTimeout(DEFAULT_CONN_TIMEOUT).setMaxBlockedThreadsPerHost(DEFAULT_MAX_BLOCKED_THREADS).setPartitioner(murmur3partitioner);
log.info("The client to node is encrypted={}", isClientToNodeEncrypted);
if (isClientToNodeEncrypted) {
SSLConnectionContext sslContext = getSSLConnectionContext();
cfg.setSSLConnectionContext(sslContext);
}
// TODO revisit it to see if we need set different retry policy, timeout, discovery delay etc for geodb
keyspaceContext = new AstyanaxContext.Builder().withHostSupplier(hostSupplier).forCluster(clusterName).forKeyspace(keyspaceName).withAstyanaxConfiguration(new AstyanaxConfigurationImpl().setConnectionPoolType(ConnectionPoolType.ROUND_ROBIN).setDiscoveryDelayInSeconds(svcListPoolIntervalSec).setDefaultReadConsistencyLevel(readCL).setDefaultWriteConsistencyLevel(writeCL).setTargetCassandraVersion("2.0").setPartitioners(partitioners).setRetryPolicy(retryPolicy)).withConnectionPoolConfiguration(cfg).withTracerFactory(new KeyspaceTracerFactoryImpl()).withConnectionPoolMonitor(new CustomConnectionPoolMonitor(monitorIntervalSecs)).buildKeyspace(ThriftFamilyFactory.getInstance());
keyspaceContext.start();
keyspace = keyspaceContext.getClient();
// Check and reset default write consistency level
final DrUtil drUtil = new DrUtil(hostSupplier.getCoordinatorClient());
if (drUtil.isMultivdc()) {
// geodb in mutlivdc should be EACH_QUORUM always. Never retry for write failures
setRetryFailedWriteWithLocalQuorum(false);
log.info("Retry for failed write with LOCAL_QUORUM: {}", retryFailedWriteWithLocalQuorum);
} else {
setRetryFailedWriteWithLocalQuorum(true);
}
if (drUtil.isActiveSite() && !drUtil.isMultivdc()) {
log.info("Schedule db consistency level monitor on DR active site");
exe.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
checkAndResetConsistencyLevel(drUtil, hostSupplier.getDbSvcName());
} catch (Exception ex) {
log.warn("Encounter Unexpected exception during check consistency level. Retry in next run", ex);
}
}
}, 60, DEFAULT_CONSISTENCY_LEVEL_CHECK_SEC, TimeUnit.SECONDS);
}
initDone = true;
}
use of com.netflix.astyanax.connectionpool.Host in project kork by spinnaker.
the class EurekaHostSupplier method buildHost.
static Host buildHost(InstanceInfo info) {
String[] parts = StringUtils.split(StringUtils.split(info.getHostName(), ".")[0], '-');
Host host = new Host(info.getHostName(), info.getPort()).addAlternateIpAddress(StringUtils.join(new String[] { parts[1], parts[2], parts[3], parts[4] }, ".")).addAlternateIpAddress(info.getIPAddr()).setId(info.getId());
try {
if (info.getDataCenterInfo() instanceof AmazonInfo) {
AmazonInfo amazonInfo = (AmazonInfo) info.getDataCenterInfo();
host.setRack(amazonInfo.get(AmazonInfo.MetaDataKey.availabilityZone));
}
} catch (Throwable t) {
logger.error("Error getting rack for host " + host.getName(), t);
}
return host;
}
use of com.netflix.astyanax.connectionpool.Host in project coprhd-controller by CoprHD.
the class HostSupplierImpl method internalGet.
public List<Host> internalGet() {
try {
_log.debug("getting hosts for " + dbSvcName + "; version = " + _version);
boolean isGeodb = Constants.GEODBSVC_NAME.equals(dbSvcName);
List<Service> service = _coordinator.locateAllServices(dbSvcName, _version, (String) null, null);
List<Host> hostList = new ArrayList<Host>(service.size());
for (int i = 0; i < service.size(); i++) {
Service svc = service.get(i);
if (isGeodb && isDbReinitializing(svc)) {
_log.debug("Ignore host {} because its geodb is reinitialzing", svc.getId());
continue;
}
URI hostUri = svc.getEndpoint();
_log.debug("Found " + svc.getName() + "; host = " + hostUri.getHost() + "; port = " + hostUri.getPort());
hostList.add(new Host(String.format("%1$s:%2$d", hostUri.getHost(), hostUri.getPort()), hostUri.getPort()));
}
_log.debug("dbsvc endpoint refreshed");
return hostList;
} catch (RetryableCoordinatorException e) {
_log.warn("no dbsvc instance running. Coordinator exception message: {}", e.getMessage());
} catch (Exception e) {
_log.error("dbsvc lookup failure", e);
}
return Collections.emptyList();
}
Aggregations