use of io.vertx.core.spi.cluster.ClusterManager in project vert.x by eclipse.
the class FaultToleranceTest method afterNodesKilled.
protected void afterNodesKilled() throws Exception {
ClusterManager clusterManager = vertx.getClusterManager();
assertEqualsEventually("Remaining members", Integer.valueOf(2), () -> clusterManager.getNodes().size());
}
use of io.vertx.core.spi.cluster.ClusterManager in project vert.x by eclipse.
the class VertxImpl method getClusterManager.
private ClusterManager getClusterManager(VertxOptions options) {
if (options.isClustered()) {
if (options.getClusterManager() != null) {
return options.getClusterManager();
} else {
ClusterManager mgr;
String clusterManagerClassName = System.getProperty("vertx.cluster.managerClass");
if (clusterManagerClassName != null) {
// We allow specify a sys prop for the cluster manager factory which overrides ServiceLoader
try {
Class<?> clazz = Class.forName(clusterManagerClassName);
mgr = (ClusterManager) clazz.newInstance();
} catch (Exception e) {
throw new IllegalStateException("Failed to instantiate " + clusterManagerClassName, e);
}
} else {
mgr = ServiceHelper.loadFactoryOrNull(ClusterManager.class);
if (mgr == null) {
throw new IllegalStateException("No ClusterManagerFactory instances found on classpath");
}
}
return mgr;
}
} else {
return null;
}
}
use of io.vertx.core.spi.cluster.ClusterManager in project vert.x by eclipse.
the class VertxOptionsTest method testOptions.
@Test
public void testOptions() {
VertxOptions options = new VertxOptions();
assertEquals(2 * Runtime.getRuntime().availableProcessors(), options.getEventLoopPoolSize());
int rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setEventLoopPoolSize(rand));
assertEquals(rand, options.getEventLoopPoolSize());
try {
options.setEventLoopPoolSize(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertEquals(20, options.getWorkerPoolSize());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setWorkerPoolSize(rand));
assertEquals(rand, options.getWorkerPoolSize());
try {
options.setWorkerPoolSize(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertEquals(20, options.getInternalBlockingPoolSize());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setInternalBlockingPoolSize(rand));
assertEquals(rand, options.getInternalBlockingPoolSize());
try {
options.setInternalBlockingPoolSize(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertFalse(options.isClustered());
assertEquals(options, options.setClustered(true));
assertTrue(options.isClustered());
assertEquals(0, options.getClusterPort());
assertEquals(options, options.setClusterPort(1234));
assertEquals(1234, options.getClusterPort());
try {
options.setClusterPort(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
try {
options.setClusterPort(65536);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertEquals(-1, options.getClusterPublicPort());
assertEquals(options, options.setClusterPublicPort(1234));
assertEquals(1234, options.getClusterPublicPort());
try {
options.setClusterPublicPort(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
try {
options.setClusterPublicPort(65536);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertEquals("localhost", options.getClusterHost());
String randString = TestUtils.randomUnicodeString(100);
assertEquals(options, options.setClusterHost(randString));
assertEquals(randString, options.getClusterHost());
assertEquals(null, options.getClusterPublicHost());
randString = TestUtils.randomUnicodeString(100);
assertEquals(options, options.setClusterPublicHost(randString));
assertEquals(randString, options.getClusterPublicHost());
assertEquals(20000, options.getClusterPingInterval());
long randomLong = TestUtils.randomPositiveLong();
assertEquals(options, options.setClusterPingInterval(randomLong));
assertEquals(randomLong, options.getClusterPingInterval());
try {
options.setClusterPingInterval(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
assertEquals(randomLong, options.getClusterPingInterval());
}
assertEquals(20000, options.getClusterPingReplyInterval());
randomLong = TestUtils.randomPositiveLong();
assertEquals(options, options.setClusterPingReplyInterval(randomLong));
assertEquals(randomLong, options.getClusterPingReplyInterval());
try {
options.setClusterPingReplyInterval(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
assertEquals(randomLong, options.getClusterPingReplyInterval());
}
assertEquals(1000, options.getBlockedThreadCheckInterval());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setBlockedThreadCheckInterval(rand));
assertEquals(rand, options.getBlockedThreadCheckInterval());
try {
options.setBlockedThreadCheckInterval(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
// 2 seconds in nano seconds
assertEquals(2000l * 1000000, options.getMaxEventLoopExecuteTime());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setMaxEventLoopExecuteTime(rand));
assertEquals(rand, options.getMaxEventLoopExecuteTime());
try {
options.setMaxEventLoopExecuteTime(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
// 1 minute in nano seconds
assertEquals(1l * 60 * 1000 * 1000000, options.getMaxWorkerExecuteTime());
rand = TestUtils.randomPositiveInt();
assertEquals(options, options.setMaxWorkerExecuteTime(rand));
assertEquals(rand, options.getMaxWorkerExecuteTime());
try {
options.setMaxWorkerExecuteTime(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
ClusterManager mgr = new FakeClusterManager();
assertNull(options.getClusterManager());
assertEquals(options, options.setClusterManager(mgr));
assertSame(mgr, options.getClusterManager());
assertFalse(options.isHAEnabled());
assertEquals(options, options.setHAEnabled(true));
assertTrue(options.isHAEnabled());
rand = TestUtils.randomPositiveInt();
assertEquals(1, options.getQuorumSize());
assertEquals(options, options.setQuorumSize(rand));
assertEquals(rand, options.getQuorumSize());
try {
options.setQuorumSize(0);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
try {
options.setQuorumSize(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertEquals(VertxOptions.DEFAULT_HA_GROUP, options.getHAGroup());
randString = TestUtils.randomUnicodeString(100);
assertEquals(options, options.setHAGroup(randString));
assertEquals(randString, options.getHAGroup());
try {
options.setHAGroup(null);
fail("Should throw exception");
} catch (NullPointerException e) {
// OK
}
assertNotNull(options.getMetricsOptions());
try {
options.setWarningExceptionTime(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
assertEquals(options, options.setWarningExceptionTime(1000000000l));
assertEquals(1000000000l, options.getWarningExceptionTime());
}
use of io.vertx.core.spi.cluster.ClusterManager in project okapi by folio-org.
the class MainVerticle method init.
@Override
public void init(Vertx vertx, Context context) {
ModuleVersionReporter m = new ModuleVersionReporter("org.folio.okapi/okapi-core");
okapiVersion = m.getVersion();
m.logStart();
boolean enableProxy = false;
boolean enableDeployment = false;
super.init(vertx, context);
JsonObject config = context.config();
port = Integer.parseInt(Config.getSysConf("port", "9130", config));
int portStart = Integer.parseInt(Config.getSysConf("port_start", Integer.toString(port + 1), config));
int portEnd = Integer.parseInt(Config.getSysConf("port_end", Integer.toString(portStart + 10), config));
String okapiVersion2 = Config.getSysConf("okapiVersion", null, config);
if (okapiVersion2 != null) {
okapiVersion = okapiVersion2;
}
if (clusterManager != null) {
logger.info("cluster NodeId " + clusterManager.getNodeID());
} else {
logger.info("clusterManager not in use");
}
final String host = Config.getSysConf("host", "localhost", config);
String okapiUrl = Config.getSysConf("okapiurl", "http://localhost:" + port, config);
// Remove trailing slash, if there
okapiUrl = okapiUrl.replaceAll("/+$", "");
final String nodeName = Config.getSysConf("nodename", null, config);
String storageType = Config.getSysConf("storage", "inmemory", config);
String loglevel = Config.getSysConf("loglevel", "", config);
if (!loglevel.isEmpty()) {
logHelper.setRootLogLevel(loglevel);
} else {
String lev = getenv("OKAPI_LOGLEVEL");
if (lev != null && !lev.isEmpty()) {
logHelper.setRootLogLevel(loglevel);
}
}
String mode = config.getString("mode", "cluster");
switch(mode) {
case "deployment":
enableDeployment = true;
break;
case "proxy":
enableProxy = true;
break;
case "purgedatabase":
initMode = PURGE;
// so we get to initialize the database. We exit soon after anyway
enableProxy = true;
break;
case "initdatabase":
initMode = INIT;
enableProxy = true;
break;
default:
// cluster and dev
enableDeployment = true;
enableProxy = true;
break;
}
storage = new Storage(vertx, storageType, config);
envManager = new EnvManager(storage.getEnvStore());
discoveryManager = new DiscoveryManager(storage.getDeploymentStore());
if (clusterManager != null) {
discoveryManager.setClusterManager(clusterManager);
}
if (enableDeployment) {
Ports ports = new Ports(portStart, portEnd);
deploymentManager = new DeploymentManager(vertx, discoveryManager, envManager, host, ports, port, nodeName);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
CountDownLatch latch = new CountDownLatch(1);
deploymentManager.shutdown(ar -> latch.countDown());
try {
if (!latch.await(2, TimeUnit.MINUTES)) {
logger.error("Timed out waiting to undeploy all");
}
} catch (InterruptedException e) {
logger.error("Exception while shutting down");
Thread.currentThread().interrupt();
throw new IllegalStateException(e);
}
}
});
}
if (enableProxy) {
ModuleStore moduleStore = storage.getModuleStore();
moduleManager = new ModuleManager(moduleStore);
TenantStore tenantStore = storage.getTenantStore();
tenantManager = new TenantManager(moduleManager, tenantStore);
moduleManager.setTenantManager(tenantManager);
discoveryManager.setModuleManager(moduleManager);
logger.info("Proxy using " + storageType + " storage");
PullManager pullManager = new PullManager(vertx, moduleManager);
InternalModule internalModule = new InternalModule(moduleManager, tenantManager, deploymentManager, discoveryManager, envManager, pullManager, okapiVersion);
proxyService = new ProxyService(vertx, moduleManager, tenantManager, discoveryManager, internalModule, okapiUrl);
tenantManager.setProxyService(proxyService);
} else {
// not really proxying, except to /_/deployment
moduleManager = new ModuleManager(null);
// make sure it is not shared
moduleManager.forceLocalMap();
tenantManager = new TenantManager(moduleManager, null);
tenantManager.forceLocalMap();
moduleManager.setTenantManager(tenantManager);
discoveryManager.setModuleManager(moduleManager);
InternalModule internalModule = new InternalModule(null, null, deploymentManager, null, envManager, null, okapiVersion);
// no modules, tenants, or discovery. Only deployment and env.
proxyService = new ProxyService(vertx, moduleManager, tenantManager, discoveryManager, internalModule, okapiUrl);
}
}
use of io.vertx.core.spi.cluster.ClusterManager in project VX-API-Gateway by EliMirren.
the class VxApiLauncher method beforeStartingVertx.
/**
* 设置vert.x配置
*/
@Override
public void beforeStartingVertx(VertxOptions options) {
try {
byte[] bytes = Files.readAllBytes(PathUtil.getPath("conf.json"));
Buffer buff = Buffer.buffer(bytes);
// 总配置文件
JsonObject conf = buff.toJsonObject();
// vert.x配置文件
JsonObject vertxc = conf.getJsonObject("vertx", getDefaultVertxConfig());
initVertxConfig(vertxc, options);
// 集群配置文件
JsonObject clusterc = conf.getJsonObject("cluster", new JsonObject().put("clusterType", CLUSTER_TYPE));
if (!CLUSTER_TYPE.equals(clusterc.getString("clusterType"))) {
ClusterManager cmgr = VxApiClusterManagerFactory.getClusterManager(clusterc.getString("clusterType"), clusterc.getJsonObject("clusterConf", getDefaultClusterConfig()));
options.setClusterManager(cmgr);
options.setClustered(true);
}
} catch (IOException e) {
throw new FileSystemException(e);
}
}
Aggregations