use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class RestServerMain method getSSLFactoryIfRequired.
/**
* Instantiate an {@link SSLFactory} if any components require it.
* @param verifiableProperties The {@link VerifiableProperties} to check if any components require it.
* @return the {@link SSLFactory}, or {@code null} if no components require it.
* @throws GeneralSecurityException
* @throws IOException
*/
private static SSLFactory getSSLFactoryIfRequired(VerifiableProperties verifiableProperties) throws Exception {
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
boolean sslRequired = new NettyConfig(verifiableProperties).nettyServerEnableSSL || clusterMapConfig.clusterMapSslEnabledDatacenters.length() > 0;
return sslRequired ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class CloudRouterFactory method getRequestHandlerPool.
/**
* Utility method to build a {@link RequestHandlerPool}.
* @param verifiableProperties the properties to use.
* @param clusterMap the {@link ClusterMap} to use.
* @return the constructed {@link RequestHandlerPool}.
* @throws Exception if the construction fails.
*/
public RequestHandlerPool getRequestHandlerPool(VerifiableProperties verifiableProperties, ClusterMap clusterMap, CloudDestination cloudDestination, CloudConfig cloudConfig) throws Exception {
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
MetricRegistry registry = clusterMap.getMetricRegistry();
DataNodeId nodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
VcrMetrics vcrMetrics = new VcrMetrics(registry);
StoreManager cloudStorageManager = new CloudStorageManager(verifiableProperties, vcrMetrics, cloudDestination, clusterMap);
LocalRequestResponseChannel channel = new LocalRequestResponseChannel();
ServerMetrics serverMetrics = new ServerMetrics(registry, AmbryRequests.class);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(routerConfig.routerStoreKeyConverterFactory, verifiableProperties, registry);
// A null notification system is passed into AmbryRequests so that replication events are not emitted from a
// frontend.
AmbryRequests requests = new AmbryRequests(cloudStorageManager, channel, clusterMap, nodeId, registry, serverMetrics, null, null, null, storeKeyFactory, storeKeyConverterFactory);
return new RequestHandlerPool(routerConfig.routerRequestHandlerNumOfThreads, channel, requests);
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class ServerTestUtil method getSSLFactoryIfRequired.
/**
* Create an {@link SSLFactory} if there are SSL enabled datacenters in the properties
* @param verifiableProperties the {@link VerifiableProperties} to use.
* @return an {@link SSLFactory}, or {@code null}, if no {@link SSLFactory} is required.
* @throws Exception
*/
static SSLFactory getSSLFactoryIfRequired(VerifiableProperties verifiableProperties) throws Exception {
if (new RouterConfig(verifiableProperties).routerEnableHttp2NetworkClient) {
return new NettySslHttp2Factory(new SSLConfig(verifiableProperties));
}
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
boolean requiresSSL = clusterMapConfig.clusterMapSslEnabledDatacenters.length() > 0;
return requiresSSL ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class VcrAutomationTest method testSimpleVcrAutomation.
/**
* Test basic partition add and remove cases.
*/
@Test
public void testSimpleVcrAutomation() throws Exception {
List<ZkInfo> zkInfoList = new ArrayList<>();
String mainClusterStateModelDef = ClusterMapConfig.AMBRY_STATE_MODEL_DEF;
DataNodeConfigSourceType dataNodeConfigSourceType = DataNodeConfigSourceType.INSTANCE_CONFIG;
int zkPort = 2100;
int numberOfDataNode = 3;
int partitionCount = 9;
int newPartitionCount = 13;
String zkHostName = "localhost";
String zkConnectString = zkHostName + ":" + zkPort;
String clusterPrefix = "";
String clusterName = "MainCluster";
String vcrClusterName = "VcrCluster";
String dcName = "DC0";
TestHardwareLayout testHardwareLayout;
TestPartitionLayout testPartitionLayout;
String hardwareLayoutPath;
String partitionLayoutPath;
String zkLayoutPath;
zkInfoList.add(new ZkInfo(TestUtils.getTempDir("tempZk"), "DC0", (byte) 0, zkPort, true));
String tempDirPath = getTempDir(clusterName + "-");
hardwareLayoutPath = tempDirPath + "/hardwareLayoutTest.json";
partitionLayoutPath = tempDirPath + "/partitionLayoutTest.json";
zkLayoutPath = tempDirPath + "/zkLayoutPath.json";
testHardwareLayout = new TestHardwareLayout(clusterName, 1, 10737418240L, numberOfDataNode, 1, 18088, 20, false);
testPartitionLayout = constructInitialPartitionLayoutJSON(testHardwareLayout, partitionCount, null);
JSONObject zkJson = constructZkLayoutJSON(zkInfoList);
Utils.writeJsonObjectToFile(zkJson, zkLayoutPath);
Utils.writeJsonObjectToFile(testHardwareLayout.getHardwareLayout().toJSONObject(), hardwareLayoutPath);
Utils.writeJsonObjectToFile(testPartitionLayout.getPartitionLayout().toJSONObject(), partitionLayoutPath);
Properties props = VcrTestUtil.createVcrProperties("DC0", vcrClusterName, zkConnectString, 12300, 12400, 12510, null);
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("clustermap.port", "1100");
props.setProperty("clustermap.cluster.name", clusterName);
props.setProperty("clustermap.datacenter.name", dcName);
props.setProperty("clustermap.dcs.zk.connect.strings", zkJson.toString(2));
props.setProperty("clustermap.state.model.definition", mainClusterStateModelDef);
props.setProperty("clustermap.data.node.config.source.type", dataNodeConfigSourceType.name());
props.setProperty("clustermap.cluster.change.handler.type", "DynamicClusterChangeHandler");
props.setProperty("vcr.helix.updater.partition.id", "1");
props.setProperty("vcr.helix.update.delay.time.in.seconds", "1");
HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterPrefix, dcName, 10, false, false, new HelixAdminFactory(), false, mainClusterStateModelDef, HelixBootstrapUpgradeUtil.HelixAdminOperation.BootstrapCluster, dataNodeConfigSourceType, false);
HelixControllerManager helixControllerManager = new HelixControllerManager(zkConnectString, clusterPrefix + clusterName);
helixControllerManager.syncStart();
// Main cluster helix setup done.
HelixVcrUtil.VcrHelixConfig vcrHelixConfig;
String vcConfigData = CloudConfig.DEFAULT_VCR_HELIX_UPDATE_CONFIG;
try (InputStream input = new ByteArrayInputStream(vcConfigData.getBytes())) {
vcrHelixConfig = new ObjectMapper().readValue(input, HelixVcrUtil.VcrHelixConfig.class);
} catch (IOException ex) {
throw new IllegalStateException("Could not load config from config data: " + vcConfigData);
}
HelixVcrUtil.createCluster(zkConnectString, vcrClusterName, vcrHelixConfig);
HelixVcrUtil.updateResourceAndPartition(zkConnectString, clusterPrefix + clusterName, zkConnectString, vcrClusterName, vcrHelixConfig, false);
Assert.assertTrue("Dest and Src should be same", isSrcDestSync(zkConnectString, clusterPrefix + clusterName, zkConnectString, vcrClusterName));
HelixControllerManager vcrHelixControllerManager = new HelixControllerManager(zkConnectString, vcrClusterName);
vcrHelixControllerManager.syncStart();
StrictMatchExternalViewVerifier helixBalanceVerifier = new StrictMatchExternalViewVerifier(zkConnectString, vcrClusterName, Collections.singleton(VcrTestUtil.helixResource), null);
// VCR cluster helix setup done.
VerifiableProperties verifiableProperties = new VerifiableProperties(props);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
HelixClusterAgentsFactory helixClusterAgentsFactory = new HelixClusterAgentsFactory(clusterMapConfig, null, null);
VcrServer vcrServer = new VcrServer(verifiableProperties, helixClusterAgentsFactory, null, new CloudDestinationFactory() {
@Override
public CloudDestination getCloudDestination() throws IllegalStateException {
return mock(CloudDestination.class);
}
}, null);
vcrServer.startup();
makeSureHelixBalance(vcrServer, helixBalanceVerifier);
Assert.assertTrue("Partition assignment is not correct.", TestUtils.checkAndSleep(partitionCount, () -> vcrServer.getVcrClusterParticipant().getAssignedPartitionIds().size(), 5000));
// vcr server start up done.
// Partition add case:
testPartitionLayout = constructInitialPartitionLayoutJSON(testHardwareLayout, partitionCount + newPartitionCount, null);
Utils.writeJsonObjectToFile(testPartitionLayout.getPartitionLayout().toJSONObject(), partitionLayoutPath);
HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterPrefix, dcName, 10, false, false, new HelixAdminFactory(), false, mainClusterStateModelDef, HelixBootstrapUpgradeUtil.HelixAdminOperation.BootstrapCluster, dataNodeConfigSourceType, false);
makeSureHelixBalance(vcrServer, helixBalanceVerifier);
Assert.assertTrue("Partition assignment is not correct.", TestUtils.checkAndSleep(partitionCount + newPartitionCount, () -> vcrServer.getVcrClusterParticipant().getAssignedPartitionIds().size(), 5000));
// Partition remove case:
testPartitionLayout = constructInitialPartitionLayoutJSON(testHardwareLayout, partitionCount, null);
Utils.writeJsonObjectToFile(testPartitionLayout.getPartitionLayout().toJSONObject(), partitionLayoutPath);
HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterPrefix, dcName, 10, false, true, new HelixAdminFactory(), false, mainClusterStateModelDef, HelixBootstrapUpgradeUtil.HelixAdminOperation.BootstrapCluster, dataNodeConfigSourceType, false);
makeSureHelixBalance(vcrServer, helixBalanceVerifier);
Assert.assertTrue("Partition assignment is not correct.", TestUtils.checkAndSleep(partitionCount, () -> vcrServer.getVcrClusterParticipant().getAssignedPartitionIds().size(), 5000));
helixControllerManager.syncStop();
vcrHelixControllerManager.syncStop();
zkInfoList.get(0).shutdown();
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class AmbryServer method startup.
public void startup() throws InstantiationException {
try {
logger.info("starting");
clusterParticipants = clusterAgentsFactory.getClusterParticipants();
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
ServerConfig serverConfig = new ServerConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
// verify the configs
properties.verify();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
// mismatch in sealed/stopped replica lists that maintained by each participant.
if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
}
logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
if (nodeId == null) {
throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
}
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
// In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
// and some components require sole participant, the first one in the list will be primary participant.
storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
storageManager.start();
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
replicationManager.start();
if (replicationConfig.replicationEnabledWithVcrCluster) {
logger.info("Creating Helix cluster spectator for cloud to store replication.");
vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
cloudToStoreReplicationManager.start();
}
logger.info("Creating StatsManager to publish stats");
accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
if (serverConfig.serverStatsPublishLocalEnabled) {
statsManager.start();
}
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (nodeId.hasSSLPort()) {
ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (nodeId.hasHttp2Port()) {
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
// Other code
List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
Set<String> validStatsTypes = new HashSet<>();
for (StatsReportType type : StatsReportType.values()) {
validStatsTypes.add(type.toString());
}
if (serverConfig.serverStatsPublishReportEnabled) {
serverConfig.serverStatsReportsToPublish.forEach(e -> {
if (validStatsTypes.contains(e)) {
ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
}
});
}
if (vcrClusterSpectator != null) {
vcrClusterSpectator.spectate();
}
Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
for (ClusterParticipant clusterParticipant : clusterParticipants) {
clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
}
if (nettyInternalMetrics != null) {
nettyInternalMetrics.start();
logger.info("NettyInternalMetric starts");
}
logger.info("started");
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
metrics.serverStartTimeInMs.update(processingTime);
logger.info("Server startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during startup", e);
throw new InstantiationException("failure during startup " + e);
}
}
Aggregations