Search in sources :

Example 46 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class AmbryMain method main.

public static void main(String[] args) {
    final AmbryServer ambryServer;
    int exitCode = 0;
    try {
        InvocationOptions options = new InvocationOptions(args);
        Properties properties = Utils.loadProps(options.serverPropsFilePath);
        VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
        CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
        ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, options.hardwareLayoutFilePath, options.partitionLayoutFilePath);
        logger.info("Bootstrapping AmbryServer");
        VcrClusterAgentsFactory vcrClusterAgentsFactory = Utils.getObj(cloudConfig.vcrClusterAgentsFactoryClass);
        ambryServer = new AmbryServer(verifiableProperties, clusterAgentsFactory, vcrClusterAgentsFactory, SystemTime.getInstance());
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                logger.info("Received shutdown signal. Shutting down AmbryServer");
                ambryServer.shutdown();
            }
        });
        ambryServer.startup();
        ambryServer.awaitShutdown();
    } catch (Exception e) {
        logger.error("Exception during bootstrap of AmbryServer", e);
        exitCode = 1;
    }
    logger.info("Exiting AmbryMain");
    System.exit(exitCode);
}
Also used : VcrClusterAgentsFactory(com.github.ambry.clustermap.VcrClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) CloudConfig(com.github.ambry.config.CloudConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) VcrClusterAgentsFactory(com.github.ambry.clustermap.VcrClusterAgentsFactory) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) InvocationOptions(com.github.ambry.utils.InvocationOptions) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Example 47 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class VcrMain method main.

public static void main(String[] args) {
    final VcrServer vcrServer;
    int exitCode = 0;
    try {
        InvocationOptions options = new InvocationOptions(args);
        Properties properties = Utils.loadProps(options.serverPropsFilePath);
        VerifiableProperties verifiableProperties = new VerifiableProperties(properties);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
        ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, options.hardwareLayoutFilePath, options.partitionLayoutFilePath);
        logger.info("Bootstrapping VcrServer");
        vcrServer = new VcrServer(verifiableProperties, clusterAgentsFactory, new LoggingNotificationSystem(), null);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                logger.info("Received shutdown signal. Shutting down VcrServer");
                vcrServer.shutdown();
            }
        });
        vcrServer.startup();
        vcrServer.awaitShutdown(Integer.MAX_VALUE);
    } catch (Exception e) {
        logger.error("Exception during bootstrap of VcrServer", e);
        exitCode = 1;
    }
    logger.info("Exiting VcrMain");
    System.exit(exitCode);
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) InvocationOptions(com.github.ambry.utils.InvocationOptions) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Example 48 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class VcrServer method startup.

/**
 * Start the VCR Server.
 * @throws InstantiationException if an error was encountered during startup.
 */
public void startup() throws InstantiationException {
    try {
        logger.info("starting");
        ServerConfig serverConfig = new ServerConfig(properties);
        ServerSecurityServiceFactory serverSecurityServiceFactory = Utils.getObj(serverConfig.serverSecurityServiceFactory, properties, serverMetrics, registry);
        serverSecurityService = serverSecurityServiceFactory.getServerSecurityService();
        clusterMap = clusterAgentsFactory.getClusterMap();
        logger.info("Initialized clusterMap");
        registry = clusterMap.getMetricRegistry();
        serverMetrics = new ServerMetrics(registry, VcrServer.class, VcrServer.class);
        logger.info("Setting up JMX.");
        long startTime = SystemTime.getInstance().milliseconds();
        registry = clusterMap.getMetricRegistry();
        reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
        reporter.start();
        logger.info("creating configs");
        NetworkConfig networkConfig = new NetworkConfig(properties);
        StoreConfig storeConfig = new StoreConfig(properties);
        ReplicationConfig replicationConfig = new ReplicationConfig(properties);
        CloudConfig cloudConfig = new CloudConfig(properties);
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
        SSLConfig sslConfig = new SSLConfig(properties);
        // verify the configs
        properties.verify();
        // initialize cloud destination
        if (cloudDestinationFactory == null) {
            cloudDestinationFactory = Utils.getObj(cloudConfig.cloudDestinationFactoryClass, properties, registry, clusterMap);
        }
        cloudDestination = cloudDestinationFactory.getCloudDestination();
        // TODO Make sure that config.updaterPollingIntervalMs value is large (~one day) for VCR.
        AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
        AccountService accountService = accountServiceFactory.getAccountService();
        vcrClusterParticipant = ((VcrClusterAgentsFactory) Utils.getObj(cloudConfig.vcrClusterAgentsFactoryClass, cloudConfig, clusterMapConfig, clusterMap, accountService, storeConfig, cloudDestination, registry)).getVcrClusterParticipant();
        scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
        if (clusterMapConfig.clusterMapEnableHttp2Replication) {
            connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
            logger.info("Using http2 for VCR replication.");
        } else {
            connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
            logger.info("Using blocking channel for VCR replication.");
        }
        connectionPool.start();
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
        VcrMetrics vcrMetrics = new VcrMetrics(registry);
        CloudStorageManager cloudStorageManager = new CloudStorageManager(properties, vcrMetrics, cloudDestination, clusterMap);
        vcrReplicationManager = new VcrReplicationManager(cloudConfig, replicationConfig, clusterMapConfig, storeConfig, cloudStorageManager, storeKeyFactory, clusterMap, vcrClusterParticipant, cloudDestination, scheduler, connectionPool, vcrMetrics, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer);
        vcrReplicationManager.start();
        DataNodeId currentNode = vcrClusterParticipant.getCurrentDataNodeId();
        ArrayList<Port> ports = new ArrayList<Port>();
        ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
        if (currentNode.hasSSLPort()) {
            ports.add(new Port(cloudConfig.vcrSslPort, PortType.SSL));
        }
        networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
        // todo fix enableDataPrefetch
        ServerMetrics serverMetrics = new ServerMetrics(registry, VcrRequests.class, VcrServer.class);
        VcrRequests requests = new VcrRequests(cloudStorageManager, networkServer.getRequestResponseChannel(), clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
        requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
        networkServer.start();
        // Start netty http2 server
        if (currentNode.hasHttp2Port()) {
            logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", currentNode.getHttp2Port());
            NettyConfig nettyConfig = new NettyConfig(properties);
            NettyMetrics nettyMetrics = new NettyMetrics(registry);
            Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
            Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
            NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
            VcrRequests vcrRequestsForHttp2 = new VcrRequests(cloudStorageManager, requestResponseChannel, clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
            requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, vcrRequestsForHttp2);
            NioServerFactory nioServerFactory = new StorageServerNettyFactory(currentNode.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, serverMetrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
            nettyHttp2Server = nioServerFactory.getNioServer();
            nettyHttp2Server.start();
        }
        long processingTime = SystemTime.getInstance().milliseconds() - startTime;
        logger.info("VCR startup time in Ms {}", processingTime);
    } catch (Exception e) {
        logger.error("Error during VCR startup", e);
        throw new InstantiationException("failure during VCR startup " + e);
    }
}
Also used : ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) SSLFactory(com.github.ambry.commons.SSLFactory) Http2ClientMetrics(com.github.ambry.network.http2.Http2ClientMetrics) Port(com.github.ambry.network.Port) CloudConfig(com.github.ambry.config.CloudConfig) ArrayList(java.util.ArrayList) StorageServerNettyFactory(com.github.ambry.rest.StorageServerNettyFactory) NettySslHttp2Factory(com.github.ambry.commons.NettySslHttp2Factory) NettyConfig(com.github.ambry.config.NettyConfig) ServerConfig(com.github.ambry.config.ServerConfig) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) StoreKeyConverterFactory(com.github.ambry.store.StoreKeyConverterFactory) ServerMetrics(com.github.ambry.commons.ServerMetrics) Http2ServerMetrics(com.github.ambry.network.http2.Http2ServerMetrics) SSLConfig(com.github.ambry.config.SSLConfig) ReplicationConfig(com.github.ambry.config.ReplicationConfig) Http2BlockingChannelPool(com.github.ambry.network.http2.Http2BlockingChannelPool) SocketServer(com.github.ambry.network.SocketServer) NettyMetrics(com.github.ambry.rest.NettyMetrics) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) NettyServerRequestResponseChannel(com.github.ambry.network.NettyServerRequestResponseChannel) NetworkConfig(com.github.ambry.config.NetworkConfig) Http2ClientConfig(com.github.ambry.config.Http2ClientConfig) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) IOException(java.io.IOException) ServerSecurityServiceFactory(com.github.ambry.rest.ServerSecurityServiceFactory) Http2ServerMetrics(com.github.ambry.network.http2.Http2ServerMetrics) NioServerFactory(com.github.ambry.rest.NioServerFactory) RequestHandlerPool(com.github.ambry.protocol.RequestHandlerPool) StoreConfig(com.github.ambry.config.StoreConfig) AccountService(com.github.ambry.account.AccountService) DataNodeId(com.github.ambry.clustermap.DataNodeId) AccountServiceFactory(com.github.ambry.account.AccountServiceFactory)

Example 49 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class HelixBootstrapUpgradeUtil method addUpdateInstances.

/**
 * Add and/or update instances in Helix based on the information in the static cluster map.
 * @param dcName the name of the datacenter being processed.
 * @param partitionsToInstancesInDc a map to be filled with the mapping of partitions to their instance sets in the
 *                                  given datacenter.
 */
private void addUpdateInstances(String dcName, Map<String, Set<String>> partitionsToInstancesInDc) {
    ClusterMapConfig config = getClusterMapConfig(clusterName, dcName, null);
    String zkConnectStr = dataCenterToZkAddress.get(dcName).getZkConnectStrs().get(0);
    try (PropertyStoreToDataNodeConfigAdapter propertyStoreAdapter = new PropertyStoreToDataNodeConfigAdapter(zkConnectStr, config)) {
        InstanceConfigToDataNodeConfigAdapter.Converter instanceConfigConverter = new InstanceConfigToDataNodeConfigAdapter.Converter(config);
        info("[{}] Getting list of instances in {}", dcName.toUpperCase(), dcName);
        Set<String> instancesInHelix = new HashSet<>(getInstanceNamesInHelix(dcName, propertyStoreAdapter));
        Set<String> instancesInStatic = dcToInstanceNameToDataNodeId.get(dcName) == null ? new HashSet<>() : new HashSet<>(dcToInstanceNameToDataNodeId.get(dcName).keySet());
        Set<String> instancesInBoth = new HashSet<>(instancesInHelix);
        // set instances in both correctly.
        instancesInBoth.retainAll(instancesInStatic);
        // set instances in Helix only correctly.
        instancesInHelix.removeAll(instancesInBoth);
        // set instances in Static only correctly.
        instancesInStatic.removeAll(instancesInBoth);
        int totalInstances = instancesInBoth.size() + instancesInHelix.size() + instancesInStatic.size();
        for (String instanceName : instancesInBoth) {
            DataNodeConfig nodeConfigFromHelix = getDataNodeConfigFromHelix(dcName, instanceName, propertyStoreAdapter, instanceConfigConverter);
            DataNodeConfig nodeConfigFromStatic = createDataNodeConfigFromStatic(dcName, instanceName, nodeConfigFromHelix, partitionsToInstancesInDc, instanceConfigConverter);
            if (!nodeConfigFromStatic.equals(nodeConfigFromHelix, !overrideReplicaStatus)) {
                if (helixAdminOperation == HelixAdminOperation.BootstrapCluster) {
                    if (!dryRun) {
                        info("[{}] Instance {} already present in Helix {}, but config has changed, updating. Remaining instances: {}", dcName.toUpperCase(), instanceName, dataNodeConfigSourceType.name(), --totalInstances);
                        // Continuing on the note above, if there is indeed a change, we must make a call on whether RO/RW, replica
                        // availability and so on should be updated at all (if not, nodeConfigFromStatic should be replaced with
                        // the appropriate dataNodeConfig that is constructed with the correct values from both).
                        // For now, only bootstrapping cluster is allowed to directly change DataNodeConfig
                        setDataNodeConfigInHelix(dcName, instanceName, nodeConfigFromStatic, propertyStoreAdapter, instanceConfigConverter);
                    } else {
                        info("[{}] Instance {} already present in Helix {}, but config has changed, no action as dry run. Remaining instances: {}", dcName.toUpperCase(), instanceName, dataNodeConfigSourceType.name(), --totalInstances);
                        logger.debug("[{}] Previous config: {} \n New config: {}", dcName.toUpperCase(), nodeConfigFromHelix, nodeConfigFromStatic);
                    }
                    // for dryRun, we update counter but don't really change the DataNodeConfig in Helix
                    instancesUpdated.getAndIncrement();
                }
            } else {
                if (!dryRun) {
                    info("[{}] Instance {} already present in Helix {}, with same Data, skipping. Remaining instances: {}", dcName.toUpperCase(), instanceName, dataNodeConfigSourceType.name(), --totalInstances);
                }
            }
        }
        for (String instanceName : instancesInStatic) {
            DataNodeConfig nodeConfigFromStatic = createDataNodeConfigFromStatic(dcName, instanceName, null, partitionsToInstancesInDc, instanceConfigConverter);
            info("[{}] Instance {} is new, {}. Remaining instances: {}", dcName.toUpperCase(), instanceName, dryRun ? "no action as dry run" : "adding to Helix " + dataNodeConfigSourceType.name(), --totalInstances);
            // replica addition.
            if (helixAdminOperation == HelixAdminOperation.BootstrapCluster) {
                if (!dryRun) {
                    addDataNodeConfigToHelix(dcName, nodeConfigFromStatic, propertyStoreAdapter, instanceConfigConverter);
                }
                instancesAdded.getAndIncrement();
            }
        }
        for (String instanceName : instancesInHelix) {
            if (forceRemove) {
                info("[{}] Instance {} is in Helix {}, but not in static. {}. Remaining instances: {}", dcName.toUpperCase(), instanceName, dataNodeConfigSourceType.name(), dryRun ? "No action as dry run" : "Forcefully removing", --totalInstances);
                if (helixAdminOperation == HelixAdminOperation.BootstrapCluster) {
                    if (!dryRun) {
                        removeDataNodeConfigFromHelix(dcName, instanceName, propertyStoreAdapter);
                    }
                    instancesDropped.getAndIncrement();
                }
            } else {
                info("[{}] Instance {} is in Helix {}, but not in static. Ignoring for now (use --forceRemove to forcefully remove). " + "Remaining instances: {}", dcName.toUpperCase(), instanceName, dataNodeConfigSourceType.name(), --totalInstances);
                expectMoreInHelixDuringValidate = true;
                instancesNotForceRemovedByDc.computeIfAbsent(dcName, k -> ConcurrentHashMap.newKeySet()).add(instanceName);
            }
        }
    }
}
Also used : Arrays(java.util.Arrays) ClusterMapUtils(com.github.ambry.clustermap.ClusterMapUtils) SortedSet(java.util.SortedSet) LoggerFactory(org.slf4j.LoggerFactory) JSONException(org.json.JSONException) JSONObject(org.json.JSONObject) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) SharedZkClientFactory(org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory) AccessOption(org.apache.helix.AccessOption) EnumSet(java.util.EnumSet) LeaderStandbySMD(org.apache.helix.model.LeaderStandbySMD) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ZKUtil(org.apache.helix.manager.zk.ZKUtil) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) HelixPropertyStoreConfig(com.github.ambry.config.HelixPropertyStoreConfig) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) ZNRecord(org.apache.helix.zookeeper.datamodel.ZNRecord) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Optional(java.util.Optional) IdealState(org.apache.helix.model.IdealState) CommonUtils(com.github.ambry.commons.CommonUtils) HashMap(java.util.HashMap) HelixZkClient(org.apache.helix.zookeeper.api.client.HelixZkClient) ResourceConfig(org.apache.helix.model.ResourceConfig) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) HelixPropertyStore(org.apache.helix.store.HelixPropertyStore) RealmAwareZkClient(org.apache.helix.zookeeper.api.client.RealmAwareZkClient) DataNodeConfigSourceType(com.github.ambry.clustermap.DataNodeConfigSourceType) StateModelDefinition(org.apache.helix.model.StateModelDefinition) Properties(java.util.Properties) Logger(org.slf4j.Logger) VerifiableProperties(com.github.ambry.config.VerifiableProperties) IOException(java.io.IOException) InstanceConfig(org.apache.helix.model.InstanceConfig) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) HelixAdmin(org.apache.helix.HelixAdmin) TreeMap(java.util.TreeMap) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) Comparator(java.util.Comparator) Collections(java.util.Collections) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) HashSet(java.util.HashSet)

Example 50 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class HelixBootstrapUpgradeUtil method getClusterMapConfig.

/**
 * Exposed for test use.
 * @param clusterName cluster name to use in the config
 * @param dcName datacenter name to use in the config
 * @param zkLayoutPath if non-null, read ZK connection configs from the file at this path.
 * @return the {@link ClusterMapConfig}
 */
static ClusterMapConfig getClusterMapConfig(String clusterName, String dcName, String zkLayoutPath) {
    Properties props = new Properties();
    props.setProperty("clustermap.host.name", "localhost");
    props.setProperty("clustermap.cluster.name", clusterName);
    props.setProperty("clustermap.datacenter.name", dcName);
    if (zkLayoutPath != null) {
        try {
            props.setProperty("clustermap.dcs.zk.connect.strings", Utils.readStringFromFile(zkLayoutPath));
        } catch (IOException e) {
            throw new RuntimeException("Could not read zk layout file", e);
        }
    }
    return new ClusterMapConfig(new VerifiableProperties(props));
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) IOException(java.io.IOException) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Aggregations

ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)100 VerifiableProperties (com.github.ambry.config.VerifiableProperties)81 Test (org.junit.Test)56 Properties (java.util.Properties)52 MetricRegistry (com.codahale.metrics.MetricRegistry)47 ArrayList (java.util.ArrayList)31 IOException (java.io.IOException)26 HashSet (java.util.HashSet)25 JSONObject (org.json.JSONObject)25 File (java.io.File)24 ClusterMap (com.github.ambry.clustermap.ClusterMap)23 HashMap (java.util.HashMap)21 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)19 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)18 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 StoreConfig (com.github.ambry.config.StoreConfig)18 ReplicaId (com.github.ambry.clustermap.ReplicaId)16 List (java.util.List)16 Map (java.util.Map)16 CountDownLatch (java.util.concurrent.CountDownLatch)16