Search in sources :

Example 6 with ZooKeeper

use of com.linkedin.d2.discovery.stores.zk.ZooKeeper in project rest.li by linkedin.

the class D2Config method configure.

public int configure() throws Exception {
    // original map derived from properties file
    Map<String, Object> clusterServiceConfiguration = merge(_clusterServiceConfigurations);
    // map of clusterName -> cluster configuration
    Map<String, Map<String, Object>> clusters = new HashMap<String, Map<String, Object>>();
    // map of serviceName -> service configuration
    Map<String, Map<String, Object>> services = new HashMap<String, Map<String, Object>>();
    // Ugly. But this is a map of service groups, so it needs to reflect multiple services maps.
    Map<String, Map<String, Map<String, Object>>> serviceVariants = new HashMap<String, Map<String, Map<String, Object>>>();
    // temporary mapping from cluster name to services map, to aid in create cluster variants and
    // service groups.
    Map<String, Map<String, Map<String, Object>>> clusterToServiceMapping = new HashMap<String, Map<String, Map<String, Object>>>();
    int status;
    // temporary mapping from cluster name to the list of colo variants it has.
    Map<String, List<String>> variantToVariantsMapping = new HashMap<String, List<String>>();
    // temporary mapping from cluster name to coloVariant ClusterNames list.
    Map<String, List<String>> clusterToColoClustersMapping = new HashMap<String, List<String>>();
    // mapping from regular cluster name to the list of containing services
    // which will be added as children of the regular cluster znode.
    Map<String, List<String>> regularClusterToServicesMapping = new HashMap<>();
    _log.info("basePath: " + _basePath);
    _log.info("clusterDefaults: " + _clusterDefaults);
    _log.info("serviceDefaults: " + _serviceDefaults);
    final String defaultColo = (String) _clusterDefaults.remove(PropertyKeys.DEFAULT_COLO);
    // Solution 2 is the approach taken below.
    for (String clusterName : clusterServiceConfiguration.keySet()) {
        @SuppressWarnings("unchecked") Map<String, Object> clusterConfig = (Map<String, Object>) clusterServiceConfiguration.get(clusterName);
        clusterConfig.put(PropertyKeys.CLUSTER_NAME, clusterName);
        final Object servicesProperty = clusterConfig.remove(PropertyKeys.SERVICES);
        @SuppressWarnings("unchecked") Map<String, Map<String, Object>> servicesConfigs = (Map<String, Map<String, Object>>) servicesProperty;
        final Object clusterVariantProperty = clusterConfig.remove(PropertyKeys.CLUSTER_VARIANTS);
        @SuppressWarnings("unchecked") Map<String, Map<String, Object>> clusterVariantConfig = (Map<String, Map<String, Object>>) clusterVariantProperty;
        final Object coloVariantsProperty = clusterConfig.remove(PropertyKeys.COLO_VARIANTS);
        @SuppressWarnings("unchecked") List<String> coloVariants = (List<String>) coloVariantsProperty;
        final String masterColo = (String) clusterConfig.remove(PropertyKeys.MASTER_COLO);
        final String enableSymlinkString = (String) clusterConfig.remove(PropertyKeys.ENABLE_SYMLINK);
        final boolean enableSymlink;
        regularClusterToServicesMapping.put(clusterName, servicesConfigs.keySet().stream().collect(Collectors.toList()));
        if (enableSymlinkString != null && "true".equalsIgnoreCase(enableSymlinkString)) {
            enableSymlink = true;
        } else {
            enableSymlink = false;
        }
        // do some sanity check for partitions if any
        // Moving handling of partitionProperties before any coloVariant manipulations
        final Object partitionPropertiesProperty = clusterConfig.get(PropertyKeys.PARTITION_PROPERTIES);
        @SuppressWarnings("unchecked") Map<String, Object> partitionProperties = (Map<String, Object>) partitionPropertiesProperty;
        if (partitionProperties != null) {
            status = handlePartitionProperties(partitionProperties, clusterConfig, clusterName);
            if (status != 0) {
                return status;
            }
        }
        Map<String, String> clusterProperties = new HashMap<>();
        if (coloVariants != null && coloVariants.size() > 0 && !(coloVariants.size() == 1 && coloVariants.contains(""))) {
            clusterProperties.put(PropertyKeys.COLO_VARIANTS, String.join(LIST_SEPARATOR, coloVariants));
        }
        if (masterColo != null && !masterColo.equals("")) {
            clusterProperties.put(PropertyKeys.MASTER_COLO, masterColo);
        }
        if (clusterVariantConfig != null && clusterVariantConfig.size() > 0) {
            clusterProperties.put(PropertyKeys.CLUSTER_VARIANTS, String.join(LIST_SEPARATOR, clusterVariantConfig.keySet()));
        }
        clusterConfig.put(PropertyKeys.CLUSTER_PROPERTIES, clusterProperties);
        // lots of if/else.
        if (coloVariants == null || (coloVariants.size() == 1 && coloVariants.contains(""))) {
            coloVariants = Collections.singletonList("");
        } else {
            // one of the peer colos, if applicable.
            if (!coloVariants.contains(defaultColo)) {
                throw new IllegalStateException("The default colo: " + defaultColo + " is not one of the peer colos = " + coloVariants);
            }
            if (masterColo != null && !coloVariants.contains(masterColo) && !enableSymlink) {
                throw new IllegalStateException("The master colo: " + masterColo + " is not one of the peer colos = " + coloVariants);
            }
        }
        boolean defaultServicesCreated = false;
        for (String colo : coloVariants) {
            // the coloClusterName will be equal to the original cluster name if colo is the empty string
            String coloClusterName = D2Utils.addSuffixToBaseName(clusterName, colo);
            // coloServicesConfigs are the set of d2 services in this cluster in this colo
            // for the regular cluster case I could avoid creation of a new HashMap for both coloServicesConfig
            // and coloServiceConfig, as an optimization at the expense of simplicity.
            Map<String, Map<String, Object>> coloServicesConfigs = new HashMap<String, Map<String, Object>>();
            // Only create the default services once, and only when we have an empty colo string or the
            // colo matches the default colo.
            boolean createDefaultServices = (defaultServicesCreated == false) ? shouldCreateDefaultServices(colo, defaultColo) : false;
            for (String serviceName : servicesConfigs.keySet()) {
                // "resource" level config
                Map<String, Object> serviceConfig = servicesConfigs.get(serviceName);
                // There are some cases where we may not want to create colo variants of a particular service
                // We can't remove properties from the serviceConfig here because we might need to loop
                // over it multiple times.
                String createColoVariants = (String) serviceConfig.get(PropertyKeys.HAS_COLO_VARIANTS);
                boolean createColoVariantsForService = shouldCreateColoVariantsForService(colo, createColoVariants);
                String coloServiceName = serviceName;
                final boolean defaultRoutingToMasterColo = serviceConfig.containsKey(PropertyKeys.DEFAULT_ROUTING) && PropertyKeys.MASTER_SUFFIX.equals(serviceConfig.get(PropertyKeys.DEFAULT_ROUTING));
                // any colo variants of that serviceName.
                if (createColoVariantsForService) {
                    coloServiceName = D2Utils.addSuffixToBaseName(serviceName, colo);
                }
                final Object transportClientProperty = serviceConfig.get(PropertyKeys.TRANSPORT_CLIENT_PROPERTIES);
                @SuppressWarnings("unchecked") Map<String, Object> transportClientConfig = (Map<String, Object>) transportClientProperty;
                serviceConfig.put(PropertyKeys.TRANSPORT_CLIENT_PROPERTIES, transportClientConfig);
                Map<String, Object> coloServiceConfig = new HashMap<String, Object>(serviceConfig);
                // so it does not have to know about what are the default services.
                if (createDefaultServices && !defaultServicesCreated) {
                    // create the Master version of this service.
                    if (masterColo != null && createColoVariantsForService) {
                        // we need to create a "Master" version of this service to point to the current Master
                        // Cluster. Why not just use the original service name? We will point the original
                        // service name at the local cluster, as well as to make it explicit that requests
                        // sent to this service might cross colos, if the master is located in another colo.
                        Map<String, Object> masterServiceConfig = new HashMap<String, Object>(serviceConfig);
                        String masterServiceName = serviceName + PropertyKeys.MASTER_SUFFIX;
                        String masterClusterName;
                        if (enableSymlink) {
                            masterClusterName = D2Utils.getSymlinkNameForMaster(clusterName);
                        } else {
                            masterClusterName = D2Utils.addSuffixToBaseName(clusterName, masterColo);
                        }
                        masterServiceConfig.put(PropertyKeys.CLUSTER_NAME, masterClusterName);
                        masterServiceConfig.put(PropertyKeys.SERVICE_NAME, masterServiceName);
                        masterServiceConfig.put(PropertyKeys.IS_MASTER_SERVICE, "true");
                        coloServicesConfigs.put(masterServiceName, masterServiceConfig);
                    }
                    // this block will handle:
                    // the colo-agnostic service -> colo-specific default cluster mapping (fooService -> FooCluster-WestCoast)
                    // the colo-agnostic service -> colo-agnostic cluster mapping (fooService -> FooCluster)
                    // the latter only being done for regular clusters, the former only being done for clusters
                    // that have coloVariants specified.
                    Map<String, Object> regularServiceConfig = new HashMap<String, Object>(serviceConfig);
                    if (createColoVariantsForService) {
                        // we set isDefaultService flag only if it is a multi-colo aware service.
                        regularServiceConfig.put(PropertyKeys.IS_DEFAULT_SERVICE, "true");
                        if (defaultRoutingToMasterColo) {
                            regularServiceConfig.put(PropertyKeys.DEFAULT_ROUTING_TO_MASTER, "true");
                        }
                    }
                    final String defaultColoClusterName = clusterNameWithRouting(clusterName, colo, defaultColo, masterColo, defaultRoutingToMasterColo, enableSymlink);
                    regularServiceConfig.put(PropertyKeys.CLUSTER_NAME, defaultColoClusterName);
                    regularServiceConfig.put(PropertyKeys.SERVICE_NAME, serviceName);
                    coloServicesConfigs.put(serviceName, regularServiceConfig);
                }
                if (!serviceName.equals(coloServiceName)) {
                    // this block will handle:
                    // the colo-specific service-> colo-specific cluster mapping (fooService-WestCoast -> FooCluster-WestCoast,
                    // fooService-EastCoast -> FooCluster-EastCoast)
                    coloServiceConfig.put(PropertyKeys.CLUSTER_NAME, coloClusterName);
                    coloServiceConfig.put(PropertyKeys.SERVICE_NAME, coloServiceName);
                    coloServicesConfigs.put(coloServiceName, coloServiceConfig);
                }
            }
            // end for each service
            status = addServicesToServicesMap(coloServicesConfigs, services, coloClusterName);
            if (status != NO_ERROR_EXIT_CODE) {
                return status;
            }
            // Now that we've created colo-specific service to colo-specific cluster mappings, we now need
            // to actually create those colo-specific clusters.
            Map<String, Object> coloClusterConfig = clusterConfig;
            if (!clusterName.equals(coloClusterName)) {
                coloClusterConfig = new HashMap<String, Object>(clusterConfig);
                coloClusterConfig.put(PropertyKeys.CLUSTER_NAME, coloClusterName);
                if (createDefaultServices) {
                    clusters.put(clusterName, clusterConfig);
                }
            }
            clusters.put(coloClusterName, coloClusterConfig);
            // list before the cluster variants.
            if (clusterVariantConfig != null) {
                Map<String, Map<String, Object>> coloClusterVariantConfig = new HashMap<String, Map<String, Object>>(clusterVariantConfig);
                status = handleClusterVariants(coloClusterVariantConfig, clusterConfig, clusters, coloServicesConfigs, clusterToServiceMapping, colo, variantToVariantsMapping, masterColo, enableSymlink);
                if (status != 0) {
                    return status;
                }
            } else {
                // even if clusterVariant is not defined, it is still needed to save the coloServicesConfigs
                // in case the serviceGroup directly refers the cluster name
                clusterToServiceMapping.put(coloClusterName, coloServicesConfigs);
                // also save the coloClusterName
                addNewVariantToVariantsList(clusterToColoClustersMapping, clusterName, coloClusterName);
            }
            // the set the flag marking the default services for this cluster as created.
            if (!defaultServicesCreated && createDefaultServices == true) {
                defaultServicesCreated = true;
            }
        }
    // end for each colo variant
    }
    // there are service variants
    if (_serviceVariants != null) {
        for (String serviceGroup : _serviceVariants.keySet()) {
            // each service group contains a list of cluster names and a type field that
            // describes how to treat the list. We group together the services described by these
            // listed clusters, and prep that for writing to a different znode than the default service
            // znode directory. Note that we had already pointed those services to the appropriate cluster
            // variant earlier.
            Map<String, Map<String, Object>> servicesGroupConfig = new HashMap<String, Map<String, Object>>();
            @SuppressWarnings("unchecked") Map<String, Object> configGroupMap = (Map<String, Object>) _serviceVariants.get(serviceGroup);
            String type = (String) configGroupMap.get(PropertyKeys.TYPE);
            final Object clusterListProperty = configGroupMap.get(PropertyKeys.CLUSTER_LIST);
            @SuppressWarnings("unchecked") List<String> clusterList = (List<String>) clusterListProperty;
            // create an alternate service table for the services specified by these cluster variants
            for (Iterator<String> iter = clusterList.listIterator(); iter.hasNext(); ) {
                String clusterItem = iter.next();
                List<String> coloClusterVariantList = variantToVariantsMapping.get(clusterItem);
                if (coloClusterVariantList == null && PropertyKeys.FULL_CLUSTER_LIST.equals(type)) {
                    // For full_cluster_list type, it is allowed to specify real cluster name, not
                    // necessarily always clusterVariant. Check the clusterToColoClustersMappings.
                    coloClusterVariantList = clusterToColoClustersMapping.get(clusterItem);
                }
                if (coloClusterVariantList == null) {
                    // the service group had an unknown cluster!
                    _log.error("Unknown cluster specified: " + clusterItem);
                    return EXCEPTION_EXIT_CODE;
                }
                // in those coloVariants to this service group's list of services.
                for (String coloClusterVariant : coloClusterVariantList) {
                    Map<String, Map<String, Object>> candidateServices = clusterToServiceMapping.get(coloClusterVariant);
                    if (candidateServices == null) {
                        // the service group had an unknown cluster!
                        _log.error("Unknown cluster specified: " + coloClusterVariant);
                        return EXCEPTION_EXIT_CODE;
                    }
                    for (Map.Entry<String, Map<String, Object>> mapEntry : candidateServices.entrySet()) {
                        Object testValue = servicesGroupConfig.put(mapEntry.getKey(), mapEntry.getValue());
                        if (testValue != null) {
                            // We shouldn't have had conflicting services, two variants of the same cluster
                            // were probably specified in the same service group.
                            _log.error("Service group has variants of the same cluster: " + serviceGroup);
                            return EXCEPTION_EXIT_CODE;
                        }
                    }
                }
            }
            if (PropertyKeys.CLUSTER_VARIANTS_LIST.equals(type)) {
                // start from the full list of services, and then overwrite the services specified by the
                // cluster variants.
                Map<String, Map<String, Object>> fullServiceList = new HashMap<String, Map<String, Object>>(services);
                fullServiceList.putAll(servicesGroupConfig);
                serviceVariants.put(serviceGroup, fullServiceList);
            } else if (PropertyKeys.FULL_CLUSTER_LIST.equals(type)) {
                // The use has explicitly indicated that we should put these and only the services that
                // correspond to the named clusters in the serviceGroup.
                serviceVariants.put(serviceGroup, servicesGroupConfig);
            } else {
                _log.error("unknown serviceVariant type: " + type);
                return EXCEPTION_EXIT_CODE;
            }
        }
    }
    _log.debug("serviceVariants: " + serviceVariants);
    _zkConnection.start();
    try {
        _log.info("Cluster configuration:\n" + clusters);
        writeConfig(ZKFSUtil.clusterPath(_basePath), new ClusterPropertiesJsonSerializer(), new ClusterPropertiesJsonSerializer(), clusters, _clusterDefaults);
        _log.info("Wrote cluster configuration");
        _log.info("Service configuration:\n" + services);
        writeConfig(ZKFSUtil.servicePath(_basePath), new ServicePropertiesJsonSerializer(), new ServicePropertiesJsonSerializer(), services, _serviceDefaults);
        _log.info("Wrote service configuration");
        writeChildren(regularClusterToServicesMapping);
        _log.info("Wrote service children nodes under clusters");
        if (!serviceVariants.isEmpty()) {
            for (Map.Entry<String, Map<String, Map<String, Object>>> entry : serviceVariants.entrySet()) {
                if (_log.isDebugEnabled()) {
                    _log.info("serviceVariant: " + entry + "\n");
                } else {
                    _log.info("serviceVariant: " + entry.getKey() + "\n");
                }
                writeConfig(ZKFSUtil.servicePath(_basePath, entry.getKey()), new ServicePropertiesJsonSerializer(), new ServicePropertiesJsonSerializer(), entry.getValue(), _serviceDefaults);
            }
            _log.info("Wrote service variant configurations");
        }
        _log.info("Configuration complete");
        return NO_ERROR_EXIT_CODE;
    } finally {
        try {
            _zkConnection.shutdown();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            _log.warn("ZooKeeper shutdown interrupted", e);
        }
    }
}
Also used : HashMap(java.util.HashMap) ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) ClusterPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 7 with ZooKeeper

use of com.linkedin.d2.discovery.stores.zk.ZooKeeper in project rest.li by linkedin.

the class TestD2Config method testSetup.

@BeforeMethod
public void testSetup() throws IOException, Exception {
    // Startup zookeeper server
    try {
        _zkServer = new ZKServer(ZK_PORT);
        _zkServer.startup();
    } catch (IOException e) {
        fail("unable to instantiate real zk server on port " + ZK_PORT);
        e.printStackTrace();
    }
    // Client
    try {
        _zkclient = ZKTestUtil.getConnection(_zkHosts, 10000);
    } catch (Exception e) {
        fail("unable to startup zk client.");
        e.printStackTrace();
    }
}
Also used : ZKServer(com.linkedin.d2.discovery.stores.zk.ZKServer) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) PartitionAccessException(com.linkedin.d2.balancer.util.partitions.PartitionAccessException) PropertyStoreException(com.linkedin.d2.discovery.stores.PropertyStoreException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) BeforeMethod(org.testng.annotations.BeforeMethod)

Example 8 with ZooKeeper

use of com.linkedin.d2.discovery.stores.zk.ZooKeeper in project rest.li by linkedin.

the class TestD2Config method testWriteConfigDelta.

@Test
public static void testWriteConfigDelta() throws Exception {
    // Upload config for the first time.
    @SuppressWarnings("serial") Map<String, List<String>> clustersData = new HashMap<String, List<String>>() {

        {
            put("cluster-a", Arrays.asList(new String[] { "service-1_a", "service-1_b" }));
            put("cluster-b", Arrays.asList(new String[] { "service-2_a", "service-2_b" }));
        }
    };
    D2ConfigTestUtil d2Conf = new D2ConfigTestUtil(clustersData);
    d2Conf.setUseDeltaWrite(true);
    assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
    // Upload the same config with service-1_1 replaced with service-1_3.
    //  - Duplicate services will not be rewritten.
    //  - New service (service-1_3) will be added.
    //  - Deleted service (service-1_1) will NOT be deleted.
    // Therefore all clusters & services should be uploaded once after this step (aka all have version 1).
    clustersData.put("cluster-a", Arrays.asList(new String[] { "service-1_b", "service-1_c" }));
    d2Conf = new D2ConfigTestUtil(clustersData);
    d2Conf.setUseDeltaWrite(true);
    assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
    // Upload cluster-1 and service-1_1 again with new properties.
    // They should both change to version 2 while others remain version 1.
    //  - Rest of cluster-a's fields deleted.
    //  - cluster-b deleted
    //  - cluster-c added
    @SuppressWarnings("serial") Map<String, List<String>> modifiedClustersData = new HashMap<String, List<String>>() {

        {
            put("cluster-a", Arrays.asList(new String[] { "service-1_a" }));
            put("cluster-c", Arrays.asList(new String[] { "service-3_a" }));
        }
    };
    d2Conf = new D2ConfigTestUtil(modifiedClustersData);
    d2Conf.setUseDeltaWrite(true);
    d2Conf.setServiceDefaults(Arrays.asList(new String[] { "degrader-new" }));
    d2Conf.setClusterProperties(1001, 1002);
    assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
    // Build map of path to expected version.
    final HashMap<String, Integer> expectedVersionMap = new HashMap<String, Integer>();
    expectedVersionMap.put("/d2/services/service-1_a", 2);
    expectedVersionMap.put("/d2/services/service-1_b", 1);
    expectedVersionMap.put("/d2/services/service-1_c", 1);
    expectedVersionMap.put("/d2/services/service-2_a", 1);
    expectedVersionMap.put("/d2/services/service-2_b", 1);
    expectedVersionMap.put("/d2/services/service-3_a", 1);
    expectedVersionMap.put("/d2/clusters/cluster-a", 2);
    expectedVersionMap.put("/d2/clusters/cluster-b", 1);
    expectedVersionMap.put("/d2/clusters/cluster-c", 1);
    // Get actual version number for each path.
    final HashMap<String, Integer> actualVersionMap = new HashMap<String, Integer>();
    final CountDownLatch latch = new CountDownLatch(expectedVersionMap.size());
    final AsyncCallback.StatCallback statCallback = new AsyncCallback.StatCallback() {

        @Override
        public void processResult(int rc, String path, Object ctx, Stat stat) {
            KeeperException.Code code = KeeperException.Code.get(rc);
            if (code == KeeperException.Code.OK) {
                actualVersionMap.put(path, stat.getVersion());
                latch.countDown();
            }
        }
    };
    ZooKeeper zk = _zkclient.getZooKeeper();
    for (String path : expectedVersionMap.keySet()) {
        zk.exists(path, false, statCallback, null);
    }
    // Wait for expectedVersionMap to be populated.
    if (!latch.await(5, TimeUnit.SECONDS)) {
        fail("Unable to get stat for all paths.");
    }
    for (String path : expectedVersionMap.keySet()) {
        assertEquals(actualVersionMap.get(path).intValue(), expectedVersionMap.get(path).intValue());
    }
}
Also used : HashMap(java.util.HashMap) AsyncCallback(org.apache.zookeeper.AsyncCallback) CountDownLatch(java.util.concurrent.CountDownLatch) Stat(org.apache.zookeeper.data.Stat) ZooKeeper(com.linkedin.d2.discovery.stores.zk.ZooKeeper) List(java.util.List) ArrayList(java.util.ArrayList) KeeperException(org.apache.zookeeper.KeeperException) Test(org.testng.annotations.Test)

Example 9 with ZooKeeper

use of com.linkedin.d2.discovery.stores.zk.ZooKeeper in project rest.li by linkedin.

the class TestLoadBalancerClientCli method testSetup.

@BeforeMethod
public void testSetup() throws IOException, Exception {
    // Startup zookeeper server
    try {
        _zkServer = new ZKServer();
        _zkServer.startup();
        _zkHosts = ZK_HOST + ":" + _zkServer.getPort();
    } catch (IOException e) {
        fail("unable to instantiate real zk server .");
        e.printStackTrace();
    }
}
Also used : ZKServer(com.linkedin.d2.discovery.stores.zk.ZKServer) IOException(java.io.IOException) BeforeMethod(org.testng.annotations.BeforeMethod)

Example 10 with ZooKeeper

use of com.linkedin.d2.discovery.stores.zk.ZooKeeper in project rest.li by linkedin.

the class ExampleD2Client method main.

public static void main(String[] args) throws IOException, ParseException, InterruptedException {
    //get client configuration
    JSONObject json = parseConfig();
    String zkConnectString = (String) json.get("zkConnectString");
    Long zkSessionTimeout = (Long) json.get("zkSessionTimeout");
    String zkBasePath = (String) json.get("zkBasePath");
    Long zkStartupTimeout = (Long) json.get("zkStartupTimeout");
    Long zkLoadBalancerNotificationTimeout = (Long) json.get("zkLoadBalancerNotificationTimeout");
    String zkFlagFile = (String) json.get("zkFlagFile");
    String fsBasePath = (String) json.get("fsBasePath");
    final Map<String, Long> trafficProportion = (Map<String, Long>) json.get("trafficProportion");
    final Long clientShutdownTimeout = (Long) json.get("clientShutdownTimeout");
    final Long clientStartTimeout = (Long) json.get("clientStartTimeout");
    Long rate = (Long) json.get("rateMillisecond");
    System.out.println("Finished parsing client config");
    //create d2 client
    final D2Client d2Client = new D2ClientBuilder().setZkHosts(zkConnectString).setZkSessionTimeout(zkSessionTimeout, TimeUnit.MILLISECONDS).setZkStartupTimeout(zkStartupTimeout, TimeUnit.MILLISECONDS).setLbWaitTimeout(zkLoadBalancerNotificationTimeout, TimeUnit.MILLISECONDS).setFlagFile(zkFlagFile).setBasePath(zkBasePath).setFsBasePath(fsBasePath).build();
    System.out.println("Finished creating d2 client, starting d2 client...");
    ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
    final CountDownLatch latch = new CountDownLatch(1);
    //start d2 client by connecting to zookeeper
    startClient(d2Client, executorService, clientStartTimeout, new Callback<None>() {

        @Override
        public void onError(Throwable e) {
            System.exit(1);
        }

        @Override
        public void onSuccess(None result) {
            latch.countDown();
        }
    });
    latch.await();
    System.out.println("D2 client is sending traffic");
    ScheduledFuture task = executorService.scheduleAtFixedRate(new Runnable() {

        @Override
        public void run() {
            try {
                sendTraffic(trafficProportion, d2Client);
            } catch (URISyntaxException e) {
                e.printStackTrace();
            }
        }
    }, 0, rate, TimeUnit.MILLISECONDS);
    System.out.println("Press enter to stop D2 client...");
    System.in.read();
    task.cancel(false);
    System.out.println("Shutting down...");
    shutdown(d2Client, executorService, clientShutdownTimeout);
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) D2Client(com.linkedin.d2.balancer.D2Client) D2ClientBuilder(com.linkedin.d2.balancer.D2ClientBuilder) URISyntaxException(java.net.URISyntaxException) CountDownLatch(java.util.concurrent.CountDownLatch) ScheduledFuture(java.util.concurrent.ScheduledFuture) JSONObject(org.json.simple.JSONObject) Map(java.util.Map) None(com.linkedin.common.util.None)

Aggregations

Map (java.util.Map)8 ArrayList (java.util.ArrayList)6 CountDownLatch (java.util.concurrent.CountDownLatch)6 JSONObject (org.json.simple.JSONObject)6 None (com.linkedin.common.util.None)5 IOException (java.io.IOException)5 HashMap (java.util.HashMap)5 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)5 D2Client (com.linkedin.d2.balancer.D2Client)4 D2ClientBuilder (com.linkedin.d2.balancer.D2ClientBuilder)4 ZooKeeper (com.linkedin.d2.discovery.stores.zk.ZooKeeper)4 List (java.util.List)4 ScheduledFuture (java.util.concurrent.ScheduledFuture)4 URI (java.net.URI)3 AsyncCallback (org.apache.zookeeper.AsyncCallback)3 KeeperException (org.apache.zookeeper.KeeperException)3 Stat (org.apache.zookeeper.data.Stat)3 ParseException (org.json.simple.parser.ParseException)3 Test (org.testng.annotations.Test)3 TrackerClient (com.linkedin.d2.balancer.clients.TrackerClient)2