Search in sources :

Example 6 with ServicePropertiesJsonSerializer

use of com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer in project rest.li by linkedin.

the class D2Config method configure.

public int configure() throws Exception {
    // original map derived from properties file
    Map<String, Object> clusterServiceConfiguration = merge(_clusterServiceConfigurations);
    // map of clusterName -> cluster configuration
    Map<String, Map<String, Object>> clusters = new HashMap<String, Map<String, Object>>();
    // map of serviceName -> service configuration
    Map<String, Map<String, Object>> services = new HashMap<String, Map<String, Object>>();
    // Ugly. But this is a map of service groups, so it needs to reflect multiple services maps.
    Map<String, Map<String, Map<String, Object>>> serviceVariants = new HashMap<String, Map<String, Map<String, Object>>>();
    // temporary mapping from cluster name to services map, to aid in create cluster variants and
    // service groups.
    Map<String, Map<String, Map<String, Object>>> clusterToServiceMapping = new HashMap<String, Map<String, Map<String, Object>>>();
    int status;
    // temporary mapping from cluster name to the list of colo variants it has.
    Map<String, List<String>> variantToVariantsMapping = new HashMap<String, List<String>>();
    // temporary mapping from cluster name to coloVariant ClusterNames list.
    Map<String, List<String>> clusterToColoClustersMapping = new HashMap<String, List<String>>();
    // mapping from regular cluster name to the list of containing services
    // which will be added as children of the regular cluster znode.
    Map<String, List<String>> regularClusterToServicesMapping = new HashMap<>();
    _log.info("basePath: " + _basePath);
    _log.info("clusterDefaults: " + _clusterDefaults);
    _log.info("serviceDefaults: " + _serviceDefaults);
    final String defaultColo = (String) _clusterDefaults.remove(PropertyKeys.DEFAULT_COLO);
    // Solution 2 is the approach taken below.
    for (String clusterName : clusterServiceConfiguration.keySet()) {
        @SuppressWarnings("unchecked") Map<String, Object> clusterConfig = (Map<String, Object>) clusterServiceConfiguration.get(clusterName);
        clusterConfig.put(PropertyKeys.CLUSTER_NAME, clusterName);
        final Object servicesProperty = clusterConfig.remove(PropertyKeys.SERVICES);
        @SuppressWarnings("unchecked") Map<String, Map<String, Object>> servicesConfigs = (Map<String, Map<String, Object>>) servicesProperty;
        final Object clusterVariantProperty = clusterConfig.remove(PropertyKeys.CLUSTER_VARIANTS);
        @SuppressWarnings("unchecked") Map<String, Map<String, Object>> clusterVariantConfig = (Map<String, Map<String, Object>>) clusterVariantProperty;
        final Object coloVariantsProperty = clusterConfig.remove(PropertyKeys.COLO_VARIANTS);
        @SuppressWarnings("unchecked") List<String> coloVariants = (List<String>) coloVariantsProperty;
        final String masterColo = (String) clusterConfig.remove(PropertyKeys.MASTER_COLO);
        final String enableSymlinkString = (String) clusterConfig.remove(PropertyKeys.ENABLE_SYMLINK);
        final boolean enableSymlink;
        regularClusterToServicesMapping.put(clusterName, servicesConfigs.keySet().stream().collect(Collectors.toList()));
        if (enableSymlinkString != null && "true".equalsIgnoreCase(enableSymlinkString)) {
            enableSymlink = true;
        } else {
            enableSymlink = false;
        }
        // do some sanity check for partitions if any
        // Moving handling of partitionProperties before any coloVariant manipulations
        final Object partitionPropertiesProperty = clusterConfig.get(PropertyKeys.PARTITION_PROPERTIES);
        @SuppressWarnings("unchecked") Map<String, Object> partitionProperties = (Map<String, Object>) partitionPropertiesProperty;
        if (partitionProperties != null) {
            status = handlePartitionProperties(partitionProperties, clusterConfig, clusterName);
            if (status != 0) {
                return status;
            }
        }
        Map<String, String> clusterProperties = new HashMap<>();
        if (coloVariants != null && coloVariants.size() > 0 && !(coloVariants.size() == 1 && coloVariants.contains(""))) {
            clusterProperties.put(PropertyKeys.COLO_VARIANTS, String.join(LIST_SEPARATOR, coloVariants));
        }
        if (masterColo != null && !masterColo.equals("")) {
            clusterProperties.put(PropertyKeys.MASTER_COLO, masterColo);
        }
        if (clusterVariantConfig != null && clusterVariantConfig.size() > 0) {
            clusterProperties.put(PropertyKeys.CLUSTER_VARIANTS, String.join(LIST_SEPARATOR, clusterVariantConfig.keySet()));
        }
        clusterConfig.put(PropertyKeys.CLUSTER_PROPERTIES, clusterProperties);
        // lots of if/else.
        if (coloVariants == null || (coloVariants.size() == 1 && coloVariants.contains(""))) {
            coloVariants = Collections.singletonList("");
        } else {
            // one of the peer colos, if applicable.
            if (!coloVariants.contains(defaultColo)) {
                throw new IllegalStateException("The default colo: " + defaultColo + " is not one of the peer colos = " + coloVariants);
            }
            if (masterColo != null && !coloVariants.contains(masterColo) && !enableSymlink) {
                throw new IllegalStateException("The master colo: " + masterColo + " is not one of the peer colos = " + coloVariants);
            }
        }
        boolean defaultServicesCreated = false;
        for (String colo : coloVariants) {
            // the coloClusterName will be equal to the original cluster name if colo is the empty string
            String coloClusterName = D2Utils.addSuffixToBaseName(clusterName, colo);
            // coloServicesConfigs are the set of d2 services in this cluster in this colo
            // for the regular cluster case I could avoid creation of a new HashMap for both coloServicesConfig
            // and coloServiceConfig, as an optimization at the expense of simplicity.
            Map<String, Map<String, Object>> coloServicesConfigs = new HashMap<String, Map<String, Object>>();
            // Only create the default services once, and only when we have an empty colo string or the
            // colo matches the default colo.
            boolean createDefaultServices = (defaultServicesCreated == false) ? shouldCreateDefaultServices(colo, defaultColo) : false;
            for (String serviceName : servicesConfigs.keySet()) {
                // "resource" level config
                Map<String, Object> serviceConfig = servicesConfigs.get(serviceName);
                // There are some cases where we may not want to create colo variants of a particular service
                // We can't remove properties from the serviceConfig here because we might need to loop
                // over it multiple times.
                String createColoVariants = (String) serviceConfig.get(PropertyKeys.HAS_COLO_VARIANTS);
                boolean createColoVariantsForService = shouldCreateColoVariantsForService(colo, createColoVariants);
                String coloServiceName = serviceName;
                final boolean defaultRoutingToMasterColo = serviceConfig.containsKey(PropertyKeys.DEFAULT_ROUTING) && PropertyKeys.MASTER_SUFFIX.equals(serviceConfig.get(PropertyKeys.DEFAULT_ROUTING));
                // any colo variants of that serviceName.
                if (createColoVariantsForService) {
                    coloServiceName = D2Utils.addSuffixToBaseName(serviceName, colo);
                }
                final Object transportClientProperty = serviceConfig.get(PropertyKeys.TRANSPORT_CLIENT_PROPERTIES);
                @SuppressWarnings("unchecked") Map<String, Object> transportClientConfig = (Map<String, Object>) transportClientProperty;
                serviceConfig.put(PropertyKeys.TRANSPORT_CLIENT_PROPERTIES, transportClientConfig);
                Map<String, Object> coloServiceConfig = new HashMap<String, Object>(serviceConfig);
                // so it does not have to know about what are the default services.
                if (createDefaultServices && !defaultServicesCreated) {
                    // create the Master version of this service.
                    if (masterColo != null && createColoVariantsForService) {
                        // we need to create a "Master" version of this service to point to the current Master
                        // Cluster. Why not just use the original service name? We will point the original
                        // service name at the local cluster, as well as to make it explicit that requests
                        // sent to this service might cross colos, if the master is located in another colo.
                        Map<String, Object> masterServiceConfig = new HashMap<String, Object>(serviceConfig);
                        String masterServiceName = serviceName + PropertyKeys.MASTER_SUFFIX;
                        String masterClusterName;
                        if (enableSymlink) {
                            masterClusterName = D2Utils.getSymlinkNameForMaster(clusterName);
                        } else {
                            masterClusterName = D2Utils.addSuffixToBaseName(clusterName, masterColo);
                        }
                        masterServiceConfig.put(PropertyKeys.CLUSTER_NAME, masterClusterName);
                        masterServiceConfig.put(PropertyKeys.SERVICE_NAME, masterServiceName);
                        masterServiceConfig.put(PropertyKeys.IS_MASTER_SERVICE, "true");
                        coloServicesConfigs.put(masterServiceName, masterServiceConfig);
                    }
                    // this block will handle:
                    // the colo-agnostic service -> colo-specific default cluster mapping (fooService -> FooCluster-WestCoast)
                    // the colo-agnostic service -> colo-agnostic cluster mapping (fooService -> FooCluster)
                    // the latter only being done for regular clusters, the former only being done for clusters
                    // that have coloVariants specified.
                    Map<String, Object> regularServiceConfig = new HashMap<String, Object>(serviceConfig);
                    if (createColoVariantsForService) {
                        // we set isDefaultService flag only if it is a multi-colo aware service.
                        regularServiceConfig.put(PropertyKeys.IS_DEFAULT_SERVICE, "true");
                        if (defaultRoutingToMasterColo) {
                            regularServiceConfig.put(PropertyKeys.DEFAULT_ROUTING_TO_MASTER, "true");
                        }
                    }
                    final String defaultColoClusterName = clusterNameWithRouting(clusterName, colo, defaultColo, masterColo, defaultRoutingToMasterColo, enableSymlink);
                    regularServiceConfig.put(PropertyKeys.CLUSTER_NAME, defaultColoClusterName);
                    regularServiceConfig.put(PropertyKeys.SERVICE_NAME, serviceName);
                    coloServicesConfigs.put(serviceName, regularServiceConfig);
                }
                if (!serviceName.equals(coloServiceName)) {
                    // this block will handle:
                    // the colo-specific service-> colo-specific cluster mapping (fooService-WestCoast -> FooCluster-WestCoast,
                    // fooService-EastCoast -> FooCluster-EastCoast)
                    coloServiceConfig.put(PropertyKeys.CLUSTER_NAME, coloClusterName);
                    coloServiceConfig.put(PropertyKeys.SERVICE_NAME, coloServiceName);
                    coloServicesConfigs.put(coloServiceName, coloServiceConfig);
                }
            }
            // end for each service
            status = addServicesToServicesMap(coloServicesConfigs, services, coloClusterName);
            if (status != NO_ERROR_EXIT_CODE) {
                return status;
            }
            // Now that we've created colo-specific service to colo-specific cluster mappings, we now need
            // to actually create those colo-specific clusters.
            Map<String, Object> coloClusterConfig = clusterConfig;
            if (!clusterName.equals(coloClusterName)) {
                coloClusterConfig = new HashMap<String, Object>(clusterConfig);
                coloClusterConfig.put(PropertyKeys.CLUSTER_NAME, coloClusterName);
                if (createDefaultServices) {
                    clusters.put(clusterName, clusterConfig);
                }
            }
            clusters.put(coloClusterName, coloClusterConfig);
            // list before the cluster variants.
            if (clusterVariantConfig != null) {
                Map<String, Map<String, Object>> coloClusterVariantConfig = new HashMap<String, Map<String, Object>>(clusterVariantConfig);
                status = handleClusterVariants(coloClusterVariantConfig, clusterConfig, clusters, coloServicesConfigs, clusterToServiceMapping, colo, variantToVariantsMapping, masterColo, enableSymlink);
                if (status != 0) {
                    return status;
                }
            } else {
                // even if clusterVariant is not defined, it is still needed to save the coloServicesConfigs
                // in case the serviceGroup directly refers the cluster name
                clusterToServiceMapping.put(coloClusterName, coloServicesConfigs);
                // also save the coloClusterName
                addNewVariantToVariantsList(clusterToColoClustersMapping, clusterName, coloClusterName);
            }
            // the set the flag marking the default services for this cluster as created.
            if (!defaultServicesCreated && createDefaultServices == true) {
                defaultServicesCreated = true;
            }
        }
    // end for each colo variant
    }
    // there are service variants
    if (_serviceVariants != null) {
        for (String serviceGroup : _serviceVariants.keySet()) {
            // each service group contains a list of cluster names and a type field that
            // describes how to treat the list. We group together the services described by these
            // listed clusters, and prep that for writing to a different znode than the default service
            // znode directory. Note that we had already pointed those services to the appropriate cluster
            // variant earlier.
            Map<String, Map<String, Object>> servicesGroupConfig = new HashMap<String, Map<String, Object>>();
            @SuppressWarnings("unchecked") Map<String, Object> configGroupMap = (Map<String, Object>) _serviceVariants.get(serviceGroup);
            String type = (String) configGroupMap.get(PropertyKeys.TYPE);
            final Object clusterListProperty = configGroupMap.get(PropertyKeys.CLUSTER_LIST);
            @SuppressWarnings("unchecked") List<String> clusterList = (List<String>) clusterListProperty;
            // create an alternate service table for the services specified by these cluster variants
            for (Iterator<String> iter = clusterList.listIterator(); iter.hasNext(); ) {
                String clusterItem = iter.next();
                List<String> coloClusterVariantList = variantToVariantsMapping.get(clusterItem);
                if (coloClusterVariantList == null && PropertyKeys.FULL_CLUSTER_LIST.equals(type)) {
                    // For full_cluster_list type, it is allowed to specify real cluster name, not
                    // necessarily always clusterVariant. Check the clusterToColoClustersMappings.
                    coloClusterVariantList = clusterToColoClustersMapping.get(clusterItem);
                }
                if (coloClusterVariantList == null) {
                    // the service group had an unknown cluster!
                    _log.error("Unknown cluster specified: " + clusterItem);
                    return EXCEPTION_EXIT_CODE;
                }
                // in those coloVariants to this service group's list of services.
                for (String coloClusterVariant : coloClusterVariantList) {
                    Map<String, Map<String, Object>> candidateServices = clusterToServiceMapping.get(coloClusterVariant);
                    if (candidateServices == null) {
                        // the service group had an unknown cluster!
                        _log.error("Unknown cluster specified: " + coloClusterVariant);
                        return EXCEPTION_EXIT_CODE;
                    }
                    for (Map.Entry<String, Map<String, Object>> mapEntry : candidateServices.entrySet()) {
                        Object testValue = servicesGroupConfig.put(mapEntry.getKey(), mapEntry.getValue());
                        if (testValue != null) {
                            // We shouldn't have had conflicting services, two variants of the same cluster
                            // were probably specified in the same service group.
                            _log.error("Service group has variants of the same cluster: " + serviceGroup);
                            return EXCEPTION_EXIT_CODE;
                        }
                    }
                }
            }
            if (PropertyKeys.CLUSTER_VARIANTS_LIST.equals(type)) {
                // start from the full list of services, and then overwrite the services specified by the
                // cluster variants.
                Map<String, Map<String, Object>> fullServiceList = new HashMap<String, Map<String, Object>>(services);
                fullServiceList.putAll(servicesGroupConfig);
                serviceVariants.put(serviceGroup, fullServiceList);
            } else if (PropertyKeys.FULL_CLUSTER_LIST.equals(type)) {
                // The use has explicitly indicated that we should put these and only the services that
                // correspond to the named clusters in the serviceGroup.
                serviceVariants.put(serviceGroup, servicesGroupConfig);
            } else {
                _log.error("unknown serviceVariant type: " + type);
                return EXCEPTION_EXIT_CODE;
            }
        }
    }
    _log.debug("serviceVariants: " + serviceVariants);
    _zkConnection.start();
    try {
        _log.info("Cluster configuration:\n" + clusters);
        writeConfig(ZKFSUtil.clusterPath(_basePath), new ClusterPropertiesJsonSerializer(), new ClusterPropertiesJsonSerializer(), clusters, _clusterDefaults);
        _log.info("Wrote cluster configuration");
        _log.info("Service configuration:\n" + services);
        writeConfig(ZKFSUtil.servicePath(_basePath), new ServicePropertiesJsonSerializer(), new ServicePropertiesJsonSerializer(), services, _serviceDefaults);
        _log.info("Wrote service configuration");
        writeChildren(regularClusterToServicesMapping);
        _log.info("Wrote service children nodes under clusters");
        if (!serviceVariants.isEmpty()) {
            for (Map.Entry<String, Map<String, Map<String, Object>>> entry : serviceVariants.entrySet()) {
                if (_log.isDebugEnabled()) {
                    _log.info("serviceVariant: " + entry + "\n");
                } else {
                    _log.info("serviceVariant: " + entry.getKey() + "\n");
                }
                writeConfig(ZKFSUtil.servicePath(_basePath, entry.getKey()), new ServicePropertiesJsonSerializer(), new ServicePropertiesJsonSerializer(), entry.getValue(), _serviceDefaults);
            }
            _log.info("Wrote service variant configurations");
        }
        _log.info("Configuration complete");
        return NO_ERROR_EXIT_CODE;
    } finally {
        try {
            _zkConnection.shutdown();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            _log.warn("ZooKeeper shutdown interrupted", e);
        }
    }
}
Also used : HashMap(java.util.HashMap) ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) ClusterPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 7 with ServicePropertiesJsonSerializer

use of com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer in project rest.li by linkedin.

the class R2D2Server method putService.

private void putService(ServiceProperties serviceProperties) throws Exception {
    System.err.println("put: " + serviceProperties);
    ZKConnection client = new ZKConnection(_zookeeperHost + ":" + _zookeeperPort, 30000);
    PropertyStore<ServiceProperties> store = new ZooKeeperPermanentStore<ServiceProperties>(client, new ServicePropertiesJsonSerializer(), _basePath + "/services");
    store.put(serviceProperties.getServiceName(), serviceProperties);
    client.getZooKeeper().close();
}
Also used : ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ZKConnection(com.linkedin.d2.discovery.stores.zk.ZKConnection) ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) ZooKeeperPermanentStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperPermanentStore)

Example 8 with ServicePropertiesJsonSerializer

use of com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer in project rest.li by linkedin.

the class ZKFSTest method testKeyMapper.

@Test
public void testKeyMapper() throws Exception {
    final String TEST_SERVICE_NAME = "test-service";
    final String TEST_CLUSTER_NAME = "test-cluster";
    final URI TEST_SERVER_URI1 = URI.create("http://test-host-1/");
    final URI TEST_SERVER_URI2 = URI.create("http://test-host-2/");
    final int NUM_ITERATIONS = 5;
    startServer();
    try {
        ZKFSLoadBalancer balancer = getBalancer();
        FutureCallback<None> callback = new FutureCallback<None>();
        balancer.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        ZKConnection conn = balancer.zkConnection();
        ZooKeeperPermanentStore<ServiceProperties> serviceStore = new ZooKeeperPermanentStore<ServiceProperties>(conn, new ServicePropertiesJsonSerializer(), ZKFSUtil.servicePath(BASE_PATH));
        ServiceProperties props = new ServiceProperties(TEST_SERVICE_NAME, TEST_CLUSTER_NAME, "/test", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, Arrays.asList("http"), null);
        serviceStore.put(TEST_SERVICE_NAME, props);
        ClusterProperties clusterProperties = new ClusterProperties(TEST_CLUSTER_NAME);
        ZooKeeperPermanentStore<ClusterProperties> clusterStore = new ZooKeeperPermanentStore<ClusterProperties>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
        clusterStore.put(TEST_CLUSTER_NAME, clusterProperties);
        ZooKeeperEphemeralStore<UriProperties> uriStore = new ZooKeeperEphemeralStore<UriProperties>(conn, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), ZKFSUtil.uriPath(BASE_PATH), false, true);
        Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>();
        Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
        partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1.0d));
        uriData.put(TEST_SERVER_URI1, partitionData);
        uriData.put(TEST_SERVER_URI2, partitionData);
        UriProperties uriProps = new UriProperties(TEST_CLUSTER_NAME, uriData);
        callback = new FutureCallback<None>();
        uriStore.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        uriStore.put(TEST_CLUSTER_NAME, uriProps);
        Set<Integer> keys = new HashSet<Integer>();
        for (int ii = 0; ii < 100; ++ii) {
            keys.add(ii);
        }
        for (int ii = 0; ii < NUM_ITERATIONS; ++ii) {
            KeyMapper mapper = balancer.getKeyMapper();
            MapKeyResult<URI, Integer> batches = mapper.mapKeysV2(URI.create("d2://" + TEST_SERVICE_NAME), keys);
            Assert.assertEquals(batches.getMapResult().size(), 2);
            for (Map.Entry<URI, Collection<Integer>> oneBatch : batches.getMapResult().entrySet()) {
                Assert.assertTrue(oneBatch.getKey().toString().startsWith("http://test-host-"));
                Assert.assertTrue(keys.containsAll(oneBatch.getValue()));
            }
        }
    } finally {
        stopServer();
    }
}
Also used : ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) HashMap(java.util.HashMap) KeyMapper(com.linkedin.d2.balancer.KeyMapper) ZooKeeperPermanentStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperPermanentStore) UriPropertiesMerger(com.linkedin.d2.balancer.properties.UriPropertiesMerger) URI(java.net.URI) UriPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer) ClusterPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) FutureCallback(com.linkedin.common.callback.FutureCallback) HashSet(java.util.HashSet) ZKConnection(com.linkedin.d2.discovery.stores.zk.ZKConnection) ZooKeeperEphemeralStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) Collection(java.util.Collection) None(com.linkedin.common.util.None) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.testng.annotations.Test)

Example 9 with ServicePropertiesJsonSerializer

use of com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer in project rest.li by linkedin.

the class ZKFSTest method testServiceDirectory.

@Test
public void testServiceDirectory() throws Exception {
    final String TEST_SERVICE_NAME = "testingService";
    startServer();
    try {
        ZKFSLoadBalancer balancer = getBalancer();
        FutureCallback<None> callback = new FutureCallback<None>();
        balancer.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        Directory dir = balancer.getDirectory();
        ZKConnection conn = new ZKConnection("localhost:" + PORT, 30000);
        conn.start();
        ZooKeeperPermanentStore<ServiceProperties> store = new ZooKeeperPermanentStore<ServiceProperties>(conn, new ServicePropertiesJsonSerializer(), ZKFSUtil.servicePath(BASE_PATH));
        callback = new FutureCallback<None>();
        store.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        ServiceProperties props = new ServiceProperties(TEST_SERVICE_NAME, "someCluster", "/somePath", Arrays.asList("someStrategy"));
        store.put(TEST_SERVICE_NAME, props);
        FutureCallback<List<String>> serviceCallback = new FutureCallback<List<String>>();
        dir.getServiceNames(serviceCallback);
        Assert.assertEquals(serviceCallback.get(30, TimeUnit.SECONDS), Collections.singletonList(TEST_SERVICE_NAME));
    } finally {
        stopServer();
    }
}
Also used : ZKConnection(com.linkedin.d2.discovery.stores.zk.ZKConnection) ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) ZooKeeperPermanentStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperPermanentStore) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) List(java.util.List) None(com.linkedin.common.util.None) FutureCallback(com.linkedin.common.callback.FutureCallback) Directory(com.linkedin.d2.balancer.Directory) Test(org.testng.annotations.Test)

Example 10 with ServicePropertiesJsonSerializer

use of com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer in project rest.li by linkedin.

the class LoadBalancerClientCli method hasService.

public static boolean hasService(ZKConnection zkclient, String zkserver, String d2path, String cluster, String service) throws URISyntaxException, IOException, PropertyStoreException {
    ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry = null;
    String scstoreString = zkserver + ZKFSUtil.servicePath(d2path);
    zkServiceRegistry = (ZooKeeperPermanentStore<ServiceProperties>) getStore(zkclient, scstoreString, new ServicePropertiesJsonSerializer());
    return zkServiceRegistry.get(service).getClusterName().equals(cluster);
}
Also used : ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer)

Aggregations

ServicePropertiesJsonSerializer (com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer)15 ServiceProperties (com.linkedin.d2.balancer.properties.ServiceProperties)14 UriProperties (com.linkedin.d2.balancer.properties.UriProperties)11 UriPropertiesJsonSerializer (com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer)11 ClusterPropertiesJsonSerializer (com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer)10 ClusterProperties (com.linkedin.d2.balancer.properties.ClusterProperties)9 UriPropertiesMerger (com.linkedin.d2.balancer.properties.UriPropertiesMerger)9 ZooKeeperPermanentStore (com.linkedin.d2.discovery.stores.zk.ZooKeeperPermanentStore)8 HashMap (java.util.HashMap)7 ZooKeeperEphemeralStore (com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore)6 None (com.linkedin.common.util.None)5 ZKConnection (com.linkedin.d2.discovery.stores.zk.ZKConnection)5 Test (org.testng.annotations.Test)5 FutureCallback (com.linkedin.common.callback.FutureCallback)4 DegraderLoadBalancerStrategyFactoryV3 (com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyFactoryV3)4 SimpleLoadBalancer (com.linkedin.d2.balancer.simple.SimpleLoadBalancer)3 SimpleLoadBalancerState (com.linkedin.d2.balancer.simple.SimpleLoadBalancerState)3 PropertyEventBusImpl (com.linkedin.d2.discovery.event.PropertyEventBusImpl)3 HashSet (java.util.HashSet)3 List (java.util.List)3