Search in sources :

Example 1 with RangeBasedPartitionProperties

use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.

the class PartitionPropertiesConverter method toProperties.

public static PartitionProperties toProperties(D2ClusterPartitionConfiguration config) {
    final PartitionProperties partitionProperties;
    switch(config.getType()) {
        case RANGE:
            {
                rangedPartitionProperties rangedPartitionProperties = config.getPartitionTypeSpecificData().getRangedPartitionProperties();
                partitionProperties = new RangeBasedPartitionProperties(config.getPartitionKeyRegex(), rangedPartitionProperties.getKeyRangeStart(), rangedPartitionProperties.getPartitionSize(), config.getPartitionCount());
                break;
            }
        case HASH:
            HashBasedPartitionProperties.HashAlgorithm algorithm;
            switch(config.getPartitionTypeSpecificData().getHashAlgorithm()) {
                case MODULO:
                    algorithm = HashBasedPartitionProperties.HashAlgorithm.MODULO;
                    break;
                case MD5:
                    algorithm = HashBasedPartitionProperties.HashAlgorithm.MD5;
                    break;
                default:
                    throw new IllegalArgumentException("Unsupported hash algorithm: " + config.getPartitionTypeSpecificData().getHashAlgorithm());
            }
            partitionProperties = new HashBasedPartitionProperties(config.getPartitionKeyRegex(), config.getPartitionCount(), algorithm);
            break;
        case NONE:
            partitionProperties = NullPartitionProperties.getInstance();
            break;
        default:
            throw new IllegalArgumentException("Unsupported partitionType: " + config.getType());
    }
    return partitionProperties;
}
Also used : com.linkedin.d2.rangedPartitionProperties(com.linkedin.d2.rangedPartitionProperties) PartitionProperties(com.linkedin.d2.balancer.properties.PartitionProperties) NullPartitionProperties(com.linkedin.d2.balancer.properties.NullPartitionProperties) HashBasedPartitionProperties(com.linkedin.d2.balancer.properties.HashBasedPartitionProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) com.linkedin.d2.rangedPartitionProperties(com.linkedin.d2.rangedPartitionProperties) HashBasedPartitionProperties(com.linkedin.d2.balancer.properties.HashBasedPartitionProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties)

Example 2 with RangeBasedPartitionProperties

use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.

the class PartitionPropertiesConverter method toConfig.

public static D2ClusterPartitionConfiguration toConfig(PartitionProperties property) {
    final D2ClusterPartitionConfiguration config;
    final D2ClusterPartitionConfiguration.PartitionTypeSpecificData specificData;
    switch(property.getPartitionType()) {
        case RANGE:
            RangeBasedPartitionProperties range = (RangeBasedPartitionProperties) property;
            config = new D2ClusterPartitionConfiguration();
            config.setType(PartitionTypeEnum.RANGE);
            config.setPartitionKeyRegex(range.getPartitionKeyRegex());
            config.setPartitionCount(range.getPartitionCount());
            specificData = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
            rangedPartitionProperties rangedPartitionProperties = new rangedPartitionProperties();
            rangedPartitionProperties.setKeyRangeStart(range.getKeyRangeStart());
            rangedPartitionProperties.setPartitionSize(range.getPartitionSize());
            specificData.setRangedPartitionProperties(rangedPartitionProperties);
            config.setPartitionTypeSpecificData(specificData);
            break;
        case HASH:
            HashBasedPartitionProperties hash = (HashBasedPartitionProperties) property;
            config = new D2ClusterPartitionConfiguration();
            config.setType(PartitionTypeEnum.HASH);
            config.setPartitionKeyRegex(hash.getPartitionKeyRegex());
            config.setPartitionCount(hash.getPartitionCount());
            specificData = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
            specificData.setHashAlgorithm(hashAlgorithm.valueOf(hash.getHashAlgorithm().name()));
            config.setPartitionTypeSpecificData(specificData);
            break;
        case NONE:
            config = new D2ClusterPartitionConfiguration();
            config.setType(PartitionTypeEnum.NONE);
            break;
        default:
            throw new IllegalArgumentException("Unsupported partitionType: " + property.getPartitionType());
    }
    return config;
}
Also used : com.linkedin.d2.rangedPartitionProperties(com.linkedin.d2.rangedPartitionProperties) HashBasedPartitionProperties(com.linkedin.d2.balancer.properties.HashBasedPartitionProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) D2ClusterPartitionConfiguration(com.linkedin.d2.D2ClusterPartitionConfiguration)

Example 3 with RangeBasedPartitionProperties

use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.

the class SimpleLoadBalancerStateTest method testRegister.

@Test(groups = { "small", "back-end" })
public void testRegister() {
    reset();
    TestListener listener = new TestListener();
    List<String> schemes = new ArrayList<String>();
    schemes.add("http");
    _state.register(listener);
    assertNull(listener.scheme);
    assertNull(listener.strategy);
    assertNull(listener.serviceName);
    // trigger a strategy add
    // first add a cluster
    _state.listenToCluster("cluster-1", new NullStateListenerCallback());
    _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
    // then add a service
    _state.listenToService("service-1", new NullStateListenerCallback());
    _serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), null, null, schemes, null));
    // this should trigger a refresh
    assertEquals(listener.scheme, "http");
    assertTrue(listener.strategy instanceof RandomLoadBalancerStrategy);
    assertEquals(listener.serviceName, "service-1");
    // then update the cluster
    _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
    // this triggered a second refresh, but also an onStrategyRemoved. The onStrategyRemoved should
    // be done first, and then the onStrategyAdd, so we should still see a valid strategy.
    assertEquals(listener.scheme, "http");
    assertTrue(listener.strategy instanceof RandomLoadBalancerStrategy);
    assertEquals(listener.serviceName, "service-1");
    _state.listenToCluster("partition-cluster-1", new NullStateListenerCallback());
    _clusterRegistry.put("partition-cluster-1", new ClusterProperties("partition-cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 100, 2)));
    _state.listenToService("partition-service-1", new NullStateListenerCallback());
    _serviceRegistry.put("partition-service-1", new ServiceProperties("partition-service-1", "partition-cluster-1", "/partition-test", Arrays.asList("degraderV3"), Collections.<String, Object>emptyMap(), null, null, schemes, null));
    assertEquals(listener.scheme, "http");
    assertTrue(listener.strategy instanceof DegraderLoadBalancerStrategyV3);
}
Also used : RandomLoadBalancerStrategy(com.linkedin.d2.balancer.strategies.random.RandomLoadBalancerStrategy) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DegraderLoadBalancerStrategyV3(com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3) NullStateListenerCallback(com.linkedin.d2.balancer.LoadBalancerState.NullStateListenerCallback) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) HashSet(java.util.HashSet) Test(org.testng.annotations.Test) DegraderLoadBalancerTest(com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerTest)

Example 4 with RangeBasedPartitionProperties

use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.

the class TestD2Config method verifyPartitionProperties.

public static void verifyPartitionProperties(String cluster, Map<String, Object> propertiesMap) throws IOException, URISyntaxException, PropertyStoreException {
    final ClusterProperties clusterprops = getClusterProperties(_zkclient, cluster);
    if (propertiesMap.get("partitionProperties") != null) {
        @SuppressWarnings("unchecked") Map<String, Object> properties = (Map<String, Object>) propertiesMap.get("partitionProperties");
        PartitionProperties.PartitionType partitionType = PartitionProperties.PartitionType.valueOf((String) properties.get("partitionType"));
        switch(partitionType) {
            case RANGE:
                {
                    long keyRangeStart = ((Number) properties.get("keyRangeStart")).longValue();
                    long partitionSize = ((Number) properties.get("partitionSize")).longValue();
                    int partitionCount = ((Number) properties.get("partitionCount")).intValue();
                    String regex = (String) properties.get("partitionKeyRegex");
                    RangeBasedPartitionProperties rbp = (RangeBasedPartitionProperties) clusterprops.getPartitionProperties();
                    assertEquals(keyRangeStart, rbp.getKeyRangeStart());
                    assertEquals(partitionSize, rbp.getPartitionSize());
                    assertEquals(partitionCount, rbp.getPartitionCount());
                    assertEquals(regex, rbp.getPartitionKeyRegex());
                }
                break;
            case HASH:
                {
                    int partitionCount = ((Number) properties.get("partitionCount")).intValue();
                    String regex = (String) properties.get("partitionKeyRegex");
                    String algorithm = (String) properties.get("hashAlgorithm");
                    HashBasedPartitionProperties.HashAlgorithm hashAlgorithm = HashBasedPartitionProperties.HashAlgorithm.valueOf(algorithm.toUpperCase());
                    HashBasedPartitionProperties hbp = (HashBasedPartitionProperties) clusterprops.getPartitionProperties();
                    assertEquals(partitionCount, hbp.getPartitionCount());
                    assertEquals(regex, hbp.getPartitionKeyRegex());
                    assertEquals(hashAlgorithm, hbp.getHashAlgorithm());
                }
                break;
            default:
                break;
        }
    }
}
Also used : PartitionProperties(com.linkedin.d2.balancer.properties.PartitionProperties) HashBasedPartitionProperties(com.linkedin.d2.balancer.properties.HashBasedPartitionProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) HashBasedPartitionProperties(com.linkedin.d2.balancer.properties.HashBasedPartitionProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) Map(java.util.Map) HashMap(java.util.HashMap)

Example 5 with RangeBasedPartitionProperties

use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.

the class SimpleLoadBalancerStateTest method testClientsShutdownAfterPropertyUpdatesStreamRequest.

@Test(groups = { "small", "back-end" })
public void testClientsShutdownAfterPropertyUpdatesStreamRequest() throws URISyntaxException, InterruptedException {
    reset();
    URI uri = URI.create("http://cluster-1/test");
    List<String> schemes = new ArrayList<String>();
    Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
    partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
    Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>();
    uriData.put(uri, partitionData);
    schemes.add("http");
    // set up state
    _state.listenToService("service-1", new NullStateListenerCallback());
    _state.listenToCluster("cluster-1", new NullStateListenerCallback());
    _state.setDelayedExecution(0);
    _serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
    _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
    _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
    URI uri1 = URI.create("http://partition-cluster-1/test1");
    URI uri2 = URI.create("http://partition-cluster-1/test2");
    _state.listenToCluster("partition-cluster-1", new NullStateListenerCallback());
    _clusterRegistry.put("partition-cluster-1", new ClusterProperties("partition-cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 100, 2)));
    _state.listenToService("partition-service-1", new NullStateListenerCallback());
    _serviceRegistry.put("partition-service-1", new ServiceProperties("partition-service-1", "partition-cluster-1", "/partition-test", Arrays.asList("degraderV3"), Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
    Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
    partitionWeight.put(0, new PartitionData(1d));
    partitionWeight.put(1, new PartitionData(2d));
    Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<URI, Map<Integer, PartitionData>>();
    partitionDesc.put(uri1, partitionWeight);
    partitionWeight.remove(0);
    partitionWeight.put(2, new PartitionData(1d));
    partitionDesc.put(uri2, partitionWeight);
    _uriRegistry.put("partition-cluster-1", new UriProperties("partition-cluster-1", partitionDesc));
    TrackerClient client1 = _state.getClient("partition-service-1", uri1);
    TrackerClient client2 = _state.getClient("partition-service-1", uri2);
    assertEquals(client2.getPartitionWeight(1), 2d);
    assertEquals(client2.getPartitionWeight(2), 1d);
    assertEquals(client1.getPartitionWeight(1), 2d);
    // Get client, then refresh cluster
    TrackerClient client = _state.getClient("service-1", uri);
    client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
    // now force a refresh by adding cluster
    _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
    // Get client, then refresh service
    client = _state.getClient("service-1", uri);
    client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
    // refresh by adding service
    _serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), null, null, schemes, null));
    // Get client, then mark server up/down
    client = _state.getClient("service-1", uri);
    client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
    _uriRegistry.put("cluster-1", new UriProperties("cluster-1", Collections.<URI, Map<Integer, PartitionData>>emptyMap()));
    _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
    // Get the client one last time
    client = _state.getClient("service-1", uri);
    client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
    TestShutdownCallback callback = new TestShutdownCallback();
    _state.shutdown(callback);
    assertTrue(callback.await(10, TimeUnit.SECONDS), "Failed to shut down state");
    for (TransportClientFactory factory : _clientFactories.values()) {
        SimpleLoadBalancerTest.DoNothingClientFactory f = (SimpleLoadBalancerTest.DoNothingClientFactory) factory;
        assertEquals(f.getRunningClientCount(), 0, "not all clients were shut down");
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) URI(java.net.URI) StreamRequestBuilder(com.linkedin.r2.message.stream.StreamRequestBuilder) TrackerClient(com.linkedin.d2.balancer.clients.TrackerClient) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) RequestContext(com.linkedin.r2.message.RequestContext) TransportClientFactory(com.linkedin.r2.transport.common.TransportClientFactory) HashSet(java.util.HashSet) StreamResponse(com.linkedin.r2.message.stream.StreamResponse) NullStateListenerCallback(com.linkedin.d2.balancer.LoadBalancerState.NullStateListenerCallback) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.testng.annotations.Test) DegraderLoadBalancerTest(com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerTest)

Aggregations

RangeBasedPartitionProperties (com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties)8 ClusterProperties (com.linkedin.d2.balancer.properties.ClusterProperties)5 HashBasedPartitionProperties (com.linkedin.d2.balancer.properties.HashBasedPartitionProperties)5 HashMap (java.util.HashMap)5 Test (org.testng.annotations.Test)5 ServiceProperties (com.linkedin.d2.balancer.properties.ServiceProperties)4 ArrayList (java.util.ArrayList)4 HashSet (java.util.HashSet)4 Map (java.util.Map)4 NullStateListenerCallback (com.linkedin.d2.balancer.LoadBalancerState.NullStateListenerCallback)3 PartitionData (com.linkedin.d2.balancer.properties.PartitionData)3 PartitionProperties (com.linkedin.d2.balancer.properties.PartitionProperties)3 UriProperties (com.linkedin.d2.balancer.properties.UriProperties)3 DegraderLoadBalancerTest (com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerTest)3 com.linkedin.d2.rangedPartitionProperties (com.linkedin.d2.rangedPartitionProperties)3 RequestContext (com.linkedin.r2.message.RequestContext)3 TransportClientFactory (com.linkedin.r2.transport.common.TransportClientFactory)3 URI (java.net.URI)3 D2ClusterPartitionConfiguration (com.linkedin.d2.D2ClusterPartitionConfiguration)2 TrackerClient (com.linkedin.d2.balancer.clients.TrackerClient)2