use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class LoadBalancerEchoServer method markUp.
public void markUp(Map<Integer, Double> partitionWeight) throws PropertyStoreException {
FutureCallback<None> callback = new FutureCallback<None>();
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>();
if (partitionWeight != null) {
for (int partitionId : partitionWeight.keySet()) {
partitionDataMap.put(partitionId, new PartitionData(partitionWeight.get(partitionId)));
}
} else {
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
}
_announcer.markUp(_cluster, _uri, partitionDataMap, callback);
try {
callback.get(10, TimeUnit.SECONDS);
} catch (Exception e) {
throw new PropertyStoreException(e);
}
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class DegraderLoadBalancerTest method getClient.
public static TrackerClient getClient(URI uri) {
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
return new TrackerClient(uri, partitionDataMap, new TestLoadBalancerClient(uri));
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class ZKFSTest method testKeyMapper.
@Test
public void testKeyMapper() throws Exception {
final String TEST_SERVICE_NAME = "test-service";
final String TEST_CLUSTER_NAME = "test-cluster";
final URI TEST_SERVER_URI1 = URI.create("http://test-host-1/");
final URI TEST_SERVER_URI2 = URI.create("http://test-host-2/");
final int NUM_ITERATIONS = 5;
startServer();
try {
ZKFSLoadBalancer balancer = getBalancer();
FutureCallback<None> callback = new FutureCallback<None>();
balancer.start(callback);
callback.get(30, TimeUnit.SECONDS);
ZKConnection conn = balancer.zkConnection();
ZooKeeperPermanentStore<ServiceProperties> serviceStore = new ZooKeeperPermanentStore<ServiceProperties>(conn, new ServicePropertiesJsonSerializer(), ZKFSUtil.servicePath(BASE_PATH));
ServiceProperties props = new ServiceProperties(TEST_SERVICE_NAME, TEST_CLUSTER_NAME, "/test", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, Arrays.asList("http"), null);
serviceStore.put(TEST_SERVICE_NAME, props);
ClusterProperties clusterProperties = new ClusterProperties(TEST_CLUSTER_NAME);
ZooKeeperPermanentStore<ClusterProperties> clusterStore = new ZooKeeperPermanentStore<ClusterProperties>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
clusterStore.put(TEST_CLUSTER_NAME, clusterProperties);
ZooKeeperEphemeralStore<UriProperties> uriStore = new ZooKeeperEphemeralStore<UriProperties>(conn, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), ZKFSUtil.uriPath(BASE_PATH), false, true);
Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>();
Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1.0d));
uriData.put(TEST_SERVER_URI1, partitionData);
uriData.put(TEST_SERVER_URI2, partitionData);
UriProperties uriProps = new UriProperties(TEST_CLUSTER_NAME, uriData);
callback = new FutureCallback<None>();
uriStore.start(callback);
callback.get(30, TimeUnit.SECONDS);
uriStore.put(TEST_CLUSTER_NAME, uriProps);
Set<Integer> keys = new HashSet<Integer>();
for (int ii = 0; ii < 100; ++ii) {
keys.add(ii);
}
for (int ii = 0; ii < NUM_ITERATIONS; ++ii) {
KeyMapper mapper = balancer.getKeyMapper();
MapKeyResult<URI, Integer> batches = mapper.mapKeysV2(URI.create("d2://" + TEST_SERVICE_NAME), keys);
Assert.assertEquals(batches.getMapResult().size(), 2);
for (Map.Entry<URI, Collection<Integer>> oneBatch : batches.getMapResult().entrySet()) {
Assert.assertTrue(oneBatch.getKey().toString().startsWith("http://test-host-"));
Assert.assertTrue(keys.containsAll(oneBatch.getValue()));
}
}
} finally {
stopServer();
}
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class ConsistentHashKeyMapperTest method getConsistentHashKeyMapper.
private ConsistentHashKeyMapper getConsistentHashKeyMapper(RingFactory<URI> ringFactory) throws URISyntaxException {
String serviceName = "articles";
String clusterName = "cluster";
String path = "path";
String strategyName = "degrader";
//setup partition
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<URI, Map<Integer, PartitionData>>();
final URI foo1 = new URI("http://foo1.com");
Map<Integer, PartitionData> foo1Data = new HashMap<Integer, PartitionData>();
foo1Data.put(0, new PartitionData(1.0));
partitionDescriptions.put(foo1, foo1Data);
final URI foo2 = new URI("http://foo2.com");
Map<Integer, PartitionData> foo2Data = new HashMap<Integer, PartitionData>();
foo2Data.put(3, new PartitionData(1.0));
foo2Data.put(4, new PartitionData(1.0));
partitionDescriptions.put(foo2, foo2Data);
final URI foo3 = new URI("http://foo3.com");
Map<Integer, PartitionData> foo3Data = new HashMap<Integer, PartitionData>();
foo3Data.put(0, new PartitionData(1.0));
partitionDescriptions.put(foo3, foo3Data);
final URI foo4 = new URI("http://foo4.com");
Map<Integer, PartitionData> foo4Data = new HashMap<Integer, PartitionData>();
foo4Data.put(1, new PartitionData(1.0));
partitionDescriptions.put(foo4, foo4Data);
final URI foo5 = new URI("http://foo5.com");
Map<Integer, PartitionData> foo5Data = new HashMap<Integer, PartitionData>();
foo5Data.put(1, new PartitionData(1.0));
partitionDescriptions.put(foo5, foo5Data);
final URI foo6 = new URI("http://foo6.com");
Map<Integer, PartitionData> foo6Data = new HashMap<Integer, PartitionData>();
foo6Data.put(1, new PartitionData(1.0));
partitionDescriptions.put(foo6, foo6Data);
//setup strategy which involves tweaking the hash ring to get partitionId -> URI host
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<LoadBalancerState.SchemeStrategyPair>();
LoadBalancerStrategy strategy = new TestLoadBalancerStrategy(partitionDescriptions, ringFactory);
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair("http", strategy));
//setup the partition accessor which is used to get partitionId -> keys
PartitionAccessor accessor = new TestPartitionAccessor();
URI serviceURI = new URI("d2://" + serviceName);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor));
ConsistentHashKeyMapper mapper = new ConsistentHashKeyMapper(balancer, balancer);
return mapper;
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class ConsistentHashKeyMapperTest method testMapKeysConcurrency.
@SuppressWarnings("rawtypes")
@Test
public void testMapKeysConcurrency() throws Exception {
String serviceName = "articles";
String clusterName = "cluster";
String path = "path";
String strategyName = "degrader";
int numPartitions = 500;
// setup partition
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<URI, Map<Integer, PartitionData>>();
final URI foo1 = new URI("http://foo1.com");
Map<Integer, PartitionData> foo1Data = new HashMap<Integer, PartitionData>();
for (int i = 0; i < numPartitions; i++) {
foo1Data.put(i, new PartitionData(1.0));
}
partitionDescriptions.put(foo1, foo1Data);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(new DegraderLoadBalancerStrategyConfig(5000), serviceName, null);
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<LoadBalancerState.SchemeStrategyPair>();
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair("http", strategy));
PartitionAccessor accessor = new TestDeadlockPartitionAccessor(numPartitions);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor));
ConsistentHashKeyMapper mapper = new ConsistentHashKeyMapper(balancer, balancer);
CountDownLatch latch = new CountDownLatch(numPartitions);
List<Runnable> runnables = createRunnables(numPartitions, mapper, serviceName, latch);
final ExecutorService executor = Executors.newFixedThreadPool(numPartitions);
List<Future> futures = new ArrayList<Future>();
for (int i = 0; i < numPartitions; i++) {
futures.add(executor.submit(runnables.get(i)));
}
for (Future future : futures) {
future.get(30, TimeUnit.SECONDS);
}
}
Aggregations