use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class SimpleLoadBalancerSimulation method addCluster.
// cluster simulation
public void addCluster(String clusterName, List<String> prioritizedSchemes, List<URI> uris) {
ClusterProperties clusterProperties = new ClusterProperties(clusterName, prioritizedSchemes);
// weight the uris randomly between 1 and 2
Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>();
for (URI uri : uris) {
Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d + _random.nextDouble()));
uriData.put(uri, partitionData);
}
UriProperties uriProperties = new UriProperties(clusterName, uriData);
_expectedClusterProperties.put(clusterName, clusterProperties);
_expectedUriProperties.put(clusterName, uriProperties);
_clusterRegistry.put(clusterName, clusterProperties);
_uriRegistry.put(clusterName, uriProperties);
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class SimpleLoadBalancerStateTest method testRefreshWithConcurrentGetTC.
// This test is to verify a fix for a specific bug, where the d2 client receives a zookeeper
// update and concurrent getTrackerClient requests. In that case, all but the first concurrent
// requests got a null tracker client because the degraderLoadBalancerState was not fully initialized
// (hashring was empty), and this continued until the first request had atomically swamped a
// fully initialized state for other requests to use. This test failed on pre-fix code, it now
// succeeds.
@Test(groups = { "small", "back-end" })
public void testRefreshWithConcurrentGetTC() throws URISyntaxException, InterruptedException {
reset();
LinkedList<String> strategyList = new LinkedList<String>();
URI uri = URI.create("http://cluster-1/test");
final List<String> schemes = new ArrayList<String>();
schemes.add("http");
strategyList.add("degraderV3");
// set up state
_state.listenToService("service-1", new NullStateListenerCallback());
_state.listenToCluster("cluster-1", new NullStateListenerCallback());
assertNull(_state.getStrategy("service-1", "http"));
// Use the _clusterRegistry.put to populate the _state.clusterProperties, used by
// _state.refreshServiceStrategies
_clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
_serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", strategyList, Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
LoadBalancerStrategy strategy = _state.getStrategy("service-1", "http");
assertNotNull(strategy, "got null strategy in setup");
// test serial to make sure things are working before concurrent test
TransportClient resultTC = _state.getClient("service-1", "http");
assertNotNull(resultTC, "got null tracker client in non-concurrent env");
ExecutorService myExecutor = Executors.newCachedThreadPool();
ArrayList<TcCallable> cArray = new ArrayList<TcCallable>();
List<TrackerClient> clients = new ArrayList<TrackerClient>();
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
clients.add(new TrackerClient(uri, partitionDataMap, new DegraderLoadBalancerTest.TestLoadBalancerClient(uri), SystemClock.instance(), null));
for (int i = 0; i < 20; i++) {
cArray.add(i, new TcCallable(clients, _state));
}
Runnable refreshTask = new Runnable() {
@Override
public void run() {
while (true) {
List<String> myStrategyList = new LinkedList<String>();
myStrategyList.add("degraderV3");
_state.refreshServiceStrategies(new ServiceProperties("service-1", "cluster-1", "/test", myStrategyList, Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
if (Thread.interrupted()) {
return;
}
}
}
};
myExecutor.execute(refreshTask);
Integer badResults = 0;
ArrayList<Future<Integer>> myList = new ArrayList<Future<Integer>>();
for (int i = 0; i < cArray.size(); i++) {
@SuppressWarnings("unchecked") Callable<Integer> c = (Callable) cArray.get(i);
myList.add(i, myExecutor.submit(c));
}
try {
for (int i = 0; i < cArray.size(); i++) {
badResults += myList.get(i).get();
}
} catch (ExecutionException e) {
Assert.assertFalse(true, "got ExecutionException");
} finally {
try {
// call shutdownNow() to send an interrupt to the refreshTask
myExecutor.shutdownNow();
boolean status = myExecutor.awaitTermination(5, TimeUnit.SECONDS);
if (status == false) {
Assert.assertFalse(true, "failed to shutdown threads correctly");
}
} catch (InterruptedException ie) {
// this thread was interrupted
myExecutor.shutdownNow();
}
}
Assert.assertTrue(badResults == 0, "getTrackerClients returned null");
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class TrackerClientTest method testClientStreamRequest.
@Test(groups = { "small", "back-end" })
public void testClientStreamRequest() throws URISyntaxException {
URI uri = URI.create("http://test.qa.com:1234/foo");
double weight = 3d;
TestClient wrappedClient = new TestClient(true);
Clock clock = new SettableClock();
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(3d));
TrackerClient client = new TrackerClient(uri, partitionDataMap, wrappedClient, clock, null);
assertEquals(client.getUri(), uri);
Double clientWeight = client.getPartitionWeight(DefaultPartitionAccessor.DEFAULT_PARTITION_ID);
assertEquals(clientWeight, weight);
assertEquals(client.getWrappedClient(), wrappedClient);
StreamRequest streamRequest = new StreamRequestBuilder(uri).build(EntityStreams.emptyStream());
Map<String, String> restWireAttrs = new HashMap<String, String>();
TestTransportCallback<StreamResponse> restCallback = new TestTransportCallback<StreamResponse>();
client.streamRequest(streamRequest, new RequestContext(), restWireAttrs, restCallback);
assertFalse(restCallback.response.hasError());
assertSame(wrappedClient.streamRequest, streamRequest);
assertEquals(wrappedClient.restWireAttrs, restWireAttrs);
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class ZookeeperConnectionManagerTest method testDelayMarkUp.
@Test
public void testDelayMarkUp() throws IOException, ExecutionException, InterruptedException, PropertyStoreException {
final String uri = "http://cluster-1/test";
final String cluster = "cluster-1";
final double weight = 0.5d;
ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer(), false);
announcer.setCluster(cluster);
announcer.setUri(uri);
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
announcer.setPartitionData(partitionWeight);
ZooKeeperConnectionManager manager = createManager(announcer);
FutureCallback<None> managerStartCallback = new FutureCallback<None>();
manager.start(managerStartCallback);
managerStartCallback.get();
ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
UriProperties properties = store.get(cluster);
assertNull(properties);
FutureCallback<None> markUpCallback = new FutureCallback<None>();
announcer.markUp(markUpCallback);
markUpCallback.get();
UriProperties propertiesAfterMarkUp = store.get(cluster);
assertNotNull(propertiesAfterMarkUp);
assertEquals(propertiesAfterMarkUp.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
assertEquals(propertiesAfterMarkUp.Uris().size(), 1);
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class ZookeeperConnectionManagerTest method testMarkUp.
@Test
public void testMarkUp() throws IOException, ExecutionException, InterruptedException, PropertyStoreException {
final String uri = "http://cluster-1/test";
final String cluster = "cluster-1";
final double weight = 0.5d;
ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer());
announcer.setCluster(cluster);
announcer.setUri(uri);
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
announcer.setPartitionData(partitionWeight);
ZooKeeperConnectionManager manager = createManager(announcer);
FutureCallback<None> managerStartCallback = new FutureCallback<None>();
manager.start(managerStartCallback);
managerStartCallback.get();
ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
UriProperties properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
assertEquals(properties.Uris().size(), 1);
}
Aggregations