use of java.util.LinkedList in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testDegraderLoadBalancerSimulator.
private void testDegraderLoadBalancerSimulator(DegraderLoadBalancerStrategyAdapter adapter, TestClock clock, long timeInterval, List<TrackerClient> clients, double qps, DegraderImpl.Config degraderConfig) {
long clusterGenerationId = 1;
double overrideDropRate = 0.0;
//simulate latency 4000 ms
//1st round we use LOAD_BALANCING strategy. Since we have a high latency we will decrease the number of points
//from 100 to 80 (transmissionRate * points per weight).
TrackerClient resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, true, 0.0, 4000, false, false);
assertNotNull(resultTC);
//2nd round drop rate should be increased by DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP
overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, false, overrideDropRate, 4000, false, false);
//3rd round. We alternate back to LOAD_BALANCING strategy and we drop the points even more
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, true, overrideDropRate, 4000, false, false);
//4th round. The drop rate should be increased again like 2nd round
overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, false, overrideDropRate, 4000, false, false);
//5th round. Alternate to changing hash ring again.
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, true, overrideDropRate, 4000, false, false);
//6th round. Same as 5th round, we'll increase the drop rate
overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
//7th round. The # of point in hashring is at the minimum so we can't decrease it further. At this point the client
//is in recovery mode. But since we can't change the hashring anymore, we'll always in CALL_DROPPING mode
//so the next strategy is expected to be LOAD_BALANCING mode.
overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
//8th round. We'll increase the drop rate to the max.
overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
//9th round, now we'll simulate as if there still a call even though we drop 100% of all request to get
//tracker client. The assumption is there's some thread that still holds tracker client and we want
//to make sure we can handle the request and we can't degrade the cluster even further.
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
//10th round, now we'll simulate as if there's no call because we dropped all request
//even though we are in LOAD_BALANCING mode and this tracker client is in recovery mode and there's no call
//so the hashring doesn't change so we go back to reducing the drop rate to 0.8 and that means the next
//strategy is LOAD_BALANCE
overrideDropRate -= DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_DOWN;
resultTC = simulateAndTestOneInterval(timeInterval, clock, 0.0, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
//11th round, this time we'll simulate the latency is now 1000 ms (so it's within low and high watermark). Drop rate
//should stay the same and everything else should stay the same
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 1000, false, false);
//we'll simulate the client dying one by one until all the clients are gone
int numberOfClients = clients.size();
HashSet<URI> uris = new HashSet<URI>();
HashSet<URI> removedUris = new HashSet<URI>();
for (TrackerClient client : clients) {
uris.add(client.getUri());
}
LinkedList<TrackerClient> removedClients = new LinkedList<TrackerClient>();
//loadBalancing strategy will always be picked because there is no hash ring changes
boolean isLoadBalancingStrategyTurn = true;
for (int i = numberOfClients; i > 0; i--) {
TrackerClient removed = clients.remove(0);
uris.remove(removed.getUri());
removedClients.addLast(removed);
removedUris.add(removed.getUri());
clusterGenerationId++;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, isLoadBalancingStrategyTurn, overrideDropRate, 1000, false, false);
if (i == 1) {
assertNull(resultTC);
} else {
//the override drop rate is 0.8)
if (resultTC != null) {
assertTrue(uris.contains(resultTC.getUri()));
assertFalse(removedUris.contains(resultTC.getUri()));
}
}
}
assertTrue(uris.isEmpty());
assertTrue(clients.isEmpty());
assertEquals(removedUris.size(), numberOfClients);
assertEquals(removedClients.size(), numberOfClients);
//we'll simulate the client start reviving one by one until all clients are back up again
for (int i = numberOfClients; i > 0; i--) {
TrackerClient added = removedClients.remove(0);
//we have to create a new client. The old client has a degraded DegraderImpl. And in production enviroment
//when a new client join a cluster, it should be in good state. This means there should be 100 points
//in the hash ring for this client
TrackerClient newClient = new TrackerClient(added.getUri(), getDefaultPartitionData(1d), new TestLoadBalancerClient(added.getUri()), clock, degraderConfig);
clients.add(newClient);
uris.add(added.getUri());
removedUris.remove(added.getUri());
clusterGenerationId++;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, isLoadBalancingStrategyTurn, overrideDropRate, 1000, false, false);
if (resultTC != null) {
assertTrue(uris.contains(resultTC.getUri()));
assertFalse(removedUris.contains(resultTC.getUri()));
}
}
//the number of points because there is no hash ring changes
for (overrideDropRate -= DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_DOWN; overrideDropRate >= 0; overrideDropRate -= DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_DOWN) {
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, overrideDropRate, 300, false, false);
}
//we should have recovered fully by this time
overrideDropRate = 0.0;
resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, overrideDropRate, 300, false, false);
assertNotNull(resultTC);
clusterGenerationId++;
//simulate the increase of certain error (connect exception, closedChannelException) rate will cause degradation.
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, true, 0.0, 300, false, true);
//switching to call dropping strategy
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, false, 0.0, 300, false, true);
//continue the degradation
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, true, 0.0, 300, false, true);
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, false, 0.0, 300, false, true);
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, true, 0.0, 300, false, true);
//now let's remove all the error and see how the cluster recover but we have to wait until next round because
//this round is CALL_DROP strategy
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, 0.0, 300, false, false);
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, true, 0.0, 300, false, false);
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, false, 0.0, 300, false, false);
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, true, 0.0, 300, false, false);
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, false, 0.0, 300, false, false);
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, true, 0.0, 300, false, false);
//make sure if we have error that is not from CONNECT_EXCEPTION or CLOSED_CHANNEL_EXCEPTION we don't degrade
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, 0.0, 300, true, false);
//since there's no change in hash ring due to error NOT of CONNECT_EXCEPTION or CLOSED_CHANNEL_EXCEPTION,
//the strategy won't change to CALL_DROPPING
simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, 0.0, 300, true, false);
}
use of java.util.LinkedList in project JsonPath by jayway.
the class JsonContext method read.
@Override
public <T> T read(String path, Predicate... filters) {
notEmpty(path, "path can not be null or empty");
Cache cache = CacheProvider.getCache();
path = path.trim();
LinkedList filterStack = new LinkedList<Predicate>(asList(filters));
String cacheKey = Utils.concat(path, filterStack.toString());
JsonPath jsonPath = cache.get(cacheKey);
if (jsonPath != null) {
return read(jsonPath);
} else {
jsonPath = compile(path, filters);
cache.put(cacheKey, jsonPath);
return read(jsonPath);
}
}
use of java.util.LinkedList in project Smack by igniterealtime.
the class DNSUtil method sortSRVRecords.
/**
* Sort a given list of SRVRecords as described in RFC 2782
* Note that we follow the RFC with one exception. In a group of the same priority, only the first entry
* is calculated by random. The others are ore simply ordered by their priority.
*
* @param records
* @return the list of resolved HostAddresses
*/
private static List<HostAddress> sortSRVRecords(List<SRVRecord> records) {
// (the root domain), abort."
if (records.size() == 1 && records.get(0).getFQDN().equals("."))
return Collections.emptyList();
// sorting the records improves the performance of the bisection later
Collections.sort(records);
// create the priority buckets
SortedMap<Integer, List<SRVRecord>> buckets = new TreeMap<Integer, List<SRVRecord>>();
for (SRVRecord r : records) {
Integer priority = r.getPriority();
List<SRVRecord> bucket = buckets.get(priority);
// create the list of SRVRecords if it doesn't exist
if (bucket == null) {
bucket = new LinkedList<SRVRecord>();
buckets.put(priority, bucket);
}
bucket.add(r);
}
List<HostAddress> res = new ArrayList<HostAddress>(records.size());
for (Integer priority : buckets.keySet()) {
List<SRVRecord> bucket = buckets.get(priority);
int bucketSize;
while ((bucketSize = bucket.size()) > 0) {
int[] totals = new int[bucketSize];
int running_total = 0;
int count = 0;
int zeroWeight = 1;
for (SRVRecord r : bucket) {
if (r.getWeight() > 0) {
zeroWeight = 0;
break;
}
}
for (SRVRecord r : bucket) {
running_total += (r.getWeight() + zeroWeight);
totals[count] = running_total;
count++;
}
int selectedPos;
if (running_total == 0) {
// If running total is 0, then all weights in this priority
// group are 0. So we simply select one of the weights randomly
// as the other 'normal' algorithm is unable to handle this case
selectedPos = (int) (Math.random() * bucketSize);
} else {
double rnd = Math.random() * running_total;
selectedPos = bisect(totals, rnd);
}
// add the SRVRecord that was randomly chosen on it's weight
// to the start of the result list
SRVRecord chosenSRVRecord = bucket.remove(selectedPos);
res.add(chosenSRVRecord);
}
}
return res;
}
use of java.util.LinkedList in project Smack by igniterealtime.
the class SmackExceptionTest method testConnectionException.
@Test
public void testConnectionException() throws UnknownHostException {
List<HostAddress> failedAddresses = new LinkedList<HostAddress>();
String host = "foo.bar.example";
InetAddress inetAddress = InetAddress.getByAddress(host, new byte[] { 0, 0, 0, 0 });
List<InetAddress> inetAddresses = Collections.singletonList(inetAddress);
HostAddress hostAddress = new HostAddress(host, 1234, inetAddresses);
hostAddress.setException(new Exception("Failed for some reason"));
failedAddresses.add(hostAddress);
host = "barz.example";
inetAddress = InetAddress.getByAddress(host, new byte[] { 0, 0, 0, 0 });
inetAddresses = Collections.singletonList(inetAddress);
hostAddress = new HostAddress(host, 5678, inetAddresses);
hostAddress.setException(new Exception("Failed for some other reason"));
failedAddresses.add(hostAddress);
ConnectionException connectionException = ConnectionException.from(failedAddresses);
String message = connectionException.getMessage();
assertEquals("The following addresses failed: 'foo.bar.example:1234' failed because: java.lang.Exception: Failed for some reason, 'barz.example:5678' failed because: java.lang.Exception: Failed for some other reason", message);
}
use of java.util.LinkedList in project Openfire by igniterealtime.
the class MucMamPersistenceManager method findMessages.
@Override
public Collection<ArchivedMessage> findMessages(Date startDate, Date endDate, String owner, String with, XmppResultSet xmppResultSet) {
JID mucRoom = new JID(owner);
MultiUserChatManager manager = XMPPServer.getInstance().getMultiUserChatManager();
MultiUserChatService service = manager.getMultiUserChatService(mucRoom);
MUCRoom room = service.getChatRoom(mucRoom.getNode());
Connection connection = null;
PreparedStatement pstmt = null;
ResultSet rs = null;
// If logging isn't enabled, do nothing.
if (!room.isLogEnabled())
return null;
List<ArchivedMessage> msgs = new LinkedList<>();
if (startDate == null) {
startDate = new Date(0L);
}
if (endDate == null) {
endDate = new Date();
}
int max = xmppResultSet.getMax();
// TODO: Suppress this, since we don't yet have requestor information for access control.
with = null;
try {
connection = DbConnectionManager.getConnection();
StringBuilder sql = new StringBuilder(LOAD_HISTORY);
if (with != null) {
sql.append(WHERE_SENDER);
}
if (xmppResultSet.getAfter() != null) {
sql.append(WHERE_AFTER);
}
if (xmppResultSet.getBefore() != null) {
sql.append(WHERE_BEFORE);
}
sql.append(ORDER_BY);
pstmt = connection.prepareStatement(sql.toString());
pstmt.setString(1, StringUtils.dateToMillis(startDate));
pstmt.setString(2, StringUtils.dateToMillis(endDate));
pstmt.setLong(3, room.getID());
int pos = 3;
if (with != null) {
pstmt.setString(++pos, with);
}
if (xmppResultSet.getAfter() != null) {
pstmt.setLong(++pos, xmppResultSet.getAfter());
}
if (xmppResultSet.getBefore() != null) {
pstmt.setLong(++pos, xmppResultSet.getBefore());
}
rs = pstmt.executeQuery();
while (rs.next()) {
String senderJID = rs.getString(1);
String nickname = rs.getString(2);
Date sentDate = new Date(Long.parseLong(rs.getString(3).trim()));
String subject = rs.getString(4);
String body = rs.getString(5);
String stanza = rs.getString(6);
long id = rs.getLong(7);
if (stanza == null) {
Message message = new Message();
message.setType(Message.Type.groupchat);
message.setSubject(subject);
message.setBody(body);
// Set the sender of the message
if (nickname != null && nickname.trim().length() > 0) {
JID roomJID = room.getRole().getRoleAddress();
// Recreate the sender address based on the nickname and room's JID
message.setFrom(new JID(roomJID.getNode(), roomJID.getDomain(), nickname, true));
} else {
// Set the room as the sender of the message
message.setFrom(room.getRole().getRoleAddress());
}
stanza = message.toString();
}
ArchivedMessage archivedMessage = new ArchivedMessage(sentDate, ArchivedMessage.Direction.from, null, null);
archivedMessage.setStanza(stanza);
archivedMessage.setId(id);
msgs.add(archivedMessage);
}
} catch (SQLException e) {
Log.error("SQL failure during MAM-MUC: ", e);
} finally {
DbConnectionManager.closeConnection(rs, pstmt, connection);
}
// TODO - Not great, really should be done by suitable LIMIT stuff.
// Would need to reverse ordering in some cases and then reverse results.
boolean complete = true;
xmppResultSet.setCount(msgs.size());
while (msgs.size() > max) {
msgs.remove(msgs.size() - 1);
complete = false;
}
xmppResultSet.setComplete(complete);
if (msgs.size() > 0) {
xmppResultSet.setFirst(msgs.get(0).getId());
if (msgs.size() > 1) {
xmppResultSet.setLast(msgs.get(msgs.size() - 1).getId());
}
}
return msgs;
}
Aggregations