use of java.util.concurrent.CopyOnWriteArrayList in project CloudNet by Dytanic.
the class CloudServer method updateDisable.
public void updateDisable() {
List<String> list = new CopyOnWriteArrayList<>();
for (Player all : Bukkit.getOnlinePlayers()) list.add(all.getName());
ServerInfo serverInfo = new ServerInfo(CloudAPI.getInstance().getServiceId(), hostAdress, port, false, list, memory, motd, Bukkit.getOnlinePlayers().size(), maxPlayers, serverState, serverConfig, template);
CloudAPI.getInstance().getNetworkConnection().sendPacketSynchronized(new PacketOutUpdateServerInfo(serverInfo));
}
use of java.util.concurrent.CopyOnWriteArrayList in project CloudNet by Dytanic.
the class ConfigPermissions method write.
public void write(PermissionGroup permissionGroup, Configuration configuration) {
Configuration section = configuration.getSection("groups");
Configuration group = new Configuration();
group.set("prefix", permissionGroup.getPrefix());
group.set("suffix", permissionGroup.getSuffix());
group.set("display", permissionGroup.getDisplay());
group.set("tagId", permissionGroup.getTagId());
group.set("joinPower", permissionGroup.getJoinPower());
group.set("defaultGroup", permissionGroup.isDefaultGroup());
Collection<String> perms = new CopyOnWriteArrayList<>();
for (Map.Entry<String, Boolean> entry : permissionGroup.getPermissions().entrySet()) {
perms.add((!entry.getValue() ? "-" : "") + entry.getKey());
}
group.set("permissions", perms);
Configuration permsCfg = new Configuration();
for (Map.Entry<String, List<String>> keys : permissionGroup.getServerGroupPermissions().entrySet()) {
permsCfg.set(keys.getKey(), keys.getValue());
}
group.set("serverGroupPermissions", permsCfg);
if (permissionGroup.getOptions().size() == 0)
permissionGroup.getOptions().put("test_option", true);
group.set("options", permissionGroup.getOptions());
group.set("implements", permissionGroup.getImplementGroups());
section.set(permissionGroup.getName(), null);
section.set(permissionGroup.getName(), group);
}
use of java.util.concurrent.CopyOnWriteArrayList in project herddb by diennea.
the class TableSpaceManager method dumpTableSpace.
void dumpTableSpace(String dumpId, Channel _channel, int fetchSize, boolean includeLog) throws DataStorageManagerException, LogNotAvailableException {
LOGGER.log(Level.SEVERE, "dumpTableSpace dumpId:" + dumpId + " channel " + _channel + " fetchSize:" + fetchSize + ", includeLog:" + includeLog);
TableSpaceCheckpoint checkpoint;
List<DumpedLogEntry> txlogentries = new CopyOnWriteArrayList<>();
CommitLogListener logDumpReceiver = new CommitLogListener() {
@Override
public void logEntry(LogSequenceNumber logPos, LogEntry data) {
// we are going to capture all the changes to the tablespace during the dump, in order to replay
// eventually 'missed' changes during the dump
txlogentries.add(new DumpedLogEntry(logPos, data.serialize()));
LOGGER.log(Level.SEVERE, "dumping entry " + logPos + ", " + data + " nentries: " + txlogentries.size());
}
};
generalLock.writeLock().lock();
try {
if (includeLog) {
log.attachCommitLogListener(logDumpReceiver);
}
checkpoint = checkpoint(true, true);
/* Downgrade lock */
generalLock.readLock().lock();
} finally {
generalLock.writeLock().unlock();
}
try {
final int timeout = 60000;
Map<String, Object> startData = new HashMap<>();
startData.put("command", "start");
LogSequenceNumber logSequenceNumber = log.getLastSequenceNumber();
startData.put("ledgerid", logSequenceNumber.ledgerId);
startData.put("offset", logSequenceNumber.offset);
Message response_to_start = _channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, startData), timeout);
if (response_to_start.type != Message.TYPE_ACK) {
LOGGER.log(Level.SEVERE, "error response at start command: " + response_to_start.parameters);
return;
}
if (includeLog) {
List<Transaction> transactionsSnapshot = new ArrayList<>();
dataStorageManager.loadTransactions(logSequenceNumber, tableSpaceUUID, transactionsSnapshot::add);
List<Transaction> batch = new ArrayList<>();
for (Transaction t : transactionsSnapshot) {
batch.add(t);
if (batch.size() == 10) {
sendTransactionsDump(batch, _channel, dumpId, timeout, response_to_start);
}
}
sendTransactionsDump(batch, _channel, dumpId, timeout, response_to_start);
}
for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
final AbstractTableManager tableManager = tables.get(entry.getKey());
final LogSequenceNumber sequenceNumber = entry.getValue();
if (tableManager.isSystemTable()) {
continue;
}
try {
FullTableScanConsumer sink = new SingleTableDumper(tableSpaceName, tableManager, _channel, dumpId, timeout, fetchSize);
tableManager.dump(sequenceNumber, sink);
} catch (DataStorageManagerException err) {
Map<String, Object> errorOnData = new HashMap<>();
errorOnData.put("command", "error");
_channel.sendMessageWithReply(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, errorOnData), timeout);
LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId, err);
return;
}
}
if (!txlogentries.isEmpty()) {
txlogentries.sort(Comparator.naturalOrder());
sendDumpedCommitLog(txlogentries, _channel, dumpId, timeout);
}
Map<String, Object> finishData = new HashMap<>();
LogSequenceNumber finishLogSequenceNumber = log.getLastSequenceNumber();
finishData.put("ledgerid", finishLogSequenceNumber.ledgerId);
finishData.put("offset", finishLogSequenceNumber.offset);
finishData.put("command", "finish");
_channel.sendOneWayMessage(Message.TABLESPACE_DUMP_DATA(null, tableSpaceName, dumpId, finishData), new SendResultCallback() {
@Override
public void messageSent(Message originalMessage, Throwable error) {
}
});
} catch (InterruptedException | TimeoutException error) {
LOGGER.log(Level.SEVERE, "error sending dump id " + dumpId);
} finally {
generalLock.readLock().unlock();
if (includeLog) {
log.removeCommitLogListener(logDumpReceiver);
}
for (Entry<String, LogSequenceNumber> entry : checkpoint.tablesCheckpoints.entrySet()) {
dataStorageManager.unPinTableCheckpoint(tableSpaceUUID, entry.getKey(), entry.getValue());
}
}
}
use of java.util.concurrent.CopyOnWriteArrayList in project wicket by apache.
the class StoredResponsesMapTest method heavyLoad.
/**
* <a href="https://issues.apache.org/jira/browse/WICKET-3736">WICKET-3736</a>
*
* Tries to simulate heavy load on the {@link StoredResponsesMap} by putting many entries and
* removing randomly them.
*
* The test is disabled by default because it is slow (~ 30secs). Enable it when we have
* categorized tests ({@link Category}) and run slow ones only at Apache CI servers
*
* @throws InterruptedException
*/
@Test
public void heavyLoad() throws InterruptedException {
final int numberOfThreads = 100;
final int iterations = 1000;
final CountDownLatch startLatch = new CountDownLatch(numberOfThreads);
final CountDownLatch endLatch = new CountDownLatch(numberOfThreads);
final SecureRandom rnd = new SecureRandom();
final StoredResponsesMap map = new StoredResponsesMap(1000, Duration.seconds(60));
final List<String> keys = new CopyOnWriteArrayList<String>();
final Runnable r = new Runnable() {
@Override
public void run() {
startLatch.countDown();
try {
// wait all threads before starting the test
startLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
for (int i = 0; i < iterations; i++) {
String key = "abc" + (rnd.nextDouble() * iterations);
keys.add(key);
map.put(key, new BufferedWebResponse(null));
int randomMax = keys.size() - 1;
int toRemove = randomMax == 0 ? 0 : rnd.nextInt(randomMax);
String key2 = keys.get(toRemove);
map.remove(key2);
}
endLatch.countDown();
}
};
for (int t = 0; t < numberOfThreads; t++) {
new Thread(r).start();
}
endLatch.await();
}
use of java.util.concurrent.CopyOnWriteArrayList in project powerbot by powerbot.
the class Players method get.
@Override
public List<Player> get() {
final List<Player> r = new CopyOnWriteArrayList<Player>();
final Client client = ctx.client();
if (client == null) {
return r;
}
final int[] indices = client.getPlayerIndices();
final org.powerbot.bot.rt4.client.Player[] players = client.getPlayers();
if (indices == null || players == null) {
return r;
}
for (int index = 0; index < Math.min(client.getPlayerCount(), indices.length); index++) {
final int k = indices[index];
final org.powerbot.bot.rt4.client.Player p = players[k];
if (p.obj.get() != null) {
r.add(new Player(ctx, p));
}
}
return r;
}
Aggregations