use of java.util.concurrent.atomic.AtomicLong in project OpenAM by OpenRock.
the class RateWindow method incrementForTimestamp.
/**
* Re-calculates the rate.
*
* @param timestamp The millisecond timestamp of the event.
*/
public void incrementForTimestamp(final long timestamp) {
long index = getIndexForTimestamp(timestamp);
if (isWithinWindow(index)) {
AtomicLong rate = window.get(index);
if (rate == null) {
//fill in the RateWindow until the current index
fillInWindow(index - 1);
rate = new AtomicLong(0);
AtomicLong previousValue = window.putIfAbsent(index, rate);
if (previousValue == null) {
//this is a new entry, hence we should clear out old entries to prevent memory leak
window.headMap(window.lastKey() - size, true).clear();
} else {
rate = previousValue;
}
}
rate.incrementAndGet();
}
}
use of java.util.concurrent.atomic.AtomicLong in project voltdb by VoltDB.
the class CoreUtils method getThreadFactory.
/**
* Creates a thread factory that creates threads within a thread group if
* the group name is given. The threads created will catch any unhandled
* exceptions and log them to the HOST logger.
*
* @param groupName
* @param name
* @param stackSize
* @return
*/
public static ThreadFactory getThreadFactory(final String groupName, final String name, final int stackSize, final boolean incrementThreadNames, final Queue<String> coreList) {
ThreadGroup group = null;
if (groupName != null) {
group = new ThreadGroup(Thread.currentThread().getThreadGroup(), groupName);
}
final ThreadGroup finalGroup = group;
return new ThreadFactory() {
private final AtomicLong m_createdThreadCount = new AtomicLong(0);
private final ThreadGroup m_group = finalGroup;
@Override
public synchronized Thread newThread(final Runnable r) {
final String threadName = name + (incrementThreadNames ? " - " + m_createdThreadCount.getAndIncrement() : "");
String coreTemp = null;
if (coreList != null && !coreList.isEmpty()) {
coreTemp = coreList.poll();
}
final String core = coreTemp;
Runnable runnable = new Runnable() {
@Override
public void run() {
if (core != null) {
// Remove Affinity for now to make this dependency dissapear from the client.
// Goal is to remove client dependency on this class in the medium term.
//PosixJNAAffinity.INSTANCE.setAffinity(core);
}
try {
r.run();
} catch (Throwable t) {
new VoltLogger("HOST").error("Exception thrown in thread " + threadName, t);
} finally {
m_threadLocalDeallocator.run();
}
}
};
Thread t = new Thread(m_group, runnable, threadName, stackSize);
t.setDaemon(true);
return t;
}
};
}
use of java.util.concurrent.atomic.AtomicLong in project voltdb by VoltDB.
the class TableHelper method fillTableWithBigintPkey.
/**
* Load random data into a partitioned table in VoltDB that has a bigint pkey.
*
* If the VoltTable indicates which column is its pkey, then it will use it, but otherwise it will
* assume the first column is the bigint pkey. Note, this works with other integer keys, but
* your keyspace is pretty small.
*
* If mb == 0, then maxRows is used. If maxRows == 0, then mb is used.
*
* @param table Table with or without schema metadata.
* @param mb Target RSS (approximate)
* @param maxRows Target maximum rows
* @param client To load with.
* @param offset Generated pkey values start here.
* @param jump Generated pkey values increment by this value.
* @throws Exception
*/
public void fillTableWithBigintPkey(VoltTable table, int mb, long maxRows, final Client client, long offset, long jump) throws Exception {
// make sure some kind of limit is set
assert ((maxRows > 0) || (mb > 0));
assert (maxRows >= 0);
assert (mb >= 0);
final int mbTarget = mb > 0 ? mb : Integer.MAX_VALUE;
if (maxRows == 0) {
maxRows = Long.MAX_VALUE;
}
System.out.printf("Filling table %s with rows starting with pkey id %d (every %d rows) until either RSS=%dmb or rowcount=%d\n", table.m_extraMetadata.name, offset, jump, mbTarget, maxRows);
// find the primary key, assume first col if not found
int pkeyColIndex = getBigintPrimaryKeyIndexIfExists(table);
if (pkeyColIndex == -1) {
pkeyColIndex = 0;
assert (table.getColumnType(0).isBackendIntegerType());
}
final AtomicLong rss = new AtomicLong(0);
ProcedureCallback insertCallback = new ProcedureCallback() {
@Override
public void clientCallback(ClientResponse clientResponse) throws Exception {
if (clientResponse.getStatus() != ClientResponse.SUCCESS) {
System.out.println("Error in loader callback:");
System.out.println(((ClientResponseImpl) clientResponse).toJSONString());
assert (false);
}
}
};
// update the rss value asynchronously
final AtomicBoolean rssThreadShouldStop = new AtomicBoolean(false);
Thread rssThread = new Thread() {
@Override
public void run() {
long tempRss = rss.get();
long rssPrev = tempRss;
while (!rssThreadShouldStop.get()) {
tempRss = MiscUtils.getMBRss(client);
if (tempRss != rssPrev) {
rssPrev = tempRss;
rss.set(tempRss);
System.out.printf("RSS=%dmb\n", tempRss);
// bail when done
if (tempRss > mbTarget) {
return;
}
}
try {
Thread.sleep(2000);
} catch (Exception e) {
}
}
}
};
// load rows until RSS goal is met (status print every 100k)
long i = offset;
long rows = 0;
rssThread.start();
final String insertProcName = table.m_extraMetadata.name.toUpperCase() + ".insert";
RandomRowMaker filler = createRandomRowMaker(table, Integer.MAX_VALUE, false, false);
while (rss.get() < mbTarget) {
Object[] row = filler.randomRow();
row[pkeyColIndex] = i;
client.callProcedure(insertCallback, insertProcName, row);
rows++;
if ((rows % 100000) == 0) {
System.out.printf("Loading 100000 rows. %d inserts sent (%d max id).\n", rows, i);
}
// if row limit is set, break if it's hit
if (rows >= maxRows) {
break;
}
i += jump;
}
rssThreadShouldStop.set(true);
client.drain();
rssThread.join();
System.out.printf("Filled table %s with %d rows and now RSS=%dmb\n", table.m_extraMetadata.name, rows, rss.get());
}
use of java.util.concurrent.atomic.AtomicLong in project voltdb by VoltDB.
the class ExportOnServerVerifier method verifySetup.
boolean verifySetup(String[] args) throws Exception {
String[] remoteHosts = args[0].split(",");
final String homeDir = System.getProperty("user.home");
final String sshDir = homeDir + File.separator + ".ssh";
final String sshConfigPath = sshDir + File.separator + "config";
//Oh yes...
loadAllPrivateKeys(new File(sshDir));
OpenSshConfig sshConfig = null;
if (new File(sshConfigPath).exists()) {
sshConfig = new OpenSshConfig(new File(sshConfigPath));
}
final String defaultKnownHosts = sshDir + "/known_hosts";
if (new File(defaultKnownHosts).exists()) {
m_jsch.setKnownHosts(defaultKnownHosts);
}
for (String hostString : remoteHosts) {
String[] split = hostString.split(":");
String host = split[0];
RemoteHost rh = new RemoteHost();
rh.path = split[1];
String user = System.getProperty("user.name");
int port = 22;
File identityFile = null;
String configHost = host;
if (sshConfig != null) {
OpenSshConfig.Host hostConfig = sshConfig.lookup(host);
if (hostConfig.getUser() != null) {
user = hostConfig.getUser();
}
if (hostConfig.getPort() != -1) {
port = hostConfig.getPort();
}
if (hostConfig.getIdentityFile() != null) {
identityFile = hostConfig.getIdentityFile();
}
if (hostConfig.getHostName() != null) {
configHost = hostConfig.getHostName();
}
}
Session session = null;
if (identityFile != null) {
JSch jsch = new JSch();
jsch.addIdentity(identityFile.getAbsolutePath());
session = jsch.getSession(user, configHost, port);
} else {
session = m_jsch.getSession(user, configHost, port);
}
rh.session = session;
session.setConfig("StrictHostKeyChecking", "no");
session.setDaemonThread(true);
session.connect();
final ChannelSftp channel = (ChannelSftp) session.openChannel("sftp");
rh.channel = channel;
channel.connect();
touchActiveTracker(rh);
m_hosts.add(rh);
}
m_partitions = Integer.parseInt(args[1]);
for (int i = 0; i < m_partitions; i++) {
m_rowTxnIds.put(i, new TreeMap<Long, Long>());
m_maxPartTxId.put(i, Long.MIN_VALUE);
m_checkedUpTo.put(i, 0);
m_readUpTo.put(i, new AtomicLong(0));
}
m_clientPath = new File(args[2]);
if (!m_clientPath.exists() || !m_clientPath.isDirectory()) {
if (!m_clientPath.mkdir()) {
throw new IOException("Issue with transaction ID path");
}
}
for (RemoteHost rh : m_hosts) {
boolean existsOrIsDir = true;
try {
SftpATTRS stat = rh.channel.stat(rh.path);
if (!stat.isDir()) {
existsOrIsDir = false;
}
} catch (SftpException e) {
if (e.id == ChannelSftp.SSH_FX_NO_SUCH_FILE) {
existsOrIsDir = false;
} else {
Throwables.propagate(e);
}
}
if (!existsOrIsDir) {
rh.channel.mkdir(rh.path);
}
}
boolean skinny = false;
if (args.length > 3 && args[3] != null && !args[3].trim().isEmpty()) {
skinny = Boolean.parseBoolean(args[3].trim().toLowerCase());
}
return skinny;
}
use of java.util.concurrent.atomic.AtomicLong in project voltdb by VoltDB.
the class AtomicLongMap method putIfAbsent.
/*
* ConcurrentMap operations which we may eventually add.
*
* The problem with these is that remove(K, long) has to be done in two phases by definition ---
* first decrementing to zero, and then removing. putIfAbsent or replace could observe the
* intermediate zero-state. Ways we could deal with this are:
*
* - Don't define any of the ConcurrentMap operations. This is the current state of affairs.
*
* - Define putIfAbsent and replace as treating zero and absent identically (as currently
* implemented below). This is a bit surprising with putIfAbsent, which really becomes
* putIfZero.
*
* - Allow putIfAbsent and replace to distinguish between zero and absent, but don't implement
* remove(K, long). Without any two-phase operations it becomes feasible for all remaining
* operations to distinguish between zero and absent. If we do this, then perhaps we should add
* replace(key, long).
*
* - Introduce a special-value private static final AtomicLong that would have the meaning of
* removal-in-progress, and rework all operations to properly distinguish between zero and
* absent.
*/
/**
* If {@code key} is not already associated with a value or if {@code key} is associated with
* zero, associate it with {@code newValue}. Returns the previous value associated with {@code
* key}, or zero if there was no mapping for {@code key}.
*/
long putIfAbsent(K key, long newValue) {
while (true) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
atomic = map.putIfAbsent(key, new AtomicLong(newValue));
if (atomic == null) {
return 0L;
}
// atomic is now non-null; fall through
}
long oldValue = atomic.get();
if (oldValue == 0L) {
// don't compareAndSet a zero
if (map.replace(key, atomic, new AtomicLong(newValue))) {
return 0L;
}
// atomic replaced
continue;
}
return oldValue;
}
}
Aggregations