use of java.util.concurrent.LinkedBlockingQueue in project voltdb by VoltDB.
the class ThreadLocalRandom method getInitialSeedUniquifier.
public static synchronized long getInitialSeedUniquifier() {
// Use the value set via the setter.
long initialSeedUniquifier = ThreadLocalRandom.initialSeedUniquifier;
if (initialSeedUniquifier == 0) {
// Use the system property value.
ThreadLocalRandom.initialSeedUniquifier = initialSeedUniquifier = AccessController.doPrivileged(new PrivilegedAction<Long>() {
@Override
public Long run() {
return Long.getLong("io.netty.initialSeedUniquifier", 0);
}
});
}
// Otherwise, generate one.
if (initialSeedUniquifier == 0) {
boolean secureRandom = AccessController.doPrivileged(new PrivilegedAction<Boolean>() {
@Override
public Boolean run() {
return Boolean.getBoolean("java.util.secureRandomSeed");
}
});
if (secureRandom) {
// Try to generate a real random number from /dev/random.
// Get from a different thread to avoid blocking indefinitely on a machine without much entropy.
final BlockingQueue<Long> queue = new LinkedBlockingQueue<Long>();
Thread generatorThread = new Thread("initialSeedUniquifierGenerator") {
@Override
public void run() {
// Get the real random seed from /dev/random
SecureRandom random = new SecureRandom();
final byte[] seed = random.generateSeed(8);
long s = ((long) seed[0] & 0xff) << 56 | ((long) seed[1] & 0xff) << 48 | ((long) seed[2] & 0xff) << 40 | ((long) seed[3] & 0xff) << 32 | ((long) seed[4] & 0xff) << 24 | ((long) seed[5] & 0xff) << 16 | ((long) seed[6] & 0xff) << 8 | (long) seed[7] & 0xff;
queue.add(s);
}
};
generatorThread.setDaemon(true);
generatorThread.start();
generatorThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
logger.debug("An exception has been raised by {}", t.getName(), e);
}
});
// Get the random seed from the thread with timeout.
final long timeoutSeconds = 3;
final long deadLine = System.nanoTime() + TimeUnit.SECONDS.toNanos(timeoutSeconds);
boolean interrupted = false;
for (; ; ) {
long waitTime = deadLine - System.nanoTime();
if (waitTime <= 0) {
generatorThread.interrupt();
logger.warn("Failed to generate a seed from SecureRandom within {} seconds. " + "Not enough entrophy?", timeoutSeconds);
break;
}
try {
Long seed = queue.poll(waitTime, TimeUnit.NANOSECONDS);
if (seed != null) {
initialSeedUniquifier = seed;
break;
}
} catch (InterruptedException e) {
interrupted = true;
logger.warn("Failed to generate a seed from SecureRandom due to an InterruptedException.");
break;
}
}
// Just in case the initialSeedUniquifier is zero or some other constant
// just a meaningless random number
initialSeedUniquifier ^= 0x3255ecdc33bae119L;
initialSeedUniquifier ^= Long.reverse(System.nanoTime());
if (interrupted) {
// Restore the interrupt status because we don't know how to/don't need to handle it here.
Thread.currentThread().interrupt();
// Interrupt the generator thread if it's still running,
// in the hope that the SecureRandom provider raises an exception on interruption.
generatorThread.interrupt();
}
} else {
initialSeedUniquifier = mix64(System.currentTimeMillis()) ^ mix64(System.nanoTime());
}
ThreadLocalRandom.initialSeedUniquifier = initialSeedUniquifier;
}
return initialSeedUniquifier;
}
use of java.util.concurrent.LinkedBlockingQueue in project voltdb by VoltDB.
the class LoadTableLoader method run.
@Override
public void run() {
// ratio of upsert for @Load*Table
final float upsertratio = 0.50F;
// ratio of upsert to an existing table for @Load*Table
final float upserthitratio = 0.20F;
CopyAndDeleteDataTask cdtask = new CopyAndDeleteDataTask();
cdtask.start();
long p;
List<Long> cidList = new ArrayList<Long>(batchSize);
List<Long> timeList = new ArrayList<Long>(batchSize);
try {
// pick up where we left off
ClientResponse cr = TxnId2Utils.doAdHoc(client, "select nvl(max(cid)+1,0) from " + m_tableName + ";");
p = cr.getResults()[0].asScalarLong();
while (m_shouldContinue.get()) {
//1 in 3 gets copied and then deleted after leaving some data
byte shouldCopy = (byte) (m_random.nextInt(3) == 0 ? 1 : 0);
byte upsertMode = (byte) (m_random.nextFloat() < upsertratio ? 1 : 0);
byte upsertHitMode = (byte) ((upsertMode != 0) && (m_random.nextFloat() < upserthitratio) ? 1 : 0);
CountDownLatch latch = new CountDownLatch(batchSize);
final BlockingQueue<Long> lcpDelQueue = new LinkedBlockingQueue<Long>();
cidList.clear();
timeList.clear();
// try to insert/upsert batchSize random rows
for (int i = 0; i < batchSize; i++) {
m_table.clearRowData();
m_permits.acquire();
//Increment p so that we always get new key.
p++;
long nanotime = System.nanoTime();
m_table.addRow(p, p + nanotime, nanotime);
cidList.add(p);
timeList.add(nanotime);
BlockingQueue<Long> wrkQueue;
if (shouldCopy != 0) {
wrkQueue = lcpDelQueue;
} else {
wrkQueue = onlyDelQueue;
}
boolean success;
try {
if (!m_isMP) {
Object rpartitionParam = VoltType.valueToBytes(m_table.fetchRow(0).get(m_partitionedColumnIndex, VoltType.BIGINT));
if (upsertHitMode != 0) {
// for test upsert an existing row, insert it and then upsert same row again.
// only insert
success = client.callProcedure(new InsertCallback(latch, p, shouldCopy, wrkQueue, unkQueue, loadTxnCount, (byte) 1), m_procName, rpartitionParam, m_tableName, (byte) 0, m_table);
} else {
// insert or upsert
success = client.callProcedure(new InsertCallback(latch, p, shouldCopy, wrkQueue, unkQueue, loadTxnCount, (byte) 2), m_procName, rpartitionParam, m_tableName, upsertMode, m_table);
}
} else {
if (upsertHitMode != 0) {
// only insert
success = client.callProcedure(new InsertCallback(latch, p, shouldCopy, wrkQueue, unkQueue, loadTxnCount, (byte) 3), m_procName, m_tableName, (byte) 0, m_table);
} else {
// insert or upsert
success = client.callProcedure(new InsertCallback(latch, p, shouldCopy, wrkQueue, unkQueue, loadTxnCount, (byte) 4), m_procName, m_tableName, upsertMode, m_table);
}
}
if (!success) {
hardStop("Failed to insert upsert for: " + p);
}
if (m_slowFlight)
Thread.sleep(slowDownDelayMs);
} catch (NoConnectionsException e) {
//drop this lcid on the floor, we'll just move on
setSlowFlight();
} catch (Exception e) {
hardStop(e);
}
}
log.debug("Waiting for all inserts for @Load* done.");
//Wait for all @Load{SP|MP}Done
latch.await();
log.debug("Done Waiting for all inserts for @Load* done.");
// try to upsert if want the collision
if (upsertHitMode != 0) {
CountDownLatch upserHitLatch = new CountDownLatch(batchSize * upsertHitMode);
BlockingQueue<Long> cpywrkQueue = new LinkedBlockingQueue<Long>();
BlockingQueue<Long> cpyunkQueue = new LinkedBlockingQueue<Long>();
for (int i = 0; i < batchSize; i++) {
m_table.clearRowData();
m_permits.acquire();
m_table.addRow(cidList.get(i), cidList.get(i) + timeList.get(i), timeList.get(i));
boolean success;
try {
if (!m_isMP) {
Object rpartitionParam = VoltType.valueToBytes(m_table.fetchRow(0).get(m_partitionedColumnIndex, VoltType.BIGINT));
// upsert only
success = client.callProcedure(new InsertCallback(upserHitLatch, p, shouldCopy, cpywrkQueue, cpyunkQueue, upsertTxnCount, (byte) 5), m_procName, rpartitionParam, m_tableName, (byte) 1, m_table);
} else {
// upsert only
success = client.callProcedure(new InsertCallback(upserHitLatch, p, shouldCopy, cpywrkQueue, cpyunkQueue, upsertTxnCount, (byte) 6), m_procName, m_tableName, (byte) 1, m_table);
}
if (!success) {
hardStop("Failed to invoke upsert for: " + cidList.get(i));
}
if (m_slowFlight)
Thread.sleep(slowDownDelayMs);
} catch (NoConnectionsException e) {
//drop this lcid on the floor, we'll just move on
setSlowFlight();
} catch (Exception e) {
hardStop(e);
}
}
log.debug("Waiting for all upsert for @Load* done.");
//Wait for all additional upsert @Load{SP|MP}Done
upserHitLatch.await();
log.debug("Done Waiting for all upsert for @Load* done.");
}
//log.info("to copy: " + lcpDelQueue.toString());
cpyQueue.addAll(lcpDelQueue);
try {
long nextRowCount = TxnId2Utils.getRowCount(client, m_tableName);
long nextCpRowCount = TxnId2Utils.getRowCount(client, "cp" + m_tableName);
// report counts of successful txns
log.info("LoadTableLoader rowcounts " + nextRowCount + "/" + nextCpRowCount + " Insert/Upsert txs: " + loadTxnCount[0] + " UpsertHit txs: " + upsertTxnCount[0] + " Copy txs: " + copyTxnCount + " Delete txn: " + deleteTxnCount);
} catch (Exception e) {
hardStop("getrowcount exception", e);
}
if (onlyDelQueue.size() > 0 && m_shouldContinue.get()) {
List<Long> workList = new ArrayList<Long>();
onlyDelQueue.drainTo(workList);
//log.info("from deleteonly to delete: " + workList.toString());
CountDownLatch odlatch = new CountDownLatch(workList.size());
for (Long lcid : workList) {
try {
boolean success;
success = client.callProcedure(new DeleteCallback(odlatch, lcid, onlyDelQueue, unkQueue, null, delUnkQueue, 1, (byte) 1), m_onlydelprocName, lcid);
if (!success) {
hardStop("Failed to invoke delete for: " + lcid);
}
if (m_slowFlight)
Thread.sleep(slowDownDelayMs);
} catch (NoConnectionsException e) {
//requeue for next time
onlyDelQueue.add(lcid);
setSlowFlight();
} catch (Exception e) {
hardStop(e);
}
}
odlatch.await();
}
if (unkQueue.size() > 0 && m_shouldContinue.get()) {
List<Long> workList = new ArrayList<Long>();
unkQueue.drainTo(workList);
//log.info("from unknownqueue to delete: " + workList.toString());
CountDownLatch odlatch = new CountDownLatch(workList.size());
for (Long lcid : workList) {
try {
boolean success;
success = client.callProcedure(new DeleteCallback(odlatch, lcid, unkQueue, null, null, unkQueue, -1, (byte) 3), m_onlydelprocName, lcid);
if (!success) {
hardStop("Failed to invoke delete for: " + lcid);
}
if (m_slowFlight)
Thread.sleep(slowDownDelayMs);
} catch (NoConnectionsException e) {
//requeue for next time
unkQueue.add(lcid);
setSlowFlight();
} catch (Exception e) {
hardStop(e);
}
}
odlatch.await();
}
}
} catch (Exception e) {
// on exception, log and end the thread, but don't kill the process
if (e instanceof ProcCallException)
log.error(((ProcCallException) e).getClientResponse().toString());
hardStop("LoadTableLoader failed a procedure call for table " + m_tableName + " and the thread will now stop.", e);
} finally {
cdtask.shutdown();
try {
cdtask.join();
} catch (InterruptedException ex) {
log.error("CopyDelete Task was stopped.", ex);
}
}
}
use of java.util.concurrent.LinkedBlockingQueue in project intellij-community by JetBrains.
the class EDTGuard method create.
@NotNull
public static <T, O extends Watchable> T create(@NotNull final O target, final ProcessHandler process) {
final Pair<LinkedBlockingQueue<Call>, LinkedBlockingQueue<Call.Result>> queue = Pair.create(new LinkedBlockingQueue<Call>(10), new LinkedBlockingQueue<Call.Result>());
final Thread thread = new Thread("Async Invocation Thread for " + process) {
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final Call call = queue.first.take();
if (call != null) {
queue.second.offer(call.invoke());
}
}
} catch (InterruptedException e) {
// break
}
}
};
thread.start();
final AtomicBoolean ref = new AtomicBoolean();
final Disposable d = new Disposable() {
boolean disposed;
@Override
public void dispose() {
if (!disposed) {
disposed = true;
ref.set(true);
thread.interrupt();
}
}
};
process.addProcessListener(new ProcessAdapter() {
@Override
public void processTerminated(ProcessEvent event) {
synchronized (d) {
Disposer.dispose(d);
}
}
@Override
public void processWillTerminate(ProcessEvent event, boolean willBeDestroyed) {
if (!willBeDestroyed) {
synchronized (d) {
Disposer.dispose(d);
}
}
}
});
final Alarm alarm = new Alarm(Alarm.ThreadToUse.POOLED_THREAD, d);
final Alarm alarm2 = new Alarm(Alarm.ThreadToUse.POOLED_THREAD, alarm);
final Runnable watchdog = () -> ref.set(true);
final Runnable ping = new Runnable() {
@Override
public void run() {
synchronized (d) {
if (alarm.isDisposed()) {
return;
}
alarm2.addRequest(watchdog, 200);
try {
ref.set(!target.ping());
} catch (Exception e) {
ref.set(true);
} finally {
alarm2.cancelRequest(watchdog);
alarm.addRequest(this, 500);
}
}
}
};
alarm.addRequest(ping, 500);
final EDTGuard guard = new EDTGuard(target, queue, ref);
final ClassLoader classLoader = target.getClass().getClassLoader();
final Class<?>[] interfaces = target.getClass().getInterfaces();
//noinspection unchecked
return (T) Proxy.newProxyInstance(classLoader, interfaces, guard);
}
use of java.util.concurrent.LinkedBlockingQueue in project android_packages_inputmethods_LatinIME by CyanogenMod.
the class DictionaryService method onCreate.
@Override
public void onCreate() {
// By default, a thread pool executor does not timeout its core threads, so it will
// never kill them when there isn't any work to do any more. That would mean the service
// can never die! By creating it this way and calling allowCoreThreadTimeOut, we allow
// the single thread to time out after WORKER_THREAD_TIMEOUT_SECONDS = 15 seconds, allowing
// the process to be reclaimed by the system any time after that if it's not doing
// anything else.
// Executors#newSingleThreadExecutor creates a ThreadPoolExecutor but it returns the
// superclass ExecutorService which does not have the #allowCoreThreadTimeOut method,
// so we can't use that.
mExecutor = new ThreadPoolExecutor(1, /* corePoolSize */
1, /* maximumPoolSize */
WORKER_THREAD_TIMEOUT_SECONDS, /* keepAliveTime */
TimeUnit.SECONDS, /* unit for keepAliveTime */
new LinkedBlockingQueue<Runnable>());
mExecutor.allowCoreThreadTimeOut(true);
}
use of java.util.concurrent.LinkedBlockingQueue in project async-http-client by AsyncHttpClient.
the class AsyncStreamLifecycleTest method testStream.
@Test(groups = "standalone")
public void testStream() throws Exception {
try (AsyncHttpClient ahc = asyncHttpClient()) {
final AtomicBoolean err = new AtomicBoolean(false);
final LinkedBlockingQueue<String> queue = new LinkedBlockingQueue<>();
final AtomicBoolean status = new AtomicBoolean(false);
final AtomicInteger headers = new AtomicInteger(0);
final CountDownLatch latch = new CountDownLatch(1);
ahc.executeRequest(ahc.prepareGet(getTargetUrl()).build(), new AsyncHandler<Object>() {
public void onThrowable(Throwable t) {
fail("Got throwable.", t);
err.set(true);
}
public State onBodyPartReceived(HttpResponseBodyPart e) throws Exception {
if (e.length() != 0) {
String s = new String(e.getBodyPartBytes());
logger.info("got part: {}", s);
queue.put(s);
}
return State.CONTINUE;
}
public State onStatusReceived(HttpResponseStatus e) throws Exception {
status.set(true);
return State.CONTINUE;
}
public State onHeadersReceived(HttpResponseHeaders e) throws Exception {
if (headers.incrementAndGet() == 2) {
throw new Exception("Analyze this.");
}
return State.CONTINUE;
}
public Object onCompleted() throws Exception {
latch.countDown();
return null;
}
});
assertTrue(latch.await(1, TimeUnit.SECONDS), "Latch failed.");
assertFalse(err.get());
assertEquals(queue.size(), 2);
assertTrue(queue.contains("part1"));
assertTrue(queue.contains("part2"));
assertTrue(status.get());
assertEquals(headers.get(), 1);
}
}
Aggregations