use of java.util.concurrent.atomic.AtomicReference in project storm by apache.
the class LogConfigManagerTest method testProcessRootLogLevelToDebugSetsLoggerAndTimeout.
@Test
public void testProcessRootLogLevelToDebugSetsLoggerAndTimeout() {
try (SimulatedTime t = new SimulatedTime()) {
LogConfig mockConfig = new LogConfig();
AtomicReference<TreeMap<String, LogLevel>> mockConfigAtom = new AtomicReference<>(null);
long inThirtySeconds = Time.currentTimeMillis() + 30_000;
mockConfig.put_to_named_logger_level("ROOT", ll("DEBUG", inThirtySeconds));
mockConfig.put_to_named_logger_level("my_debug_logger", ll("DEBUG", inThirtySeconds));
mockConfig.put_to_named_logger_level("my_info_logger", ll("INFO", inThirtySeconds));
mockConfig.put_to_named_logger_level("my_error_logger", ll("ERROR", inThirtySeconds));
LOG.info("Tests {}", mockConfigAtom.get());
LogConfigManager underTest = spy(new LogConfigManagerUnderTest(mockConfigAtom));
underTest.processLogConfigChange(mockConfig);
verify(underTest).setLoggerLevel(anyObject(), eq(""), eq("DEBUG"));
verify(underTest).setLoggerLevel(anyObject(), eq("my_debug_logger"), eq("DEBUG"));
verify(underTest).setLoggerLevel(anyObject(), eq("my_info_logger"), eq("INFO"));
verify(underTest).setLoggerLevel(anyObject(), eq("my_error_logger"), eq("ERROR"));
}
}
use of java.util.concurrent.atomic.AtomicReference in project storm by apache.
the class LogConfigManagerTest method testLogResetShouldNotTriggerForFutureTime.
@Test
public void testLogResetShouldNotTriggerForFutureTime() {
try (SimulatedTime t = new SimulatedTime()) {
long theFuture = Time.currentTimeMillis() + 1000;
TreeMap<String, LogLevel> config = new TreeMap<>();
config.put("foo", ll(theFuture));
AtomicReference<TreeMap<String, LogLevel>> atomConf = new AtomicReference<>(config);
LogConfigManager underTest = new LogConfigManagerUnderTest(atomConf);
underTest.resetLogLevels();
assertNotNull(atomConf.get());
}
}
use of java.util.concurrent.atomic.AtomicReference in project storm by apache.
the class LogConfigManagerTest method testLogResetsNamedLoggersWithPastTimeout.
@Test
public void testLogResetsNamedLoggersWithPastTimeout() {
try (SimulatedTime t = new SimulatedTime()) {
long past = Time.currentTimeMillis() - 1000;
TreeMap<String, LogLevel> config = new TreeMap<>();
config.put("my_debug_logger", ll("DEBUG", "INFO", past));
config.put("my_info_logger", ll("INFO", "WARN", past));
config.put("my_error_logger", ll("ERROR", "INFO", past));
AtomicReference<TreeMap<String, LogLevel>> atomConf = new AtomicReference<>(config);
LogConfigManager underTest = spy(new LogConfigManagerUnderTest(atomConf));
underTest.resetLogLevels();
assertEquals(new TreeMap<>(), atomConf.get());
verify(underTest).setLoggerLevel(anyObject(), eq("my_debug_logger"), eq("INFO"));
verify(underTest).setLoggerLevel(anyObject(), eq("my_info_logger"), eq("WARN"));
verify(underTest).setLoggerLevel(anyObject(), eq("my_error_logger"), eq("INFO"));
}
}
use of java.util.concurrent.atomic.AtomicReference in project storm by apache.
the class Zookeeper method zkLeaderElectorImpl.
protected ILeaderElector zkLeaderElectorImpl(Map conf, BlobStore blobStore) throws UnknownHostException {
List<String> servers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
Object port = conf.get(Config.STORM_ZOOKEEPER_PORT);
CuratorFramework zk = mkClientImpl(conf, servers, port, "", conf);
String leaderLockPath = conf.get(Config.STORM_ZOOKEEPER_ROOT) + "/leader-lock";
String id = NimbusInfo.fromConf(conf).toHostPortString();
AtomicReference<LeaderLatch> leaderLatchAtomicReference = new AtomicReference<>(new LeaderLatch(zk, leaderLockPath, id));
AtomicReference<LeaderLatchListener> leaderLatchListenerAtomicReference = new AtomicReference<>(leaderLatchListenerImpl(conf, zk, blobStore, leaderLatchAtomicReference.get()));
return new LeaderElectorImp(conf, servers, zk, leaderLockPath, id, leaderLatchAtomicReference, leaderLatchListenerAtomicReference, blobStore);
}
use of java.util.concurrent.atomic.AtomicReference in project crate by crate.
the class PartitionedTableConcurrentIntegrationTest method deletePartitionWhileInsertingData.
private void deletePartitionWhileInsertingData(final boolean useBulk) throws Exception {
execute("create table parted (id int, name string) " + "partitioned by (id) " + "with (number_of_replicas = 0)");
ensureYellow();
int numberOfDocs = 1000;
final Object[][] bulkArgs = new Object[numberOfDocs][];
for (int i = 0; i < numberOfDocs; i++) {
bulkArgs[i] = new Object[] { i % 2, randomAsciiOfLength(10) };
}
// partition to delete
final int idToDelete = 1;
final AtomicReference<Exception> exceptionRef = new AtomicReference<>();
final CountDownLatch insertLatch = new CountDownLatch(1);
final String insertStmt = "insert into parted (id, name) values (?, ?)";
Thread insertThread = new Thread(new Runnable() {
@Override
public void run() {
try {
if (useBulk) {
execute(insertStmt, bulkArgs);
} else {
for (Object[] args : bulkArgs) {
execute(insertStmt, args);
}
}
} catch (Exception t) {
exceptionRef.set(t);
} finally {
insertLatch.countDown();
}
}
});
final CountDownLatch deleteLatch = new CountDownLatch(1);
final String partitionName = new PartitionName("parted", Collections.singletonList(new BytesRef(String.valueOf(idToDelete)))).asIndexName();
final Object[] deleteArgs = new Object[] { idToDelete };
Thread deleteThread = new Thread(new Runnable() {
@Override
public void run() {
boolean deleted = false;
while (!deleted) {
try {
MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData();
if (metaData.indices().get(partitionName) != null) {
execute("delete from parted where id = ?", deleteArgs);
deleted = true;
}
} catch (Throwable t) {
// ignore (mostly partition index does not exists yet)
}
}
deleteLatch.countDown();
}
});
insertThread.start();
deleteThread.start();
deleteLatch.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
insertLatch.await(SQLTransportExecutor.REQUEST_TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS);
Exception exception = exceptionRef.get();
if (exception != null) {
throw exception;
}
insertThread.join();
deleteThread.join();
}
Aggregations