use of java.util.concurrent.Callable in project pulsar by yahoo.
the class ReplicatorTest method testConfigChange.
@Test(enabled = true)
public void testConfigChange() throws Exception {
log.info("--- Starting ReplicatorTest::testConfigChange ---");
// This test is to verify that the config change on global namespace is successfully applied in broker during
// runtime.
// Run a set of producer tasks to create the destinations
List<Future<Void>> results = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
final DestinationName dest = DestinationName.get(String.format("persistent://pulsar/global/ns/topic-%d", i));
results.add(executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
MessageProducer producer = new MessageProducer(url1, dest);
log.info("--- Starting producer --- " + url1);
MessageConsumer consumer = new MessageConsumer(url1, dest);
log.info("--- Starting Consumer --- " + url1);
producer.produce(2);
consumer.receive(2);
producer.close();
consumer.close();
return null;
}
}));
}
for (Future<Void> result : results) {
try {
result.get();
} catch (Exception e) {
log.error("exception in getting future result ", e);
fail(String.format("replication test failed with %s exception", e.getMessage()));
}
}
Thread.sleep(1000L);
// Make sure that the internal replicators map contains remote cluster info
ConcurrentOpenHashMap<String, PulsarClient> replicationClients1 = ns1.getReplicationClients();
ConcurrentOpenHashMap<String, PulsarClient> replicationClients2 = ns2.getReplicationClients();
ConcurrentOpenHashMap<String, PulsarClient> replicationClients3 = ns3.getReplicationClients();
Assert.assertNotNull(replicationClients1.get("r2"));
Assert.assertNotNull(replicationClients1.get("r3"));
Assert.assertNotNull(replicationClients2.get("r1"));
Assert.assertNotNull(replicationClients2.get("r3"));
Assert.assertNotNull(replicationClients3.get("r1"));
Assert.assertNotNull(replicationClients3.get("r2"));
// Case 1: Update the global namespace replication configuration to only contains the local cluster itself
admin1.namespaces().setNamespaceReplicationClusters("pulsar/global/ns", Lists.newArrayList("r1"));
// Wait for config changes to be updated.
Thread.sleep(1000L);
// Make sure that the internal replicators map still contains remote cluster info
Assert.assertNotNull(replicationClients1.get("r2"));
Assert.assertNotNull(replicationClients1.get("r3"));
Assert.assertNotNull(replicationClients2.get("r1"));
Assert.assertNotNull(replicationClients2.get("r3"));
Assert.assertNotNull(replicationClients3.get("r1"));
Assert.assertNotNull(replicationClients3.get("r2"));
// Case 2: Update the configuration back
admin1.namespaces().setNamespaceReplicationClusters("pulsar/global/ns", Lists.newArrayList("r1", "r2", "r3"));
// Wait for config changes to be updated.
Thread.sleep(1000L);
// Make sure that the internal replicators map still contains remote cluster info
Assert.assertNotNull(replicationClients1.get("r2"));
Assert.assertNotNull(replicationClients1.get("r3"));
Assert.assertNotNull(replicationClients2.get("r1"));
Assert.assertNotNull(replicationClients2.get("r3"));
Assert.assertNotNull(replicationClients3.get("r1"));
Assert.assertNotNull(replicationClients3.get("r2"));
// Case 3: TODO: Once automatic cleanup is implemented, add tests case to verify auto removal of clusters
}
use of java.util.concurrent.Callable in project pulsar by yahoo.
the class ReplicatorTest method testReplicationOverrides.
@Test(enabled = false)
public void testReplicationOverrides() throws Exception {
log.info("--- Starting ReplicatorTest::testReplicationOverrides ---");
// This test is to verify that the config change on global namespace is successfully applied in broker during
// runtime.
// Run a set of producer tasks to create the destinations
SortedSet<String> testDests = new TreeSet<String>();
List<Future<Void>> results = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
final DestinationName dest = DestinationName.get(String.format("persistent://pulsar/global/ns/repltopic-%d", i));
testDests.add(dest.toString());
results.add(executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
MessageProducer producer1 = new MessageProducer(url1, dest);
log.info("--- Starting producer --- " + url1);
MessageProducer producer2 = new MessageProducer(url2, dest);
log.info("--- Starting producer --- " + url2);
MessageProducer producer3 = new MessageProducer(url3, dest);
log.info("--- Starting producer --- " + url3);
MessageConsumer consumer1 = new MessageConsumer(url1, dest);
log.info("--- Starting Consumer --- " + url1);
MessageConsumer consumer2 = new MessageConsumer(url2, dest);
log.info("--- Starting Consumer --- " + url2);
MessageConsumer consumer3 = new MessageConsumer(url3, dest);
log.info("--- Starting Consumer --- " + url3);
// Produce from cluster1 for this test
int nr1 = 0;
int nr2 = 0;
int nR3 = 0;
// Produce a message that isn't replicated
producer1.produce(1, MessageBuilder.create().disableReplication());
// Produce a message not replicated to r2
producer1.produce(1, MessageBuilder.create().setReplicationClusters(Lists.newArrayList("r1", "r3")));
// Produce a default replicated message
producer1.produce(1);
consumer1.receive(3);
consumer2.receive(1);
if (!consumer2.drained()) {
throw new Exception("consumer2 - unexpected message in queue");
}
consumer3.receive(2);
if (!consumer3.drained()) {
throw new Exception("consumer3 - unexpected message in queue");
}
producer1.close();
producer2.close();
producer3.close();
consumer1.close();
consumer2.close();
consumer3.close();
return null;
}
}));
}
for (Future<Void> result : results) {
try {
result.get();
} catch (Exception e) {
log.error("exception in getting future result ", e);
fail(String.format("replication test failed with %s exception", e.getMessage()));
}
}
}
use of java.util.concurrent.Callable in project jdk8u_jdk by JetBrains.
the class LdapTimeoutTest method call.
public Boolean call() {
InitialContext ctx = null;
ScheduledFuture killer = null;
long start = System.nanoTime();
try {
while (!server.accepting()) // allow the server to start up
Thread.sleep(200);
// to be sure
Thread.sleep(200);
// interrupt after a certain time
if (killSwitchPool != null) {
final Thread current = Thread.currentThread();
killer = killSwitchPool.schedule(new Callable<Void>() {
public Void call() throws Exception {
current.interrupt();
return null;
}
}, HANGING_TEST_TIMEOUT, MILLISECONDS);
}
env.put(Context.PROVIDER_URL, "ldap://localhost:" + server.getLocalPort());
try {
ctx = new InitialDirContext(env);
performOp(ctx);
fail();
} catch (NamingException e) {
long end = System.nanoTime();
System.out.println(this.getClass().toString() + " - elapsed: " + NANOSECONDS.toMillis(end - start));
handleNamingException(e, start, end);
} finally {
if (killer != null && !killer.isDone())
killer.cancel(true);
shutItDown(ctx);
server.close();
}
return passed;
} catch (IOException | InterruptedException e) {
throw new RuntimeException(e);
}
}
use of java.util.concurrent.Callable in project symmetric-ds by JumpMind.
the class DataExtractorService method extract.
protected List<OutgoingBatch> extract(final ProcessInfo processInfo, final Node targetNode, final List<OutgoingBatch> activeBatches, final IDataWriter dataWriter, final BufferedWriter writer, final ExtractMode mode) {
if (activeBatches.size() > 0) {
final List<OutgoingBatch> processedBatches = new ArrayList<OutgoingBatch>(activeBatches.size());
Set<String> channelsProcessed = new HashSet<String>();
long batchesSelectedAtMs = System.currentTimeMillis();
OutgoingBatch currentBatch = null;
ExecutorService executor = null;
try {
final boolean streamToFileEnabled = parameterService.is(ParameterConstants.STREAM_TO_FILE_ENABLED);
long keepAliveMillis = parameterService.getLong(ParameterConstants.DATA_LOADER_SEND_ACK_KEEPALIVE);
Node sourceNode = nodeService.findIdentity();
final FutureExtractStatus status = new FutureExtractStatus();
executor = Executors.newFixedThreadPool(1, new CustomizableThreadFactory(String.format("dataextractor-%s-%s", targetNode.getNodeGroupId(), targetNode.getNodeGroupId())));
List<Future<FutureOutgoingBatch>> futures = new ArrayList<Future<FutureOutgoingBatch>>();
processInfo.setBatchCount(activeBatches.size());
for (int i = 0; i < activeBatches.size(); i++) {
currentBatch = activeBatches.get(i);
processInfo.setCurrentLoadId(currentBatch.getLoadId());
processInfo.setDataCount(currentBatch.getDataEventCount());
processInfo.setCurrentBatchId(currentBatch.getBatchId());
channelsProcessed.add(currentBatch.getChannelId());
currentBatch = requeryIfEnoughTimeHasPassed(batchesSelectedAtMs, currentBatch);
processInfo.setStatus(ProcessInfo.Status.EXTRACTING);
final OutgoingBatch extractBatch = currentBatch;
Callable<FutureOutgoingBatch> callable = new Callable<FutureOutgoingBatch>() {
public FutureOutgoingBatch call() throws Exception {
return extractBatch(extractBatch, status, processInfo, targetNode, dataWriter, mode, activeBatches);
}
};
if (status.shouldExtractSkip) {
break;
}
futures.add(executor.submit(callable));
}
if (parameterService.is(ParameterConstants.SYNCHRONIZE_ALL_JOBS)) {
executor.shutdown();
boolean isProcessed = false;
while (!isProcessed) {
try {
isProcessed = executor.awaitTermination(keepAliveMillis, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
if (!isProcessed) {
writeKeepAliveAck(writer, sourceNode, streamToFileEnabled);
}
}
}
Iterator<OutgoingBatch> activeBatchIter = activeBatches.iterator();
for (Future<FutureOutgoingBatch> future : futures) {
currentBatch = activeBatchIter.next();
boolean isProcessed = false;
while (!isProcessed) {
try {
FutureOutgoingBatch extractBatch = future.get(keepAliveMillis, TimeUnit.MILLISECONDS);
currentBatch = extractBatch.getOutgoingBatch();
if (extractBatch.isExtractSkipped) {
break;
}
if (streamToFileEnabled || mode == ExtractMode.FOR_PAYLOAD_CLIENT) {
processInfo.setStatus(ProcessInfo.Status.TRANSFERRING);
processInfo.setCurrentLoadId(currentBatch.getLoadId());
boolean isRetry = extractBatch.isRetry() && extractBatch.getOutgoingBatch().getStatus() != OutgoingBatch.Status.IG;
currentBatch = sendOutgoingBatch(processInfo, targetNode, currentBatch, isRetry, dataWriter, writer, mode);
}
processedBatches.add(currentBatch);
isProcessed = true;
if (currentBatch.getStatus() != Status.OK) {
currentBatch.setLoadCount(currentBatch.getLoadCount() + 1);
changeBatchStatus(Status.LD, currentBatch, mode);
}
} catch (ExecutionException e) {
if (isNotBlank(e.getMessage()) && e.getMessage().contains("string truncation")) {
throw new RuntimeException("There is a good chance that the truncation error you are receiving is because contains_big_lobs on the '" + currentBatch.getChannelId() + "' channel needs to be turned on.", e.getCause() != null ? e.getCause() : e);
}
throw new RuntimeException(e.getCause() != null ? e.getCause() : e);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (TimeoutException e) {
writeKeepAliveAck(writer, sourceNode, streamToFileEnabled);
}
}
}
} catch (RuntimeException e) {
SQLException se = unwrapSqlException(e);
if (currentBatch != null) {
try {
/* Reread batch in case the ignore flag has been set */
currentBatch = outgoingBatchService.findOutgoingBatch(currentBatch.getBatchId(), currentBatch.getNodeId());
statisticManager.incrementDataExtractedErrors(currentBatch.getChannelId(), 1);
if (se != null) {
currentBatch.setSqlState(se.getSQLState());
currentBatch.setSqlCode(se.getErrorCode());
currentBatch.setSqlMessage(se.getMessage());
} else {
currentBatch.setSqlMessage(getRootMessage(e));
}
currentBatch.revertStatsOnError();
if (currentBatch.getStatus() != Status.IG && currentBatch.getStatus() != Status.OK) {
currentBatch.setStatus(Status.ER);
currentBatch.setErrorFlag(true);
}
outgoingBatchService.updateOutgoingBatch(currentBatch);
} catch (Exception ex) {
log.error("Failed to update the outgoing batch status for failed batch {}", currentBatch, ex);
} finally {
if (!isStreamClosedByClient(e)) {
if (e instanceof ProtocolException) {
IStagedResource resource = getStagedResource(currentBatch);
if (resource != null) {
resource.delete();
}
}
if (e.getCause() instanceof InterruptedException) {
log.info("Extract of batch {} was interrupted", currentBatch);
} else {
log.error("Failed to extract batch {}", currentBatch, e);
}
}
processInfo.setStatus(ProcessInfo.Status.ERROR);
}
} else {
log.error("Could not log the outgoing batch status because the batch was null", e);
}
} finally {
if (executor != null) {
executor.shutdown();
}
}
// Next, we update the node channel controls to the
// current timestamp
Calendar now = Calendar.getInstance();
for (String channelProcessed : channelsProcessed) {
NodeChannel nodeChannel = configurationService.getNodeChannel(channelProcessed, targetNode.getNodeId(), false);
if (nodeChannel != null && nodeChannel.getExtractPeriodMillis() > 0) {
nodeChannel.setLastExtractTime(now.getTime());
configurationService.updateLastExtractTime(nodeChannel);
}
}
return processedBatches;
} else {
return Collections.emptyList();
}
}
use of java.util.concurrent.Callable in project jersey by jersey.
the class OsgiRegistry method register.
private void register(final Bundle bundle) {
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.log(Level.FINEST, "checking bundle {0}", bundle.getBundleId());
}
Map<String, Callable<List<Class<?>>>> map;
lock.writeLock().lock();
try {
map = factories.get(bundle.getBundleId());
if (map == null) {
map = new ConcurrentHashMap<String, Callable<List<Class<?>>>>();
factories.put(bundle.getBundleId(), map);
}
} finally {
lock.writeLock().unlock();
}
final Enumeration<URL> e = findEntries(bundle, "META-INF/services/", "*", false);
if (e != null) {
while (e.hasMoreElements()) {
final URL u = e.nextElement();
final String url = u.toString();
if (url.endsWith("/")) {
continue;
}
final String factoryId = url.substring(url.lastIndexOf("/") + 1);
map.put(factoryId, new BundleSpiProvidersLoader(factoryId, u, bundle));
}
}
}
Aggregations