use of com.google.common.util.concurrent.ThreadFactoryBuilder in project stanbol by apache.
the class FstLinkingEngineComponent method applyConfig.
/**
* Called by {@link #activate(ComponentContext)},
* {@link PlainFstLinkingComponnet#activate(ComponentContext)} and
* {@link NamedEntityFstLinkingComponnet#activate(ComponentContext)} to
* apply the parsed {@link ComponentContext#getProperties()}. The
* {@link LinkingModeEnum linking mode} is parsed separately as OSGI does not
* allow to modify the parsed config and sup-classes do need to override
* the linking mode.
* @param linkingMode the linking mode
* @param properties
* @throws ConfigurationException
*/
protected void applyConfig(LinkingModeEnum linkingMode, Dictionary<String, Object> properties, NamespacePrefixService prefixService) throws ConfigurationException {
//(0) The name for the Enhancement Engine and the basic metadata
Object value = properties.get(PROPERTY_NAME);
if (value == null || value.toString().isEmpty()) {
throw new ConfigurationException(PROPERTY_NAME, "The EnhancementEngine name MUST BE configured!");
} else {
this.engineName = value.toString();
}
log.info(" - engine name: {}", engineName);
engineMetadata = new Hashtable<String, Object>();
engineMetadata.put(PROPERTY_NAME, this.engineName);
value = properties.get(Constants.SERVICE_RANKING);
engineMetadata.put(Constants.SERVICE_RANKING, value == null ? Integer.valueOf(0) : value);
//(0) set the linking mode
this.linkingMode = linkingMode;
log.info(" - linking mode: {}", linkingMode);
//(1) parse the TextProcessing configuration
//TODO: decide if we should use the TextProcessingConfig for this engine
textProcessingConfig = TextProcessingConfig.createInstance(properties);
//change default for EntityLinkerConfig.MIN_FOUND_TOKENS
value = properties.get(EntityLinkerConfig.MIN_FOUND_TOKENS);
entityLinkerConfig = EntityLinkerConfig.createInstance(properties, prefixService);
if (value == null) {
//no MIN_FOUND_TOKENS config present
//manually set the default to the value used by this engine
entityLinkerConfig.setMinFoundTokens(FST_DEFAULT_MIN_FOUND_TOKENS);
}
//(2) parse the configured IndexReference
value = properties.get(SOLR_CORE);
if (value == null) {
throw new ConfigurationException(SOLR_CORE, "Missing required configuration of the SolrCore");
} else {
indexReference = IndexReference.parse(value.toString());
}
value = properties.get(IndexConfiguration.FIELD_ENCODING);
if (value == null) {
throw new ConfigurationException(IndexConfiguration.FIELD_ENCODING, "Missing required configuration of the Solr Field Encoding");
} else {
try {
fieldEncoding = FieldEncodingEnum.valueOf(value.toString().trim());
} catch (IllegalArgumentException e) {
throw new ConfigurationException(IndexConfiguration.FIELD_ENCODING, "The configured " + "FieldEncoding MUST BE a member of " + Arrays.toString(FieldEncodingEnum.values()), e);
}
}
value = properties.get(IndexConfiguration.SKIP_ALT_TOKENS);
if (value instanceof Boolean) {
skipAltTokensConfig = ((Boolean) value);
} else if (value != null) {
skipAltTokensConfig = Boolean.valueOf(value.toString());
}
// else no config -> will use the default
//(4) parse Origin information
value = properties.get(ORIGIN);
if (value instanceof RDFTerm) {
origin = (RDFTerm) origin;
} else if (value instanceof String) {
try {
URI originUri = new URI((String) value);
if (originUri.isAbsolute()) {
origin = new IRI((String) value);
} else {
origin = new PlainLiteralImpl((String) value);
}
} catch (URISyntaxException e) {
origin = new PlainLiteralImpl((String) value);
}
log.info(" - origin: {}", origin);
} else if (value != null) {
log.warn("Values of the {} property MUST BE of type RDFTerm or String " + "(parsed: {} (type:{}))", new Object[] { ORIGIN, value, value.getClass() });
}
//else no ORIGIN information provided
//(5) init the FST configuration
//We can create the default configuration only here, as it depends on the
//name of the solrIndex
String defaultConfig = "*;" + IndexConfiguration.PARAM_FST + "=" + indexReference.getIndex() + ";" + IndexConfiguration.PARAM_FIELD + "=" + IndexConfiguration.DEFAULT_FIELD;
fstConfig = new LanguageConfiguration(IndexConfiguration.FST_CONFIG, new String[] { defaultConfig });
//now set the actual configuration parsed to the engine
value = properties.get(IndexConfiguration.FST_CONFIG);
if (value != null && !StringUtils.isBlank(value.toString())) {
fstConfig.setConfiguration(properties);
}
//else keep the default
value = properties.get(IndexConfiguration.FST_FOLDER);
if (value instanceof String) {
this.fstFolder = ((String) value).trim();
if (this.fstFolder.isEmpty()) {
this.fstFolder = null;
}
} else if (value == null) {
this.fstFolder = null;
} else {
throw new ConfigurationException(IndexConfiguration.FST_FOLDER, "Values MUST BE of type String" + "(found: " + value.getClass().getName() + ")!");
}
//(6) Create the ThreadPool used for the runtime creation of FST models
value = properties.get(FST_THREAD_POOL_SIZE);
int tpSize;
if (value instanceof Number) {
tpSize = ((Number) value).intValue();
} else if (value != null) {
try {
tpSize = Integer.parseInt(value.toString());
} catch (NumberFormatException e) {
throw new ConfigurationException(FST_THREAD_POOL_SIZE, "Unable to parse the integer FST thread pool size from the " + "configured " + value.getClass().getSimpleName() + " '" + value + "'!", e);
}
} else {
tpSize = -1;
}
if (tpSize <= 0) {
//if configured value <= 0 we use the default
tpSize = DEFAULT_FST_THREAD_POOL_SIZE;
}
//build a ThreadFactoryBuilder for low priority daemon threads that
//do use a meaningful name
ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder();
//should be stopped if the VM closes
tfBuilder.setDaemon(true);
//low priority
tfBuilder.setPriority(Thread.MIN_PRIORITY);
tfBuilder.setNameFormat(engineName + "-FstRuntimeCreation-thread-%d");
if (fstCreatorService != null && !fstCreatorService.isTerminated()) {
//NOTE: We can not call terminateNow, because to interrupt threads
// here would also close FileChannels used by the SolrCore
// and produce java.nio.channels.ClosedByInterruptException
// exceptions followed by java.nio.channels.ClosedChannelException
// on following calls to affected files of the SolrIndex.
//Because of that we just log a warning and let uncompleted tasks
//complete!
log.warn("some items in a previouse FST Runtime Creation Threadpool have " + "still not finished!");
}
fstCreatorService = Executors.newFixedThreadPool(tpSize, tfBuilder.build());
//(7) Parse the EntityCache config
int entityCacheSize;
value = properties.get(ENTITY_CACHE_SIZE);
if (value instanceof Number) {
entityCacheSize = ((Number) value).intValue();
} else if (value != null) {
try {
entityCacheSize = Integer.parseInt(value.toString());
} catch (NumberFormatException e) {
throw new ConfigurationException(ENTITY_CACHE_SIZE, "Unable to parse the integer EntityCacheSize from the " + "configured " + value.getClass().getSimpleName() + " '" + value + "'!", e);
}
} else {
entityCacheSize = -1;
}
if (entityCacheSize == 0) {
log.info(" ... EntityCache deactivated");
this.entityCacheSize = entityCacheSize;
} else {
this.entityCacheSize = entityCacheSize < 0 ? DEFAULT_ENTITY_CACHE_SIZE : entityCacheSize;
log.info(" ... EntityCache enabled (size: {})", this.entityCacheSize);
}
//(8) parse the Entity type field
value = properties.get(IndexConfiguration.SOLR_TYPE_FIELD);
if (value == null || StringUtils.isBlank(value.toString())) {
solrTypeField = null;
} else {
solrTypeField = value.toString().trim();
}
//(9) parse the Entity Ranking field
value = properties.get(IndexConfiguration.SOLR_RANKING_FIELD);
if (value == null) {
solrRankingField = null;
} else {
solrRankingField = value.toString().trim();
}
//(10) parse the NamedEntity type mappings (if linkingMode = NER)
if (linkingMode == LinkingModeEnum.NER) {
nerTypeMappings = new HashMap<String, Set<String>>();
value = properties.get(NAMED_ENTITY_TYPE_MAPPINGS);
if (value instanceof String[]) {
//support array
value = Arrays.asList((String[]) value);
} else if (value instanceof String) {
//single value
value = Collections.singleton(value);
}
if (value instanceof Collection<?>) {
//and collection
log.info(" - process Named Entity Type Mappings (used by LinkingMode: {})", linkingMode);
configs: for (Object o : (Iterable<?>) value) {
if (o != null) {
StringBuilder usage = new StringBuilder("useage: ");
usage.append("'{namedEntity-tag-or-uri} > {entityType-1}[,{entityType-n}]'");
String[] config = o.toString().split(">");
String namedEntityType = config[0].trim();
if (namedEntityType.isEmpty()) {
log.warn("Invalid Type Mapping Config '{}': Missing namedEntityType ({}) -> ignore this config", o, usage);
continue configs;
}
if (NamespaceMappingUtils.getPrefix(namedEntityType) != null) {
namedEntityType = NamespaceMappingUtils.getConfiguredUri(prefixService, NAMED_ENTITY_TYPE_MAPPINGS, namedEntityType);
}
if (config.length < 2 || config[1].isEmpty()) {
log.warn("Invalid Type Mapping Config '{}': Missing dc:type URI '{}' ({}) -> ignore this config", o, usage);
continue configs;
}
String entityTypes = config[1].trim();
if (config.length > 2) {
log.warn("Configuration after 2nd '>' gets ignored. Will use mapping '{} > {}' from config {}", new Object[] { namedEntityType, entityTypes, o });
}
Set<String> types = nerTypeMappings.get(namedEntityType);
if (types == null) {
//add new element to the mapping
types = new HashSet<String>();
nerTypeMappings.put(namedEntityType, types);
}
for (String entityType : entityTypes.split(";")) {
entityType = entityType.trim();
if (!entityType.isEmpty()) {
String typeUri;
if ("*".equals(entityType)) {
//null is used as wildcard
typeUri = null;
} else {
typeUri = NamespaceMappingUtils.getConfiguredUri(prefixService, NAMED_ENTITY_TYPE_MAPPINGS, entityType);
}
log.info(" - add {} > {}", namedEntityType, typeUri);
types.add(typeUri);
}
//else ignore empty mapping
}
}
}
} else {
//no mappings defined ... set wildcard mapping
log.info(" - No Named Entity type mappings configured. Will use wildcard mappings");
nerTypeMappings = Collections.singletonMap(null, Collections.<String>singleton(null));
}
}
//(11) start tracking the SolrCore
try {
solrServerTracker = new RegisteredSolrServerTracker(bundleContext, indexReference, null) {
@Override
public void removedService(ServiceReference reference, Object service) {
log.info(" ... SolrCore for {} was removed!", reference);
//try to get an other serviceReference from the tracker
if (reference.equals(FstLinkingEngineComponent.this.solrServerReference)) {
updateEngineRegistration(solrServerTracker.getServiceReference(), null);
} else {
log.info(" - removed SolrCore was not used for FST linking");
}
super.removedService(reference, service);
}
@Override
public void modifiedService(ServiceReference reference, Object service) {
log.info(" ... SolrCore for {} was updated!", indexReference);
updateEngineRegistration(solrServerTracker.getServiceReference(), null);
super.modifiedService(reference, service);
}
@Override
public SolrServer addingService(ServiceReference reference) {
SolrServer server = super.addingService(reference);
if (solrCore != null) {
log.info("Multiple SolrCores for name {}! Will update engine " + "with the newly added {}!", new Object[] { solrCore.getName(), indexReference, reference });
}
updateEngineRegistration(reference, server);
return server;
}
};
} catch (InvalidSyntaxException e) {
throw new ConfigurationException(SOLR_CORE, "parsed SolrCore name '" + value.toString() + "' is invalid (expected: '[{server-name}:]{indexname}'");
}
try {
solrServerTracker.open();
} catch (RuntimeException e) {
//FIX for STANBOL-1416 (see https://issues.apache.org/jira/browse/STANBOL-1416)
//If an available SolrCore can not be correctly initialized we will
//get the exception here. In this case we want this component to be
//activated and waiting for further service events. Because of that
//we catch here the exception.
log.debug("Error while processing existing SolrCore Service during " + "opening SolrServiceTracker ... waiting for further service" + "Events", e);
}
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project bitsquare by bitsquare.
the class SeedNodeMain method main.
public static void main(String[] args) throws Exception {
final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("SeedNodeMain").setDaemon(true).build();
UserThread.setExecutor(Executors.newSingleThreadExecutor(threadFactory));
// We don't want to do the full argument parsing here as that might easily change in update versions
// So we only handle the absolute minimum which is APP_NAME, APP_DATA_DIR_KEY and USER_DATA_DIR
BitsquareEnvironment.setDefaultAppName("Bitsquare_seednode");
OptionParser parser = new OptionParser();
parser.allowsUnrecognizedOptions();
parser.accepts(AppOptionKeys.USER_DATA_DIR_KEY, description("User data directory", DEFAULT_USER_DATA_DIR)).withRequiredArg();
parser.accepts(AppOptionKeys.APP_NAME_KEY, description("Application name", DEFAULT_APP_NAME)).withRequiredArg();
OptionSet options;
try {
options = parser.parse(args);
} catch (OptionException ex) {
System.out.println("error: " + ex.getMessage());
System.out.println();
parser.printHelpOn(System.out);
System.exit(EXIT_FAILURE);
return;
}
BitsquareEnvironment bitsquareEnvironment = new BitsquareEnvironment(options);
// need to call that before BitsquareAppMain().execute(args)
BitsquareExecutable.initAppDir(bitsquareEnvironment.getProperty(AppOptionKeys.APP_DATA_DIR_KEY));
// For some reason the JavaFX launch process results in us losing the thread context class loader: reset it.
// In order to work around a bug in JavaFX 8u25 and below, you must include the following code as the first line of your realMain method:
Thread.currentThread().setContextClassLoader(SeedNodeMain.class.getClassLoader());
new SeedNodeMain().execute(args);
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project stanbol by apache.
the class EntityhubDereferenceEngine method activate.
@Activate
@SuppressWarnings("unchecked")
protected void activate(ComponentContext ctx) throws ConfigurationException {
Dictionary<String, Object> properties = ctx.getProperties();
bundleContext = ctx.getBundleContext();
log.info("> activate {}", getClass().getSimpleName());
//get the metadata later set to the enhancement engine
DereferenceEngineConfig engineConfig = new DereferenceEngineConfig(properties, prefixService);
log.debug(" - engineName: {}", engineConfig.getEngineName());
//parse the Entityhub Site used for dereferencing
Object value = properties.get(SITE_ID);
//init the EntitySource
if (value == null) {
//all referenced sites
siteName = "*";
} else {
siteName = value.toString();
}
if (siteName.isEmpty()) {
siteName = "*";
}
log.debug(" - siteName: {}", siteName);
final boolean sharedPoolState;
value = properties.get(SHARED_THREAD_POOL_STATE);
if (value instanceof Boolean) {
sharedPoolState = ((Boolean) value).booleanValue();
} else if (value != null && !StringUtils.isBlank(value.toString())) {
sharedPoolState = Boolean.parseBoolean(value.toString());
} else {
sharedPoolState = DEFAULT_SHARED_THREAD_POOL_STATE;
}
final ExecutorServiceProvider esProvider;
log.debug(" - shared thread pool state: {}", sharedPoolState);
if (sharedPoolState) {
esProvider = new SharedExecutorServiceProvider(ctx.getBundleContext());
} else {
//we need to create our own ExecutorService
value = properties.get(THREAD_POOL_SIZE);
if (value instanceof Number) {
this.threadPoolSize = ((Number) value).intValue();
} else if (value != null) {
try {
this.threadPoolSize = Integer.parseInt(value.toString());
} catch (NumberFormatException e) {
throw new ConfigurationException(THREAD_POOL_SIZE, "Value '" + value + "'(type: " + value.getClass().getName() + ") can not be parsed " + "as Integer");
}
} else {
this.threadPoolSize = DEFAULT_THREAD_POOL_SIZE;
}
if (threadPoolSize > 0) {
String namePattern = getClass().getSimpleName() + "-" + engineConfig.getEngineName() + "-thread-%s";
ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat(namePattern).setDaemon(true).build();
log.debug(" - create Threadpool(namePattern='{}' | size='{}')", namePattern, threadPoolSize);
executorService = Executors.newFixedThreadPool(threadPoolSize, threadFactory);
} else {
log.debug(" - no thread pool configured (poolSize: {})", threadPoolSize);
executorService = null;
}
esProvider = new StaticExecutorServiceProvider(executorService);
}
//init the tracking entity searcher
trackedServiceCount = 0;
if (Entityhub.ENTITYHUB_IDS.contains(siteName.toLowerCase())) {
log.info(" ... init Entityhub dereferencer");
entityDereferencer = new EntityhubDereferencer(bundleContext, this, esProvider);
} else if (siteName.equals("*")) {
log.info(" ... init dereferencer for all referenced sites");
entityDereferencer = new SitesDereferencer(bundleContext, this, esProvider);
} else {
log.info(" ... init dereferencer for referenced site {}", siteName);
entityDereferencer = new SiteDereferencer(bundleContext, siteName, this, esProvider);
}
//set the namespace prefix service to the dereferencer
entityDereferencer.setNsPrefixService(prefixService);
//now parse dereference field config
entityDereferencer.setDereferencedFields(engineConfig.getDereferenceFields());
entityDereferencer.setLdPath(engineConfig.getLdPathProgram());
entityDereferenceEngine = new EntityDereferenceEngine(entityDereferencer, engineConfig, new //we want to use our own DereferenceContext impl
DereferenceContextFactory() {
@Override
public DereferenceContext createContext(EntityDereferenceEngine engine, Map<String, Object> enhancementProperties) throws DereferenceConfigurationException {
return new EntityhubDereferenceContext(engine, enhancementProperties);
}
});
//NOTE: registration of this instance as OSGI service is done as soon as the
// entityhub service backing the entityDereferencer is available.
//finally start tracking
entityDereferencer.open();
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project GeoGig by boundlessgeo.
the class DataStoreConcurrencyTest method beforeTest.
@Before
public void beforeTest() throws Exception {
File workingDirectory = tmp.newFolder("repo");
File userHomeDirectory = tmp.newFolder("home");
TestPlatform platform = new TestPlatform(workingDirectory);
platform.setUserHome(userHomeDirectory);
Context injector = new CLITestContextBuilder(platform).build();
GeoGIG geogig = new GeoGIG(injector);
geogig.command(InitOp.class).call();
geogig.command(ConfigOp.class).setAction(ConfigAction.CONFIG_SET).setName("user.name").setValue("gabriel").call();
geogig.command(ConfigOp.class).setAction(ConfigAction.CONFIG_SET).setName("user.email").setValue("gabriel@roldan.example.com").call();
store = new GeoGigDataStore(geogig);
store.createSchema(pointType);
editThreads = Executors.newFixedThreadPool(writeThreadCount, new ThreadFactoryBuilder().setNameFormat("edit-thread-%d").build());
readThreads = Executors.newFixedThreadPool(readThreadCount, new ThreadFactoryBuilder().setNameFormat("read-thread-%d").build());
initialCommitCount = copyOf(store.getGeogig().command(LogOp.class).call()).size();
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project opennms by OpenNMS.
the class StressCommand method doExecute.
@Override
protected Void doExecute() {
// Apply sane lower bounds to all of the configurable options
intervalInSeconds = Math.max(1, intervalInSeconds);
numberOfNodes = Math.max(1, numberOfNodes);
numberOfInterfacesPerNode = Math.max(1, numberOfInterfacesPerNode);
numberOfGroupsPerInterface = Math.max(1, numberOfGroupsPerInterface);
numberOfNumericAttributesPerGroup = Math.max(0, numberOfNumericAttributesPerGroup);
numberOfStringAttributesPerGroup = Math.max(0, numberOfStringAttributesPerGroup);
reportIntervalInSeconds = Math.max(1, reportIntervalInSeconds);
numberOfGeneratorThreads = Math.max(1, numberOfGeneratorThreads);
stringVariationFactor = Math.max(0, stringVariationFactor);
if (stringVariationFactor > 0) {
stringAttributesVaried = metrics.meter("string-attributes-varied");
}
// Display the effective settings and rates
double attributesPerSecond = (1 / (double) intervalInSeconds) * numberOfGroupsPerInterface * numberOfInterfacesPerNode * numberOfNodes;
System.out.printf("Generating collection sets every %d seconds\n", intervalInSeconds);
System.out.printf("\t for %d nodes\n", numberOfNodes);
System.out.printf("\t with %d interfaces\n", numberOfInterfacesPerNode);
System.out.printf("\t with %d attribute groups\n", numberOfGroupsPerInterface);
System.out.printf("\t with %d numeric attributes\n", numberOfNumericAttributesPerGroup);
System.out.printf("\t with %d string attributes\n", numberOfStringAttributesPerGroup);
System.out.printf("Across %d threads\n", numberOfGeneratorThreads);
if (stringVariationFactor > 0) {
System.out.printf("With string variation factor %d\n", stringVariationFactor);
}
System.out.printf("Which will yield an effective\n");
System.out.printf("\t %.2f numeric attributes per second\n", numberOfNumericAttributesPerGroup * attributesPerSecond);
System.out.printf("\t %.2f string attributes per second\n", numberOfStringAttributesPerGroup * attributesPerSecond);
ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).build();
// Setup the executor
ThreadFactory threadFactoy = new ThreadFactoryBuilder().setNameFormat("Metrics Stress Tool Generator #%d").build();
ExecutorService executor = Executors.newFixedThreadPool(numberOfGeneratorThreads, threadFactoy);
// Setup auxiliary objects needed by the persister
ServiceParameters params = new ServiceParameters(Collections.emptyMap());
RrdRepository repository = new RrdRepository();
repository.setStep(Math.max(intervalInSeconds, 1));
repository.setHeartBeat(repository.getStep() * 2);
if (rras != null && rras.size() > 0) {
repository.setRraList(rras);
} else {
repository.setRraList(Lists.newArrayList(// Use the default list of RRAs we provide in our stock configuration files
"RRA:AVERAGE:0.5:1:2016", "RRA:AVERAGE:0.5:12:1488", "RRA:AVERAGE:0.5:288:366", "RRA:MAX:0.5:288:366", "RRA:MIN:0.5:288:366"));
}
repository.setRrdBaseDir(Paths.get(System.getProperty("opennms.home"), "share", "rrd", "snmp").toFile());
// Calculate how we fast we should insert the collection sets
int sleepTimeInMillisBetweenNodes = 0;
int sleepTimeInSecondsBetweenIterations = 0;
System.out.printf("Sleeping for\n");
if (burst) {
sleepTimeInSecondsBetweenIterations = intervalInSeconds;
System.out.printf("\t %d seconds between batches\n", sleepTimeInSecondsBetweenIterations);
} else {
// We want to "stream" the collection sets
sleepTimeInMillisBetweenNodes = Math.round((((float) intervalInSeconds * 1000) / numberOfNodes) * numberOfGeneratorThreads);
System.out.printf("\t %d milliseconds between nodes\n", sleepTimeInMillisBetweenNodes);
}
// Start generating, and keep generating until we're interrupted
try {
reporter.start(reportIntervalInSeconds, TimeUnit.SECONDS);
while (true) {
final Context context = batchTimer.time();
try {
// Split the tasks up among the threads
List<Future<Void>> futures = new ArrayList<>();
for (int generatorThreadId = 0; generatorThreadId < numberOfGeneratorThreads; generatorThreadId++) {
futures.add(executor.submit(generateAndPersistCollectionSets(params, repository, generatorThreadId, sleepTimeInMillisBetweenNodes)));
}
// Wait for all the tasks to complete before starting others
for (Future<Void> future : futures) {
future.get();
}
} catch (InterruptedException | ExecutionException e) {
break;
} finally {
context.stop();
}
try {
Thread.sleep(sleepTimeInSecondsBetweenIterations * 1000);
} catch (InterruptedException e) {
break;
}
}
} finally {
reporter.stop();
abort.set(true);
executor.shutdownNow();
}
return null;
}
Aggregations