use of java.util.concurrent.ExecutorCompletionService in project hive by apache.
the class OrcInputFormat method generateSplitsInfo.
static List<OrcSplit> generateSplitsInfo(Configuration conf, Context context) throws IOException {
if (LOG.isInfoEnabled()) {
LOG.info("ORC pushdown predicate: " + context.sarg);
}
boolean useFileIdsConfig = HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS);
// Sharing this state assumes splits will succeed or fail to get it together (same FS).
// We also start with null and only set it to true on the first call, so we would only do
// the global-disable thing on the first failure w/the API error, not any random failure.
Ref<Boolean> useFileIds = Ref.from(useFileIdsConfig ? null : false);
boolean allowSyntheticFileIds = useFileIdsConfig && HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS);
List<OrcSplit> splits = Lists.newArrayList();
List<Future<AcidDirInfo>> pathFutures = Lists.newArrayList();
List<Future<Void>> strategyFutures = Lists.newArrayList();
final List<Future<List<OrcSplit>>> splitFutures = Lists.newArrayList();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// multi-threaded file statuses and split strategy
Path[] paths = getInputPaths(conf);
CompletionService<AcidDirInfo> ecs = new ExecutorCompletionService<>(Context.threadPool);
for (Path dir : paths) {
FileSystem fs = dir.getFileSystem(conf);
FileGenerator fileGenerator = new FileGenerator(context, fs, dir, useFileIds, ugi);
pathFutures.add(ecs.submit(fileGenerator));
}
boolean isTransactionalTableScan = HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN);
boolean isSchemaEvolution = HiveConf.getBoolVar(conf, ConfVars.HIVE_SCHEMA_EVOLUTION);
TypeDescription readerSchema = OrcInputFormat.getDesiredRowTypeDescr(conf, isTransactionalTableScan, Integer.MAX_VALUE);
List<OrcProto.Type> readerTypes = null;
if (readerSchema != null) {
readerTypes = OrcUtils.getOrcTypes(readerSchema);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Generate splits schema evolution property " + isSchemaEvolution + " reader schema " + (readerSchema == null ? "NULL" : readerSchema.toString()) + " transactional scan property " + isTransactionalTableScan);
}
// complete path futures and schedule split generation
try {
CombinedCtx combinedCtx = (context.splitStrategyBatchMs > 0) ? new CombinedCtx() : null;
long maxWaitUs = context.splitStrategyBatchMs * 1000000;
int resultsLeft = paths.length;
while (resultsLeft > 0) {
AcidDirInfo adi = null;
if (combinedCtx != null && combinedCtx.combined != null) {
long waitTimeUs = combinedCtx.combineStartUs + maxWaitUs - System.nanoTime();
if (waitTimeUs >= 0) {
Future<AcidDirInfo> f = ecs.poll(waitTimeUs, TimeUnit.NANOSECONDS);
adi = (f == null) ? null : f.get();
}
} else {
adi = ecs.take().get();
}
if (adi == null) {
// We were combining SS-es and the time has expired.
assert combinedCtx.combined != null;
scheduleSplits(combinedCtx.combined, context, splitFutures, strategyFutures, splits);
combinedCtx.combined = null;
continue;
}
// We have received a new directory information, make split strategies.
--resultsLeft;
// The reason why we can get a list of split strategies here is because for ACID split-update
// case when we have a mix of original base files & insert deltas, we will produce two
// independent split strategies for them. There is a global flag 'isOriginal' that is set
// on a per split strategy basis and it has to be same for all the files in that strategy.
List<SplitStrategy<?>> splitStrategies = determineSplitStrategies(combinedCtx, context, adi.fs, adi.splitPath, adi.acidInfo, adi.baseFiles, adi.parsedDeltas, readerTypes, ugi, allowSyntheticFileIds);
for (SplitStrategy<?> splitStrategy : splitStrategies) {
if (isDebugEnabled) {
LOG.debug("Split strategy: {}", splitStrategy);
}
// This works purely by magic, because we know which strategy produces which type.
if (splitStrategy instanceof ETLSplitStrategy) {
scheduleSplits((ETLSplitStrategy) splitStrategy, context, splitFutures, strategyFutures, splits);
} else {
@SuppressWarnings("unchecked") List<OrcSplit> readySplits = (List<OrcSplit>) splitStrategy.getSplits();
splits.addAll(readySplits);
}
}
}
// Run the last combined strategy, if any.
if (combinedCtx != null && combinedCtx.combined != null) {
scheduleSplits(combinedCtx.combined, context, splitFutures, strategyFutures, splits);
combinedCtx.combined = null;
}
// complete split futures
for (Future<Void> ssFuture : strategyFutures) {
// Make sure we get exceptions strategies might have thrown.
ssFuture.get();
}
// All the split strategies are done, so it must be safe to access splitFutures.
for (Future<List<OrcSplit>> splitFuture : splitFutures) {
splits.addAll(splitFuture.get());
}
} catch (Exception e) {
cancelFutures(pathFutures);
cancelFutures(strategyFutures);
cancelFutures(splitFutures);
throw new RuntimeException("ORC split generation failed with exception: " + e.getMessage(), e);
}
if (context.cacheStripeDetails) {
LOG.info("FooterCacheHitRatio: " + context.cacheHitCounter.get() + "/" + context.numFilesCounter.get());
}
if (isDebugEnabled) {
for (OrcSplit split : splits) {
LOG.debug(split + " projected_columns_uncompressed_size: " + split.getColumnarProjectionSize());
}
}
return splits;
}
use of java.util.concurrent.ExecutorCompletionService in project hbase by apache.
the class TestIdLock method testMultipleClients.
@Test
public void testMultipleClients() throws Exception {
ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
try {
ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i));
for (int i = 0; i < NUM_THREADS; ++i) {
Future<Boolean> result = ecs.take();
assertTrue(result.get());
}
idLock.assertMapEmpty();
} finally {
exec.shutdown();
exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
}
}
use of java.util.concurrent.ExecutorCompletionService in project hive by apache.
the class LlapServiceDriver method run.
private int run(String[] args) throws Exception {
LlapOptionsProcessor optionsProcessor = new LlapOptionsProcessor();
final LlapOptions options = optionsProcessor.processOptions(args);
final Properties propsDirectOptions = new Properties();
if (options == null) {
// help
return 1;
}
// Working directory.
Path tmpDir = new Path(options.getDirectory());
if (conf == null) {
throw new Exception("Cannot load any configuration to run command");
}
final long t0 = System.nanoTime();
final FileSystem fs = FileSystem.get(conf);
final FileSystem lfs = FileSystem.getLocal(conf).getRawFileSystem();
int threadCount = Math.max(1, Runtime.getRuntime().availableProcessors() / 2);
final ExecutorService executor = Executors.newFixedThreadPool(threadCount, new ThreadFactoryBuilder().setNameFormat("llap-pkg-%d").build());
final CompletionService<Void> asyncRunner = new ExecutorCompletionService<Void>(executor);
int rc = 0;
try {
// needed so that the file is actually loaded into configuration.
for (String f : NEEDED_CONFIGS) {
conf.addResource(f);
if (conf.getResource(f) == null) {
throw new Exception("Unable to find required config file: " + f);
}
}
for (String f : OPTIONAL_CONFIGS) {
conf.addResource(f);
}
conf.reloadConfiguration();
populateConfWithLlapProperties(conf, options.getConfig());
if (options.getName() != null) {
// update service registry configs - caveat: this has nothing to do with the actual settings
// as read by the AM
// if needed, use --hiveconf llap.daemon.service.hosts=@llap0 to dynamically switch between
// instances
conf.set(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
}
if (options.getLogger() != null) {
HiveConf.setVar(conf, ConfVars.LLAP_DAEMON_LOGGER, options.getLogger());
propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_LOGGER.varname, options.getLogger());
}
boolean isDirect = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT);
if (options.getSize() != -1) {
if (options.getCache() != -1) {
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED) == false) {
// direct heap allocations need to be safer
Preconditions.checkArgument(options.getCache() < options.getSize(), "Cache size (" + LlapUtil.humanReadableByteCount(options.getCache()) + ") has to be smaller" + " than the container sizing (" + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
} else if (options.getCache() < options.getSize()) {
LOG.warn("Note that this might need YARN physical memory monitoring to be turned off " + "(yarn.nodemanager.pmem-check-enabled=false)");
}
}
if (options.getXmx() != -1) {
Preconditions.checkArgument(options.getXmx() < options.getSize(), "Working memory (Xmx=" + LlapUtil.humanReadableByteCount(options.getXmx()) + ") has to be" + " smaller than the container sizing (" + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
}
if (isDirect && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED)) {
// direct and not memory mapped
Preconditions.checkArgument(options.getXmx() + options.getCache() <= options.getSize(), "Working memory (Xmx=" + LlapUtil.humanReadableByteCount(options.getXmx()) + ") + cache size (" + LlapUtil.humanReadableByteCount(options.getCache()) + ") has to be smaller than the container sizing (" + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
}
}
if (options.getExecutors() != -1) {
conf.setLong(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname, options.getExecutors());
propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname, String.valueOf(options.getExecutors()));
// TODO: vcpu settings - possibly when DRFA works right
}
if (options.getIoThreads() != -1) {
conf.setLong(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname, options.getIoThreads());
propsDirectOptions.setProperty(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname, String.valueOf(options.getIoThreads()));
}
long cache = -1, xmx = -1;
if (options.getCache() != -1) {
cache = options.getCache();
conf.set(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname, Long.toString(cache));
propsDirectOptions.setProperty(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname, Long.toString(cache));
}
if (options.getXmx() != -1) {
// Needs more explanation here
// Xmx is not the max heap value in JDK8. You need to subtract 50% of the survivor fraction
// from this, to get actual usable memory before it goes into GC
xmx = options.getXmx();
long xmxMb = (xmx / (1024L * 1024L));
conf.setLong(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname, xmxMb);
propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname, String.valueOf(xmxMb));
}
long size = options.getSize();
if (size == -1) {
long heapSize = xmx;
if (!isDirect) {
heapSize += cache;
}
size = Math.min((long) (heapSize * 1.2), heapSize + 1024L * 1024 * 1024);
if (isDirect) {
size += cache;
}
}
long containerSize = size / (1024 * 1024);
final long minAlloc = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1);
Preconditions.checkArgument(containerSize >= minAlloc, "Container size (" + LlapUtil.humanReadableByteCount(options.getSize()) + ") should be greater" + " than minimum allocation(" + LlapUtil.humanReadableByteCount(minAlloc * 1024L * 1024L) + ")");
conf.setLong(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname, containerSize);
propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname, String.valueOf(containerSize));
LOG.info("Memory settings: container memory: {} executor memory: {} cache memory: {}", LlapUtil.humanReadableByteCount(options.getSize()), LlapUtil.humanReadableByteCount(options.getXmx()), LlapUtil.humanReadableByteCount(options.getCache()));
if (options.getLlapQueueName() != null && !options.getLlapQueueName().isEmpty()) {
conf.set(ConfVars.LLAP_DAEMON_QUEUE_NAME.varname, options.getLlapQueueName());
propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_QUEUE_NAME.varname, options.getLlapQueueName());
}
final URL logger = conf.getResource(LlapConstants.LOG4j2_PROPERTIES_FILE);
if (null == logger) {
throw new Exception("Unable to find required config file: llap-daemon-log4j2.properties");
}
Path home = new Path(System.getenv("HIVE_HOME"));
Path scriptParent = new Path(new Path(home, "scripts"), "llap");
Path scripts = new Path(scriptParent, "bin");
if (!lfs.exists(home)) {
throw new Exception("Unable to find HIVE_HOME:" + home);
} else if (!lfs.exists(scripts)) {
LOG.warn("Unable to find llap scripts:" + scripts);
}
final Path libDir = new Path(tmpDir, "lib");
final Path tezDir = new Path(libDir, "tez");
final Path udfDir = new Path(libDir, "udfs");
final Path confPath = new Path(tmpDir, "conf");
lfs.mkdirs(confPath);
NamedCallable<Void> downloadTez = new NamedCallable<Void>("downloadTez") {
@Override
public Void call() throws Exception {
synchronized (fs) {
String tezLibs = conf.get(TezConfiguration.TEZ_LIB_URIS);
if (tezLibs == null) {
LOG.warn("Missing tez.lib.uris in tez-site.xml");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copying tez libs from " + tezLibs);
}
lfs.mkdirs(tezDir);
fs.copyToLocalFile(new Path(tezLibs), new Path(libDir, "tez.tar.gz"));
CompressionUtils.unTar(new Path(libDir, "tez.tar.gz").toString(), tezDir.toString(), true);
lfs.delete(new Path(libDir, "tez.tar.gz"), false);
}
return null;
}
};
NamedCallable<Void> copyLocalJars = new NamedCallable<Void>("copyLocalJars") {
@Override
public Void call() throws Exception {
Class<?>[] dependencies = new Class<?>[] { // llap-common
LlapDaemonProtocolProtos.class, // llap-tez
LlapTezUtils.class, // llap-server
LlapInputFormat.class, // hive-exec
HiveInputFormat.class, // hive-common (https deps)
SslSocketConnector.class, // ZK registry
RegistryUtils.ServiceRecordMarshal.class, // disruptor
com.lmax.disruptor.RingBuffer.class, // log4j-api
org.apache.logging.log4j.Logger.class, // log4j-core
org.apache.logging.log4j.core.Appender.class, // log4j-slf4j
org.apache.logging.slf4j.Log4jLogger.class, // log4j-1.2-API needed for NDC
org.apache.log4j.NDC.class };
for (Class<?> c : dependencies) {
Path jarPath = new Path(Utilities.jarFinderGetJar(c));
lfs.copyFromLocalFile(jarPath, libDir);
if (LOG.isDebugEnabled()) {
LOG.debug("Copying " + jarPath + " to " + libDir);
}
}
return null;
}
};
// copy default aux classes (json/hbase)
NamedCallable<Void> copyAuxJars = new NamedCallable<Void>("copyAuxJars") {
@Override
public Void call() throws Exception {
for (String className : DEFAULT_AUX_CLASSES) {
localizeJarForClass(lfs, libDir, className, false);
}
Collection<String> codecs = conf.getStringCollection("io.compression.codecs");
if (codecs != null) {
for (String codecClassName : codecs) {
localizeJarForClass(lfs, libDir, codecClassName, false);
}
}
if (options.getIsHBase()) {
try {
localizeJarForClass(lfs, libDir, HBASE_SERDE_CLASS, true);
// HBase API is convoluted.
Job fakeJob = new Job(new JobConf());
TableMapReduceUtil.addDependencyJars(fakeJob);
Collection<String> hbaseJars = fakeJob.getConfiguration().getStringCollection("tmpjars");
for (String jarPath : hbaseJars) {
if (!jarPath.isEmpty()) {
lfs.copyFromLocalFile(new Path(jarPath), libDir);
}
}
} catch (Throwable t) {
String err = "Failed to add HBase jars. Use --auxhbase=false to avoid localizing them";
LOG.error(err);
System.err.println(err);
throw new RuntimeException(t);
}
}
HashSet<String> auxJars = new HashSet<>();
// There are many ways to have AUX jars in Hive... sigh
if (options.getIsHiveAux()) {
// Note: we don't add ADDED jars, RELOADABLE jars, etc. That is by design; there are too many ways
// to add jars in Hive, some of which are session/etc. specific. Env + conf + arg should be enough.
addAuxJarsToSet(auxJars, conf.getAuxJars());
addAuxJarsToSet(auxJars, System.getenv("HIVE_AUX_JARS_PATH"));
LOG.info("Adding the following aux jars from the environment and configs: " + auxJars);
}
addAuxJarsToSet(auxJars, options.getAuxJars());
for (String jarPath : auxJars) {
lfs.copyFromLocalFile(new Path(jarPath), libDir);
}
return null;
}
private void addAuxJarsToSet(HashSet<String> auxJarSet, String auxJars) {
if (auxJars != null && !auxJars.isEmpty()) {
// TODO: transitive dependencies warning?
String[] jarPaths = auxJars.split(",");
for (String jarPath : jarPaths) {
if (!jarPath.isEmpty()) {
auxJarSet.add(jarPath);
}
}
}
}
};
NamedCallable<Void> copyUdfJars = new NamedCallable<Void>("copyUdfJars") {
@Override
public Void call() throws Exception {
// UDFs
final Set<String> allowedUdfs;
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOW_PERMANENT_FNS)) {
synchronized (fs) {
allowedUdfs = downloadPermanentFunctions(conf, udfDir);
}
} else {
allowedUdfs = Collections.emptySet();
}
PrintWriter udfStream = new PrintWriter(lfs.create(new Path(confPath, StaticPermanentFunctionChecker.PERMANENT_FUNCTIONS_LIST)));
for (String udfClass : allowedUdfs) {
udfStream.println(udfClass);
}
udfStream.close();
return null;
}
};
String java_home;
if (options.getJavaPath() == null || options.getJavaPath().isEmpty()) {
java_home = System.getenv("JAVA_HOME");
String jre_home = System.getProperty("java.home");
if (java_home == null) {
java_home = jre_home;
} else if (!java_home.equals(jre_home)) {
LOG.warn("Java versions might not match : JAVA_HOME=[{}],process jre=[{}]", java_home, jre_home);
}
} else {
java_home = options.getJavaPath();
}
if (java_home == null || java_home.isEmpty()) {
throw new RuntimeException("Could not determine JAVA_HOME from command line parameters, environment or system properties");
}
LOG.info("Using [{}] for JAVA_HOME", java_home);
NamedCallable<Void> copyConfigs = new NamedCallable<Void>("copyConfigs") {
@Override
public Void call() throws Exception {
// Copy over the mandatory configs for the package.
for (String f : NEEDED_CONFIGS) {
copyConfig(lfs, confPath, f);
}
for (String f : OPTIONAL_CONFIGS) {
try {
copyConfig(lfs, confPath, f);
} catch (Throwable t) {
LOG.info("Error getting an optional config " + f + "; ignoring: " + t.getMessage());
}
}
createLlapDaemonConfig(lfs, confPath, conf, propsDirectOptions, options.getConfig());
setUpLogAndMetricConfigs(lfs, logger, confPath);
return null;
}
};
@SuppressWarnings("unchecked") final NamedCallable<Void>[] asyncWork = new NamedCallable[] { downloadTez, copyUdfJars, copyLocalJars, copyAuxJars, copyConfigs };
@SuppressWarnings("unchecked") final Future<Void>[] asyncResults = new Future[asyncWork.length];
for (int i = 0; i < asyncWork.length; i++) {
asyncResults[i] = asyncRunner.submit(asyncWork[i]);
}
// TODO: need to move from Python to Java for the rest of the script.
JSONObject configs = createConfigJson(containerSize, cache, xmx, java_home);
writeConfigJson(tmpDir, lfs, configs);
if (LOG.isDebugEnabled()) {
LOG.debug("Config generation took " + (System.nanoTime() - t0) + " ns");
}
for (int i = 0; i < asyncWork.length; i++) {
final long t1 = System.nanoTime();
asyncResults[i].get();
final long t2 = System.nanoTime();
if (LOG.isDebugEnabled()) {
LOG.debug(asyncWork[i].getName() + " waited for " + (t2 - t1) + " ns");
}
}
if (options.isStarting()) {
String version = System.getenv("HIVE_VERSION");
if (version == null || version.isEmpty()) {
version = DateTime.now().toString("ddMMMyyyy");
}
String outputDir = options.getOutput();
Path packageDir = null;
if (outputDir == null) {
outputDir = OUTPUT_DIR_PREFIX + version;
packageDir = new Path(Paths.get(".").toAbsolutePath().toString(), OUTPUT_DIR_PREFIX + version);
} else {
packageDir = new Path(outputDir);
}
rc = runPackagePy(args, tmpDir, scriptParent, version, outputDir);
if (rc == 0) {
LlapSliderUtils.startCluster(conf, options.getName(), "llap-" + version + ".zip", packageDir, HiveConf.getVar(conf, ConfVars.LLAP_DAEMON_QUEUE_NAME));
}
} else {
rc = 0;
}
} finally {
executor.shutdown();
lfs.close();
fs.close();
}
if (rc == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Exiting successfully");
}
} else {
LOG.info("Exiting with rc = " + rc);
}
return rc;
}
use of java.util.concurrent.ExecutorCompletionService in project neo4j by neo4j.
the class InMemoryCountsStoreIntegrationTest method concurrentWorkload.
@Test
public void concurrentWorkload() throws Exception {
//GIVEN
InMemoryCountsStore countStore = new InMemoryCountsStore();
IntermediateStateTestManager intermediateStateTestManager = new IntermediateStateTestManager();
ExecutorService executor = Executors.newFixedThreadPool(10);
ExecutorCompletionService<Void> ecs = new ExecutorCompletionService<>(executor);
List<Runnable> workers = new ArrayList<>(10);
AtomicBoolean stop = new AtomicBoolean();
for (int i = 0; i < 9; i++) {
workers.add(new UpdateWorker(stop, intermediateStateTestManager, countStore));
}
workers.add(new SnapshotWorker(10, stop, intermediateStateTestManager, countStore));
//WHEN
for (Runnable worker : workers) {
ecs.submit(worker, null);
}
// THEN
for (int i = 0; i < workers.size(); i++) {
ecs.take().get();
}
executor.shutdown();
}
use of java.util.concurrent.ExecutorCompletionService in project presto by prestodb.
the class Verifier method run.
// Returns number of failed queries
public int run(List<QueryPair> queries) throws InterruptedException {
ExecutorService executor = newFixedThreadPool(threadCount);
CompletionService<Validator> completionService = new ExecutorCompletionService<>(executor);
int totalQueries = queries.size() * config.getSuiteRepetitions();
log.info("Total Queries: %d", totalQueries);
log.info("Whitelisted Queries: %s", Joiner.on(',').join(whitelist));
int queriesSubmitted = 0;
for (int i = 0; i < config.getSuiteRepetitions(); i++) {
for (QueryPair query : queries) {
for (int j = 0; j < config.getQueryRepetitions(); j++) {
// If a whitelist exists, only run the tests on the whitelist
if (!whitelist.isEmpty() && !whitelist.contains(query.getName())) {
log.debug("Query %s is not whitelisted", query.getName());
continue;
}
if (blacklist.contains(query.getName())) {
log.debug("Query %s is blacklisted", query.getName());
continue;
}
Validator validator = new Validator(config.getControlGateway(), config.getTestGateway(), config.getControlTimeout(), config.getTestTimeout(), config.getMaxRowCount(), config.isExplainOnly(), config.getDoublePrecision(), isCheckCorrectness(query), true, config.isVerboseResultsComparison(), config.getControlTeardownRetries(), config.getTestTeardownRetries(), query);
completionService.submit(validator::valid, validator);
queriesSubmitted++;
}
}
}
log.info("Allowed Queries: %d", queriesSubmitted);
log.info("Skipped Queries: %d", (totalQueries - queriesSubmitted));
log.info("---------------------");
executor.shutdown();
int total = 0;
int valid = 0;
int failed = 0;
int skipped = 0;
double lastProgress = 0;
while (total < queriesSubmitted) {
total++;
Validator validator = takeUnchecked(completionService);
if (validator.isSkipped()) {
if (!config.isQuiet()) {
log.warn("%s", validator.getSkippedMessage());
}
skipped++;
continue;
}
if (validator.valid()) {
valid++;
} else {
failed++;
}
for (EventClient eventClient : eventClients) {
eventClient.post(buildEvent(validator));
}
double progress = (((double) total) / totalQueries) * 100;
if (!config.isQuiet() || (progress - lastProgress) > 1) {
log.info("Progress: %s valid, %s failed, %s skipped, %.2f%% done", valid, failed, skipped, progress);
lastProgress = progress;
}
}
log.info("Results: %s / %s (%s skipped)", valid, failed, skipped);
log.info("");
for (EventClient eventClient : eventClients) {
if (eventClient instanceof Closeable) {
try {
((Closeable) eventClient).close();
} catch (IOException ignored) {
}
log.info("");
}
}
return failed;
}
Aggregations