use of java.util.concurrent.atomic.AtomicInteger in project vert.x by eclipse.
the class Http1xTest method testTimedOutWaiterDoesntConnect.
// Extra tests
@Test
public void testTimedOutWaiterDoesntConnect() throws Exception {
long responseDelay = 300;
int requests = 6;
client.close();
CountDownLatch firstCloseLatch = new CountDownLatch(1);
server.close(onSuccess(v -> firstCloseLatch.countDown()));
// Make sure server is closed before continuing
awaitLatch(firstCloseLatch);
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false).setMaxPoolSize(1));
AtomicInteger connectCount = new AtomicInteger(0);
// We need a net server because we need to intercept the socket connection, not just full http requests
NetServer server = vertx.createNetServer(new NetServerOptions().setHost(DEFAULT_HTTP_HOST).setPort(DEFAULT_HTTP_PORT));
server.connectHandler(socket -> {
connectCount.incrementAndGet();
vertx.setTimer(responseDelay, time -> socket.write("HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nOK"));
});
CountDownLatch latch = new CountDownLatch(requests);
server.listen(onSuccess(s -> {
for (int count = 0; count < requests; count++) {
HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals("OK", buff.toString());
latch.countDown();
});
});
if (count % 2 == 1) {
req.setTimeout(responseDelay / 2);
req.exceptionHandler(ex -> {
latch.countDown();
});
}
req.end();
}
}));
awaitLatch(latch);
assertEquals("Incorrect number of connect attempts.", (requests + 1) / 2, connectCount.get());
server.close();
}
use of java.util.concurrent.atomic.AtomicInteger in project vert.x by eclipse.
the class Http1xTest method testPooling.
private void testPooling(boolean keepAlive, boolean pipelining) {
String path = "foo.txt";
int numGets = 100;
int maxPoolSize = 10;
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(keepAlive).setPipelining(pipelining).setMaxPoolSize(maxPoolSize));
server.requestHandler(req -> {
String cnt = req.headers().get("count");
req.response().headers().set("count", cnt);
req.response().end();
});
AtomicBoolean completeAlready = new AtomicBoolean();
server.listen(onSuccess(s -> {
AtomicInteger cnt = new AtomicInteger(0);
for (int i = 0; i < numGets; i++) {
int theCount = i;
HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, path, resp -> {
assertEquals(200, resp.statusCode());
assertEquals(theCount, Integer.parseInt(resp.headers().get("count")));
if (cnt.incrementAndGet() == numGets) {
testComplete();
}
});
req.exceptionHandler(t -> {
if (pipelining && !keepAlive) {
assertTrue(t instanceof IllegalStateException);
if (completeAlready.compareAndSet(false, true)) {
testComplete();
}
} else {
fail("Should not throw exception: " + t.getMessage());
}
});
req.headers().set("count", String.valueOf(i));
req.end();
}
}));
await();
}
use of java.util.concurrent.atomic.AtomicInteger in project vert.x by eclipse.
the class Http1xTest method testInvalidHttpResponse.
@Test
public void testInvalidHttpResponse() {
waitFor(2);
AtomicInteger count = new AtomicInteger(0);
CompletableFuture<Void> sendResp = new CompletableFuture<>();
NetServer server = vertx.createNetServer();
String match = "GET /somepath HTTP/1.1\r\nHost: localhost:8080\r\n\r\n";
server.connectHandler(so -> {
StringBuilder content = new StringBuilder();
so.handler(buff -> {
content.append(buff);
while (content.toString().startsWith(match)) {
content.delete(0, match.length());
switch(count.getAndIncrement()) {
case 0:
sendResp.thenAccept(v -> {
});
break;
case 1:
Buffer resp1 = Buffer.buffer(TestUtils.randomAlphaString(40) + "\r\n");
Buffer resp2 = Buffer.buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n");
so.write(Buffer.buffer().appendBuffer(resp1).appendBuffer(resp2));
break;
default:
fail();
break;
}
}
});
}).listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(s -> {
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(true).setMaxPoolSize(1));
AtomicBoolean fail1 = new AtomicBoolean();
HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
}).exceptionHandler(err -> {
if (fail1.compareAndSet(false, true)) {
assertEquals(IllegalArgumentException.class, err.getClass());
complete();
}
});
AtomicBoolean fail2 = new AtomicBoolean();
HttpClientRequest req2 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
resp.bodyHandler(buff -> {
assertEquals("okusa", buff.toString());
testComplete();
});
}).exceptionHandler(err -> {
if (fail2.compareAndSet(false, true)) {
assertEquals(VertxException.class, err.getClass());
complete();
}
});
req1.end();
req2.end();
}));
await();
}
use of java.util.concurrent.atomic.AtomicInteger in project buck by facebook.
the class TestRunning method runTests.
@SuppressWarnings("PMD.EmptyCatchBlock")
public static int runTests(final CommandRunnerParams params, Iterable<TestRule> tests, ExecutionContext executionContext, final TestRunningOptions options, ListeningExecutorService service, BuildEngine buildEngine, final StepRunner stepRunner, SourcePathResolver sourcePathResolver, SourcePathRuleFinder ruleFinder) throws IOException, ExecutionException, InterruptedException {
ImmutableSet<JavaLibrary> rulesUnderTestForCoverage;
// If needed, we first run instrumentation on the class files.
if (options.isCodeCoverageEnabled()) {
rulesUnderTestForCoverage = getRulesUnderTest(tests);
if (!rulesUnderTestForCoverage.isEmpty()) {
try {
// We'll use the filesystem of the first rule under test. This will fail if there are any
// tests from a different repo, but it'll help us bootstrap ourselves to being able to
// support multiple repos
// TODO(t8220837): Support tests in multiple repos
JavaLibrary library = rulesUnderTestForCoverage.iterator().next();
stepRunner.runStepForBuildTarget(executionContext, new MakeCleanDirectoryStep(library.getProjectFilesystem(), JacocoConstants.getJacocoOutputDir(library.getProjectFilesystem())), Optional.empty());
} catch (StepFailedException e) {
params.getBuckEventBus().post(ConsoleEvent.severe(Throwables.getRootCause(e).getLocalizedMessage()));
return 1;
}
}
} else {
rulesUnderTestForCoverage = ImmutableSet.of();
}
final ImmutableSet<String> testTargets = FluentIterable.from(tests).transform(BuildRule::getBuildTarget).transform(Object::toString).toSet();
final int totalNumberOfTests = Iterables.size(tests);
params.getBuckEventBus().post(TestRunEvent.started(options.isRunAllTests(), options.getTestSelectorList(), options.shouldExplainTestSelectorList(), testTargets));
// Start running all of the tests. The result of each java_test() rule is represented as a
// ListenableFuture.
List<ListenableFuture<TestResults>> results = Lists.newArrayList();
TestRuleKeyFileHelper testRuleKeyFileHelper = new TestRuleKeyFileHelper(buildEngine);
final AtomicInteger lastReportedTestSequenceNumber = new AtomicInteger();
final List<TestRun> separateTestRuns = Lists.newArrayList();
List<TestRun> parallelTestRuns = Lists.newArrayList();
for (final TestRule test : tests) {
// Determine whether the test needs to be executed.
final Callable<TestResults> resultsInterpreter = getCachingCallable(test.interpretTestResults(executionContext, /*isUsingTestSelectors*/
!options.getTestSelectorList().isEmpty()));
boolean isTestRunRequired;
isTestRunRequired = isTestRunRequiredForTest(test, buildEngine, executionContext, testRuleKeyFileHelper, options.getTestResultCacheMode(), resultsInterpreter, !options.getTestSelectorList().isEmpty(), !options.getEnvironmentOverrides().isEmpty());
final Map<String, UUID> testUUIDMap = new HashMap<>();
final AtomicReference<TestStatusMessageEvent.Started> currentTestStatusMessageEvent = new AtomicReference<>();
TestRule.TestReportingCallback testReportingCallback = new TestRule.TestReportingCallback() {
@Override
public void testsDidBegin() {
LOG.debug("Tests for rule %s began", test.getBuildTarget());
}
@Override
public void statusDidBegin(TestStatusMessage didBeginMessage) {
LOG.debug("Test status did begin: %s", didBeginMessage);
TestStatusMessageEvent.Started startedEvent = TestStatusMessageEvent.started(didBeginMessage);
TestStatusMessageEvent.Started previousEvent = currentTestStatusMessageEvent.getAndSet(startedEvent);
Preconditions.checkState(previousEvent == null, "Received begin status before end status (%s)", previousEvent);
params.getBuckEventBus().post(startedEvent);
String message = didBeginMessage.getMessage();
if (message.toLowerCase().contains("debugger")) {
executionContext.getStdErr().println(executionContext.getAnsi().asWarningText(message));
}
}
@Override
public void statusDidEnd(TestStatusMessage didEndMessage) {
LOG.debug("Test status did end: %s", didEndMessage);
TestStatusMessageEvent.Started previousEvent = currentTestStatusMessageEvent.getAndSet(null);
Preconditions.checkState(previousEvent != null, "Received end status before begin status (%s)", previousEvent);
params.getBuckEventBus().post(TestStatusMessageEvent.finished(previousEvent, didEndMessage));
}
@Override
public void testDidBegin(String testCaseName, String testName) {
LOG.debug("Test rule %s test case %s test name %s began", test.getBuildTarget(), testCaseName, testName);
UUID testUUID = UUID.randomUUID();
// UUID is immutable and thread-safe as of Java 7, so it's
// safe to stash in a map and use later:
//
// http://bugs.java.com/view_bug.do?bug_id=6611830
testUUIDMap.put(testCaseName + ":" + testName, testUUID);
params.getBuckEventBus().post(TestSummaryEvent.started(testUUID, testCaseName, testName));
}
@Override
public void testDidEnd(TestResultSummary testResultSummary) {
LOG.debug("Test rule %s test did end: %s", test.getBuildTarget(), testResultSummary);
UUID testUUID = testUUIDMap.get(testResultSummary.getTestCaseName() + ":" + testResultSummary.getTestName());
Preconditions.checkNotNull(testUUID);
params.getBuckEventBus().post(TestSummaryEvent.finished(testUUID, testResultSummary));
}
@Override
public void testsDidEnd(List<TestCaseSummary> testCaseSummaries) {
LOG.debug("Test rule %s tests did end: %s", test.getBuildTarget(), testCaseSummaries);
}
};
List<Step> steps;
if (isTestRunRequired) {
params.getBuckEventBus().post(IndividualTestEvent.started(testTargets));
ImmutableList.Builder<Step> stepsBuilder = ImmutableList.builder();
Preconditions.checkState(buildEngine.isRuleBuilt(test.getBuildTarget()));
List<Step> testSteps = test.runTests(executionContext, options, sourcePathResolver, testReportingCallback);
if (!testSteps.isEmpty()) {
stepsBuilder.addAll(testSteps);
stepsBuilder.add(testRuleKeyFileHelper.createRuleKeyInDirStep(test));
}
steps = stepsBuilder.build();
} else {
steps = ImmutableList.of();
}
TestRun testRun = TestRun.of(test, steps, getStatusTransformingCallable(isTestRunRequired, resultsInterpreter), testReportingCallback);
// commands because the rule is cached, but its results must still be processed.
if (test.runTestSeparately()) {
LOG.debug("Running test %s in serial", test);
separateTestRuns.add(testRun);
} else {
LOG.debug("Running test %s in parallel", test);
parallelTestRuns.add(testRun);
}
}
for (TestRun testRun : parallelTestRuns) {
ListenableFuture<TestResults> testResults = runStepsAndYieldResult(stepRunner, executionContext, testRun.getSteps(), testRun.getTestResultsCallable(), testRun.getTest().getBuildTarget(), params.getBuckEventBus(), service);
results.add(transformTestResults(params, testResults, testRun.getTest(), testRun.getTestReportingCallback(), testTargets, lastReportedTestSequenceNumber, totalNumberOfTests));
}
ListenableFuture<List<TestResults>> parallelTestStepsFuture = Futures.allAsList(results);
final List<TestResults> completedResults = Lists.newArrayList();
final ListeningExecutorService directExecutorService = MoreExecutors.newDirectExecutorService();
ListenableFuture<Void> uberFuture = MoreFutures.addListenableCallback(parallelTestStepsFuture, new FutureCallback<List<TestResults>>() {
@Override
public void onSuccess(List<TestResults> parallelTestResults) {
LOG.debug("Parallel tests completed, running separate tests...");
completedResults.addAll(parallelTestResults);
List<ListenableFuture<TestResults>> separateResultsList = Lists.newArrayList();
for (TestRun testRun : separateTestRuns) {
separateResultsList.add(transformTestResults(params, runStepsAndYieldResult(stepRunner, executionContext, testRun.getSteps(), testRun.getTestResultsCallable(), testRun.getTest().getBuildTarget(), params.getBuckEventBus(), directExecutorService), testRun.getTest(), testRun.getTestReportingCallback(), testTargets, lastReportedTestSequenceNumber, totalNumberOfTests));
}
ListenableFuture<List<TestResults>> serialResults = Futures.allAsList(separateResultsList);
try {
completedResults.addAll(serialResults.get());
} catch (ExecutionException e) {
LOG.error(e, "Error fetching serial test results");
throw new HumanReadableException(e, "Error fetching serial test results");
} catch (InterruptedException e) {
LOG.error(e, "Interrupted fetching serial test results");
try {
serialResults.cancel(true);
} catch (CancellationException ignored) {
// Rethrow original InterruptedException instead.
}
Thread.currentThread().interrupt();
throw new HumanReadableException(e, "Test cancelled");
}
LOG.debug("Done running serial tests.");
}
@Override
public void onFailure(Throwable e) {
LOG.error(e, "Parallel tests failed, not running serial tests");
throw new HumanReadableException(e, "Parallel tests failed");
}
}, directExecutorService);
try {
// Block until all the tests have finished running.
uberFuture.get();
} catch (ExecutionException e) {
e.printStackTrace(params.getConsole().getStdErr());
return 1;
} catch (InterruptedException e) {
try {
uberFuture.cancel(true);
} catch (CancellationException ignored) {
// Rethrow original InterruptedException instead.
}
Thread.currentThread().interrupt();
throw e;
}
params.getBuckEventBus().post(TestRunEvent.finished(testTargets, completedResults));
// Write out the results as XML, if requested.
Optional<String> path = options.getPathToXmlTestOutput();
if (path.isPresent()) {
try (Writer writer = Files.newWriter(new File(path.get()), Charsets.UTF_8)) {
writeXmlOutput(completedResults, writer);
}
}
// Generate the code coverage report.
if (options.isCodeCoverageEnabled() && !rulesUnderTestForCoverage.isEmpty()) {
try {
JavaBuckConfig javaBuckConfig = params.getBuckConfig().getView(JavaBuckConfig.class);
DefaultJavaPackageFinder defaultJavaPackageFinder = javaBuckConfig.createDefaultJavaPackageFinder();
stepRunner.runStepForBuildTarget(executionContext, getReportCommand(rulesUnderTestForCoverage, defaultJavaPackageFinder, javaBuckConfig.getDefaultJavaOptions().getJavaRuntimeLauncher(), params.getCell().getFilesystem(), sourcePathResolver, ruleFinder, JacocoConstants.getJacocoOutputDir(params.getCell().getFilesystem()), options.getCoverageReportFormat(), options.getCoverageReportTitle(), javaBuckConfig.getDefaultJavacOptions().getSpoolMode() == JavacOptions.SpoolMode.INTERMEDIATE_TO_DISK, options.getCoverageIncludes(), options.getCoverageExcludes()), Optional.empty());
} catch (StepFailedException e) {
params.getBuckEventBus().post(ConsoleEvent.severe(Throwables.getRootCause(e).getLocalizedMessage()));
return 1;
}
}
boolean failures = Iterables.any(completedResults, results1 -> {
LOG.debug("Checking result %s for failure", results1);
return !results1.isSuccess();
});
return failures ? TEST_FAILURES_EXIT_CODE : 0;
}
use of java.util.concurrent.atomic.AtomicInteger in project deeplearning4j by deeplearning4j.
the class WordVectorSerializer method loadFullModel.
/**
* This method loads full w2v model, previously saved with writeFullMethod call
*
* Deprecation note: Please, consider using readWord2VecModel() or loadStaticModel() method instead
*
* @param path - path to previously stored w2v json model
* @return - Word2Vec instance
*/
@Deprecated
public static Word2Vec loadFullModel(@NonNull String path) throws FileNotFoundException {
/*
// TODO: implementation is in process
We need to restore:
1. WeightLookupTable, including syn0 and syn1 matrices
2. VocabCache + mark it as SPECIAL, to avoid accidental word removals
*/
BasicLineIterator iterator = new BasicLineIterator(new File(path));
// first 3 lines should be processed separately
String confJson = iterator.nextSentence();
log.info("Word2Vec conf. JSON: " + confJson);
VectorsConfiguration configuration = VectorsConfiguration.fromJson(confJson);
// actually we dont need expTable, since it produces exact results on subsequent runs untill you dont modify expTable size :)
String eTable = iterator.nextSentence();
double[] expTable;
String nTable = iterator.nextSentence();
if (configuration.getNegative() > 0) {
// TODO: we probably should parse negTable, but it's not required until vocab changes are introduced. Since on the predefined vocab it will produce exact nTable, the same goes for expTable btw.
}
/*
Since we're restoring vocab from previously serialized model, we can expect minWordFrequency appliance in its vocabulary, so it should NOT be truncated.
That's why i'm setting minWordFrequency to configuration value, but applying SPECIAL to each word, to avoid truncation
*/
VocabularyHolder holder = new VocabularyHolder.Builder().minWordFrequency(configuration.getMinWordFrequency()).hugeModelExpected(configuration.isHugeModelExpected()).scavengerActivationThreshold(configuration.getScavengerActivationThreshold()).scavengerRetentionDelay(configuration.getScavengerRetentionDelay()).build();
AtomicInteger counter = new AtomicInteger(0);
AbstractCache<VocabWord> vocabCache = new AbstractCache.Builder<VocabWord>().build();
while (iterator.hasNext()) {
// log.info("got line: " + iterator.nextSentence());
String wordJson = iterator.nextSentence();
VocabularyWord word = VocabularyWord.fromJson(wordJson);
word.setSpecial(true);
VocabWord vw = new VocabWord(word.getCount(), word.getWord());
vw.setIndex(counter.getAndIncrement());
vw.setIndex(word.getHuffmanNode().getIdx());
vw.setCodeLength(word.getHuffmanNode().getLength());
vw.setPoints(arrayToList(word.getHuffmanNode().getPoint(), word.getHuffmanNode().getLength()));
vw.setCodes(arrayToList(word.getHuffmanNode().getCode(), word.getHuffmanNode().getLength()));
vocabCache.addToken(vw);
vocabCache.addWordToIndex(vw.getIndex(), vw.getLabel());
vocabCache.putVocabWord(vw.getWord());
}
// at this moment vocab is restored, and it's time to rebuild Huffman tree
// since word counters are equal, huffman tree will be equal too
//holder.updateHuffmanCodes();
// we definitely don't need UNK word in this scenarion
// holder.transferBackToVocabCache(vocabCache, false);
// now, it's time to transfer syn0/syn1/syn1 neg values
InMemoryLookupTable lookupTable = (InMemoryLookupTable) new InMemoryLookupTable.Builder().negative(configuration.getNegative()).useAdaGrad(configuration.isUseAdaGrad()).lr(configuration.getLearningRate()).cache(vocabCache).vectorLength(configuration.getLayersSize()).build();
// we create all arrays
lookupTable.resetWeights(true);
iterator.reset();
// we should skip 3 lines from file
iterator.nextSentence();
iterator.nextSentence();
iterator.nextSentence();
// now, for each word from vocabHolder we'll just transfer actual values
while (iterator.hasNext()) {
String wordJson = iterator.nextSentence();
VocabularyWord word = VocabularyWord.fromJson(wordJson);
// syn0 transfer
INDArray syn0 = lookupTable.getSyn0().getRow(vocabCache.indexOf(word.getWord()));
syn0.assign(Nd4j.create(word.getSyn0()));
// syn1 transfer
// syn1 values are being accessed via tree points, but since our goal is just deserialization - we can just push it row by row
INDArray syn1 = lookupTable.getSyn1().getRow(vocabCache.indexOf(word.getWord()));
syn1.assign(Nd4j.create(word.getSyn1()));
// syn1Neg transfer
if (configuration.getNegative() > 0) {
INDArray syn1Neg = lookupTable.getSyn1Neg().getRow(vocabCache.indexOf(word.getWord()));
syn1Neg.assign(Nd4j.create(word.getSyn1Neg()));
}
}
Word2Vec vec = new Word2Vec.Builder(configuration).vocabCache(vocabCache).lookupTable(lookupTable).resetModel(false).build();
vec.setModelUtils(new BasicModelUtils());
return vec;
}
Aggregations