use of com.google.common.util.concurrent.FutureCallback in project bitsquare by bitsquare.
the class SpendFromDepositTxWindow method addContent.
private void addContent() {
InputTextField depositTxHex = addLabelInputTextField(gridPane, ++rowIndex, "depositTxHex:").second;
InputTextField buyerPayoutAmount = addLabelInputTextField(gridPane, ++rowIndex, "buyerPayoutAmount:").second;
InputTextField sellerPayoutAmount = addLabelInputTextField(gridPane, ++rowIndex, "sellerPayoutAmount:").second;
InputTextField arbitratorPayoutAmount = addLabelInputTextField(gridPane, ++rowIndex, "arbitratorPayoutAmount:").second;
InputTextField buyerAddressString = addLabelInputTextField(gridPane, ++rowIndex, "buyerAddressString:").second;
InputTextField sellerAddressString = addLabelInputTextField(gridPane, ++rowIndex, "sellerAddressString:").second;
InputTextField arbitratorAddressString = addLabelInputTextField(gridPane, ++rowIndex, "arbitratorAddressString:").second;
InputTextField buyerPrivateKeyAsHex = addLabelInputTextField(gridPane, ++rowIndex, "buyerPrivateKeyAsHex:").second;
InputTextField sellerPrivateKeyAsHex = addLabelInputTextField(gridPane, ++rowIndex, "sellerPrivateKeyAsHex:").second;
InputTextField arbitratorPrivateKeyAsHex = addLabelInputTextField(gridPane, ++rowIndex, "arbitratorPrivateKeyAsHex:").second;
InputTextField buyerPubKeyAsHex = addLabelInputTextField(gridPane, ++rowIndex, "buyerPubKeyAsHex:").second;
InputTextField sellerPubKeyAsHex = addLabelInputTextField(gridPane, ++rowIndex, "sellerPubKeyAsHex:").second;
InputTextField arbitratorPubKeyAsHex = addLabelInputTextField(gridPane, ++rowIndex, "arbitratorPubKeyAsHex:").second;
InputTextField P2SHMultiSigOutputScript = addLabelInputTextField(gridPane, ++rowIndex, "P2SHMultiSigOutputScript:").second;
// Notes:
// Open with alt+g and enable DEV mode
// Priv key is only visible if pw protection is removed (wallet details data (alt+j))
// Take P2SHMultiSigOutputScript from depositTx in blockexplorer
// Take missing buyerPubKeyAsHex and sellerPubKeyAsHex from contract data!
// Lookup sellerPrivateKeyAsHex associated with sellerPubKeyAsHex (or buyers) in wallet details data
// sellerPubKeys/buyerPubKeys are auto generated if used the fields below
// Never set the priv arbitr. key here!
depositTxHex.setText("");
P2SHMultiSigOutputScript.setText("");
buyerPayoutAmount.setText("1.03");
sellerPayoutAmount.setText("0.03");
arbitratorPayoutAmount.setText("0");
buyerAddressString.setText("");
buyerPubKeyAsHex.setText("");
buyerPrivateKeyAsHex.setText("");
sellerAddressString.setText("");
sellerPubKeyAsHex.setText("");
sellerPrivateKeyAsHex.setText("");
//4.9 (pkfcmj42c6es6tjt.onion)
// arbitratorAddressString.setText("19xdeiQM2Hn2M2wbpT5imcYWzqhiSDHPy4");
// arbitratorPubKeyAsHex.setText("02c62e794fe67f3a2115e2de4757143ff7f27bdf38aa4ae58a3595baa6d676875b");
// 4.2 (ntjhaj27rylxwvnp.onion)
arbitratorAddressString.setText("1FdFzBazmHQxbUbdCUJwuCtR37DrZrEobu");
arbitratorPubKeyAsHex.setText("030fdc2ebc297df4047442f6079f1ce3b7d1938a41f88bd11497545cc94fcfd315");
actionButtonText("Sign and publish transaction");
FutureCallback<Transaction> callback = new FutureCallback<Transaction>() {
@Override
public void onSuccess(@Nullable Transaction result) {
log.error("onSuccess");
UserThread.execute(() -> {
String txId = result != null ? result.getHashAsString() : "null";
new Popup<>().information("Transaction successful published. Transaction ID: " + txId).show();
});
}
@Override
public void onFailure(Throwable t) {
log.error(t.toString());
log.error("onFailure");
UserThread.execute(() -> new Popup<>().warning(t.toString()).show());
}
};
onAction(() -> {
try {
tradeWalletService.emergencySignAndPublishPayoutTx(depositTxHex.getText(), Coin.parseCoin(buyerPayoutAmount.getText()), Coin.parseCoin(sellerPayoutAmount.getText()), Coin.parseCoin(arbitratorPayoutAmount.getText()), buyerAddressString.getText(), sellerAddressString.getText(), arbitratorAddressString.getText(), buyerPrivateKeyAsHex.getText(), sellerPrivateKeyAsHex.getText(), arbitratorPrivateKeyAsHex.getText(), buyerPubKeyAsHex.getText(), sellerPubKeyAsHex.getText(), arbitratorPubKeyAsHex.getText(), P2SHMultiSigOutputScript.getText(), callback);
} catch (AddressFormatException | WalletException | TransactionVerificationException e) {
log.error(e.toString());
e.printStackTrace();
UserThread.execute(() -> new Popup<>().warning(e.toString()).show());
}
});
}
use of com.google.common.util.concurrent.FutureCallback in project bitsquare by bitsquare.
the class RequestDataHandler method requestData.
///////////////////////////////////////////////////////////////////////////////////////////
// API
///////////////////////////////////////////////////////////////////////////////////////////
public void requestData(NodeAddress nodeAddress, boolean isPreliminaryDataRequest) {
Log.traceCall("nodeAddress=" + nodeAddress);
peersNodeAddress = nodeAddress;
if (!stopped) {
GetDataRequest getDataRequest;
// We collect the keys of the PersistedStoragePayload items so we exclude them in our request.
// PersistedStoragePayload items don't get removed, so we don't have an issue with the case that
// an object gets removed in between PreliminaryGetDataRequest and the GetUpdatedDataRequest and we would
// miss that event if we do not load the full set or use some delta handling.
Set<byte[]> excludedKeys = dataStorage.getMap().entrySet().stream().filter(e -> e.getValue().getStoragePayload() instanceof PersistedStoragePayload).map(e -> e.getKey().bytes).collect(Collectors.toSet());
if (isPreliminaryDataRequest)
getDataRequest = new PreliminaryGetDataRequest(nonce, excludedKeys);
else
getDataRequest = new GetUpdatedDataRequest(networkNode.getNodeAddress(), nonce, excludedKeys);
if (timeoutTimer == null) {
timeoutTimer = UserThread.runAfter(() -> {
// setup before sending to avoid race conditions
if (!stopped) {
String errorMessage = "A timeout occurred at sending getDataRequest:" + getDataRequest + " on nodeAddress:" + nodeAddress;
log.debug(errorMessage + " / RequestDataHandler=" + RequestDataHandler.this);
handleFault(errorMessage, nodeAddress, CloseConnectionReason.SEND_MSG_TIMEOUT);
} else {
log.trace("We have stopped already. We ignore that timeoutTimer.run call. " + "Might be caused by an previous networkNode.sendMessage.onFailure.");
}
}, TIME_OUT_SEC);
}
log.debug("We send a {} to peer {}. ", getDataRequest.getClass().getSimpleName(), nodeAddress);
networkNode.addMessageListener(this);
SettableFuture<Connection> future = networkNode.sendMessage(nodeAddress, getDataRequest);
Futures.addCallback(future, new FutureCallback<Connection>() {
@Override
public void onSuccess(Connection connection) {
if (!stopped) {
RequestDataHandler.this.connection = connection;
log.trace("Send " + getDataRequest + " to " + nodeAddress + " succeeded.");
} else {
log.trace("We have stopped already. We ignore that networkNode.sendMessage.onSuccess call." + "Might be caused by an previous timeout.");
}
}
@Override
public void onFailure(@NotNull Throwable throwable) {
if (!stopped) {
String errorMessage = "Sending getDataRequest to " + nodeAddress + " failed. That is expected if the peer is offline.\n\t" + "getDataRequest=" + getDataRequest + "." + "\n\tException=" + throwable.getMessage();
log.debug(errorMessage);
handleFault(errorMessage, nodeAddress, CloseConnectionReason.SEND_MSG_FAILURE);
} else {
log.trace("We have stopped already. We ignore that networkNode.sendMessage.onFailure call. " + "Might be caused by an previous timeout.");
}
}
});
} else {
log.warn("We have stopped already. We ignore that requestData call.");
}
}
use of com.google.common.util.concurrent.FutureCallback in project cdap by caskdata.
the class ResourceCoordinatorClient method watchAssignment.
/**
* Starts watching ZK for ResourceAssignment changes for the given service.
*/
private void watchAssignment(final String serviceName) {
final String zkPath = CoordinationConstants.ASSIGNMENTS_PATH + "/" + serviceName;
// Watch for both getData() and exists() call
Watcher watcher = wrapWatcher(new AssignmentWatcher(serviceName, EnumSet.of(Watcher.Event.EventType.NodeDataChanged, Watcher.Event.EventType.NodeDeleted)));
Futures.addCallback(zkClient.getData(zkPath, watcher), wrapCallback(new FutureCallback<NodeData>() {
@Override
public void onSuccess(NodeData result) {
try {
ResourceAssignment assignment = CoordinationConstants.RESOURCE_ASSIGNMENT_CODEC.decode(result.getData());
LOG.debug("Received resource assignment for {}. {}", serviceName, assignment.getAssignments());
handleAssignmentChange(serviceName, assignment);
} catch (Exception e) {
LOG.error("Failed to decode ResourceAssignment {}", Bytes.toStringBinary(result.getData()), e);
}
}
@Override
public void onFailure(Throwable t) {
if (t instanceof KeeperException.NoNodeException) {
// Treat it as assignment has been removed. If the node doesn't exists for the first time fetch data,
// there will be no oldAssignment, hence the following call would be a no-op.
handleAssignmentChange(serviceName, new ResourceAssignment(serviceName));
// Watch for exists if it still interested
synchronized (ResourceCoordinatorClient.this) {
if (changeListeners.containsKey(serviceName)) {
watchAssignmentOnExists(serviceName);
}
}
} else {
LOG.error("Failed to getData on ZK {}{}", zkClient.getConnectString(), zkPath, t);
doNotifyFailed(t);
}
}
}), Threads.SAME_THREAD_EXECUTOR);
}
use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.
the class CachingClusteredClientTest method testOutOfOrderBackgroundCachePopulation.
@Test
public void testOutOfOrderBackgroundCachePopulation() throws Exception {
// to trigger the actual execution when we are ready to shuffle the order.
abstract class DrainTask implements Runnable {
}
final ForwardingListeningExecutorService randomizingExecutorService = new ForwardingListeningExecutorService() {
final ConcurrentLinkedDeque<Pair<SettableFuture, Object>> taskQueue = new ConcurrentLinkedDeque<>();
final ListeningExecutorService delegate = MoreExecutors.listeningDecorator(// are complete before moving on to the next query run.
MoreExecutors.sameThreadExecutor());
@Override
protected ListeningExecutorService delegate() {
return delegate;
}
private <T> ListenableFuture<T> maybeSubmitTask(Object task, boolean wait) {
if (wait) {
SettableFuture<T> future = SettableFuture.create();
taskQueue.addFirst(Pair.<SettableFuture, Object>of(future, task));
return future;
} else {
List<Pair<SettableFuture, Object>> tasks = Lists.newArrayList(taskQueue.iterator());
Collections.shuffle(tasks, new Random(0));
for (final Pair<SettableFuture, Object> pair : tasks) {
ListenableFuture future = pair.rhs instanceof Callable ? delegate.submit((Callable) pair.rhs) : delegate.submit((Runnable) pair.rhs);
Futures.addCallback(future, new FutureCallback() {
@Override
public void onSuccess(@Nullable Object result) {
pair.lhs.set(result);
}
@Override
public void onFailure(Throwable t) {
pair.lhs.setException(t);
}
});
}
}
return task instanceof Callable ? delegate.submit((Callable) task) : (ListenableFuture<T>) delegate.submit((Runnable) task);
}
@Override
public <T> ListenableFuture<T> submit(Callable<T> task) {
return maybeSubmitTask(task, true);
}
@Override
public ListenableFuture<?> submit(Runnable task) {
if (task instanceof DrainTask) {
return maybeSubmitTask(task, false);
} else {
return maybeSubmitTask(task, true);
}
}
};
client = makeClient(randomizingExecutorService);
// callback to be run every time a query run is complete, to ensure all background
// caching tasks are executed, and cache is populated before we move onto the next query
queryCompletedCallback = new Runnable() {
@Override
public void run() {
try {
randomizingExecutorService.submit(new DrainTask() {
@Override
public void run() {
// no-op
}
}).get();
} catch (Exception e) {
Throwables.propagate(e);
}
}
};
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
testQueryCaching(runner, builder.build(), new Interval("2011-01-05/2011-01-10"), makeTimeResults(new DateTime("2011-01-05"), 85, 102, new DateTime("2011-01-06"), 412, 521, new DateTime("2011-01-07"), 122, 21894, new DateTime("2011-01-08"), 5, 20, new DateTime("2011-01-09"), 18, 521), new Interval("2011-01-10/2011-01-13"), makeTimeResults(new DateTime("2011-01-10"), 85, 102, new DateTime("2011-01-11"), 412, 521, new DateTime("2011-01-12"), 122, 21894));
}
use of com.google.common.util.concurrent.FutureCallback in project buck by facebook.
the class TestRunning method transformTestResults.
private static ListenableFuture<TestResults> transformTestResults(final CommandRunnerParams params, ListenableFuture<TestResults> originalTestResults, final TestRule testRule, final TestRule.TestReportingCallback testReportingCallback, final ImmutableSet<String> testTargets, final AtomicInteger lastReportedTestSequenceNumber, final int totalNumberOfTests) {
final SettableFuture<TestResults> transformedTestResults = SettableFuture.create();
FutureCallback<TestResults> callback = new FutureCallback<TestResults>() {
private TestResults postTestResults(TestResults testResults) {
if (!testRule.supportsStreamingTests()) {
// For test rules which don't support streaming tests, we'll
// stream test summary events after interpreting the
// results.
LOG.debug("Simulating streaming test events for rule %s", testRule);
testReportingCallback.testsDidBegin();
for (TestCaseSummary testCaseSummary : testResults.getTestCases()) {
for (TestResultSummary testResultSummary : testCaseSummary.getTestResults()) {
testReportingCallback.testDidBegin(testResultSummary.getTestCaseName(), testResultSummary.getTestName());
testReportingCallback.testDidEnd(testResultSummary);
}
}
testReportingCallback.testsDidEnd(testResults.getTestCases());
LOG.debug("Done simulating streaming test events for rule %s", testRule);
}
TestResults transformedTestResults = TestResults.builder().from(testResults).setSequenceNumber(lastReportedTestSequenceNumber.incrementAndGet()).setTotalNumberOfTests(totalNumberOfTests).build();
params.getBuckEventBus().post(IndividualTestEvent.finished(testTargets, transformedTestResults));
return transformedTestResults;
}
private String getStackTrace(Throwable throwable) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
throwable.printStackTrace(pw);
return sw.toString();
}
@Override
public void onSuccess(TestResults testResults) {
LOG.debug("Transforming successful test results %s", testResults);
postTestResults(testResults);
transformedTestResults.set(testResults);
}
@Override
public void onFailure(Throwable throwable) {
LOG.warn(throwable, "Test command step failed, marking %s as failed", testRule);
// If the test command steps themselves fail, report this as special test result.
TestResults testResults = TestResults.of(testRule.getBuildTarget(), ImmutableList.of(new TestCaseSummary(testRule.getBuildTarget().toString(), ImmutableList.of(new TestResultSummary(testRule.getBuildTarget().toString(), "main", ResultType.FAILURE, 0L, throwable.getMessage(), getStackTrace(throwable), "", "")))), testRule.getContacts(), testRule.getLabels().stream().map(Object::toString).collect(MoreCollectors.toImmutableSet()));
TestResults newTestResults = postTestResults(testResults);
transformedTestResults.set(newTestResults);
}
};
Futures.addCallback(originalTestResults, callback);
return transformedTestResults;
}
Aggregations