use of com.stumbleupon.async.Deferred in project opentsdb by OpenTSDB.
the class MetaPurge method purgeTSMeta.
/**
* Scans the entire UID table and removes any UIDMeta objects found.
* @return The total number of columns deleted
*/
public Deferred<Long> purgeTSMeta() {
// a list to store all pending deletes so we don't exit before they've
// completed
final ArrayList<Deferred<Object>> delete_calls = new ArrayList<Deferred<Object>>();
final Deferred<Long> result = new Deferred<Long>();
/**
* Scanner callback that will recursively call itself and loop through the
* rows of the UID table, issuing delete requests for all of the columns in
* a row that match a meta qualifier.
*/
final class MetaScanner implements Callback<Deferred<Long>, ArrayList<ArrayList<KeyValue>>> {
final Scanner scanner;
public MetaScanner() {
scanner = getScanner(tsdb.metaTable());
}
/**
* Fetches the next group of rows from the scanner and sets this class as
* a callback
* @return The total number of columns deleted after completion
*/
public Deferred<Long> scan() {
return scanner.nextRows().addCallbackDeferring(this);
}
@Override
public Deferred<Long> call(ArrayList<ArrayList<KeyValue>> rows) throws Exception {
if (rows == null) {
result.callback(columns);
return null;
}
for (final ArrayList<KeyValue> row : rows) {
// one delete request per row. We'll almost always delete the whole
// row, so preallocate some ram.
ArrayList<byte[]> qualifiers = new ArrayList<byte[]>(row.size());
for (KeyValue column : row) {
if (Bytes.equals(TSMeta.META_QUALIFIER(), column.qualifier())) {
qualifiers.add(column.qualifier());
} else if (Bytes.equals(TSMeta.COUNTER_QUALIFIER(), column.qualifier())) {
qualifiers.add(column.qualifier());
}
}
if (qualifiers.size() > 0) {
columns += qualifiers.size();
final DeleteRequest delete = new DeleteRequest(tsdb.metaTable(), row.get(0).key(), NAME_FAMILY, qualifiers.toArray(new byte[qualifiers.size()][]));
delete_calls.add(tsdb.getClient().delete(delete));
}
}
/**
* Buffer callback used to wait on all of the delete calls for the
* last set of rows returned from the scanner so we don't fill up the
* deferreds array and OOM out.
*/
final class ContinueCB implements Callback<Deferred<Long>, ArrayList<Object>> {
@Override
public Deferred<Long> call(ArrayList<Object> deletes) throws Exception {
LOG.debug("[" + thread_id + "] Processed [" + deletes.size() + "] delete calls");
delete_calls.clear();
return scan();
}
}
// fetch the next set of rows after waiting for current set of delete
// requests to complete
Deferred.group(delete_calls).addCallbackDeferring(new ContinueCB());
return null;
}
}
// start the scan
new MetaScanner().scan();
return result;
}
use of com.stumbleupon.async.Deferred in project opentsdb by OpenTSDB.
the class TreeSync method run.
/**
* Performs a tree synchronization using a table scanner across the UID table
* @return 0 if completed successfully, something else if an error occurred
*/
public void run() {
final Scanner scanner = getScanner();
// start the process by loading all of the trees in the system
final List<Tree> trees;
try {
trees = Tree.fetchAllTrees(tsdb).joinUninterruptibly();
LOG.info("[" + thread_id + "] Complete");
} catch (Exception e) {
LOG.error("[" + thread_id + "] Unexpected Exception", e);
throw new RuntimeException("[" + thread_id + "] Unexpected exception", e);
}
if (trees == null) {
LOG.warn("No tree definitions were found");
return;
} else {
boolean has_enabled_tree = false;
for (Tree tree : trees) {
if (tree.getEnabled()) {
has_enabled_tree = true;
break;
}
}
if (!has_enabled_tree) {
LOG.warn("No enabled trees were found");
return;
}
LOG.info("Found [" + trees.size() + "] trees");
}
// setup an array for storing the tree processing calls so we can block
// until each call has completed
final ArrayList<Deferred<Boolean>> tree_calls = new ArrayList<Deferred<Boolean>>();
final Deferred<Boolean> completed = new Deferred<Boolean>();
/**
* Scanner callback that loops through the UID table recursively until
* the scanner returns a null row set.
*/
final class TsuidScanner implements Callback<Deferred<Boolean>, ArrayList<ArrayList<KeyValue>>> {
/**
* Fetches the next set of rows from the scanner, adding this class as a
* callback
* @return A meaningless deferred used to wait on until processing has
* completed
*/
public Deferred<Boolean> scan() {
return scanner.nextRows().addCallbackDeferring(this);
}
@Override
public Deferred<Boolean> call(ArrayList<ArrayList<KeyValue>> rows) throws Exception {
if (rows == null) {
completed.callback(true);
return null;
}
for (final ArrayList<KeyValue> row : rows) {
// convert to a string one time
final String tsuid = UniqueId.uidToString(row.get(0).key());
/**
* A throttling callback used to wait for the current TSMeta to
* complete processing through the trees before continuing on with
* the next set.
*/
final class TreeBuilderBufferCB implements Callback<Boolean, ArrayList<ArrayList<Boolean>>> {
@Override
public Boolean call(ArrayList<ArrayList<Boolean>> builder_calls) throws Exception {
//LOG.debug("Processed [" + builder_calls.size() + "] tree_calls");
return true;
}
}
/**
* Executed after parsing a TSMeta object and loading all of the
* associated UIDMetas. Once the meta has been loaded, this callback
* runs it through each of the configured TreeBuilder objects and
* stores the resulting deferred in an array. Once processing of all
* of the rules has completed, we group the deferreds and call
* BufferCB() to wait for their completion.
*/
final class ParseCB implements Callback<Deferred<Boolean>, TSMeta> {
final ArrayList<Deferred<ArrayList<Boolean>>> builder_calls = new ArrayList<Deferred<ArrayList<Boolean>>>();
@Override
public Deferred<Boolean> call(TSMeta meta) throws Exception {
if (meta != null) {
LOG.debug("Processing TSMeta: " + meta + " w value: " + JSON.serializeToString(meta));
// copy the trees into a tree builder object and iterate through
// each builder. We need to do this as a builder is not thread
// safe and cannot be used asynchronously.
final ArrayList<TreeBuilder> tree_builders = new ArrayList<TreeBuilder>(trees.size());
for (Tree tree : trees) {
if (!tree.getEnabled()) {
continue;
}
final TreeBuilder builder = new TreeBuilder(tsdb, tree);
tree_builders.add(builder);
}
for (TreeBuilder builder : tree_builders) {
builder_calls.add(builder.processTimeseriesMeta(meta));
}
return Deferred.group(builder_calls).addCallback(new TreeBuilderBufferCB());
} else {
return Deferred.fromResult(false);
}
}
}
/**
* An error handler used to catch issues when loading the TSMeta such
* as a missing UID name. In these situations we want to log that the
* TSMeta had an issue and continue on.
*/
final class ErrBack implements Callback<Deferred<Boolean>, Exception> {
@Override
public Deferred<Boolean> call(Exception e) throws Exception {
if (e.getClass().equals(IllegalStateException.class)) {
LOG.error("Invalid data when processing TSUID [" + tsuid + "]", e);
} else if (e.getClass().equals(IllegalArgumentException.class)) {
LOG.error("Invalid data when processing TSUID [" + tsuid + "]", e);
} else if (e.getClass().equals(NoSuchUniqueId.class)) {
LOG.warn("Timeseries [" + tsuid + "] includes a non-existant UID: " + e.getMessage());
} else {
LOG.error("[" + thread_id + "] Exception while processing TSUID [" + tsuid + "]", e);
}
return Deferred.fromResult(false);
}
}
// matched a TSMeta column, so request a parsing and loading of
// associated UIDMeta objects, then pass it off to callbacks for
// parsing through the trees.
final Deferred<Boolean> process_tsmeta = TSMeta.parseFromColumn(tsdb, row.get(0), true).addCallbackDeferring(new ParseCB());
process_tsmeta.addErrback(new ErrBack());
tree_calls.add(process_tsmeta);
}
/**
* Another buffer callback that waits for the current set of TSMetas to
* complete their tree calls before we fetch another set of rows from
* the scanner. This necessary to avoid OOM issues.
*/
final class ContinueCB implements Callback<Deferred<Boolean>, ArrayList<Boolean>> {
@Override
public Deferred<Boolean> call(ArrayList<Boolean> tsuids) throws Exception {
LOG.debug("Processed [" + tsuids.size() + "] tree_calls, continuing");
tree_calls.clear();
return scan();
}
}
// request the next set of rows from the scanner, but wait until the
// current set of TSMetas has been processed so we don't slaughter our
// host
Deferred.group(tree_calls).addCallback(new ContinueCB());
return Deferred.fromResult(null);
}
}
/**
* Used to capture unhandled exceptions from the scanner callbacks and
* exit the thread properly
*/
final class ErrBack implements Callback<Deferred<Boolean>, Exception> {
@Override
public Deferred<Boolean> call(Exception e) throws Exception {
LOG.error("Unexpected exception", e);
completed.callback(false);
return Deferred.fromResult(false);
}
}
final TsuidScanner tree_scanner = new TsuidScanner();
tree_scanner.scan().addErrback(new ErrBack());
try {
completed.joinUninterruptibly();
LOG.info("[" + thread_id + "] Complete");
} catch (Exception e) {
LOG.error("[" + thread_id + "] Scanner Exception", e);
throw new RuntimeException("[" + thread_id + "] Scanner exception", e);
}
return;
}
use of com.stumbleupon.async.Deferred in project opentsdb by OpenTSDB.
the class Branch method fetchBranch.
/**
* Attempts to fetch the branch, it's leaves and all child branches.
* The UID names for each leaf may also be loaded if configured.
* @param tsdb The TSDB to use for storage access
* @param branch_id ID of the branch to retrieve
* @param load_leaf_uids Whether or not to load UID names for each leaf
* @return A branch if found, null if it did not exist
* @throws JSONException if the object could not be deserialized
*/
public static Deferred<Branch> fetchBranch(final TSDB tsdb, final byte[] branch_id, final boolean load_leaf_uids) {
final Deferred<Branch> result = new Deferred<Branch>();
final Scanner scanner = setupBranchScanner(tsdb, branch_id);
// This is the branch that will be loaded with data from the scanner and
// returned at the end of the process.
final Branch branch = new Branch();
// A list of deferreds to wait on for child leaf processing
final ArrayList<Deferred<Object>> leaf_group = new ArrayList<Deferred<Object>>();
/**
* Exception handler to catch leaves with an invalid UID name due to a
* possible deletion. This will allow the scanner to keep loading valid
* leaves and ignore problems. The fsck tool can be used to clean up
* orphaned leaves. If we catch something other than an NSU, it will
* re-throw the exception
*/
final class LeafErrBack implements Callback<Object, Exception> {
final byte[] qualifier;
public LeafErrBack(final byte[] qualifier) {
this.qualifier = qualifier;
}
@Override
public Object call(final Exception e) throws Exception {
Throwable ex = e;
while (ex.getClass().equals(DeferredGroupException.class)) {
ex = ex.getCause();
}
if (ex.getClass().equals(NoSuchUniqueId.class)) {
LOG.debug("Invalid UID for leaf: " + idToString(qualifier) + " in branch: " + idToString(branch_id), ex);
} else {
throw (Exception) ex;
}
return null;
}
}
/**
* Called after a leaf has been loaded successfully and adds the leaf
* to the branch's leaf set. Also lazily initializes the leaf set if it
* hasn't been.
*/
final class LeafCB implements Callback<Object, Leaf> {
@Override
public Object call(final Leaf leaf) throws Exception {
if (leaf != null) {
if (branch.leaves == null) {
branch.leaves = new HashMap<Integer, Leaf>();
}
branch.leaves.put(leaf.hashCode(), leaf);
}
return null;
}
}
/**
* Scanner callback executed recursively each time we get a set of data
* from storage. This is responsible for determining what columns are
* returned and issuing requests to load leaf objects.
* When the scanner returns a null set of rows, the method initiates the
* final callback.
*/
final class FetchBranchCB implements Callback<Object, ArrayList<ArrayList<KeyValue>>> {
/**
* Starts the scanner and is called recursively to fetch the next set of
* rows from the scanner.
* @return The branch if loaded successfully, null if the branch was not
* found.
*/
public Object fetchBranch() {
return scanner.nextRows().addCallback(this);
}
/**
* Loops through each row of the scanner results and parses out branch
* definitions and child leaves.
* @return The final branch callback if the scanner returns a null set
*/
@Override
public Object call(final ArrayList<ArrayList<KeyValue>> rows) throws Exception {
if (rows == null) {
if (branch.tree_id < 1 || branch.path == null) {
result.callback(null);
} else {
result.callback(branch);
}
return null;
}
for (final ArrayList<KeyValue> row : rows) {
for (KeyValue column : row) {
// matched a branch column
if (Bytes.equals(BRANCH_QUALIFIER, column.qualifier())) {
if (Bytes.equals(branch_id, column.key())) {
// it's *this* branch. We deserialize to a new object and copy
// since the columns could be in any order and we may get a
// leaf before the branch
final Branch local_branch = JSON.parseToObject(column.value(), Branch.class);
branch.path = local_branch.path;
branch.display_name = local_branch.display_name;
branch.tree_id = Tree.bytesToId(column.key());
} else {
// it's a child branch
final Branch child = JSON.parseToObject(column.value(), Branch.class);
child.tree_id = Tree.bytesToId(column.key());
branch.addChild(child);
}
// parse out a leaf
} else if (Bytes.memcmp(Leaf.LEAF_PREFIX(), column.qualifier(), 0, Leaf.LEAF_PREFIX().length) == 0) {
if (Bytes.equals(branch_id, column.key())) {
// process a leaf and skip if the UIDs for the TSUID can't be
// found. Add an errback to catch NoSuchUniqueId exceptions
leaf_group.add(Leaf.parseFromStorage(tsdb, column, load_leaf_uids).addCallbacks(new LeafCB(), new LeafErrBack(column.qualifier())));
} else {
// TODO - figure out an efficient way to increment a counter in
// the child branch with the # of leaves it has
}
}
}
}
// recursively call ourself to fetch more results from the scanner
return fetchBranch();
}
}
// start scanning
new FetchBranchCB().fetchBranch();
return result;
}
use of com.stumbleupon.async.Deferred in project opentsdb by OpenTSDB.
the class TimeSeriesLookup method resolveUIDs.
/**
* Resolves the metric and tag strings to their UIDs
* @return A deferred to wait on for resolution to complete.
*/
private Deferred<Object> resolveUIDs() {
class TagsCB implements Callback<Object, ArrayList<Object>> {
@Override
public Object call(final ArrayList<Object> ignored) throws Exception {
rowkey_regex = getRowKeyRegex();
return null;
}
}
class PairResolution implements Callback<Object, ArrayList<byte[]>> {
@Override
public Object call(final ArrayList<byte[]> tags) throws Exception {
if (tags.size() < 2) {
throw new IllegalArgumentException("Somehow we received an array " + "that wasn't two bytes in size! " + tags);
}
pairs.add(new ByteArrayPair(tags.get(0), tags.get(1)));
return Deferred.fromResult(null);
}
}
class TagResolution implements Callback<Deferred<Object>, Object> {
@Override
public Deferred<Object> call(final Object unused) throws Exception {
if (query.getTags() == null || query.getTags().isEmpty()) {
return Deferred.fromResult(null);
}
pairs = Collections.synchronizedList(new ArrayList<ByteArrayPair>(query.getTags().size()));
final ArrayList<Deferred<Object>> deferreds = new ArrayList<Deferred<Object>>(pairs.size());
for (final Pair<String, String> tags : query.getTags()) {
final ArrayList<Deferred<byte[]>> deferred_tags = new ArrayList<Deferred<byte[]>>(2);
if (tags.getKey() != null && !tags.getKey().equals("*")) {
deferred_tags.add(tsdb.getUIDAsync(UniqueIdType.TAGK, tags.getKey()));
} else {
deferred_tags.add(Deferred.<byte[]>fromResult(null));
}
if (tags.getValue() != null && !tags.getValue().equals("*")) {
deferred_tags.add(tsdb.getUIDAsync(UniqueIdType.TAGV, tags.getValue()));
} else {
deferred_tags.add(Deferred.<byte[]>fromResult(null));
}
deferreds.add(Deferred.groupInOrder(deferred_tags).addCallback(new PairResolution()));
}
return Deferred.group(deferreds).addCallback(new TagsCB());
}
}
class MetricCB implements Callback<Deferred<Object>, byte[]> {
@Override
public Deferred<Object> call(final byte[] uid) throws Exception {
metric_uid = uid;
LOG.debug("Found UID (" + UniqueId.uidToString(metric_uid) + ") for metric (" + query.getMetric() + ")");
return new TagResolution().call(null);
}
}
if (query.getMetric() != null && !query.getMetric().isEmpty() && !query.getMetric().equals("*")) {
return tsdb.getUIDAsync(UniqueIdType.METRIC, query.getMetric()).addCallbackDeferring(new MetricCB());
} else {
try {
return new TagResolution().call(null);
} catch (Exception e) {
return Deferred.fromError(e);
}
}
}
use of com.stumbleupon.async.Deferred in project opentsdb by OpenTSDB.
the class PutDataPointRpc method execute.
/**
* Handles HTTP RPC put requests
* @param tsdb The TSDB to which we belong
* @param query The HTTP query from the user
* @throws IOException if there is an error parsing the query or formatting
* the output
* @throws BadRequestException if the user supplied bad data
* @since 2.0
*/
public void execute(final TSDB tsdb, final HttpQuery query) throws IOException {
requests.incrementAndGet();
// only accept POST
if (query.method() != HttpMethod.POST) {
throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, "Method not allowed", "The HTTP method [" + query.method().getName() + "] is not permitted for this endpoint");
}
final List<IncomingDataPoint> dps = query.serializer().parsePutV1();
if (dps.size() < 1) {
throw new BadRequestException("No datapoints found in content");
}
final boolean show_details = query.hasQueryStringParam("details");
final boolean show_summary = query.hasQueryStringParam("summary");
final boolean synchronous = query.hasQueryStringParam("sync");
final int sync_timeout = query.hasQueryStringParam("sync_timeout") ? Integer.parseInt(query.getQueryStringParam("sync_timeout")) : 0;
// this is used to coordinate timeouts
final AtomicBoolean sending_response = new AtomicBoolean();
sending_response.set(false);
final ArrayList<HashMap<String, Object>> details = show_details ? new ArrayList<HashMap<String, Object>>() : null;
int queued = 0;
final List<Deferred<Boolean>> deferreds = synchronous ? new ArrayList<Deferred<Boolean>>(dps.size()) : null;
for (final IncomingDataPoint dp : dps) {
/** Handles passing a data point to the storage exception handler if
* we were unable to store it for any reason */
final class PutErrback implements Callback<Boolean, Exception> {
public Boolean call(final Exception arg) {
handleStorageException(tsdb, dp, arg);
hbase_errors.incrementAndGet();
if (show_details) {
details.add(getHttpDetails("Storage exception: " + arg.getMessage(), dp));
}
return false;
}
public String toString() {
return "HTTP Put Exception CB";
}
}
/** Simply marks the put as successful */
final class SuccessCB implements Callback<Boolean, Object> {
@Override
public Boolean call(final Object obj) {
return true;
}
public String toString() {
return "HTTP Put success CB";
}
}
try {
if (dp.getMetric() == null || dp.getMetric().isEmpty()) {
if (show_details) {
details.add(this.getHttpDetails("Metric name was empty", dp));
}
LOG.warn("Metric name was empty: " + dp);
illegal_arguments.incrementAndGet();
continue;
}
if (dp.getTimestamp() <= 0) {
if (show_details) {
details.add(this.getHttpDetails("Invalid timestamp", dp));
}
LOG.warn("Invalid timestamp: " + dp);
illegal_arguments.incrementAndGet();
continue;
}
if (dp.getValue() == null || dp.getValue().isEmpty()) {
if (show_details) {
details.add(this.getHttpDetails("Empty value", dp));
}
LOG.warn("Empty value: " + dp);
invalid_values.incrementAndGet();
continue;
}
if (dp.getTags() == null || dp.getTags().size() < 1) {
if (show_details) {
details.add(this.getHttpDetails("Missing tags", dp));
}
LOG.warn("Missing tags: " + dp);
illegal_arguments.incrementAndGet();
continue;
}
final Deferred<Object> deferred;
if (Tags.looksLikeInteger(dp.getValue())) {
deferred = tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), Tags.parseLong(dp.getValue()), dp.getTags());
} else {
deferred = tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), Float.parseFloat(dp.getValue()), dp.getTags());
}
if (synchronous) {
deferreds.add(deferred.addCallback(new SuccessCB()));
}
deferred.addErrback(new PutErrback());
++queued;
} catch (NumberFormatException x) {
if (show_details) {
details.add(this.getHttpDetails("Unable to parse value to a number", dp));
}
LOG.warn("Unable to parse value to a number: " + dp);
invalid_values.incrementAndGet();
} catch (IllegalArgumentException iae) {
if (show_details) {
details.add(this.getHttpDetails(iae.getMessage(), dp));
}
LOG.warn(iae.getMessage() + ": " + dp);
illegal_arguments.incrementAndGet();
} catch (NoSuchUniqueName nsu) {
if (show_details) {
details.add(this.getHttpDetails("Unknown metric", dp));
}
LOG.warn("Unknown metric: " + dp);
unknown_metrics.incrementAndGet();
}
}
/** A timer task that will respond to the user with the number of timeouts
* for synchronous writes. */
class PutTimeout implements TimerTask {
final int queued;
public PutTimeout(final int queued) {
this.queued = queued;
}
@Override
public void run(final Timeout timeout) throws Exception {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Put data point call " + query + " already responded successfully");
}
return;
} else {
sending_response.set(true);
}
// figure out how many writes are outstanding
int good_writes = 0;
int failed_writes = 0;
int timeouts = 0;
for (int i = 0; i < deferreds.size(); i++) {
try {
if (deferreds.get(i).join(1)) {
++good_writes;
} else {
++failed_writes;
}
} catch (TimeoutException te) {
if (show_details) {
details.add(getHttpDetails("Write timedout", dps.get(i)));
}
++timeouts;
}
}
writes_timedout.addAndGet(timeouts);
final int failures = dps.size() - queued;
if (!show_summary && !show_details) {
throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, "The put call has timedout with " + good_writes + " successful writes, " + failed_writes + " failed writes and " + timeouts + " timed out writes.", "Please see the TSD logs or append \"details\" to the put request");
} else {
final HashMap<String, Object> summary = new HashMap<String, Object>();
summary.put("success", good_writes);
summary.put("failed", failures + failed_writes);
summary.put("timeouts", timeouts);
if (show_details) {
summary.put("errors", details);
}
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatPutV1(summary));
}
}
}
// now after everything has been sent we can schedule a timeout if so
// the caller asked for a synchronous write.
final Timeout timeout = sync_timeout > 0 ? tsdb.getTimer().newTimeout(new PutTimeout(queued), sync_timeout, TimeUnit.MILLISECONDS) : null;
/** Serializes the response to the client */
class GroupCB implements Callback<Object, ArrayList<Boolean>> {
final int queued;
public GroupCB(final int queued) {
this.queued = queued;
}
@Override
public Object call(final ArrayList<Boolean> results) {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Put data point call " + query + " was marked as timedout");
}
return null;
} else {
sending_response.set(true);
if (timeout != null) {
timeout.cancel();
}
}
int good_writes = 0;
int failed_writes = 0;
for (final boolean result : results) {
if (result) {
++good_writes;
} else {
++failed_writes;
}
}
final int failures = dps.size() - queued;
if (!show_summary && !show_details) {
if (failures + failed_writes > 0) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatErrorV1(new BadRequestException(HttpResponseStatus.BAD_REQUEST, "One or more data points had errors", "Please see the TSD logs or append \"details\" to the put request")));
} else {
query.sendReply(HttpResponseStatus.NO_CONTENT, "".getBytes());
}
} else {
final HashMap<String, Object> summary = new HashMap<String, Object>();
if (sync_timeout > 0) {
summary.put("timeouts", 0);
}
summary.put("success", results.isEmpty() ? queued : good_writes);
summary.put("failed", failures + failed_writes);
if (show_details) {
summary.put("errors", details);
}
if (failures > 0) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatPutV1(summary));
} else {
query.sendReply(query.serializer().formatPutV1(summary));
}
}
return null;
}
@Override
public String toString() {
return "put data point serialization callback";
}
}
/** Catches any unexpected exceptions thrown in the callback chain */
class ErrCB implements Callback<Object, Exception> {
@Override
public Object call(final Exception e) throws Exception {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Put data point call " + query + " was marked as timedout");
}
return null;
} else {
sending_response.set(true);
if (timeout != null) {
timeout.cancel();
}
}
LOG.error("Unexpected exception", e);
throw new RuntimeException("Unexpected exception", e);
}
@Override
public String toString() {
return "put data point error callback";
}
}
if (synchronous) {
Deferred.groupInOrder(deferreds).addCallback(new GroupCB(queued)).addErrback(new ErrCB());
} else {
new GroupCB(queued).call(EMPTY_DEFERREDS);
}
}
Aggregations