use of com.stumbleupon.async.Callback in project opentsdb by OpenTSDB.
the class PutDataPointRpc method processDataPoint.
/**
* Handles one or more incoming data point types for the HTTP endpoint
* to put raw, rolled up or aggregated data points
* @param <T> An {@link IncomingDataPoint} class.
* @param tsdb The TSDB to which we belong
* @param query The query to respond to
* @param dps The de-serialized data points
* @throws BadRequestException if the data is invalid in some way
* @since 2.4
*/
public <T extends IncomingDataPoint> void processDataPoint(final TSDB tsdb, final HttpQuery query, final List<T> dps) {
if (dps.size() < 1) {
throw new BadRequestException("No datapoints found in content");
}
final HashMap<String, String> query_tags = new HashMap<String, String>();
final boolean show_details = query.hasQueryStringParam("details");
final boolean show_summary = query.hasQueryStringParam("summary");
final boolean synchronous = query.hasQueryStringParam("sync");
final int sync_timeout = query.hasQueryStringParam("sync_timeout") ? Integer.parseInt(query.getQueryStringParam("sync_timeout")) : 0;
// this is used to coordinate timeouts
final AtomicBoolean sending_response = new AtomicBoolean();
sending_response.set(false);
final List<Map<String, Object>> details = show_details ? new ArrayList<Map<String, Object>>() : null;
int queued = 0;
final List<Deferred<Boolean>> deferreds = synchronous ? new ArrayList<Deferred<Boolean>>(dps.size()) : null;
if (tsdb.getConfig().enable_header_tag()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Looking for tag header " + tsdb.getConfig().get_name_header_tag());
}
final String header_tag_value = query.getHeaderValue(tsdb.getConfig().get_name_header_tag());
if (header_tag_value != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(" header found with value:" + header_tag_value);
}
Tags.parse(query_tags, header_tag_value);
} else if (LOG.isDebugEnabled()) {
LOG.debug(" no such header in request");
}
}
for (final IncomingDataPoint dp : dps) {
final DataPointType type;
if (dp instanceof RollUpDataPoint) {
type = DataPointType.ROLLUP;
rollup_dps.incrementAndGet();
} else if (dp instanceof HistogramPojo) {
type = DataPointType.HISTOGRAM;
raw_histograms.incrementAndGet();
} else {
type = DataPointType.PUT;
raw_dps.incrementAndGet();
}
/*
Error back callback to handle storage failures
*/
final class PutErrback implements Callback<Boolean, Exception> {
public Boolean call(final Exception arg) {
if (arg instanceof PleaseThrottleException) {
inflight_exceeded.incrementAndGet();
} else {
hbase_errors.incrementAndGet();
}
if (show_details) {
details.add(getHttpDetails("Storage exception: " + arg.getMessage(), dp));
}
// we handle the storage exceptions here so as to avoid creating yet
// another callback object on every data point.
handleStorageException(tsdb, dp, arg);
return false;
}
public String toString() {
return "HTTP Put exception";
}
}
final class SuccessCB implements Callback<Boolean, Object> {
@Override
public Boolean call(final Object obj) {
switch(type) {
case PUT:
raw_stored.incrementAndGet();
break;
case ROLLUP:
rollup_stored.incrementAndGet();
break;
case HISTOGRAM:
raw_histograms_stored.incrementAndGet();
break;
default:
}
return true;
}
}
try {
if (dp == null) {
if (show_details) {
details.add(this.getHttpDetails("Unexpected null datapoint encountered in set.", dp));
}
LOG.warn("Datapoint null was encountered in set.");
illegal_arguments.incrementAndGet();
continue;
}
if (!dp.validate(details)) {
illegal_arguments.incrementAndGet();
continue;
}
// TODO - refactor the add calls someday or move some of this into the
// actual data point class.
final Deferred<Boolean> deferred;
if (type == DataPointType.HISTOGRAM) {
final HistogramPojo pojo = (HistogramPojo) dp;
// validation and/or conversion before storage of histograms by
// decoding then re-encoding.
final Histogram hdp;
if (Strings.isNullOrEmpty(dp.getValue())) {
hdp = pojo.toSimpleHistogram(tsdb);
} else {
hdp = tsdb.histogramManager().decode(pojo.getId(), pojo.getBytes(), false);
}
deferred = tsdb.addHistogramPoint(pojo.getMetric(), pojo.getTimestamp(), tsdb.histogramManager().encode(hdp.getId(), hdp, true), pojo.getTags()).addCallback(new SuccessCB()).addErrback(new PutErrback());
} else {
if (Tags.looksLikeInteger(dp.getValue())) {
switch(type) {
case ROLLUP:
{
final RollUpDataPoint rdp = (RollUpDataPoint) dp;
deferred = tsdb.addAggregatePoint(rdp.getMetric(), rdp.getTimestamp(), Tags.parseLong(rdp.getValue()), dp.getTags(), rdp.getGroupByAggregator() != null, rdp.getInterval(), rdp.getAggregator(), rdp.getGroupByAggregator()).addCallback(new SuccessCB()).addErrback(new PutErrback());
break;
}
default:
deferred = tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), Tags.parseLong(dp.getValue()), dp.getTags()).addCallback(new SuccessCB()).addErrback(new PutErrback());
}
} else {
switch(type) {
case ROLLUP:
{
final RollUpDataPoint rdp = (RollUpDataPoint) dp;
deferred = tsdb.addAggregatePoint(rdp.getMetric(), rdp.getTimestamp(), (Tags.fitsInFloat(dp.getValue()) ? Float.parseFloat(dp.getValue()) : Double.parseDouble(dp.getValue())), dp.getTags(), rdp.getGroupByAggregator() != null, rdp.getInterval(), rdp.getAggregator(), rdp.getGroupByAggregator()).addCallback(new SuccessCB()).addErrback(new PutErrback());
break;
}
default:
deferred = tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), (Tags.fitsInFloat(dp.getValue()) ? Float.parseFloat(dp.getValue()) : Double.parseDouble(dp.getValue())), dp.getTags()).addCallback(new SuccessCB()).addErrback(new PutErrback());
}
}
}
++queued;
if (synchronous) {
deferreds.add(deferred);
}
} catch (NumberFormatException x) {
if (show_details) {
details.add(getHttpDetails("Unable to parse value to a number", dp));
}
LOG.warn("Unable to parse value to a number: " + dp);
invalid_values.incrementAndGet();
} catch (IllegalArgumentException iae) {
if (show_details) {
details.add(getHttpDetails(iae.getMessage(), dp));
}
LOG.warn(iae.getMessage() + ": " + dp);
illegal_arguments.incrementAndGet();
} catch (NoSuchUniqueName nsu) {
if (show_details) {
details.add(getHttpDetails("Unknown metric", dp));
}
LOG.warn("Unknown metric: " + dp);
unknown_metrics.incrementAndGet();
} catch (PleaseThrottleException x) {
handleStorageException(tsdb, dp, x);
if (show_details) {
details.add(getHttpDetails("Please throttle", dp));
}
inflight_exceeded.incrementAndGet();
} catch (TimeoutException tex) {
handleStorageException(tsdb, dp, tex);
if (show_details) {
details.add(getHttpDetails("Timeout exception", dp));
}
requests_timedout.incrementAndGet();
/*} catch (NoSuchUniqueNameInCache x) {
handleStorageException(tsdb, dp, x);
if (show_details) {
details.add(getHttpDetails("Not cached yet", dp));
} */
} catch (RuntimeException e) {
if (show_details) {
details.add(getHttpDetails("Unexpected exception", dp));
}
LOG.warn("Unexpected exception: " + dp, e);
unknown_errors.incrementAndGet();
}
}
/**
* A timer task that will respond to the user with the number of timeouts
* for synchronous writes.
*/
class PutTimeout implements TimerTask {
final int queued;
public PutTimeout(final int queued) {
this.queued = queued;
}
@Override
public void run(final Timeout timeout) throws Exception {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Put data point call " + query + " already responded successfully");
}
return;
} else {
sending_response.set(true);
}
// figure out how many writes are outstanding
int good_writes = 0;
int failed_writes = 0;
int timeouts = 0;
for (int i = 0; i < deferreds.size(); i++) {
try {
if (deferreds.get(i).join(1)) {
++good_writes;
} else {
++failed_writes;
}
} catch (TimeoutException te) {
if (show_details) {
details.add(getHttpDetails("Write timedout", dps.get(i)));
}
++timeouts;
}
}
writes_timedout.addAndGet(timeouts);
final int failures = dps.size() - queued;
if (!show_summary && !show_details) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatErrorV1(new BadRequestException(HttpResponseStatus.BAD_REQUEST, "The put call has timedout with " + good_writes + " successful writes, " + failed_writes + " failed writes and " + timeouts + " timed out writes.", "Please see the TSD logs or append \"details\" to the put request")));
} else {
final HashMap<String, Object> summary = new HashMap<String, Object>();
summary.put("success", good_writes);
summary.put("failed", failures + failed_writes);
summary.put("timeouts", timeouts);
if (show_details) {
summary.put("errors", details);
}
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatPutV1(summary));
}
}
}
// now after everything has been sent we can schedule a timeout if so
// the caller asked for a synchronous write.
final Timeout timeout = sync_timeout > 0 ? tsdb.getTimer().newTimeout(new PutTimeout(queued), sync_timeout, TimeUnit.MILLISECONDS) : null;
/**
* Serializes the response to the client
*/
class GroupCB implements Callback<Object, ArrayList<Boolean>> {
final int queued;
public GroupCB(final int queued) {
this.queued = queued;
}
@Override
public Object call(final ArrayList<Boolean> results) {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Put data point call " + query + " was marked as timedout");
}
return null;
} else {
sending_response.set(true);
if (timeout != null) {
timeout.cancel();
}
}
int good_writes = 0;
int failed_writes = 0;
for (final boolean result : results) {
if (result) {
++good_writes;
} else {
++failed_writes;
}
}
final int failures = dps.size() - queued;
if (!show_summary && !show_details) {
if (failures + failed_writes > 0) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatErrorV1(new BadRequestException(HttpResponseStatus.BAD_REQUEST, "One or more data points had errors", "Please see the TSD logs or append \"details\" to the put request")));
} else {
query.sendReply(HttpResponseStatus.NO_CONTENT, "".getBytes());
}
} else {
final HashMap<String, Object> summary = new HashMap<String, Object>();
if (sync_timeout > 0) {
summary.put("timeouts", 0);
}
summary.put("success", results.isEmpty() ? queued : good_writes);
summary.put("failed", failures + failed_writes);
if (show_details) {
summary.put("errors", details);
}
if (failures > 0) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatPutV1(summary));
} else {
query.sendReply(query.serializer().formatPutV1(summary));
}
}
return null;
}
@Override
public String toString() {
return "put data point serialization callback";
}
}
/**
* Catches any unexpected exceptions thrown in the callback chain
*/
class ErrCB implements Callback<Object, Exception> {
@Override
public Object call(final Exception e) throws Exception {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("ERROR point call " + query + " was marked as timedout", e);
}
return null;
} else {
sending_response.set(true);
if (timeout != null) {
timeout.cancel();
}
}
LOG.error("Unexpected exception", e);
throw new RuntimeException("Unexpected exception", e);
}
@Override
public String toString() {
return "put data point error callback";
}
}
if (synchronous) {
Deferred.groupInOrder(deferreds).addCallback(new GroupCB(queued)).addErrback(new ErrCB());
} else {
new GroupCB(queued).call(EMPTY_DEFERREDS);
}
}
use of com.stumbleupon.async.Callback in project opentsdb by OpenTSDB.
the class TimeSeriesLookup method resolveUIDs.
/**
* Resolves the metric and tag strings to their UIDs
* @return A deferred to wait on for resolution to complete.
*/
private Deferred<Object> resolveUIDs() {
class TagsCB implements Callback<Object, ArrayList<Object>> {
@Override
public Object call(final ArrayList<Object> ignored) throws Exception {
rowkey_regex = getRowKeyRegex();
return null;
}
}
class PairResolution implements Callback<Object, ArrayList<byte[]>> {
@Override
public Object call(final ArrayList<byte[]> tags) throws Exception {
if (tags.size() < 2) {
throw new IllegalArgumentException("Somehow we received an array " + "that wasn't two bytes in size! " + tags);
}
pairs.add(new ByteArrayPair(tags.get(0), tags.get(1)));
return Deferred.fromResult(null);
}
}
class TagResolution implements Callback<Deferred<Object>, Object> {
@Override
public Deferred<Object> call(final Object unused) throws Exception {
if (query.getTags() == null || query.getTags().isEmpty()) {
return Deferred.fromResult(null);
}
pairs = Collections.synchronizedList(new ArrayList<ByteArrayPair>(query.getTags().size()));
final ArrayList<Deferred<Object>> deferreds = new ArrayList<Deferred<Object>>(pairs.size());
for (final Pair<String, String> tags : query.getTags()) {
final ArrayList<Deferred<byte[]>> deferred_tags = new ArrayList<Deferred<byte[]>>(2);
if (tags.getKey() != null && !tags.getKey().equals("*")) {
deferred_tags.add(tsdb.getUIDAsync(UniqueIdType.TAGK, tags.getKey()));
} else {
deferred_tags.add(Deferred.<byte[]>fromResult(null));
}
if (tags.getValue() != null && !tags.getValue().equals("*")) {
deferred_tags.add(tsdb.getUIDAsync(UniqueIdType.TAGV, tags.getValue()));
} else {
deferred_tags.add(Deferred.<byte[]>fromResult(null));
}
deferreds.add(Deferred.groupInOrder(deferred_tags).addCallback(new PairResolution()));
}
return Deferred.group(deferreds).addCallback(new TagsCB());
}
}
class MetricCB implements Callback<Deferred<Object>, byte[]> {
@Override
public Deferred<Object> call(final byte[] uid) throws Exception {
metric_uid = uid;
LOG.debug("Found UID (" + UniqueId.uidToString(metric_uid) + ") for metric (" + query.getMetric() + ")");
return new TagResolution().call(null);
}
}
if (query.getMetric() != null && !query.getMetric().isEmpty() && !query.getMetric().equals("*")) {
return tsdb.getUIDAsync(UniqueIdType.METRIC, query.getMetric()).addCallbackDeferring(new MetricCB());
} else {
try {
return new TagResolution().call(null);
} catch (Exception e) {
return Deferred.fromError(e);
}
}
}
use of com.stumbleupon.async.Callback in project opentsdb by OpenTSDB.
the class QueryExample method main.
public static void main(final String[] args) throws IOException {
// Set these as arguments so you don't have to keep path information in
// source files
String pathToConfigFile = (args != null && args.length > 0 ? args[0] : null);
// Create a config object with a path to the file for parsing. Or manually
// override settings.
// e.g. config.overrideConfig("tsd.storage.hbase.zk_quorum", "localhost");
final Config config;
if (pathToConfigFile != null && !pathToConfigFile.isEmpty()) {
config = new Config(pathToConfigFile);
} else {
// Search for a default config from /etc/opentsdb/opentsdb.conf, etc.
config = new Config(true);
}
final TSDB tsdb = new TSDB(config);
// main query
final TSQuery query = new TSQuery();
// use any string format from
// http://opentsdb.net/docs/build/html/user_guide/query/dates.html
query.setStart("1h-ago");
// Optional: set other global query params
// at least one sub query required. This is where you specify the metric and
// tags
final TSSubQuery subQuery = new TSSubQuery();
subQuery.setMetric("my.tsdb.test.metric");
// filters are optional but useful.
final List<TagVFilter> filters = new ArrayList<TagVFilter>(1);
filters.add(new TagVFilter.Builder().setType("literal_or").setFilter("example1").setTagk("script").setGroupBy(true).build());
subQuery.setFilters(filters);
// you do have to set an aggregator. Just provide the name as a string
subQuery.setAggregator("sum");
// IMPORTANT: don't forget to add the subQuery
final ArrayList<TSSubQuery> subQueries = new ArrayList<TSSubQuery>(1);
subQueries.add(subQuery);
query.setQueries(subQueries);
// otherwise we aggregate on the second.
query.setMsResolution(true);
// make sure the query is valid. This will throw exceptions if something
// is missing
query.validateAndSetQuery();
// compile the queries into TsdbQuery objects behind the scenes
Query[] tsdbqueries = query.buildQueries(tsdb);
// create some arrays for storing the results and the async calls
final int nqueries = tsdbqueries.length;
final ArrayList<DataPoints[]> results = new ArrayList<DataPoints[]>(nqueries);
final ArrayList<Deferred<DataPoints[]>> deferreds = new ArrayList<Deferred<DataPoints[]>>(nqueries);
// deferred in an array so we can wait for them to complete.
for (int i = 0; i < nqueries; i++) {
deferreds.add(tsdbqueries[i].runAsync());
}
// Start timer
long startTime = DateTime.nanoTime();
// query has finished
class QueriesCB implements Callback<Object, ArrayList<DataPoints[]>> {
public Object call(final ArrayList<DataPoints[]> queryResults) throws Exception {
results.addAll(queryResults);
return null;
}
}
// Make sure to handle any errors that might crop up
class QueriesEB implements Callback<Object, Exception> {
@Override
public Object call(final Exception e) throws Exception {
System.err.println("Queries failed");
e.printStackTrace();
return null;
}
}
// have completed.
try {
Deferred.groupInOrder(deferreds).addCallback(new QueriesCB()).addErrback(new QueriesEB()).join();
} catch (Exception e) {
e.printStackTrace();
}
// End timer.
double elapsedTime = DateTime.msFromNanoDiff(DateTime.nanoTime(), startTime);
System.out.println("Query returned in: " + elapsedTime + " milliseconds.");
// results and do any processing necessary.
for (final DataPoints[] dataSets : results) {
for (final DataPoints data : dataSets) {
System.out.print(data.metricName());
Map<String, String> resolvedTags = data.getTags();
for (final Map.Entry<String, String> pair : resolvedTags.entrySet()) {
System.out.print(" " + pair.getKey() + "=" + pair.getValue());
}
System.out.print("\n");
final SeekableView it = data.iterator();
/*
* An important point about SeekableView:
* Because no data is copied during iteration and no new object gets
* created, the DataPoint returned must not be stored and gets
* invalidated as soon as next is called on the iterator (actually it
* doesn't get invalidated but rather its contents changes). If you want
* to store individual data points, you need to copy the timestamp and
* value out of each DataPoint into your own data structures.
*
* In the vast majority of cases, the iterator will be used to go once
* through all the data points, which is why it's not a problem if the
* iterator acts just as a transient "view". Iterating will be very
* cheap since no memory allocation is required (except to instantiate
* the actual iterator at the beginning).
*/
while (it.hasNext()) {
final DataPoint dp = it.next();
System.out.println(" " + dp.timestamp() + " " + (dp.isInteger() ? dp.longValue() : dp.doubleValue()));
}
System.out.println("");
}
}
// Gracefully shutdown connection to TSDB
try {
tsdb.shutdown().join();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
}
use of com.stumbleupon.async.Callback in project opentsdb by OpenTSDB.
the class Annotation method getGlobalAnnotations.
/**
* Scans through the global annotation storage rows and returns a list of
* parsed annotation objects. If no annotations were found for the given
* timespan, the resulting list will be empty.
* @param tsdb The TSDB to use for storage access
* @param start_time Start time to scan from. May be 0
* @param end_time End time to scan to. Must be greater than 0
* @return A list with detected annotations. May be empty.
* @throws IllegalArgumentException if the end timestamp has not been set or
* the end time is less than the start time
*/
public static Deferred<List<Annotation>> getGlobalAnnotations(final TSDB tsdb, final long start_time, final long end_time) {
if (end_time < 1) {
throw new IllegalArgumentException("The end timestamp has not been set");
}
if (end_time < start_time) {
throw new IllegalArgumentException("The end timestamp cannot be less than the start timestamp");
}
/**
* Scanner that loops through the [0, 0, 0, timestamp] rows looking for
* global annotations. Returns a list of parsed annotation objects.
* The list may be empty.
*/
final class ScannerCB implements Callback<Deferred<List<Annotation>>, ArrayList<ArrayList<KeyValue>>> {
final Scanner scanner;
final ArrayList<Annotation> annotations = new ArrayList<Annotation>();
/**
* Initializes the scanner
*/
public ScannerCB() {
final byte[] start = new byte[Const.SALT_WIDTH() + TSDB.metrics_width() + Const.TIMESTAMP_BYTES];
final byte[] end = new byte[Const.SALT_WIDTH() + TSDB.metrics_width() + Const.TIMESTAMP_BYTES];
final long normalized_start = (start_time - (start_time % Const.MAX_TIMESPAN));
final long normalized_end = (end_time - (end_time % Const.MAX_TIMESPAN) + Const.MAX_TIMESPAN);
Bytes.setInt(start, (int) normalized_start, Const.SALT_WIDTH() + TSDB.metrics_width());
Bytes.setInt(end, (int) normalized_end, Const.SALT_WIDTH() + TSDB.metrics_width());
scanner = tsdb.getClient().newScanner(tsdb.dataTable());
scanner.setStartKey(start);
scanner.setStopKey(end);
scanner.setFamily(FAMILY);
}
public Deferred<List<Annotation>> scan() {
return scanner.nextRows().addCallbackDeferring(this);
}
@Override
public Deferred<List<Annotation>> call(final ArrayList<ArrayList<KeyValue>> rows) throws Exception {
if (rows == null || rows.isEmpty()) {
return Deferred.fromResult((List<Annotation>) annotations);
}
for (final ArrayList<KeyValue> row : rows) {
for (KeyValue column : row) {
if ((column.qualifier().length == 3 || column.qualifier().length == 5) && column.qualifier()[0] == PREFIX()) {
Annotation note = JSON.parseToObject(column.value(), Annotation.class);
if (note.start_time < start_time || note.end_time > end_time) {
continue;
}
annotations.add(note);
}
}
}
return scan();
}
}
return new ScannerCB().scan();
}
use of com.stumbleupon.async.Callback in project opentsdb by OpenTSDB.
the class TSMeta method syncToStorage.
/**
* Attempts a CompareAndSet storage call, loading the object from storage,
* synchronizing changes, and attempting a put. Also verifies that associated
* UID name mappings exist before merging.
* <b>Note:</b> If the local object didn't have any fields set by the caller
* or there weren't any changes, then the data will not be written and an
* exception will be thrown.
* <b>Note:</b> We do not store the UIDMeta information with TSMeta's since
* users may change a single UIDMeta object and we don't want to update every
* TSUID that includes that object with the new data. Instead, UIDMetas are
* merged into the TSMeta on retrieval so we always have canonical data. This
* also saves space in storage.
* @param tsdb The TSDB to use for storage access
* @param overwrite When the RPC method is PUT, will overwrite all user
* accessible fields
* @return True if the storage call was successful, false if the object was
* modified in storage during the CAS call. If false, retry the call. Other
* failures will result in an exception being thrown.
* @throws HBaseException if there was an issue
* @throws IllegalArgumentException if parsing failed
* @throws NoSuchUniqueId If any of the UID name mappings do not exist
* @throws IllegalStateException if the data hasn't changed. This is OK!
* @throws JSONException if the object could not be serialized
*/
public Deferred<Boolean> syncToStorage(final TSDB tsdb, final boolean overwrite) {
if (tsuid == null || tsuid.isEmpty()) {
throw new IllegalArgumentException("Missing TSUID");
}
boolean has_changes = false;
for (Map.Entry<String, Boolean> entry : changed.entrySet()) {
if (entry.getValue()) {
has_changes = true;
break;
}
}
if (!has_changes) {
LOG.debug(this + " does not have changes, skipping sync to storage");
throw new IllegalStateException("No changes detected in TSUID meta data");
}
/**
* Callback used to verify that the UID name mappings exist. We don't need
* to process the actual name, we just want it to throw an error if any
* of the UIDs don't exist.
*/
class UidCB implements Callback<Object, String> {
@Override
public Object call(String name) throws Exception {
// nothing to do as missing mappings will throw a NoSuchUniqueId
return null;
}
}
// parse out the tags from the tsuid
final List<byte[]> parsed_tags = UniqueId.getTagsFromTSUID(tsuid);
// Deferred group used to accumulate UidCB callbacks so the next call
// can wait until all of the UIDs have been verified
ArrayList<Deferred<Object>> uid_group = new ArrayList<Deferred<Object>>(parsed_tags.size() + 1);
// calculate the metric UID and fetch it's name mapping
final byte[] metric_uid = UniqueId.stringToUid(tsuid.substring(0, TSDB.metrics_width() * 2));
uid_group.add(tsdb.getUidName(UniqueIdType.METRIC, metric_uid).addCallback(new UidCB()));
int idx = 0;
for (byte[] tag : parsed_tags) {
if (idx % 2 == 0) {
uid_group.add(tsdb.getUidName(UniqueIdType.TAGK, tag).addCallback(new UidCB()));
} else {
uid_group.add(tsdb.getUidName(UniqueIdType.TAGV, tag).addCallback(new UidCB()));
}
idx++;
}
/**
* Callback executed after all of the UID mappings have been verified. This
* will then proceed with the CAS call.
*/
final class ValidateCB implements Callback<Deferred<Boolean>, ArrayList<Object>> {
private final TSMeta local_meta;
public ValidateCB(final TSMeta local_meta) {
this.local_meta = local_meta;
}
/**
* Nested class that executes the CAS after retrieving existing TSMeta
* from storage.
*/
final class StoreCB implements Callback<Deferred<Boolean>, TSMeta> {
/**
* Executes the CAS if the TSMeta was successfully retrieved
* @return True if the CAS was successful, false if the stored data
* was modified in flight
* @throws IllegalArgumentException if the TSMeta did not exist in
* storage. Only the TSD should be able to create TSMeta objects.
*/
@Override
public Deferred<Boolean> call(TSMeta stored_meta) throws Exception {
if (stored_meta == null) {
throw new IllegalArgumentException("Requested TSMeta did not exist");
}
final byte[] original_meta = stored_meta.getStorageJSON();
local_meta.syncMeta(stored_meta, overwrite);
final PutRequest put = new PutRequest(tsdb.metaTable(), UniqueId.stringToUid(local_meta.tsuid), FAMILY, META_QUALIFIER, local_meta.getStorageJSON());
return tsdb.getClient().compareAndSet(put, original_meta);
}
}
/**
* Called on UID mapping verification and continues executing the CAS
* procedure.
* @return Results from the {@link #StoreCB} callback
*/
@Override
public Deferred<Boolean> call(ArrayList<Object> validated) throws Exception {
return getFromStorage(tsdb, UniqueId.stringToUid(tsuid)).addCallbackDeferring(new StoreCB());
}
}
// Begins the callback chain by validating that the UID mappings exist
return Deferred.group(uid_group).addCallbackDeferring(new ValidateCB(this));
}
Aggregations