Search in sources :

Example 11 with PutRequest

use of org.hbase.async.PutRequest in project opentsdb by OpenTSDB.

the class TSDB method addPointInternal.

private Deferred<Object> addPointInternal(final String metric, final long timestamp, final byte[] value, final Map<String, String> tags, final short flags) {
    // we only accept positive unix epoch timestamps in seconds or milliseconds
    if (timestamp < 0 || ((timestamp & Const.SECOND_MASK) != 0 && timestamp > 9999999999999L)) {
        throw new IllegalArgumentException((timestamp < 0 ? "negative " : "bad") + " timestamp=" + timestamp + " when trying to add value=" + Arrays.toString(value) + '/' + flags + " to metric=" + metric + ", tags=" + tags);
    }
    IncomingDataPoints.checkMetricAndTags(metric, tags);
    final byte[] row = IncomingDataPoints.rowKeyTemplate(this, metric, tags);
    final long base_time;
    final byte[] qualifier = Internal.buildQualifier(timestamp, flags);
    if ((timestamp & Const.SECOND_MASK) != 0) {
        // drop the ms timestamp to seconds to calculate the base timestamp
        base_time = ((timestamp / 1000) - ((timestamp / 1000) % Const.MAX_TIMESPAN));
    } else {
        base_time = (timestamp - (timestamp % Const.MAX_TIMESPAN));
    }
    /** Callback executed for chaining filter calls to see if the value
     * should be written or not. */
    final class WriteCB implements Callback<Deferred<Object>, Boolean> {

        @Override
        public Deferred<Object> call(final Boolean allowed) throws Exception {
            if (!allowed) {
                rejected_dps.incrementAndGet();
                return Deferred.fromResult(null);
            }
            Bytes.setInt(row, (int) base_time, metrics.width() + Const.SALT_WIDTH());
            RowKey.prefixKeyWithSalt(row);
            Deferred<Object> result = null;
            if (config.enable_appends()) {
                final AppendDataPoints kv = new AppendDataPoints(qualifier, value);
                final AppendRequest point = new AppendRequest(table, row, FAMILY, AppendDataPoints.APPEND_COLUMN_QUALIFIER, kv.getBytes());
                result = client.append(point);
            } else {
                scheduleForCompaction(row, (int) base_time);
                final PutRequest point = new PutRequest(table, row, FAMILY, qualifier, value);
                result = client.put(point);
            }
            // Count all added datapoints, not just those that came in through PUT rpc
            // Will there be others? Well, something could call addPoint programatically right?
            datapoints_added.incrementAndGet();
            if (!config.enable_realtime_ts() && !config.enable_tsuid_incrementing() && !config.enable_tsuid_tracking() && rt_publisher == null) {
                return result;
            }
            final byte[] tsuid = UniqueId.getTSUIDFromKey(row, METRICS_WIDTH, Const.TIMESTAMP_BYTES);
            // if the meta cache plugin is instantiated then tracking goes through it
            if (meta_cache != null) {
                meta_cache.increment(tsuid);
            } else {
                if (config.enable_tsuid_tracking()) {
                    if (config.enable_realtime_ts()) {
                        if (config.enable_tsuid_incrementing()) {
                            TSMeta.incrementAndGetCounter(TSDB.this, tsuid);
                        } else {
                            TSMeta.storeIfNecessary(TSDB.this, tsuid);
                        }
                    } else {
                        final PutRequest tracking = new PutRequest(meta_table, tsuid, TSMeta.FAMILY(), TSMeta.COUNTER_QUALIFIER(), Bytes.fromLong(1));
                        client.put(tracking);
                    }
                }
            }
            if (rt_publisher != null) {
                rt_publisher.sinkDataPoint(metric, timestamp, value, tags, tsuid, flags);
            }
            return result;
        }

        @Override
        public String toString() {
            return "addPointInternal Write Callback";
        }
    }
    if (ts_filter != null && ts_filter.filterDataPoints()) {
        return ts_filter.allowDataPoint(metric, timestamp, value, tags, flags).addCallbackDeferring(new WriteCB());
    }
    return Deferred.fromResult(true).addCallbackDeferring(new WriteCB());
}
Also used : Callback(com.stumbleupon.async.Callback) PutRequest(org.hbase.async.PutRequest) AppendRequest(org.hbase.async.AppendRequest)

Example 12 with PutRequest

use of org.hbase.async.PutRequest in project opentsdb by OpenTSDB.

the class AppendDataPoints method parseKeyValue.

/**
   * Parses a column from storage, orders and drops newer duplicate data points.
   * The parsing will return both a Cell collection for debugging and add
   * the cells to concatenated qualifier and value arrays in the compacted data
   * point format so that the results can be merged with other non-append 
   * columns or rows.
   * <p>
   * WARNING: If the "tsd.core.repair_appends" config is set to true then this
   * method will issue puts against the database, overwriting the column with
   * sorted and de-duplicated data. It will only do this for rows that are at
   * least an hour old so as to avoid pounding current rows.
   * <p> 
   * TODO (CL) - allow for newer or older data points depending on a config.
   * @param tsdb The TSDB to which we belong
   * @param kv The key value t parse
   * @throws IllegalArgumentException if the given KV is not an append column
   * or we were unable to parse the value.
   */
public final Collection<Cell> parseKeyValue(final TSDB tsdb, final KeyValue kv) {
    if (kv.qualifier().length != 3 || kv.qualifier()[0] != APPEND_COLUMN_PREFIX) {
        // we'll support appends at different offsets.
        throw new IllegalArgumentException("Can not parse cell, it is not " + " an appended cell. It has a different qualifier " + Bytes.pretty(kv.qualifier()) + ", row key " + Bytes.pretty(kv.key()));
    }
    final boolean repair = tsdb.getConfig().repair_appends();
    final long base_time;
    try {
        base_time = Internal.baseTime(tsdb, kv.key());
    } catch (ArrayIndexOutOfBoundsException oob) {
        throw new IllegalDataException("Corrupted value: invalid row key: " + kv, oob);
    }
    int val_idx = 0;
    int val_length = 0;
    int qual_length = 0;
    // Time delta, extracted from the qualifier.
    int last_delta = -1;
    final Map<Integer, Internal.Cell> deltas = new TreeMap<Integer, Cell>();
    boolean has_duplicates = false;
    boolean out_of_order = false;
    boolean needs_repair = false;
    try {
        while (val_idx < kv.value().length) {
            byte[] q = Internal.extractQualifier(kv.value(), val_idx);
            System.arraycopy(kv.value(), val_idx, q, 0, q.length);
            val_idx = val_idx + q.length;
            int vlen = Internal.getValueLengthFromQualifier(q, 0);
            byte[] v = new byte[vlen];
            System.arraycopy(kv.value(), val_idx, v, 0, vlen);
            val_idx += vlen;
            int delta = Internal.getOffsetFromQualifier(q);
            final Cell duplicate = deltas.get(delta);
            if (duplicate != null) {
                // This is a duplicate cell, skip it
                has_duplicates = true;
                qual_length -= duplicate.qualifier.length;
                val_length -= duplicate.value.length;
            }
            qual_length += q.length;
            val_length += vlen;
            final Cell cell = new Cell(q, v);
            deltas.put(delta, cell);
            if (!out_of_order) {
                // order data
                if (delta <= last_delta) {
                    out_of_order = true;
                }
                last_delta = delta;
            }
        }
    } catch (ArrayIndexOutOfBoundsException oob) {
        throw new IllegalDataException("Corrupted value: couldn't break down" + " into individual values (consumed " + val_idx + " bytes, but was" + " expecting to consume " + (kv.value().length) + "): " + kv + ", cells so far: " + deltas.values(), oob);
    }
    if (has_duplicates || out_of_order) {
        if ((DateTime.currentTimeMillis() / 1000) - base_time > REPAIR_THRESHOLD) {
            needs_repair = true;
        }
    }
    // Check we consumed all the bytes of the value.
    if (val_idx != kv.value().length) {
        throw new IllegalDataException("Corrupted value: couldn't break down" + " into individual values (consumed " + val_idx + " bytes, but was" + " expecting to consume " + (kv.value().length) + "): " + kv + ", cells so far: " + deltas.values());
    }
    val_idx = 0;
    int qual_idx = 0;
    byte[] healed_cell = null;
    int healed_index = 0;
    this.value = new byte[val_length];
    this.qualifier = new byte[qual_length];
    if (repair && needs_repair) {
        healed_cell = new byte[val_length + qual_length];
    }
    for (final Cell cell : deltas.values()) {
        System.arraycopy(cell.qualifier, 0, this.qualifier, qual_idx, cell.qualifier.length);
        qual_idx += cell.qualifier.length;
        System.arraycopy(cell.value, 0, this.value, val_idx, cell.value.length);
        val_idx += cell.value.length;
        if (repair && needs_repair) {
            System.arraycopy(cell.qualifier, 0, healed_cell, healed_index, cell.qualifier.length);
            healed_index += cell.qualifier.length;
            System.arraycopy(cell.value, 0, healed_cell, healed_index, cell.value.length);
            healed_index += cell.value.length;
        }
    }
    if (repair && needs_repair) {
        LOG.debug("Repairing appended data column " + kv);
        final PutRequest put = new PutRequest(tsdb.table, kv.key(), TSDB.FAMILY(), kv.qualifier(), healed_cell);
        repaired_deferred = tsdb.getClient().put(put);
    }
    return deltas.values();
}
Also used : PutRequest(org.hbase.async.PutRequest) TreeMap(java.util.TreeMap) Cell(net.opentsdb.core.Internal.Cell)

Example 13 with PutRequest

use of org.hbase.async.PutRequest in project opentsdb by OpenTSDB.

the class Leaf method storeLeaf.

/**
   * Attempts to write the leaf to storage using a CompareAndSet call. We expect
   * the stored value to be null. If it's not, we fetched the stored leaf. If 
   * the stored value is the TSUID as the local leaf, we return true since the
   * caller is probably reprocessing a timeseries. If the stored TSUID is
   * different, we store a collision in the tree and return false.
   * <b>Note:</b> You MUST write the tree to storage after calling this as there
   * may be a new collision. Check the tree's collision set.
   * @param tsdb The TSDB to use for storage access
   * @param branch_id ID of the branch this leaf belongs to
   * @param tree Tree the leaf and branch belong to
   * @return True if the leaf was stored successful or already existed, false
   * if there was a collision
   * @throws HBaseException if there was an issue
   * @throws JSONException if the object could not be serialized
   */
public Deferred<Boolean> storeLeaf(final TSDB tsdb, final byte[] branch_id, final Tree tree) {
    /**
     * Callback executed with the results of our CAS operation. If the put was
     * successful, we just return. Otherwise we load the existing leaf to
     * determine if there was a collision.
     */
    final class LeafStoreCB implements Callback<Deferred<Boolean>, Boolean> {

        final Leaf local_leaf;

        public LeafStoreCB(final Leaf local_leaf) {
            this.local_leaf = local_leaf;
        }

        /**
       * @return True if the put was successful or the leaf existed, false if 
       * there was a collision
       */
        @Override
        public Deferred<Boolean> call(final Boolean success) throws Exception {
            if (success) {
                return Deferred.fromResult(success);
            }
            /**
         * Called after fetching the existing leaf from storage
         */
            final class LeafFetchCB implements Callback<Deferred<Boolean>, Leaf> {

                /**
           * @return True if the put was successful or the leaf existed, false if 
           * there was a collision
           */
                @Override
                public Deferred<Boolean> call(final Leaf existing_leaf) throws Exception {
                    if (existing_leaf == null) {
                        LOG.error("Returned leaf was null, stored data may be corrupt for leaf: " + Branch.idToString(columnQualifier()) + " on branch: " + Branch.idToString(branch_id));
                        return Deferred.fromResult(false);
                    }
                    if (existing_leaf.tsuid.equals(tsuid)) {
                        LOG.debug("Leaf already exists: " + local_leaf);
                        return Deferred.fromResult(true);
                    }
                    tree.addCollision(tsuid, existing_leaf.tsuid);
                    LOG.warn("Branch ID: [" + Branch.idToString(branch_id) + "] Leaf collision with [" + tsuid + "] on existing leaf [" + existing_leaf.tsuid + "] named [" + display_name + "]");
                    return Deferred.fromResult(false);
                }
            }
            // a collision or an existing leaf
            return Leaf.getFromStorage(tsdb, branch_id, display_name).addCallbackDeferring(new LeafFetchCB());
        }
    }
    // execute the CAS call to start the callback chain
    final PutRequest put = new PutRequest(tsdb.treeTable(), branch_id, Tree.TREE_FAMILY(), columnQualifier(), toStorageJson());
    return tsdb.getClient().compareAndSet(put, new byte[0]).addCallbackDeferring(new LeafStoreCB(this));
}
Also used : Callback(com.stumbleupon.async.Callback) PutRequest(org.hbase.async.PutRequest)

Example 14 with PutRequest

use of org.hbase.async.PutRequest in project opentsdb by OpenTSDB.

the class Tree method flushNotMatched.

/**
   * Attempts to flush the non-matches to storage. The storage call is a PUT so
   * it will overwrite any existing columns, but since each column is the TSUID
   * it should only exist once and the data shouldn't change.
   * <b>Note:</b> This will also clear the local {@link #not_matched} map
   * @param tsdb The TSDB to use for storage access
   * @return A meaningless deferred (will always be true since we need to group
   * it with tree store calls) for the caller to wait on
   * @throws HBaseException if there was an issue
   */
public Deferred<Boolean> flushNotMatched(final TSDB tsdb) {
    if (!store_failures) {
        not_matched.clear();
        return Deferred.fromResult(true);
    }
    final byte[] row_key = new byte[TREE_ID_WIDTH + 1];
    System.arraycopy(idToBytes(tree_id), 0, row_key, 0, TREE_ID_WIDTH);
    row_key[TREE_ID_WIDTH] = NOT_MATCHED_ROW_SUFFIX;
    final byte[][] qualifiers = new byte[not_matched.size()][];
    final byte[][] values = new byte[not_matched.size()][];
    int index = 0;
    for (Map.Entry<String, String> entry : not_matched.entrySet()) {
        qualifiers[index] = new byte[NOT_MATCHED_PREFIX.length + (entry.getKey().length() / 2)];
        System.arraycopy(NOT_MATCHED_PREFIX, 0, qualifiers[index], 0, NOT_MATCHED_PREFIX.length);
        final byte[] tsuid = UniqueId.stringToUid(entry.getKey());
        System.arraycopy(tsuid, 0, qualifiers[index], NOT_MATCHED_PREFIX.length, tsuid.length);
        values[index] = entry.getValue().getBytes(CHARSET);
        index++;
    }
    final PutRequest put = new PutRequest(tsdb.treeTable(), row_key, TREE_FAMILY, qualifiers, values);
    not_matched.clear();
    /**
     * Super simple callback used to convert the Deferred&lt;Object&gt; to a 
     * Deferred&lt;Boolean&gt; so that it can be grouped with other storage
     * calls
     */
    final class PutCB implements Callback<Deferred<Boolean>, Object> {

        @Override
        public Deferred<Boolean> call(Object result) throws Exception {
            return Deferred.fromResult(true);
        }
    }
    return tsdb.getClient().put(put).addCallbackDeferring(new PutCB());
}
Also used : Callback(com.stumbleupon.async.Callback) PutRequest(org.hbase.async.PutRequest) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 15 with PutRequest

use of org.hbase.async.PutRequest in project opentsdb by OpenTSDB.

the class Tree method storeTree.

/**
   * Attempts to store the tree definition via a CompareAndSet call.
   * @param tsdb The TSDB to use for access
   * @param overwrite Whether or not tree data should be overwritten
   * @return True if the write was successful, false if an error occurred
   * @throws IllegalArgumentException if the tree ID is missing or invalid
   * @throws HBaseException if a storage exception occurred
   */
public Deferred<Boolean> storeTree(final TSDB tsdb, final boolean overwrite) {
    if (tree_id < 1 || tree_id > 65535) {
        throw new IllegalArgumentException("Invalid Tree ID");
    }
    // if there aren't any changes, save time and bandwidth by not writing to
    // storage
    boolean has_changes = false;
    for (Map.Entry<String, Boolean> entry : changed.entrySet()) {
        if (entry.getValue()) {
            has_changes = true;
            break;
        }
    }
    if (!has_changes) {
        LOG.debug(this + " does not have changes, skipping sync to storage");
        throw new IllegalStateException("No changes detected in the tree");
    }
    /**
     * Callback executed after loading a tree from storage so that we can
     * synchronize changes to the meta data and write them back to storage.
     */
    final class StoreTreeCB implements Callback<Deferred<Boolean>, Tree> {

        private final Tree local_tree;

        public StoreTreeCB(final Tree local_tree) {
            this.local_tree = local_tree;
        }

        /**
       * Synchronizes the stored tree object (if found) with the local tree 
       * and issues a CAS call to write the update to storage.
       * @return True if the CAS was successful, false if something changed 
       * in flight
       */
        @Override
        public Deferred<Boolean> call(final Tree fetched_tree) throws Exception {
            Tree stored_tree = fetched_tree;
            final byte[] original_tree = stored_tree == null ? new byte[0] : stored_tree.toStorageJson();
            // now copy changes
            if (stored_tree == null) {
                stored_tree = local_tree;
            } else {
                stored_tree.copyChanges(local_tree, overwrite);
            }
            // reset the change map so we don't keep writing
            initializeChangedMap();
            final PutRequest put = new PutRequest(tsdb.treeTable(), Tree.idToBytes(tree_id), TREE_FAMILY, TREE_QUALIFIER, stored_tree.toStorageJson());
            return tsdb.getClient().compareAndSet(put, original_tree);
        }
    }
    // initiate the sync by attempting to fetch an existing tree from storage
    return fetchTree(tsdb, tree_id).addCallbackDeferring(new StoreTreeCB(this));
}
Also used : Callback(com.stumbleupon.async.Callback) PutRequest(org.hbase.async.PutRequest) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Aggregations

PutRequest (org.hbase.async.PutRequest)17 Callback (com.stumbleupon.async.Callback)11 HashMap (java.util.HashMap)10 Map (java.util.Map)8 IOException (java.io.IOException)5 TreeMap (java.util.TreeMap)5 Deferred (com.stumbleupon.async.Deferred)4 ArrayList (java.util.ArrayList)4 HBaseException (org.hbase.async.HBaseException)4 JSONException (net.opentsdb.utils.JSONException)2 AppendRequest (org.hbase.async.AppendRequest)2 DeleteRequest (org.hbase.async.DeleteRequest)2 KeyValue (org.hbase.async.KeyValue)2 ByteArrayByteIterator (com.yahoo.ycsb.ByteArrayByteIterator)1 ByteIterator (com.yahoo.ycsb.ByteIterator)1 DBException (com.yahoo.ycsb.DBException)1 BufferedReader (java.io.BufferedReader)1 TreeSet (java.util.TreeSet)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 Cell (net.opentsdb.core.Internal.Cell)1