Search in sources :

Example 1 with AtomicIncrementRequest

use of org.hbase.async.AtomicIncrementRequest in project opentsdb by OpenTSDB.

the class UidManager method fsck.

/**
   * Implements the {@code fsck} subcommand.
   * @param client The HBase client to use.
   * @param table The name of the HBase table to use.
   * @return The exit status of the command (0 means success).
   */
private static int fsck(final HBaseClient client, final byte[] table, final boolean fix, final boolean fix_unknowns) {
    if (fix) {
        LOG.info("----------------------------------");
        LOG.info("-    Running fsck in FIX mode    -");
        LOG.info("-      Remove Unknowns: " + fix_unknowns + "     -");
        LOG.info("----------------------------------");
    } else {
        LOG.info("Running in log only mode");
    }
    final class Uids {

        int errors;

        long maxid;

        long max_found_id;

        short width;

        final HashMap<String, String> id2name = new HashMap<String, String>();

        final HashMap<String, String> name2id = new HashMap<String, String>();

        void error(final KeyValue kv, final String msg) {
            error(msg + ".  kv=" + kv);
        }

        void error(final String msg) {
            LOG.error(msg);
            errors++;
        }

        /*
       * Replaces or creates the reverse map in storage and in the local map
       */
        void restoreReverseMap(final String kind, final String name, final String uid) {
            final PutRequest put = new PutRequest(table, UniqueId.stringToUid(uid), CliUtils.NAME_FAMILY, CliUtils.toBytes(kind), CliUtils.toBytes(name));
            client.put(put);
            id2name.put(uid, name);
            LOG.info("FIX: Restoring " + kind + " reverse mapping: " + uid + " -> " + name);
        }

        /*
       * Removes the reverse map from storage only
       */
        void removeReverseMap(final String kind, final String name, final String uid) {
            // clean up meta data too
            final byte[][] qualifiers = new byte[2][];
            qualifiers[0] = CliUtils.toBytes(kind);
            if (Bytes.equals(CliUtils.METRICS, qualifiers[0])) {
                qualifiers[1] = CliUtils.METRICS_META;
            } else if (Bytes.equals(CliUtils.TAGK, qualifiers[0])) {
                qualifiers[1] = CliUtils.TAGK_META;
            } else if (Bytes.equals(CliUtils.TAGV, qualifiers[0])) {
                qualifiers[1] = CliUtils.TAGV_META;
            }
            final DeleteRequest delete = new DeleteRequest(table, UniqueId.stringToUid(uid), CliUtils.NAME_FAMILY, qualifiers);
            client.delete(delete);
            // can't remove from the id2name map as this will be called while looping
            LOG.info("FIX: Removed " + kind + " reverse mapping: " + uid + " -> " + name);
        }
    }
    final long start_time = System.nanoTime();
    final HashMap<String, Uids> name2uids = new HashMap<String, Uids>();
    final Scanner scanner = client.newScanner(table);
    scanner.setMaxNumRows(1024);
    int kvcount = 0;
    try {
        ArrayList<ArrayList<KeyValue>> rows;
        while ((rows = scanner.nextRows().joinUninterruptibly()) != null) {
            for (final ArrayList<KeyValue> row : rows) {
                for (final KeyValue kv : row) {
                    kvcount++;
                    final byte[] qualifier = kv.qualifier();
                    // TODO - validate meta data in the future, for now skip it
                    if (Bytes.equals(qualifier, TSMeta.META_QUALIFIER()) || Bytes.equals(qualifier, TSMeta.COUNTER_QUALIFIER()) || Bytes.equals(qualifier, CliUtils.METRICS_META) || Bytes.equals(qualifier, CliUtils.TAGK_META) || Bytes.equals(qualifier, CliUtils.TAGV_META)) {
                        continue;
                    }
                    if (!Bytes.equals(qualifier, CliUtils.METRICS) && !Bytes.equals(qualifier, CliUtils.TAGK) && !Bytes.equals(qualifier, CliUtils.TAGV)) {
                        LOG.warn("Unknown qualifier " + UniqueId.uidToString(qualifier) + " in row " + UniqueId.uidToString(kv.key()));
                        if (fix && fix_unknowns) {
                            final DeleteRequest delete = new DeleteRequest(table, kv.key(), kv.family(), qualifier);
                            client.delete(delete);
                            LOG.info("FIX: Removed unknown qualifier " + UniqueId.uidToString(qualifier) + " in row " + UniqueId.uidToString(kv.key()));
                        }
                        continue;
                    }
                    final String kind = CliUtils.fromBytes(kv.qualifier());
                    Uids uids = name2uids.get(kind);
                    if (uids == null) {
                        uids = new Uids();
                        name2uids.put(kind, uids);
                    }
                    final byte[] key = kv.key();
                    final byte[] family = kv.family();
                    final byte[] value = kv.value();
                    if (Bytes.equals(key, CliUtils.MAXID_ROW)) {
                        if (value.length != 8) {
                            uids.error(kv, "Invalid maximum ID for " + kind + ": should be on 8 bytes: ");
                        // TODO - a fix would be to find the max used ID for the type 
                        // and store that in the max row.
                        } else {
                            uids.maxid = Bytes.getLong(value);
                            LOG.info("Maximum ID for " + kind + ": " + uids.maxid);
                        }
                    } else {
                        short idwidth = 0;
                        if (Bytes.equals(family, CliUtils.ID_FAMILY)) {
                            idwidth = (short) value.length;
                            final String skey = CliUtils.fromBytes(key);
                            final String svalue = UniqueId.uidToString(value);
                            final long max_found_id;
                            if (Bytes.equals(qualifier, CliUtils.METRICS)) {
                                max_found_id = UniqueId.uidToLong(value, TSDB.metrics_width());
                            } else if (Bytes.equals(qualifier, CliUtils.TAGK)) {
                                max_found_id = UniqueId.uidToLong(value, TSDB.tagk_width());
                            } else {
                                max_found_id = UniqueId.uidToLong(value, TSDB.tagv_width());
                            }
                            if (uids.max_found_id < max_found_id) {
                                uids.max_found_id = max_found_id;
                            }
                            final String id = uids.name2id.put(skey, svalue);
                            if (id != null) {
                                uids.error(kv, "Duplicate forward " + kind + " mapping: " + skey + " -> " + id + " and " + skey + " -> " + svalue);
                            }
                        } else if (Bytes.equals(family, CliUtils.NAME_FAMILY)) {
                            final String skey = UniqueId.uidToString(key);
                            final String svalue = CliUtils.fromBytes(value);
                            idwidth = (short) key.length;
                            final String name = uids.id2name.put(skey, svalue);
                            if (name != null) {
                                uids.error(kv, "Duplicate reverse " + kind + "  mapping: " + svalue + " -> " + name + " and " + svalue + " -> " + skey);
                            }
                        }
                        if (uids.width == 0) {
                            uids.width = idwidth;
                        } else if (uids.width != idwidth) {
                            uids.error(kv, "Invalid " + kind + " ID of length " + idwidth + " (expected: " + uids.width + ')');
                        }
                    }
                }
            }
        }
    } catch (HBaseException e) {
        LOG.error("Error while scanning HBase, scanner=" + scanner, e);
        throw e;
    } catch (Exception e) {
        LOG.error("WTF?  Unexpected exception type, scanner=" + scanner, e);
        throw new AssertionError("Should never happen");
    }
    // Match up all forward mappings with their reverse mappings and vice
    // versa and make sure they agree.
    int errors = 0;
    for (final Map.Entry<String, Uids> entry : name2uids.entrySet()) {
        final String kind = entry.getKey();
        final Uids uids = entry.getValue();
        // This will be used in the event that we run into an inconsistent forward
        // mapping that could mean a single UID was assigned to different names.
        // It SHOULD NEVER HAPPEN, but it could.
        HashMap<String, TreeSet<String>> uid_collisions = null;
        // These are harmful and shouldn't exist.
        for (final Map.Entry<String, String> nameid : uids.name2id.entrySet()) {
            final String name = nameid.getKey();
            final String id = nameid.getValue();
            final String found = uids.id2name.get(id);
            if (found == null) {
                uids.error("Forward " + kind + " mapping is missing reverse" + " mapping: " + name + " -> " + id);
                if (fix) {
                    uids.restoreReverseMap(kind, name, id);
                }
            } else if (!found.equals(name)) {
                uids.error("Forward " + kind + " mapping " + name + " -> " + id + " is different than reverse mapping: " + id + " -> " + found);
                final String id2 = uids.name2id.get(found);
                if (id2 != null) {
                    uids.error("Inconsistent forward " + kind + " mapping " + name + " -> " + id + " vs " + name + " -> " + found + " / " + found + " -> " + id2);
                    //    series.
                    if (fix) {
                        // once, as needed, since it's expensive.
                        if (uid_collisions == null) {
                            uid_collisions = new HashMap<String, TreeSet<String>>(uids.name2id.size());
                            for (final Map.Entry<String, String> row : uids.name2id.entrySet()) {
                                TreeSet<String> names = uid_collisions.get(row.getValue());
                                if (names == null) {
                                    names = new TreeSet<String>();
                                    uid_collisions.put(row.getValue(), names);
                                }
                                names.add(row.getKey());
                            }
                        }
                        // series *should* be OK and we can just fix the reverse map.
                        if (uid_collisions.containsKey(id) && uid_collisions.get(id).size() <= 1) {
                            uids.restoreReverseMap(kind, name, id);
                        }
                    }
                } else {
                    uids.error("Duplicate forward " + kind + " mapping " + name + " -> " + id + " and " + id2 + " -> " + found);
                    if (fix) {
                        uids.restoreReverseMap(kind, name, id);
                    }
                }
            }
        }
        // Scan through the UID collisions map and fix the screw ups
        if (uid_collisions != null) {
            for (Map.Entry<String, TreeSet<String>> collision : uid_collisions.entrySet()) {
                if (collision.getValue().size() <= 1) {
                    continue;
                }
                // The data in any time series with the errant UID is 
                // a mashup of with all of the names. The best thing to do is
                // start over. We'll rename the old time series so the user can
                // still see it if they want to, but delete the forward mappings
                // so that UIDs can be reassigned and clean series started.
                // - concatenate all of the names into 
                //   "fsck.<name1>.<name2>[...<nameN>]"
                // - delete the forward mappings for all of the names
                // - create a mapping with the fsck'd name pointing to the id
                final StringBuilder fsck_builder = new StringBuilder("fsck");
                final String id = collision.getKey();
                // compile the new fsck'd name and remove each of the duplicate keys
                for (String name : collision.getValue()) {
                    fsck_builder.append(".").append(name);
                    final DeleteRequest delete = new DeleteRequest(table, CliUtils.toBytes(name), CliUtils.ID_FAMILY, CliUtils.toBytes(kind));
                    client.delete(delete);
                    uids.name2id.remove(name);
                    LOG.info("FIX: Removed forward " + kind + " mapping for " + name + " -> " + id);
                }
                // write the new forward map
                final String fsck_name = fsck_builder.toString();
                final PutRequest put = new PutRequest(table, CliUtils.toBytes(fsck_name), CliUtils.ID_FAMILY, CliUtils.toBytes(kind), UniqueId.stringToUid(id));
                client.put(put);
                LOG.info("FIX: Created forward " + kind + " mapping for fsck'd UID " + fsck_name + " -> " + collision.getKey());
                // we still need to fix the uids map for the reverse run through below
                uids.name2id.put(fsck_name, collision.getKey());
                uids.restoreReverseMap(kind, fsck_name, id);
                LOG.error("----------------------------------");
                LOG.error("-     UID COLLISION DETECTED     -");
                LOG.error("Corrupted UID [" + collision.getKey() + "] renamed to [" + fsck_name + "]");
                LOG.error("----------------------------------");
            }
        }
        // These are harmless but shouldn't frequently occur.
        for (final Map.Entry<String, String> idname : uids.id2name.entrySet()) {
            final String name = idname.getValue();
            final String id = idname.getKey();
            final String found = uids.name2id.get(name);
            if (found == null) {
                LOG.warn("Reverse " + kind + " mapping is missing forward" + " mapping: " + name + " -> " + id);
                if (fix) {
                    uids.removeReverseMap(kind, name, id);
                }
            } else if (!found.equals(id)) {
                final String name2 = uids.id2name.get(found);
                if (name2 != null) {
                    uids.error("Inconsistent reverse " + kind + " mapping " + id + " -> " + name + " vs " + found + " -> " + name + " / " + name2 + " -> " + found);
                    if (fix) {
                        uids.removeReverseMap(kind, name, id);
                    }
                } else {
                    uids.error("Duplicate reverse " + kind + " mapping " + id + " -> " + name + " and " + found + " -> " + name2);
                    if (fix) {
                        uids.removeReverseMap(kind, name, id);
                    }
                }
            }
        }
        final int maxsize = Math.max(uids.id2name.size(), uids.name2id.size());
        if (uids.maxid > maxsize) {
            LOG.warn("Max ID for " + kind + " is " + uids.maxid + " but only " + maxsize + " entries were found.  Maybe " + (uids.maxid - maxsize) + " IDs were deleted?");
        } else if (uids.maxid < uids.max_found_id) {
            uids.error("We found an ID of " + uids.max_found_id + " for " + kind + " but the max ID is only " + uids.maxid + "!  Future IDs may be double-assigned!");
            if (fix) {
                // IDs than to under-run.
                if (uids.max_found_id == Long.MAX_VALUE) {
                    LOG.error("Ran out of UIDs for " + kind + ". Unable to fix max ID");
                } else {
                    final long diff = uids.max_found_id - uids.maxid;
                    final AtomicIncrementRequest air = new AtomicIncrementRequest(table, CliUtils.MAXID_ROW, CliUtils.ID_FAMILY, CliUtils.toBytes(kind), diff);
                    client.atomicIncrement(air);
                    LOG.info("FIX: Updated max ID for " + kind + " to " + uids.max_found_id);
                }
            }
        }
        if (uids.errors > 0) {
            LOG.error(kind + ": Found " + uids.errors + " errors.");
            errors += uids.errors;
        }
    }
    final long timing = (System.nanoTime() - start_time) / 1000000;
    LOG.info(kvcount + " KVs analyzed in " + timing + "ms (~" + (kvcount * 1000 / timing) + " KV/s)");
    if (errors == 0) {
        LOG.info("No errors found.");
        return 0;
    }
    LOG.warn(errors + " errors found.");
    return errors;
}
Also used : Scanner(org.hbase.async.Scanner) KeyValue(org.hbase.async.KeyValue) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) TreeSet(java.util.TreeSet) PutRequest(org.hbase.async.PutRequest) AtomicIncrementRequest(org.hbase.async.AtomicIncrementRequest) HBaseException(org.hbase.async.HBaseException) HBaseException(org.hbase.async.HBaseException) DeleteRequest(org.hbase.async.DeleteRequest) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 2 with AtomicIncrementRequest

use of org.hbase.async.AtomicIncrementRequest in project opentsdb by OpenTSDB.

the class TSMeta method incrementAndGetCounter.

/**
   * Increments the tsuid datapoint counter or creates a new counter. Also
   * creates a new meta data entry if the counter did not exist.
   * <b>Note:</b> This method also:
   * <ul><li>Passes the new TSMeta object to the Search plugin after loading 
   * UIDMeta objects</li>
   * <li>Passes the new TSMeta through all configured trees if enabled</li></ul>
   * @param tsdb The TSDB to use for storage access
   * @param tsuid The TSUID to increment or create
   * @return 0 if the put failed, a positive LONG if the put was successful
   * @throws HBaseException if there was a storage issue
   * @throws JSONException if the data was corrupted
   * @throws NoSuchUniqueName if one of the UIDMeta objects does not exist
   */
public static Deferred<Long> incrementAndGetCounter(final TSDB tsdb, final byte[] tsuid) {
    /**
     * Callback that will create a new TSMeta if the increment result is 1 or
     * will simply return the new value.
     */
    final class TSMetaCB implements Callback<Deferred<Long>, Long> {

        /**
       * Called after incrementing the counter and will create a new TSMeta if
       * the returned value was 1 as well as pass the new meta through trees
       * and the search indexer if configured.
       * @return 0 if the put failed, a positive LONG if the put was successful
       */
        @Override
        public Deferred<Long> call(final Long incremented_value) throws Exception {
            LOG.debug("Value: " + incremented_value);
            if (incremented_value > 1) {
                // whenever the user runs the full sync CLI
                return Deferred.fromResult(incremented_value);
            }
            // create a new meta object with the current system timestamp. Ideally
            // we would want the data point's timestamp, but that's much more data
            // to keep track of and may not be accurate.
            final TSMeta meta = new TSMeta(tsuid, System.currentTimeMillis() / 1000);
            /**
         * Called after the meta has been passed through tree processing. The 
         * result of the processing doesn't matter and the user may not even
         * have it enabled, so we'll just return the counter.
         */
            final class TreeCB implements Callback<Deferred<Long>, Boolean> {

                @Override
                public Deferred<Long> call(Boolean success) throws Exception {
                    return Deferred.fromResult(incremented_value);
                }
            }
            /**
         * Called after retrieving the newly stored TSMeta and loading
         * associated UIDMeta objects. This class will also pass the meta to the
         * search plugin and run it through any configured trees
         */
            final class FetchNewCB implements Callback<Deferred<Long>, TSMeta> {

                @Override
                public Deferred<Long> call(TSMeta stored_meta) throws Exception {
                    // pass to the search plugin
                    tsdb.indexTSMeta(stored_meta);
                    // pass through the trees
                    return tsdb.processTSMetaThroughTrees(stored_meta).addCallbackDeferring(new TreeCB());
                }
            }
            /**
         * Called after the CAS to store the new TSMeta object. If the CAS
         * failed then we return immediately with a 0 for the counter value.
         * Otherwise we keep processing to load the meta and pass it on.
         */
            final class StoreNewCB implements Callback<Deferred<Long>, Boolean> {

                @Override
                public Deferred<Long> call(Boolean success) throws Exception {
                    if (!success) {
                        LOG.warn("Unable to save metadata: " + meta);
                        return Deferred.fromResult(0L);
                    }
                    LOG.info("Successfullly created new TSUID entry for: " + meta);
                    return new LoadUIDs(tsdb, UniqueId.uidToString(tsuid)).call(meta).addCallbackDeferring(new FetchNewCB());
                }
            }
            // store the new TSMeta object and setup the callback chain
            return meta.storeNew(tsdb).addCallbackDeferring(new StoreNewCB());
        }
    }
    // setup the increment request and execute
    final AtomicIncrementRequest inc = new AtomicIncrementRequest(tsdb.metaTable(), tsuid, FAMILY, COUNTER_QUALIFIER);
    // then we only want to increment the data point count.
    if (!tsdb.getConfig().enable_realtime_ts()) {
        return tsdb.getClient().atomicIncrement(inc);
    }
    return tsdb.getClient().atomicIncrement(inc).addCallbackDeferring(new TSMetaCB());
}
Also used : Callback(com.stumbleupon.async.Callback) AtomicIncrementRequest(org.hbase.async.AtomicIncrementRequest)

Aggregations

AtomicIncrementRequest (org.hbase.async.AtomicIncrementRequest)2 Callback (com.stumbleupon.async.Callback)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 TreeSet (java.util.TreeSet)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 DeleteRequest (org.hbase.async.DeleteRequest)1 HBaseException (org.hbase.async.HBaseException)1 KeyValue (org.hbase.async.KeyValue)1 PutRequest (org.hbase.async.PutRequest)1 Scanner (org.hbase.async.Scanner)1