use of org.hbase.async.KeyValue in project opentsdb by OpenTSDB.
the class MetaSync method run.
/**
* Loops through the entire TSDB data set and exits when complete.
*/
public void run() {
// list of deferred calls used to act as a buffer
final ArrayList<Deferred<Boolean>> storage_calls = new ArrayList<Deferred<Boolean>>();
final Deferred<Object> result = new Deferred<Object>();
final class ErrBack implements Callback<Object, Exception> {
@Override
public Object call(Exception e) throws Exception {
Throwable ex = e;
while (ex.getClass().equals(DeferredGroupException.class)) {
if (ex.getCause() == null) {
LOG.warn("Unable to get to the root cause of the DGE");
break;
}
ex = ex.getCause();
}
LOG.error("Sync thread failed with exception", ex);
result.callback(null);
return null;
}
}
final ErrBack err_back = new ErrBack();
/**
* Called when we have encountered a previously un-processed UIDMeta object.
* This callback will update the "created" timestamp of the UIDMeta and
* store the update, replace corrupted metas and update search plugins.
*/
final class UidCB implements Callback<Deferred<Boolean>, UIDMeta> {
private final UniqueIdType type;
private final byte[] uid;
private final long timestamp;
/**
* Constructor that initializes the local callback
* @param type The type of UIDMeta we're dealing with
* @param uid The UID of the meta object as a byte array
* @param timestamp The timestamp of the timeseries when this meta
* was first detected
*/
public UidCB(final UniqueIdType type, final byte[] uid, final long timestamp) {
this.type = type;
this.uid = uid;
this.timestamp = timestamp;
}
/**
* A nested class called after fetching a UID name to use when creating a
* new UIDMeta object if the previous object was corrupted. Also pushes
* the meta off to the search plugin.
*/
final class UidNameCB implements Callback<Deferred<Boolean>, String> {
@Override
public Deferred<Boolean> call(final String name) throws Exception {
UIDMeta new_meta = new UIDMeta(type, uid, name);
new_meta.setCreated(timestamp);
tsdb.indexUIDMeta(new_meta);
LOG.info("Replacing corrupt UID [" + UniqueId.uidToString(uid) + "] of type [" + type + "]");
return new_meta.syncToStorage(tsdb, true);
}
}
@Override
public Deferred<Boolean> call(final UIDMeta meta) throws Exception {
// otherwise it's probably an accurate timestamp
if (meta.getCreated() > (timestamp + 3600) || meta.getCreated() == 0) {
LOG.info("Updating UID [" + UniqueId.uidToString(uid) + "] of type [" + type + "]");
meta.setCreated(timestamp);
// consider it corrupt and replace it with a new object
if (meta.getUID() == null || meta.getUID().isEmpty() || meta.getType() == null) {
return tsdb.getUidName(type, uid).addCallbackDeferring(new UidNameCB());
} else {
// the meta was good, just needed a timestamp update so sync to
// search and storage
tsdb.indexUIDMeta(meta);
LOG.info("Syncing valid UID [" + UniqueId.uidToString(uid) + "] of type [" + type + "]");
return meta.syncToStorage(tsdb, false);
}
} else {
LOG.debug("UID [" + UniqueId.uidToString(uid) + "] of type [" + type + "] is up to date in storage");
return Deferred.fromResult(true);
}
}
}
/**
* Called to handle a previously unprocessed TSMeta object. This callback
* will update the "created" timestamp, create a new TSMeta object if
* missing, and update search plugins.
*/
final class TSMetaCB implements Callback<Deferred<Boolean>, TSMeta> {
private final String tsuid_string;
private final byte[] tsuid;
private final long timestamp;
/**
* Default constructor
* @param tsuid ID of the timeseries
* @param timestamp The timestamp when the first data point was recorded
*/
public TSMetaCB(final byte[] tsuid, final long timestamp) {
this.tsuid = tsuid;
tsuid_string = UniqueId.uidToString(tsuid);
this.timestamp = timestamp;
}
@Override
public Deferred<Boolean> call(final TSMeta meta) throws Exception {
/** Called to process the new meta through the search plugin and tree code */
final class IndexCB implements Callback<Deferred<Boolean>, TSMeta> {
@Override
public Deferred<Boolean> call(final TSMeta new_meta) throws Exception {
tsdb.indexTSMeta(new_meta);
// pass through the trees
return tsdb.processTSMetaThroughTrees(new_meta);
}
}
/** Called to load the newly created meta object for passage onto the
* search plugin and tree builder if configured
*/
final class GetCB implements Callback<Deferred<Boolean>, Boolean> {
@Override
public final Deferred<Boolean> call(final Boolean exists) throws Exception {
if (exists) {
return TSMeta.getTSMeta(tsdb, tsuid_string).addCallbackDeferring(new IndexCB());
} else {
return Deferred.fromResult(false);
}
}
}
/** Errback on the store new call to catch issues */
class ErrBack implements Callback<Object, Exception> {
public Object call(final Exception e) throws Exception {
LOG.warn("Failed creating meta for: " + tsuid + " with exception: ", e);
return null;
}
}
// new one
if (meta == null) {
/**
* Called after successfully creating a TSMeta counter and object,
* used to convert the deferred long to a boolean so it can be
* combined with other calls for waiting.
*/
final class CreatedCB implements Callback<Deferred<Boolean>, Long> {
@Override
public Deferred<Boolean> call(Long value) throws Exception {
LOG.info("Created counter and meta for timeseries [" + tsuid_string + "]");
return Deferred.fromResult(true);
}
}
/**
* Called after checking to see if the counter exists and is used
* to determine if we should create a new counter AND meta or just a
* new meta
*/
final class CounterCB implements Callback<Deferred<Boolean>, Boolean> {
@Override
public Deferred<Boolean> call(final Boolean exists) throws Exception {
if (!exists) {
// here or in the local callback
return TSMeta.incrementAndGetCounter(tsdb, tsuid).addCallbackDeferring(new CreatedCB());
} else {
TSMeta new_meta = new TSMeta(tsuid, timestamp);
tsdb.indexTSMeta(new_meta);
LOG.info("Counter exists but meta was null, creating meta data " + "for timeseries [" + tsuid_string + "]");
return new_meta.storeNew(tsdb).addCallbackDeferring(new GetCB()).addErrback(new ErrBack());
}
}
}
// improperly before the meta is flushed to storage.
return TSMeta.counterExistsInStorage(tsdb, tsuid).addCallbackDeferring(new CounterCB());
}
// corrupted
if (meta.getTSUID() == null || meta.getTSUID().isEmpty()) {
LOG.warn("Replacing corrupt meta data for timeseries [" + tsuid_string + "]");
TSMeta new_meta = new TSMeta(tsuid, timestamp);
tsdb.indexTSMeta(new_meta);
return new_meta.storeNew(tsdb).addCallbackDeferring(new GetCB()).addErrback(new ErrBack());
} else {
// hour otherwise it's probably an accurate timestamp
if (meta.getCreated() > (timestamp + 3600) || meta.getCreated() == 0) {
meta.setCreated(timestamp);
tsdb.indexTSMeta(meta);
LOG.info("Updated created timestamp for timeseries [" + tsuid_string + "]");
return meta.syncToStorage(tsdb, false);
}
LOG.debug("TSUID [" + tsuid_string + "] is up to date in storage");
return Deferred.fromResult(false);
}
}
}
/**
* Scanner callback that recursively loops through all of the data point
* rows. Note that we don't process the actual data points, just the row
* keys.
*/
final class MetaScanner implements Callback<Object, ArrayList<ArrayList<KeyValue>>> {
private byte[] last_tsuid = null;
private String tsuid_string = "";
/**
* Fetches the next set of rows from the scanner and adds this class as
* a callback
* @return A meaningless deferred to wait on until all data rows have
* been processed.
*/
public Object scan() {
return scanner.nextRows().addCallback(this).addErrback(err_back);
}
@Override
public Object call(ArrayList<ArrayList<KeyValue>> rows) throws Exception {
if (rows == null) {
result.callback(null);
return null;
}
for (final ArrayList<KeyValue> row : rows) {
try {
final byte[] tsuid = UniqueId.getTSUIDFromKey(row.get(0).key(), TSDB.metrics_width(), Const.TIMESTAMP_BYTES);
// so we save time
if (last_tsuid != null && Arrays.equals(last_tsuid, tsuid)) {
continue;
}
last_tsuid = tsuid;
// see if we've already processed this tsuid and if so, continue
if (processed_tsuids.contains(Arrays.hashCode(tsuid))) {
continue;
}
tsuid_string = UniqueId.uidToString(tsuid);
/**
* An error callback used to catch issues with a particular timeseries
* or UIDMeta such as a missing UID name. We want to continue
* processing when this happens so we'll just log the error and
* the user can issue a command later to clean up orphaned meta
* entries.
*/
final class RowErrBack implements Callback<Object, Exception> {
@Override
public Object call(Exception e) throws Exception {
Throwable ex = e;
while (ex.getClass().equals(DeferredGroupException.class)) {
if (ex.getCause() == null) {
LOG.warn("Unable to get to the root cause of the DGE");
break;
}
ex = ex.getCause();
}
if (ex.getClass().equals(IllegalStateException.class)) {
LOG.error("Invalid data when processing TSUID [" + tsuid_string + "]: " + ex.getMessage());
} else if (ex.getClass().equals(IllegalArgumentException.class)) {
LOG.error("Invalid data when processing TSUID [" + tsuid_string + "]: " + ex.getMessage());
} else if (ex.getClass().equals(NoSuchUniqueId.class)) {
LOG.warn("Timeseries [" + tsuid_string + "] includes a non-existant UID: " + ex.getMessage());
} else {
LOG.error("Unknown exception processing row: " + row, ex);
}
return null;
}
}
// add tsuid to the processed list
processed_tsuids.add(Arrays.hashCode(tsuid));
// we may have a new TSUID or UIDs, so fetch the timestamp of the
// row for use as the "created" time. Depending on speed we could
// parse datapoints, but for now the hourly row time is enough
final long timestamp = Bytes.getUnsignedInt(row.get(0).key(), Const.SALT_WIDTH() + TSDB.metrics_width());
LOG.debug("[" + thread_id + "] Processing TSUID: " + tsuid_string + " row timestamp: " + timestamp);
// now process the UID metric meta data
final byte[] metric_uid_bytes = Arrays.copyOfRange(tsuid, 0, TSDB.metrics_width());
final String metric_uid = UniqueId.uidToString(metric_uid_bytes);
Long last_get = metric_uids.get(metric_uid);
if (last_get == null || last_get == 0 || timestamp < last_get) {
// fetch and update. Returns default object if the meta doesn't
// exist, so we can just call sync on this to create a missing
// entry
final UidCB cb = new UidCB(UniqueIdType.METRIC, metric_uid_bytes, timestamp);
final Deferred<Boolean> process_uid = UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, metric_uid_bytes).addCallbackDeferring(cb).addErrback(new RowErrBack());
storage_calls.add(process_uid);
metric_uids.put(metric_uid, timestamp);
}
// loop through the tags and process their meta
final List<byte[]> tags = UniqueId.getTagsFromTSUID(tsuid_string);
int idx = 0;
for (byte[] tag : tags) {
final UniqueIdType type = (idx % 2 == 0) ? UniqueIdType.TAGK : UniqueIdType.TAGV;
idx++;
final String uid = UniqueId.uidToString(tag);
// check the maps to see if we need to bother updating
if (type == UniqueIdType.TAGK) {
last_get = tagk_uids.get(uid);
} else {
last_get = tagv_uids.get(uid);
}
if (last_get != null && last_get != 0 && last_get <= timestamp) {
continue;
}
// fetch and update. Returns default object if the meta doesn't
// exist, so we can just call sync on this to create a missing
// entry
final UidCB cb = new UidCB(type, tag, timestamp);
final Deferred<Boolean> process_uid = UIDMeta.getUIDMeta(tsdb, type, tag).addCallbackDeferring(cb).addErrback(new RowErrBack());
storage_calls.add(process_uid);
if (type == UniqueIdType.TAGK) {
tagk_uids.put(uid, timestamp);
} else {
tagv_uids.put(uid, timestamp);
}
}
// handle the timeseries meta last so we don't record it if one
// or more of the UIDs had an issue
final Deferred<Boolean> process_tsmeta = TSMeta.getTSMeta(tsdb, tsuid_string).addCallbackDeferring(new TSMetaCB(tsuid, timestamp)).addErrback(new RowErrBack());
storage_calls.add(process_tsmeta);
} catch (RuntimeException e) {
LOG.error("Processing row " + row + " failed with exception: " + e.getMessage());
LOG.debug("Row: " + row + " stack trace: ", e);
}
}
/**
* A buffering callback used to avoid StackOverflowError exceptions
* where the list of deferred calls can exceed the limit. Instead we'll
* process the Scanner's limit in rows, wait for all of the storage
* calls to complete, then continue on to the next set.
*/
final class ContinueCB implements Callback<Object, ArrayList<Boolean>> {
@Override
public Object call(ArrayList<Boolean> puts) throws Exception {
storage_calls.clear();
return scan();
}
}
/**
* Catch exceptions in one of the grouped calls and continue scanning.
* Without this the user may not see the exception and the thread will
* just die silently.
*/
final class ContinueEB implements Callback<Object, Exception> {
@Override
public Object call(Exception e) throws Exception {
Throwable ex = e;
while (ex.getClass().equals(DeferredGroupException.class)) {
if (ex.getCause() == null) {
LOG.warn("Unable to get to the root cause of the DGE");
break;
}
ex = ex.getCause();
}
LOG.error("[" + thread_id + "] Upstream Exception: ", ex);
return scan();
}
}
// call ourself again but wait for the current set of storage calls to
// complete so we don't OOM
Deferred.group(storage_calls).addCallback(new ContinueCB()).addErrback(new ContinueEB());
return null;
}
}
final MetaScanner scanner = new MetaScanner();
try {
scanner.scan();
result.joinUninterruptibly();
LOG.info("[" + thread_id + "] Complete");
} catch (Exception e) {
LOG.error("[" + thread_id + "] Scanner Exception", e);
throw new RuntimeException("[" + thread_id + "] Scanner exception", e);
}
}
use of org.hbase.async.KeyValue in project opentsdb by OpenTSDB.
the class TreeRule method fetchRule.
/**
* Attempts to retrieve the specified tree rule from storage.
* @param tsdb The TSDB to use for storage access
* @param tree_id ID of the tree the rule belongs to
* @param level Level where the rule resides
* @param order Order where the rule resides
* @return A TreeRule object if found, null if it does not exist
* @throws HBaseException if there was an issue
* @throws IllegalArgumentException if the one of the required parameters was
* missing
* @throws JSONException if the object could not be serialized
*/
public static Deferred<TreeRule> fetchRule(final TSDB tsdb, final int tree_id, final int level, final int order) {
if (tree_id < 1 || tree_id > 65535) {
throw new IllegalArgumentException("Invalid Tree ID");
}
if (level < 0) {
throw new IllegalArgumentException("Invalid rule level");
}
if (order < 0) {
throw new IllegalArgumentException("Invalid rule order");
}
// fetch the whole row
final GetRequest get = new GetRequest(tsdb.treeTable(), Tree.idToBytes(tree_id));
get.family(Tree.TREE_FAMILY());
get.qualifier(getQualifier(level, order));
/**
* Called after fetching to parse the results
*/
final class FetchCB implements Callback<Deferred<TreeRule>, ArrayList<KeyValue>> {
@Override
public Deferred<TreeRule> call(final ArrayList<KeyValue> row) {
if (row == null || row.isEmpty()) {
return Deferred.fromResult(null);
}
return Deferred.fromResult(parseFromStorage(row.get(0)));
}
}
return tsdb.getClient().get(get).addCallbackDeferring(new FetchCB());
}
use of org.hbase.async.KeyValue in project opentsdb by OpenTSDB.
the class TreeRule method deleteAllRules.
/**
* Attempts to delete all rules belonging to the given tree.
* @param tsdb The TSDB to use for storage access
* @param tree_id ID of the tree the rules belongs to
* @return A deferred to wait on for completion. The value has no meaning and
* may be null.
* @throws HBaseException if there was an issue
* @throws IllegalArgumentException if the one of the required parameters was
* missing
*/
public static Deferred<Object> deleteAllRules(final TSDB tsdb, final int tree_id) {
if (tree_id < 1 || tree_id > 65535) {
throw new IllegalArgumentException("Invalid Tree ID");
}
// fetch the whole row
final GetRequest get = new GetRequest(tsdb.treeTable(), Tree.idToBytes(tree_id));
get.family(Tree.TREE_FAMILY());
/**
* Called after fetching the requested row. If the row is empty, we just
* return, otherwise we compile a list of qualifiers to delete and submit
* a single delete request to storage.
*/
final class GetCB implements Callback<Deferred<Object>, ArrayList<KeyValue>> {
@Override
public Deferred<Object> call(final ArrayList<KeyValue> row) throws Exception {
if (row == null || row.isEmpty()) {
return Deferred.fromResult(null);
}
final ArrayList<byte[]> qualifiers = new ArrayList<byte[]>(row.size());
for (KeyValue column : row) {
if (column.qualifier().length > RULE_PREFIX.length && Bytes.memcmp(RULE_PREFIX, column.qualifier(), 0, RULE_PREFIX.length) == 0) {
qualifiers.add(column.qualifier());
}
}
final DeleteRequest delete = new DeleteRequest(tsdb.treeTable(), Tree.idToBytes(tree_id), Tree.TREE_FAMILY(), qualifiers.toArray(new byte[qualifiers.size()][]));
return tsdb.getClient().delete(delete);
}
}
return tsdb.getClient().get(get).addCallbackDeferring(new GetCB());
}
use of org.hbase.async.KeyValue in project opentsdb by OpenTSDB.
the class TimeSeriesLookup method lookupAsync.
/**
* Lookup time series associated with the given metric, tagk, tagv or tag
* pairs. Either the meta table or the data table will be scanned. If no
* metric is given, a full table scan must be performed and this call may take
* a long time to complete.
* When dumping to stdout, if an ID can't be looked up, it will be logged and
* skipped.
* @return A list of TSUIDs matching the given lookup query.
* @throws NoSuchUniqueName if any of the given names fail to resolve to a
* UID.
* @since 2.2
*/
public Deferred<List<byte[]>> lookupAsync() {
final Pattern tagv_regex = tagv_filter != null ? Pattern.compile(tagv_filter) : null;
// we don't really know what size the UIDs will resolve to so just grab
// a decent amount.
final StringBuffer buf = to_stdout ? new StringBuffer(2048) : null;
final long start = System.currentTimeMillis();
final int limit;
if (query.getLimit() > 0) {
if (query.useMeta() || Const.SALT_WIDTH() < 1) {
limit = query.getLimit();
} else if (query.getLimit() < Const.SALT_BUCKETS()) {
limit = 1;
} else {
limit = query.getLimit() / Const.SALT_BUCKETS();
}
} else {
limit = 0;
}
class ScannerCB implements Callback<Deferred<List<byte[]>>, ArrayList<ArrayList<KeyValue>>> {
private final Scanner scanner;
// used to avoid dupes when scanning the data table
private byte[] last_tsuid = null;
private int rows_read;
ScannerCB(final Scanner scanner) {
this.scanner = scanner;
}
Deferred<List<byte[]>> scan() {
return scanner.nextRows().addCallbackDeferring(this);
}
@Override
public Deferred<List<byte[]>> call(final ArrayList<ArrayList<KeyValue>> rows) throws Exception {
if (rows == null) {
scanner.close();
if (query.useMeta() || Const.SALT_WIDTH() < 1) {
LOG.debug("Lookup query matched " + tsuids.size() + " time series in " + (System.currentTimeMillis() - start) + " ms");
}
return Deferred.fromResult(tsuids);
}
for (final ArrayList<KeyValue> row : rows) {
if (limit > 0 && rows_read >= limit) {
// little recursion to close the scanner and log above.
return call(null);
}
final byte[] tsuid = query.useMeta() ? row.get(0).key() : UniqueId.getTSUIDFromKey(row.get(0).key(), TSDB.metrics_width(), Const.TIMESTAMP_BYTES);
// string objects.
if (tagv_regex != null && !tagv_regex.matcher(new String(tsuid, CHARSET)).find()) {
continue;
}
if (to_stdout) {
if (last_tsuid != null && Bytes.memcmp(last_tsuid, tsuid) == 0) {
continue;
}
last_tsuid = tsuid;
try {
buf.append(UniqueId.uidToString(tsuid)).append(" ");
buf.append(RowKey.metricNameAsync(tsdb, tsuid).joinUninterruptibly());
buf.append(" ");
final List<byte[]> tag_ids = UniqueId.getTagPairsFromTSUID(tsuid);
final Map<String, String> resolved_tags = Tags.resolveIdsAsync(tsdb, tag_ids).joinUninterruptibly();
for (final Map.Entry<String, String> tag_pair : resolved_tags.entrySet()) {
buf.append(tag_pair.getKey()).append("=").append(tag_pair.getValue()).append(" ");
}
} catch (NoSuchUniqueId nsui) {
LOG.error("Unable to resolve UID in TSUID (" + UniqueId.uidToString(tsuid) + ") " + nsui.getMessage());
}
// reset the buffer so we can re-use it
buf.setLength(0);
} else {
tsuids.add(tsuid);
}
++rows_read;
}
return scan();
}
@Override
public String toString() {
return "Scanner callback";
}
}
class CompleteCB implements Callback<List<byte[]>, ArrayList<List<byte[]>>> {
@Override
public List<byte[]> call(final ArrayList<List<byte[]>> unused) throws Exception {
LOG.debug("Lookup query matched " + tsuids.size() + " time series in " + (System.currentTimeMillis() - start) + " ms");
return tsuids;
}
@Override
public String toString() {
return "Final async lookup callback";
}
}
class UIDCB implements Callback<Deferred<List<byte[]>>, Object> {
@Override
public Deferred<List<byte[]>> call(Object arg0) throws Exception {
if (!query.useMeta() && Const.SALT_WIDTH() > 0 && metric_uid != null) {
final ArrayList<Deferred<List<byte[]>>> deferreds = new ArrayList<Deferred<List<byte[]>>>(Const.SALT_BUCKETS());
for (int i = 0; i < Const.SALT_BUCKETS(); i++) {
deferreds.add(new ScannerCB(getScanner(i)).scan());
}
return Deferred.group(deferreds).addCallback(new CompleteCB());
} else {
return new ScannerCB(getScanner(0)).scan();
}
}
@Override
public String toString() {
return "UID resolution callback";
}
}
return resolveUIDs().addCallbackDeferring(new UIDCB());
}
use of org.hbase.async.KeyValue in project opentsdb by OpenTSDB.
the class Tree method fetchAllTrees.
/**
* Attempts to retrieve all trees from the UID table, including their rules.
* If no trees were found, the result will be an empty list
* @param tsdb The TSDB to use for storage
* @return A list of tree objects. May be empty if none were found
*/
public static Deferred<List<Tree>> fetchAllTrees(final TSDB tsdb) {
final Deferred<List<Tree>> result = new Deferred<List<Tree>>();
/**
* Scanner callback that recursively calls itself to load the next set of
* rows from storage. When the scanner returns a null, the callback will
* return with the list of trees discovered.
*/
final class AllTreeScanner implements Callback<Object, ArrayList<ArrayList<KeyValue>>> {
private final List<Tree> trees = new ArrayList<Tree>();
private final Scanner scanner;
public AllTreeScanner() {
scanner = setupAllTreeScanner(tsdb);
}
/**
* Fetches the next set of results from the scanner and adds this class
* as a callback.
* @return A list of trees if the scanner has reached the end
*/
public Object fetchTrees() {
return scanner.nextRows().addCallback(this);
}
@Override
public Object call(ArrayList<ArrayList<KeyValue>> rows) throws Exception {
if (rows == null) {
result.callback(trees);
return null;
}
for (ArrayList<KeyValue> row : rows) {
final Tree tree = new Tree();
for (KeyValue column : row) {
if (column.qualifier().length >= TREE_QUALIFIER.length && Bytes.memcmp(TREE_QUALIFIER, column.qualifier()) == 0) {
// it's *this* tree. We deserialize to a new object and copy
// since the columns could be in any order and we may get a rule
// before the tree object
final Tree local_tree = JSON.parseToObject(column.value(), Tree.class);
tree.created = local_tree.created;
tree.description = local_tree.description;
tree.name = local_tree.name;
tree.notes = local_tree.notes;
tree.strict_match = local_tree.strict_match;
tree.enabled = local_tree.enabled;
tree.store_failures = local_tree.store_failures;
// WARNING: Since the JSON data in storage doesn't contain the tree
// ID, we need to parse it from the row key
tree.setTreeId(bytesToId(row.get(0).key()));
// tree rule
} else if (column.qualifier().length > TreeRule.RULE_PREFIX().length && Bytes.memcmp(TreeRule.RULE_PREFIX(), column.qualifier(), 0, TreeRule.RULE_PREFIX().length) == 0) {
final TreeRule rule = TreeRule.parseFromStorage(column);
tree.addRule(rule);
}
}
// only add the tree if we parsed a valid ID
if (tree.tree_id > 0) {
trees.add(tree);
}
}
// recurse to get the next set of rows from the scanner
return fetchTrees();
}
}
// start the scanning process
new AllTreeScanner().fetchTrees();
return result;
}
Aggregations