Search in sources :

Example 1 with FlowActivityRowKeyPrefix

use of org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix in project hadoop by apache.

the class TestRowKeys method testFlowActivityRowKey.

@Test
public void testFlowActivityRowKey() {
    Long ts = 1459900830000L;
    Long dayTimestamp = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
    byte[] byteRowKey = new FlowActivityRowKey(CLUSTER, ts, USER, FLOW_NAME).getRowKey();
    FlowActivityRowKey rowKey = FlowActivityRowKey.parseRowKey(byteRowKey);
    assertEquals(CLUSTER, rowKey.getClusterId());
    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
    assertEquals(USER, rowKey.getUserId());
    assertEquals(FLOW_NAME, rowKey.getFlowName());
    byte[] byteRowKeyPrefix = new FlowActivityRowKeyPrefix(CLUSTER).getRowKeyPrefix();
    byte[][] splits = Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] { Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
    assertEquals(2, splits.length);
    assertEquals(0, splits[1].length);
    assertEquals(CLUSTER, Separator.QUALIFIERS.decode(Bytes.toString(splits[0])));
    verifyRowPrefixBytes(byteRowKeyPrefix);
    byteRowKeyPrefix = new FlowActivityRowKeyPrefix(CLUSTER, ts).getRowKeyPrefix();
    splits = Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] { Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE });
    assertEquals(3, splits.length);
    assertEquals(0, splits[2].length);
    assertEquals(CLUSTER, Separator.QUALIFIERS.decode(Bytes.toString(splits[0])));
    assertEquals(ts, (Long) LongConverter.invertLong(Bytes.toLong(splits[1])));
    verifyRowPrefixBytes(byteRowKeyPrefix);
}
Also used : FlowActivityRowKeyPrefix(org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix) FlowActivityRowKey(org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey) Test(org.junit.Test)

Example 2 with FlowActivityRowKeyPrefix

use of org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix in project hadoop by apache.

the class FlowActivityEntityReader method getResults.

@Override
protected ResultScanner getResults(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException {
    Scan scan = new Scan();
    String clusterId = getContext().getClusterId();
    if (getFilters().getCreatedTimeBegin() == 0L && getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) {
        // All records have to be chosen.
        scan.setRowPrefixFilter(new FlowActivityRowKeyPrefix(clusterId).getRowKeyPrefix());
    } else {
        scan.setStartRow(new FlowActivityRowKeyPrefix(clusterId, getFilters().getCreatedTimeEnd()).getRowKeyPrefix());
        scan.setStopRow(new FlowActivityRowKeyPrefix(clusterId, (getFilters().getCreatedTimeBegin() <= 0 ? 0 : (getFilters().getCreatedTimeBegin() - 1))).getRowKeyPrefix());
    }
    // use the page filter to limit the result to the page size
    // the scanner may still return more than the limit; therefore we need to
    // read the right number as we iterate
    scan.setFilter(new PageFilter(getFilters().getLimit()));
    return getTable().getResultScanner(hbaseConf, conn, scan);
}
Also used : FlowActivityRowKeyPrefix(org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix) Scan(org.apache.hadoop.hbase.client.Scan) PageFilter(org.apache.hadoop.hbase.filter.PageFilter)

Aggregations

FlowActivityRowKeyPrefix (org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix)2 Scan (org.apache.hadoop.hbase.client.Scan)1 PageFilter (org.apache.hadoop.hbase.filter.PageFilter)1 FlowActivityRowKey (org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey)1 Test (org.junit.Test)1