Search in sources :

Example 26 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class HBaseTestingUtility method getMetaTableRows.

/**
   * Returns all rows from the hbase:meta table for a given user table
   *
   * @throws IOException When reading the rows fails.
   */
public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
    // TODO: Redo using MetaTableAccessor.
    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
    List<byte[]> rows = new ArrayList<>();
    ResultScanner s = t.getScanner(new Scan());
    for (Result result : s) {
        HRegionInfo info = MetaTableAccessor.getHRegionInfo(result);
        if (info == null) {
            LOG.error("No region info for row " + Bytes.toString(result.getRow()));
            // TODO figure out what to do for this new hosed case.
            continue;
        }
        if (info.getTable().equals(tableName)) {
            LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info);
            rows.add(result.getRow());
        }
    }
    s.close();
    t.close();
    return rows;
}
Also used : HTable(org.apache.hadoop.hbase.client.HTable) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Example 27 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class HBaseTestingUtility method getMetaTableRows.

/**
   * Returns all rows from the hbase:meta table.
   *
   * @throws IOException When reading the rows fails.
   */
public List<byte[]> getMetaTableRows() throws IOException {
    // TODO: Redo using MetaTableAccessor class
    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
    List<byte[]> rows = new ArrayList<>();
    ResultScanner s = t.getScanner(new Scan());
    for (Result result : s) {
        LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()));
        rows.add(result.getRow());
    }
    s.close();
    t.close();
    return rows;
}
Also used : HTable(org.apache.hadoop.hbase.client.HTable) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Example 28 with Result

use of org.apache.hadoop.hbase.client.Result in project hadoop by apache.

the class TestHBaseStorageFlowActivity method checkFlowActivityTableSeveralRuns.

private void checkFlowActivityTableSeveralRuns(String cluster, String user, String flow, Configuration c1, String flowVersion1, long runid1, String flowVersion2, long runid2, String flowVersion3, long runid3, long appCreatedTime) throws IOException {
    Scan s = new Scan();
    s.addFamily(FlowActivityColumnFamily.INFO.getBytes());
    byte[] startRow = new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
    s.setStartRow(startRow);
    String clusterStop = cluster + "1";
    byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow).getRowKey();
    s.setStopRow(stopRow);
    Connection conn = ConnectionFactory.createConnection(c1);
    Table table1 = conn.getTable(TableName.valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
    ResultScanner scanner = table1.getScanner(s);
    int rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        byte[] row = result.getRow();
        FlowActivityRowKey flowActivityRowKey = FlowActivityRowKey.parseRowKey(row);
        assertNotNull(flowActivityRowKey);
        assertEquals(cluster, flowActivityRowKey.getClusterId());
        assertEquals(user, flowActivityRowKey.getUserId());
        assertEquals(flow, flowActivityRowKey.getFlowName());
        Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
        assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
        Map<byte[], byte[]> values = result.getFamilyMap(FlowActivityColumnFamily.INFO.getBytes());
        rowCount++;
        assertEquals(3, values.size());
        checkFlowActivityRunId(runid1, flowVersion1, values);
        checkFlowActivityRunId(runid2, flowVersion2, values);
        checkFlowActivityRunId(runid3, flowVersion3, values);
    }
    // the flow activity table is such that it will insert
    // into current day's record
    // hence, if this test runs across the midnight boundary,
    // it may fail since it would insert into two records
    // one for each day
    assertEquals(1, rowCount);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Example 29 with Result

use of org.apache.hadoop.hbase.client.Result in project hadoop by apache.

the class TestHBaseStorageFlowActivity method checkFlowActivityTable.

private void checkFlowActivityTable(String cluster, String user, String flow, String flowVersion, long runid, Configuration c1, long appCreatedTime) throws IOException {
    Scan s = new Scan();
    s.addFamily(FlowActivityColumnFamily.INFO.getBytes());
    byte[] startRow = new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
    s.setStartRow(startRow);
    String clusterStop = cluster + "1";
    byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow).getRowKey();
    s.setStopRow(stopRow);
    Connection conn = ConnectionFactory.createConnection(c1);
    Table table1 = conn.getTable(TableName.valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
    ResultScanner scanner = table1.getScanner(s);
    int rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        Map<byte[], byte[]> values = result.getFamilyMap(FlowActivityColumnFamily.INFO.getBytes());
        rowCount++;
        byte[] row = result.getRow();
        FlowActivityRowKey flowActivityRowKey = FlowActivityRowKey.parseRowKey(row);
        assertNotNull(flowActivityRowKey);
        assertEquals(cluster, flowActivityRowKey.getClusterId());
        assertEquals(user, flowActivityRowKey.getUserId());
        assertEquals(flow, flowActivityRowKey.getFlowName());
        Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
        assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
        assertEquals(1, values.size());
        checkFlowActivityRunId(runid, flowVersion, values);
    }
    assertEquals(1, rowCount);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Example 30 with Result

use of org.apache.hadoop.hbase.client.Result in project hadoop by apache.

the class TestHBaseStorageFlowRun method checkMinMaxFlush.

private void checkMinMaxFlush(Configuration c1, long minTS, long startTs, int count, String cluster, String user, String flow, long runid, boolean checkMax) throws IOException {
    Connection conn = ConnectionFactory.createConnection(c1);
    // check in flow run table
    Table table1 = conn.getTable(TableName.valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
    // scan the table and see that we get back the right min and max
    // timestamps
    byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid).getRowKey();
    Get g = new Get(startRow);
    g.addColumn(FlowRunColumnFamily.INFO.getBytes(), FlowRunColumn.MIN_START_TIME.getColumnQualifierBytes());
    g.addColumn(FlowRunColumnFamily.INFO.getBytes(), FlowRunColumn.MAX_END_TIME.getColumnQualifierBytes());
    Result r1 = table1.get(g);
    assertNotNull(r1);
    assertTrue(!r1.isEmpty());
    Map<byte[], byte[]> values = r1.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
    int start = 10;
    assertEquals(2, r1.size());
    long starttime = Bytes.toLong(values.get(FlowRunColumn.MIN_START_TIME.getColumnQualifierBytes()));
    assertEquals(minTS, starttime);
    if (checkMax) {
        assertEquals(startTs + 2 * (count - start) + TestFlowDataGenerator.END_TS_INCR, Bytes.toLong(values.get(FlowRunColumn.MAX_END_TIME.getColumnQualifierBytes())));
    }
}
Also used : EntityTable(org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable) Table(org.apache.hadoop.hbase.client.Table) Get(org.apache.hadoop.hbase.client.Get) Connection(org.apache.hadoop.hbase.client.Connection) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

Result (org.apache.hadoop.hbase.client.Result)715 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)286 Test (org.junit.Test)280 Scan (org.apache.hadoop.hbase.client.Scan)279 Get (org.apache.hadoop.hbase.client.Get)269 Table (org.apache.hadoop.hbase.client.Table)224 Put (org.apache.hadoop.hbase.client.Put)183 Cell (org.apache.hadoop.hbase.Cell)177 IOException (java.io.IOException)164 ArrayList (java.util.ArrayList)143 TableName (org.apache.hadoop.hbase.TableName)124 Connection (org.apache.hadoop.hbase.client.Connection)101 Delete (org.apache.hadoop.hbase.client.Delete)101 Configuration (org.apache.hadoop.conf.Configuration)70 KeyValue (org.apache.hadoop.hbase.KeyValue)70 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)62 InterruptedIOException (java.io.InterruptedIOException)58 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)50 CellScanner (org.apache.hadoop.hbase.CellScanner)47 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)45