use of org.apache.hadoop.hbase.client.Result in project hbase by apache.
the class HBaseTestingUtility method verifyNumericRows.
public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow, int replicaId) throws IOException {
for (int i = startRow; i < endRow; i++) {
String failMsg = "Failed verification of row :" + i;
byte[] data = Bytes.toBytes(String.valueOf(i));
Get get = new Get(data);
get.setReplicaId(replicaId);
get.setConsistency(Consistency.TIMELINE);
Result result = table.get(get);
assertTrue(failMsg, result.containsColumn(f, null));
assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
Cell cell = result.getColumnLatestCell(f, null);
assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
}
}
use of org.apache.hadoop.hbase.client.Result in project hbase by apache.
the class HBaseTestCase method assertResultEquals.
protected void assertResultEquals(final HRegion region, final byte[] row, final byte[] family, final byte[] qualifier, final long timestamp, final byte[] value) throws IOException {
Get get = new Get(row);
get.setTimeStamp(timestamp);
Result res = region.get(get);
NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = res.getMap();
byte[] res_value = map.get(family).get(qualifier).get(timestamp);
if (value == null) {
assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp, null, res_value);
} else {
if (res_value == null) {
fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp + "\" was expected to be \"" + Bytes.toStringBinary(value) + " but was null");
}
if (res_value != null) {
assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + timestamp, value, new String(res_value));
}
}
}
use of org.apache.hadoop.hbase.client.Result in project hbase by apache.
the class HBaseTestingUtility method getMetaTableRows.
/**
* Returns all rows from the hbase:meta table for a given user table
*
* @throws IOException When reading the rows fails.
*/
public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
// TODO: Redo using MetaTableAccessor.
Table t = getConnection().getTable(TableName.META_TABLE_NAME);
List<byte[]> rows = new ArrayList<>();
ResultScanner s = t.getScanner(new Scan());
for (Result result : s) {
HRegionInfo info = MetaTableAccessor.getHRegionInfo(result);
if (info == null) {
LOG.error("No region info for row " + Bytes.toString(result.getRow()));
// TODO figure out what to do for this new hosed case.
continue;
}
if (info.getTable().equals(tableName)) {
LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info);
rows.add(result.getRow());
}
}
s.close();
t.close();
return rows;
}
use of org.apache.hadoop.hbase.client.Result in project hbase by apache.
the class HBaseTestingUtility method getMetaTableRows.
/**
* Returns all rows from the hbase:meta table.
*
* @throws IOException When reading the rows fails.
*/
public List<byte[]> getMetaTableRows() throws IOException {
// TODO: Redo using MetaTableAccessor class
Table t = getConnection().getTable(TableName.META_TABLE_NAME);
List<byte[]> rows = new ArrayList<>();
ResultScanner s = t.getScanner(new Scan());
for (Result result : s) {
LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()));
rows.add(result.getRow());
}
s.close();
t.close();
return rows;
}
use of org.apache.hadoop.hbase.client.Result in project hadoop by apache.
the class TestHBaseStorageFlowActivity method checkFlowActivityTableSeveralRuns.
private void checkFlowActivityTableSeveralRuns(String cluster, String user, String flow, Configuration c1, String flowVersion1, long runid1, String flowVersion2, long runid2, String flowVersion3, long runid3, long appCreatedTime) throws IOException {
Scan s = new Scan();
s.addFamily(FlowActivityColumnFamily.INFO.getBytes());
byte[] startRow = new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
s.setStartRow(startRow);
String clusterStop = cluster + "1";
byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow).getRowKey();
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(TableName.valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int rowCount = 0;
for (Result result : scanner) {
assertNotNull(result);
assertTrue(!result.isEmpty());
byte[] row = result.getRow();
FlowActivityRowKey flowActivityRowKey = FlowActivityRowKey.parseRowKey(row);
assertNotNull(flowActivityRowKey);
assertEquals(cluster, flowActivityRowKey.getClusterId());
assertEquals(user, flowActivityRowKey.getUserId());
assertEquals(flow, flowActivityRowKey.getFlowName());
Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
Map<byte[], byte[]> values = result.getFamilyMap(FlowActivityColumnFamily.INFO.getBytes());
rowCount++;
assertEquals(3, values.size());
checkFlowActivityRunId(runid1, flowVersion1, values);
checkFlowActivityRunId(runid2, flowVersion2, values);
checkFlowActivityRunId(runid3, flowVersion3, values);
}
// the flow activity table is such that it will insert
// into current day's record
// hence, if this test runs across the midnight boundary,
// it may fail since it would insert into two records
// one for each day
assertEquals(1, rowCount);
}
Aggregations