use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class AccessControlLists method loadAll.
/**
* Load all permissions from the region server holding {@code _acl_},
* primarily intended for testing purposes.
*/
static Map<byte[], ListMultimap<String, TablePermission>> loadAll(Configuration conf) throws IOException {
Map<byte[], ListMultimap<String, TablePermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
// do a full scan of _acl_, filtering on only first table region rows
Scan scan = new Scan();
scan.addFamily(ACL_LIST_FAMILY);
ResultScanner scanner = null;
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
scanner = table.getScanner(scan);
try {
for (Result row : scanner) {
ListMultimap<String, TablePermission> resultPerms = parsePermissions(row.getRow(), row);
allPerms.put(row.getRow(), resultPerms);
}
} finally {
if (scanner != null)
scanner.close();
}
}
}
return allPerms;
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class HBaseTestingUtility method createPreSplitLoadTestTable.
/**
* Creates a pre-split table for load testing. If the table already exists,
* logs a warning and continues.
* @return the number of regions the table was split into
*/
public static int createPreSplitLoadTestTable(Configuration conf, HTableDescriptor desc, HColumnDescriptor[] hcds, SplitAlgorithm splitter, int numRegionsPerServer) throws IOException {
for (HColumnDescriptor hcd : hcds) {
if (!desc.hasFamily(hcd.getName())) {
desc.addFamily(hcd);
}
}
int totalNumberOfRegions = 0;
Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
Admin admin = unmanagedConnection.getAdmin();
try {
// create a table a pre-splits regions.
// The number of splits is set as:
// region servers * regions per region server).
int numberOfServers = admin.getClusterStatus().getServers().size();
if (numberOfServers == 0) {
throw new IllegalStateException("No live regionservers");
}
totalNumberOfRegions = numberOfServers * numRegionsPerServer;
LOG.info("Number of live regionservers: " + numberOfServers + ", " + "pre-splitting table into " + totalNumberOfRegions + " regions " + "(regions per server: " + numRegionsPerServer + ")");
byte[][] splits = splitter.split(totalNumberOfRegions);
admin.createTable(desc, splits);
} catch (MasterNotRunningException e) {
LOG.error("Master not running", e);
throw new IOException(e);
} catch (TableExistsException e) {
LOG.warn("Table " + desc.getTableName() + " already exists, continuing");
} finally {
admin.close();
unmanagedConnection.close();
}
return totalNumberOfRegions;
}
use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.
the class PerformanceEvaluation method runTest.
private void runTest(final Class<? extends Test> cmd, TestOptions opts) throws IOException, InterruptedException, ClassNotFoundException {
// Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do
// the TestOptions introspection for us and dump the output in a readable format.
LOG.info(cmd.getSimpleName() + " test run options=" + MAPPER.writeValueAsString(opts));
Admin admin = null;
Connection connection = null;
try {
connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
checkTable(admin, opts);
} finally {
if (admin != null)
admin.close();
if (connection != null)
connection.close();
}
if (opts.nomapred) {
doLocalClients(opts, getConf());
} else {
doMapReduce(opts, getConf());
}
}
use of org.apache.hadoop.hbase.client.Connection in project hadoop by apache.
the class TestHBaseStorageFlowActivity method checkFlowActivityTableSeveralRuns.
private void checkFlowActivityTableSeveralRuns(String cluster, String user, String flow, Configuration c1, String flowVersion1, long runid1, String flowVersion2, long runid2, String flowVersion3, long runid3, long appCreatedTime) throws IOException {
Scan s = new Scan();
s.addFamily(FlowActivityColumnFamily.INFO.getBytes());
byte[] startRow = new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
s.setStartRow(startRow);
String clusterStop = cluster + "1";
byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow).getRowKey();
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(TableName.valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int rowCount = 0;
for (Result result : scanner) {
assertNotNull(result);
assertTrue(!result.isEmpty());
byte[] row = result.getRow();
FlowActivityRowKey flowActivityRowKey = FlowActivityRowKey.parseRowKey(row);
assertNotNull(flowActivityRowKey);
assertEquals(cluster, flowActivityRowKey.getClusterId());
assertEquals(user, flowActivityRowKey.getUserId());
assertEquals(flow, flowActivityRowKey.getFlowName());
Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
Map<byte[], byte[]> values = result.getFamilyMap(FlowActivityColumnFamily.INFO.getBytes());
rowCount++;
assertEquals(3, values.size());
checkFlowActivityRunId(runid1, flowVersion1, values);
checkFlowActivityRunId(runid2, flowVersion2, values);
checkFlowActivityRunId(runid3, flowVersion3, values);
}
// the flow activity table is such that it will insert
// into current day's record
// hence, if this test runs across the midnight boundary,
// it may fail since it would insert into two records
// one for each day
assertEquals(1, rowCount);
}
use of org.apache.hadoop.hbase.client.Connection in project hadoop by apache.
the class TestHBaseStorageFlowActivity method checkFlowActivityTable.
private void checkFlowActivityTable(String cluster, String user, String flow, String flowVersion, long runid, Configuration c1, long appCreatedTime) throws IOException {
Scan s = new Scan();
s.addFamily(FlowActivityColumnFamily.INFO.getBytes());
byte[] startRow = new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
s.setStartRow(startRow);
String clusterStop = cluster + "1";
byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow).getRowKey();
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(TableName.valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int rowCount = 0;
for (Result result : scanner) {
assertNotNull(result);
assertTrue(!result.isEmpty());
Map<byte[], byte[]> values = result.getFamilyMap(FlowActivityColumnFamily.INFO.getBytes());
rowCount++;
byte[] row = result.getRow();
FlowActivityRowKey flowActivityRowKey = FlowActivityRowKey.parseRowKey(row);
assertNotNull(flowActivityRowKey);
assertEquals(cluster, flowActivityRowKey.getClusterId());
assertEquals(user, flowActivityRowKey.getUserId());
assertEquals(flow, flowActivityRowKey.getFlowName());
Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
assertEquals(1, values.size());
checkFlowActivityRunId(runid, flowVersion, values);
}
assertEquals(1, rowCount);
}
Aggregations