Search in sources :

Example 1 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class AccessControlLists method loadAll.

/**
   * Load all permissions from the region server holding {@code _acl_},
   * primarily intended for testing purposes.
   */
static Map<byte[], ListMultimap<String, TablePermission>> loadAll(Configuration conf) throws IOException {
    Map<byte[], ListMultimap<String, TablePermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
    // do a full scan of _acl_, filtering on only first table region rows
    Scan scan = new Scan();
    scan.addFamily(ACL_LIST_FAMILY);
    ResultScanner scanner = null;
    // TODO: Pass in a Connection rather than create one each time.
    try (Connection connection = ConnectionFactory.createConnection(conf)) {
        try (Table table = connection.getTable(ACL_TABLE_NAME)) {
            scanner = table.getScanner(scan);
            try {
                for (Result row : scanner) {
                    ListMultimap<String, TablePermission> resultPerms = parsePermissions(row.getRow(), row);
                    allPerms.put(row.getRow(), resultPerms);
                }
            } finally {
                if (scanner != null)
                    scanner.close();
            }
        }
    }
    return allPerms;
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Table(org.apache.hadoop.hbase.client.Table) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) TreeMap(java.util.TreeMap) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) ListMultimap(com.google.common.collect.ListMultimap) Result(org.apache.hadoop.hbase.client.Result)

Example 2 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class HBaseTestingUtility method createPreSplitLoadTestTable.

/**
   * Creates a pre-split table for load testing. If the table already exists,
   * logs a warning and continues.
   * @return the number of regions the table was split into
   */
public static int createPreSplitLoadTestTable(Configuration conf, HTableDescriptor desc, HColumnDescriptor[] hcds, SplitAlgorithm splitter, int numRegionsPerServer) throws IOException {
    for (HColumnDescriptor hcd : hcds) {
        if (!desc.hasFamily(hcd.getName())) {
            desc.addFamily(hcd);
        }
    }
    int totalNumberOfRegions = 0;
    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
    Admin admin = unmanagedConnection.getAdmin();
    try {
        // create a table a pre-splits regions.
        // The number of splits is set as:
        //    region servers * regions per region server).
        int numberOfServers = admin.getClusterStatus().getServers().size();
        if (numberOfServers == 0) {
            throw new IllegalStateException("No live regionservers");
        }
        totalNumberOfRegions = numberOfServers * numRegionsPerServer;
        LOG.info("Number of live regionservers: " + numberOfServers + ", " + "pre-splitting table into " + totalNumberOfRegions + " regions " + "(regions per server: " + numRegionsPerServer + ")");
        byte[][] splits = splitter.split(totalNumberOfRegions);
        admin.createTable(desc, splits);
    } catch (MasterNotRunningException e) {
        LOG.error("Master not running", e);
        throw new IOException(e);
    } catch (TableExistsException e) {
        LOG.warn("Table " + desc.getTableName() + " already exists, continuing");
    } finally {
        admin.close();
        unmanagedConnection.close();
    }
    return totalNumberOfRegions;
}
Also used : Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Admin(org.apache.hadoop.hbase.client.Admin)

Example 3 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class PerformanceEvaluation method runTest.

private void runTest(final Class<? extends Test> cmd, TestOptions opts) throws IOException, InterruptedException, ClassNotFoundException {
    // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do
    // the TestOptions introspection for us and dump the output in a readable format.
    LOG.info(cmd.getSimpleName() + " test run options=" + MAPPER.writeValueAsString(opts));
    Admin admin = null;
    Connection connection = null;
    try {
        connection = ConnectionFactory.createConnection(getConf());
        admin = connection.getAdmin();
        checkTable(admin, opts);
    } finally {
        if (admin != null)
            admin.close();
        if (connection != null)
            connection.close();
    }
    if (opts.nomapred) {
        doLocalClients(opts, getConf());
    } else {
        doMapReduce(opts, getConf());
    }
}
Also used : Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin)

Example 4 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hadoop by apache.

the class TestHBaseStorageFlowActivity method checkFlowActivityTableSeveralRuns.

private void checkFlowActivityTableSeveralRuns(String cluster, String user, String flow, Configuration c1, String flowVersion1, long runid1, String flowVersion2, long runid2, String flowVersion3, long runid3, long appCreatedTime) throws IOException {
    Scan s = new Scan();
    s.addFamily(FlowActivityColumnFamily.INFO.getBytes());
    byte[] startRow = new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
    s.setStartRow(startRow);
    String clusterStop = cluster + "1";
    byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow).getRowKey();
    s.setStopRow(stopRow);
    Connection conn = ConnectionFactory.createConnection(c1);
    Table table1 = conn.getTable(TableName.valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
    ResultScanner scanner = table1.getScanner(s);
    int rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        byte[] row = result.getRow();
        FlowActivityRowKey flowActivityRowKey = FlowActivityRowKey.parseRowKey(row);
        assertNotNull(flowActivityRowKey);
        assertEquals(cluster, flowActivityRowKey.getClusterId());
        assertEquals(user, flowActivityRowKey.getUserId());
        assertEquals(flow, flowActivityRowKey.getFlowName());
        Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
        assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
        Map<byte[], byte[]> values = result.getFamilyMap(FlowActivityColumnFamily.INFO.getBytes());
        rowCount++;
        assertEquals(3, values.size());
        checkFlowActivityRunId(runid1, flowVersion1, values);
        checkFlowActivityRunId(runid2, flowVersion2, values);
        checkFlowActivityRunId(runid3, flowVersion3, values);
    }
    // the flow activity table is such that it will insert
    // into current day's record
    // hence, if this test runs across the midnight boundary,
    // it may fail since it would insert into two records
    // one for each day
    assertEquals(1, rowCount);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Example 5 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hadoop by apache.

the class TestHBaseStorageFlowActivity method checkFlowActivityTable.

private void checkFlowActivityTable(String cluster, String user, String flow, String flowVersion, long runid, Configuration c1, long appCreatedTime) throws IOException {
    Scan s = new Scan();
    s.addFamily(FlowActivityColumnFamily.INFO.getBytes());
    byte[] startRow = new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
    s.setStartRow(startRow);
    String clusterStop = cluster + "1";
    byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow).getRowKey();
    s.setStopRow(stopRow);
    Connection conn = ConnectionFactory.createConnection(c1);
    Table table1 = conn.getTable(TableName.valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
    ResultScanner scanner = table1.getScanner(s);
    int rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        Map<byte[], byte[]> values = result.getFamilyMap(FlowActivityColumnFamily.INFO.getBytes());
        rowCount++;
        byte[] row = result.getRow();
        FlowActivityRowKey flowActivityRowKey = FlowActivityRowKey.parseRowKey(row);
        assertNotNull(flowActivityRowKey);
        assertEquals(cluster, flowActivityRowKey.getClusterId());
        assertEquals(user, flowActivityRowKey.getUserId());
        assertEquals(flow, flowActivityRowKey.getFlowName());
        Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
        assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
        assertEquals(1, values.size());
        checkFlowActivityRunId(runid, flowVersion, values);
    }
    assertEquals(1, rowCount);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

Connection (org.apache.hadoop.hbase.client.Connection)539 Table (org.apache.hadoop.hbase.client.Table)316 Test (org.junit.Test)256 IOException (java.io.IOException)223 TableName (org.apache.hadoop.hbase.TableName)181 Admin (org.apache.hadoop.hbase.client.Admin)167 Result (org.apache.hadoop.hbase.client.Result)156 Configuration (org.apache.hadoop.conf.Configuration)143 Put (org.apache.hadoop.hbase.client.Put)121 Scan (org.apache.hadoop.hbase.client.Scan)115 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)104 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)85 Get (org.apache.hadoop.hbase.client.Get)84 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)78 Delete (org.apache.hadoop.hbase.client.Delete)76 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)56 Cell (org.apache.hadoop.hbase.Cell)55 InterruptedIOException (java.io.InterruptedIOException)47 ArrayList (java.util.ArrayList)46 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)46