Search in sources :

Example 16 with Table

use of org.apache.hadoop.hbase.client.Table in project hbase by apache.

the class TestCoprocessorEndpoint method testCoprocessorService.

@Test
public void testCoprocessorService() throws Throwable {
    Table table = util.getConnection().getTable(TEST_TABLE);
    List<HRegionLocation> regions;
    try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) {
        regions = rl.getAllRegionLocations();
    }
    final TestProtos.EchoRequestProto request = TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build();
    final Map<byte[], String> results = Collections.synchronizedMap(new TreeMap<byte[], String>(Bytes.BYTES_COMPARATOR));
    try {
        // scan: for all regions
        final RpcController controller = new ServerRpcController();
        table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], ROWS[ROWS.length - 1], new Batch.Call<TestRpcServiceProtos.TestProtobufRpcProto, TestProtos.EchoResponseProto>() {

            public TestProtos.EchoResponseProto call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException {
                LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance());
                CoprocessorRpcUtils.BlockingRpcCallback<TestProtos.EchoResponseProto> callback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
                instance.echo(controller, request, callback);
                TestProtos.EchoResponseProto response = callback.get();
                LOG.debug("Batch.Call returning result " + response);
                return response;
            }
        }, new Batch.Callback<TestProtos.EchoResponseProto>() {

            public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) {
                assertNotNull(result);
                assertEquals("hello", result.getMessage());
                results.put(region, result.getMessage());
            }
        });
        for (Map.Entry<byte[], String> e : results.entrySet()) {
            LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey()));
        }
        assertEquals(3, results.size());
        for (HRegionLocation info : regions) {
            LOG.info("Region info is " + info.getRegionInfo().getRegionNameAsString());
            assertTrue(results.containsKey(info.getRegionInfo().getRegionName()));
        }
        results.clear();
        // scan: for region 2 and region 3
        table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[rowSeperator1], ROWS[ROWS.length - 1], new Batch.Call<TestRpcServiceProtos.TestProtobufRpcProto, TestProtos.EchoResponseProto>() {

            public TestProtos.EchoResponseProto call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException {
                LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance());
                CoprocessorRpcUtils.BlockingRpcCallback<TestProtos.EchoResponseProto> callback = new CoprocessorRpcUtils.BlockingRpcCallback<>();
                instance.echo(controller, request, callback);
                TestProtos.EchoResponseProto response = callback.get();
                LOG.debug("Batch.Call returning result " + response);
                return response;
            }
        }, new Batch.Callback<TestProtos.EchoResponseProto>() {

            public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) {
                assertNotNull(result);
                assertEquals("hello", result.getMessage());
                results.put(region, result.getMessage());
            }
        });
        for (Map.Entry<byte[], String> e : results.entrySet()) {
            LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey()));
        }
        assertEquals(2, results.size());
    } finally {
        table.close();
    }
}
Also used : TestRpcServiceProtos(org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos) TestProtos(org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) IOException(java.io.IOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) RpcController(com.google.protobuf.RpcController) CoprocessorRpcUtils(org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils) Map(java.util.Map) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 17 with Table

use of org.apache.hadoop.hbase.client.Table in project hbase by apache.

the class TestCoprocessorEndpoint method testAggregation.

@Test
public void testAggregation() throws Throwable {
    Table table = util.getConnection().getTable(TEST_TABLE);
    Map<byte[], Long> results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]);
    int sumResult = 0;
    int expectedResult = 0;
    for (Map.Entry<byte[], Long> e : results.entrySet()) {
        LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey()));
        sumResult += e.getValue();
    }
    for (int i = 0; i < ROWSIZE; i++) {
        expectedResult += i;
    }
    assertEquals("Invalid result", expectedResult, sumResult);
    results.clear();
    // scan: for region 2 and region 3
    results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]);
    sumResult = 0;
    expectedResult = 0;
    for (Map.Entry<byte[], Long> e : results.entrySet()) {
        LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey()));
        sumResult += e.getValue();
    }
    for (int i = rowSeperator1; i < ROWSIZE; i++) {
        expectedResult += i;
    }
    assertEquals("Invalid result", expectedResult, sumResult);
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Map(java.util.Map) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 18 with Table

use of org.apache.hadoop.hbase.client.Table in project hbase by apache.

the class TestCoprocessorEndpoint method setupBeforeClass.

@BeforeClass
public static void setupBeforeClass() throws Exception {
    // set configure to indicate which cp should be loaded
    Configuration conf = util.getConfiguration();
    conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), ProtobufCoprocessorService.class.getName());
    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, ProtobufCoprocessorService.class.getName());
    util.startMiniCluster(2);
    Admin admin = util.getAdmin();
    HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
    desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
    admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] });
    util.waitUntilAllRegionsAssigned(TEST_TABLE);
    Table table = util.getConnection().getTable(TEST_TABLE);
    for (int i = 0; i < ROWSIZE; i++) {
        Put put = new Put(ROWS[i]);
        put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
        table.put(put);
    }
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 19 with Table

use of org.apache.hadoop.hbase.client.Table in project hbase by apache.

the class TestServerCustomProtocol method testCompoundCall.

@Test
public void testCompoundCall() throws Throwable {
    try (Table table = util.getConnection().getTable(TEST_TABLE);
        RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) {
        Map<byte[], String> results = compoundOfHelloAndPing(table, ROW_A, ROW_C);
        verifyRegionResults(locator, results, "Hello, pong", ROW_A);
        verifyRegionResults(locator, results, "Hello, pong", ROW_B);
        verifyRegionResults(locator, results, "Hello, pong", ROW_C);
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Test(org.junit.Test)

Example 20 with Table

use of org.apache.hadoop.hbase.client.Table in project hbase by apache.

the class AccessController method updateACL.

/**
   * Writes all table ACLs for the tables in the given Map up into ZooKeeper
   * znodes.  This is called to synchronize ACL changes following {@code _acl_}
   * table updates.
   */
void updateACL(RegionCoprocessorEnvironment e, final Map<byte[], List<Cell>> familyMap) {
    Set<byte[]> entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
    for (Map.Entry<byte[], List<Cell>> f : familyMap.entrySet()) {
        List<Cell> cells = f.getValue();
        for (Cell cell : cells) {
            if (CellUtil.matchingFamily(cell, AccessControlLists.ACL_LIST_FAMILY)) {
                entries.add(CellUtil.cloneRow(cell));
            }
        }
    }
    ZKPermissionWatcher zkw = this.authManager.getZKPermissionWatcher();
    Configuration conf = regionEnv.getConfiguration();
    for (byte[] entry : entries) {
        try {
            try (Table t = regionEnv.getTable(AccessControlLists.ACL_TABLE_NAME)) {
                ListMultimap<String, TablePermission> perms = AccessControlLists.getPermissions(conf, entry, t);
                byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf);
                zkw.writeToZookeeper(entry, serialized);
            }
        } catch (IOException ex) {
            LOG.error("Failed updating permissions mirror for '" + Bytes.toString(entry) + "'", ex);
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) CompoundConfiguration(org.apache.hadoop.hbase.CompoundConfiguration) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TreeSet(java.util.TreeSet) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Table (org.apache.hadoop.hbase.client.Table)660 Test (org.junit.Test)421 Put (org.apache.hadoop.hbase.client.Put)237 TableName (org.apache.hadoop.hbase.TableName)227 Result (org.apache.hadoop.hbase.client.Result)224 Connection (org.apache.hadoop.hbase.client.Connection)191 Scan (org.apache.hadoop.hbase.client.Scan)174 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)160 IOException (java.io.IOException)157 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)134 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)119 Get (org.apache.hadoop.hbase.client.Get)107 Delete (org.apache.hadoop.hbase.client.Delete)99 Admin (org.apache.hadoop.hbase.client.Admin)95 ArrayList (java.util.ArrayList)85 Cell (org.apache.hadoop.hbase.Cell)83 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)73 Configuration (org.apache.hadoop.conf.Configuration)71 Path (org.apache.hadoop.fs.Path)60 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)59