Search in sources :

Example 86 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestAccessControlFilter method doQualifierAccess.

private void doQualifierAccess(final Table table) throws Exception {
    // set permissions
    SecureTestUtil.grantOnTable(TEST_UTIL, READER.getShortName(), TABLE, null, null, Permission.Action.READ);
    SecureTestUtil.grantOnTable(TEST_UTIL, LIMITED.getShortName(), TABLE, FAMILY, PUBLIC_COL, Permission.Action.READ);
    // put some test data
    List<Put> puts = new ArrayList<>(100);
    for (int i = 0; i < 100; i++) {
        Put p = new Put(Bytes.toBytes(i));
        p.addColumn(FAMILY, PRIVATE_COL, Bytes.toBytes("secret " + i));
        p.addColumn(FAMILY, PUBLIC_COL, Bytes.toBytes("info " + i));
        puts.add(p);
    }
    table.put(puts);
    // test read
    READER.runAs(new PrivilegedExceptionAction<Object>() {

        public Object run() throws Exception {
            Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
            // force a new RS connection
            conf.set("testkey", UUID.randomUUID().toString());
            Connection connection = ConnectionFactory.createConnection(conf);
            Table t = connection.getTable(TABLE);
            try {
                ResultScanner rs = t.getScanner(new Scan());
                int rowcnt = 0;
                for (Result r : rs) {
                    rowcnt++;
                    int rownum = Bytes.toInt(r.getRow());
                    assertTrue(r.containsColumn(FAMILY, PRIVATE_COL));
                    assertEquals("secret " + rownum, Bytes.toString(r.getValue(FAMILY, PRIVATE_COL)));
                    assertTrue(r.containsColumn(FAMILY, PUBLIC_COL));
                    assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL)));
                }
                assertEquals("Expected 100 rows returned", 100, rowcnt);
                return null;
            } finally {
                t.close();
                connection.close();
            }
        }
    });
    // test read with qualifier filter
    LIMITED.runAs(new PrivilegedExceptionAction<Object>() {

        public Object run() throws Exception {
            Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
            // force a new RS connection
            conf.set("testkey", UUID.randomUUID().toString());
            Connection connection = ConnectionFactory.createConnection(conf);
            Table t = connection.getTable(TABLE);
            try {
                ResultScanner rs = t.getScanner(new Scan());
                int rowcnt = 0;
                for (Result r : rs) {
                    rowcnt++;
                    int rownum = Bytes.toInt(r.getRow());
                    assertFalse(r.containsColumn(FAMILY, PRIVATE_COL));
                    assertTrue(r.containsColumn(FAMILY, PUBLIC_COL));
                    assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL)));
                }
                assertEquals("Expected 100 rows returned", 100, rowcnt);
                return null;
            } finally {
                t.close();
                connection.close();
            }
        }
    });
    // test as user with no permission
    DENIED.runAs(new PrivilegedExceptionAction<Object>() {

        public Object run() throws Exception {
            Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
            // force a new RS connection
            conf.set("testkey", UUID.randomUUID().toString());
            Connection connection = ConnectionFactory.createConnection(conf);
            Table t = connection.getTable(TABLE);
            try {
                ResultScanner rs = t.getScanner(new Scan());
                int rowcnt = 0;
                for (Result r : rs) {
                    rowcnt++;
                    int rownum = Bytes.toInt(r.getRow());
                    assertFalse(r.containsColumn(FAMILY, PRIVATE_COL));
                    assertTrue(r.containsColumn(FAMILY, PUBLIC_COL));
                    assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL)));
                }
                assertEquals("Expected 0 rows returned", 0, rowcnt);
                return null;
            } finally {
                t.close();
                connection.close();
            }
        }
    });
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) Connection(org.apache.hadoop.hbase.client.Connection) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Scan(org.apache.hadoop.hbase.client.Scan)

Example 87 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestReplicationSink method testBatchSink.

/**
   * Insert a whole batch of entries
   * @throws Exception
   */
@Test
public void testBatchSink() throws Exception {
    List<WALEntry> entries = new ArrayList<>(BATCH_SIZE);
    List<Cell> cells = new ArrayList<>();
    for (int i = 0; i < BATCH_SIZE; i++) {
        entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
    }
    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    Scan scan = new Scan();
    ResultScanner scanRes = table1.getScanner(scan);
    assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length);
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 88 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestReplicationSink method testMixedPutDelete.

/**
   * Insert a mix of puts and deletes
   * @throws Exception
   */
@Test
public void testMixedPutDelete() throws Exception {
    List<WALEntry> entries = new ArrayList<>(BATCH_SIZE / 2);
    List<Cell> cells = new ArrayList<>();
    for (int i = 0; i < BATCH_SIZE / 2; i++) {
        entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
    }
    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    entries = new ArrayList<>(BATCH_SIZE);
    cells = new ArrayList<>();
    for (int i = 0; i < BATCH_SIZE; i++) {
        entries.add(createEntry(TABLE_NAME1, i, i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells));
    }
    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    Scan scan = new Scan();
    ResultScanner scanRes = table1.getScanner(scan);
    assertEquals(BATCH_SIZE / 2, scanRes.next(BATCH_SIZE).length);
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 89 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestReplicationSink method testReplicateEntriesForHFiles.

/**
   * Test replicateEntries with a bulk load entry for 25 HFiles
   */
@Test
public void testReplicateEntriesForHFiles() throws Exception {
    Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
    Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
    int numRows = 10;
    List<Path> p = new ArrayList<>(1);
    // 1. Generate 25 hfile ranges
    Random rng = new SecureRandom();
    Set<Integer> numbers = new HashSet<>();
    while (numbers.size() < 50) {
        numbers.add(rng.nextInt(1000));
    }
    List<Integer> numberList = new ArrayList<>(numbers);
    Collections.sort(numberList);
    Map<String, Long> storeFilesSize = new HashMap<>(1);
    // 2. Create 25 hfiles
    Configuration conf = TEST_UTIL.getConfiguration();
    FileSystem fs = dir.getFileSystem(conf);
    Iterator<Integer> numbersItr = numberList.iterator();
    for (int i = 0; i < 25; i++) {
        Path hfilePath = new Path(familyDir, "hfile_" + i);
        HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
        p.add(hfilePath);
        storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
    }
    // 3. Create a BulkLoadDescriptor and a WALEdit
    Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
    storeFiles.put(FAM_NAME1, p);
    WALEdit edit = null;
    WALProtos.BulkLoadDescriptor loadDescriptor = null;
    try (Connection c = ConnectionFactory.createConnection(conf);
        RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
        HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo();
        loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
        edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
    }
    List<WALEntry> entries = new ArrayList<>(1);
    // 4. Create a WALEntryBuilder
    WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
    // 5. Copy the hfile to the path as it is in reality
    for (int i = 0; i < 25; i++) {
        String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append("hfile_" + i).toString();
        String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
        FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf);
    }
    entries.add(builder.build());
    try (ResultScanner scanner = table1.getScanner(new Scan())) {
        // 6. Assert no existing data in table
        assertEquals(0, scanner.next(numRows).length);
    }
    // 7. Replicate the bulk loaded entry
    SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    try (ResultScanner scanner = table1.getScanner(new Scan())) {
        // 8. Assert data is replicated
        assertEquals(numRows, scanner.next(numRows).length);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Random(java.util.Random) SecureRandom(java.security.SecureRandom) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) SecureRandom(java.security.SecureRandom) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) Scan(org.apache.hadoop.hbase.client.Scan) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Test(org.junit.Test)

Example 90 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestSerialReplication method testRegionMerge.

@Test
public void testRegionMerge() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor table = new HTableDescriptor(tableName);
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
    table.addFamily(fam);
    utility1.getAdmin().createTable(table);
    utility2.getAdmin().createTable(table);
    Threads.sleep(5000);
    utility1.getAdmin().split(tableName, ROWS[50]);
    waitTableHasRightNumberOfRegions(tableName, 2);
    try (Table t1 = utility1.getConnection().getTable(tableName);
        Table t2 = utility2.getConnection().getTable(tableName)) {
        for (int i = 10; i < 100; i += 10) {
            Put put = new Put(ROWS[i]);
            put.addColumn(famName, VALUE, VALUE);
            t1.put(put);
        }
        List<Pair<HRegionInfo, ServerName>> regions = MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), tableName);
        utility1.getAdmin().mergeRegionsAsync(regions.get(0).getFirst().getRegionName(), regions.get(1).getFirst().getRegionName(), true);
        waitTableHasRightNumberOfRegions(tableName, 1);
        for (int i = 11; i < 100; i += 10) {
            Put put = new Put(ROWS[i]);
            put.addColumn(famName, VALUE, VALUE);
            t1.put(put);
        }
        long start = EnvironmentEdgeManager.currentTime();
        while (EnvironmentEdgeManager.currentTime() - start < 180000) {
            Scan scan = new Scan();
            scan.setCaching(100);
            List<Cell> list = new ArrayList<>();
            try (ResultScanner results = t2.getScanner(scan)) {
                for (Result result : results) {
                    assertEquals(1, result.rawCells().length);
                    list.add(result.rawCells()[0]);
                }
            }
            List<Integer> listOfNumbers = getRowNumbers(list);
            List<Integer> list0 = new ArrayList<>();
            List<Integer> list1 = new ArrayList<>();
            for (int num : listOfNumbers) {
                if (num % 10 == 0) {
                    list0.add(num);
                } else {
                    list1.add(num);
                }
            }
            LOG.info(Arrays.toString(list0.toArray()));
            LOG.info(Arrays.toString(list1.toArray()));
            assertIntegerList(list1, 11, 10);
            if (!list1.isEmpty()) {
                assertEquals(9, list0.size());
            }
            if (list.size() == 18) {
                return;
            }
            LOG.info("Waiting all logs pushed to slave. Expected 18 , actual " + list.size());
            Thread.sleep(200);
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Aggregations

ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)342 Scan (org.apache.hadoop.hbase.client.Scan)303 Result (org.apache.hadoop.hbase.client.Result)302 Table (org.apache.hadoop.hbase.client.Table)164 Test (org.junit.Test)152 Cell (org.apache.hadoop.hbase.Cell)106 IOException (java.io.IOException)102 TableName (org.apache.hadoop.hbase.TableName)89 Delete (org.apache.hadoop.hbase.client.Delete)79 Connection (org.apache.hadoop.hbase.client.Connection)77 Put (org.apache.hadoop.hbase.client.Put)75 ArrayList (java.util.ArrayList)71 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)47 InterruptedIOException (java.io.InterruptedIOException)46 CellScanner (org.apache.hadoop.hbase.CellScanner)42 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)31 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)29 HTable (org.apache.hadoop.hbase.client.HTable)29 Admin (org.apache.hadoop.hbase.client.Admin)24 Get (org.apache.hadoop.hbase.client.Get)23