Search in sources :

Example 51 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestRegionObserverInterface method testHBase3583.

@Test(timeout = 300000)
public // HBase-3583
void testHBase3583() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    util.createTable(tableName, new byte[][] { A, B, C });
    util.waitUntilAllRegionsAssigned(tableName);
    verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled" }, tableName, new Boolean[] { false, false, false, false });
    Table table = util.getConnection().getTable(tableName);
    Put put = new Put(ROW);
    put.addColumn(A, A, A);
    table.put(put);
    Get get = new Get(ROW);
    get.addColumn(A, A);
    table.get(get);
    // verify that scannerNext and scannerClose upcalls won't be invoked
    // when we perform get().
    verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled" }, tableName, new Boolean[] { true, true, false, false });
    Scan s = new Scan();
    ResultScanner scanner = table.getScanner(s);
    try {
        for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
        }
    } finally {
        scanner.close();
    }
    // now scanner hooks should be invoked.
    verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerNextCalled", "wasScannerCloseCalled" }, tableName, new Boolean[] { true, true });
    util.deleteTable(tableName);
    table.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Get(org.apache.hadoop.hbase.client.Get) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 52 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestRegionObserverInterface method testCompactionOverride.

/**
   * Tests overriding compaction handling via coprocessor hooks
   * @throws Exception
   */
@Test(timeout = 300000)
public void testCompactionOverride() throws Exception {
    final TableName compactTable = TableName.valueOf(name.getMethodName());
    Admin admin = util.getAdmin();
    if (admin.tableExists(compactTable)) {
        admin.disableTable(compactTable);
        admin.deleteTable(compactTable);
    }
    HTableDescriptor htd = new HTableDescriptor(compactTable);
    htd.addFamily(new HColumnDescriptor(A));
    htd.addCoprocessor(EvenOnlyCompactor.class.getName());
    admin.createTable(htd);
    Table table = util.getConnection().getTable(compactTable);
    for (long i = 1; i <= 10; i++) {
        byte[] iBytes = Bytes.toBytes(i);
        Put put = new Put(iBytes);
        put.setDurability(Durability.SKIP_WAL);
        put.addColumn(A, A, iBytes);
        table.put(put);
    }
    HRegion firstRegion = cluster.getRegions(compactTable).get(0);
    Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
    assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
    EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
    // force a compaction
    long ts = System.currentTimeMillis();
    admin.flush(compactTable);
    // wait for flush
    for (int i = 0; i < 10; i++) {
        if (compactor.lastFlush >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
    LOG.debug("Flush complete");
    ts = compactor.lastFlush;
    admin.majorCompact(compactTable);
    // wait for compaction
    for (int i = 0; i < 30; i++) {
        if (compactor.lastCompaction >= ts) {
            break;
        }
        Thread.sleep(1000);
    }
    LOG.debug("Last compaction was at " + compactor.lastCompaction);
    assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
    // only even rows should remain
    ResultScanner scanner = table.getScanner(new Scan());
    try {
        for (long i = 2; i <= 10; i += 2) {
            Result r = scanner.next();
            assertNotNull(r);
            assertFalse(r.isEmpty());
            byte[] iBytes = Bytes.toBytes(i);
            assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
            assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
        }
    } finally {
        scanner.close();
    }
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 53 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestMultiRowRangeFilter method getScanResult.

private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, Table ht) throws IOException {
    Scan scan = new Scan();
    scan.setMaxVersions();
    if (!Bytes.toString(startRow).isEmpty()) {
        scan.setStartRow(startRow);
    }
    if (!Bytes.toString(stopRow).isEmpty()) {
        scan.setStopRow(stopRow);
    }
    ResultScanner scanner = ht.getScanner(scan);
    List<Cell> kvList = new ArrayList<>();
    Result r;
    while ((r = scanner.next()) != null) {
        for (Cell kv : r.listCells()) {
            kvList.add(kv);
        }
    }
    scanner.close();
    return kvList;
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result)

Example 54 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestHFileOutputFormat2 method doIncrementalLoadTest.

private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, boolean putSortReducer, String tableStr) throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
    int hostCount = 1;
    int regionNum = 5;
    if (shouldKeepLocality) {
        // We should change host count higher than hdfs replica count when MiniHBaseCluster supports
        // explicit hostnames parameter just like MiniDFSCluster does.
        hostCount = 3;
        regionNum = 20;
    }
    byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
        hostnames[i] = "datanode_" + i;
    }
    util.startMiniCluster(1, hostCount, hostnames);
    TableName tableName = TableName.valueOf(tableStr);
    Table table = util.createTable(tableName, FAMILIES, splitKeys);
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    FileSystem fs = testDir.getFileSystem(conf);
    try (RegionLocator r = util.getConnection().getRegionLocator(tableName);
        Admin admin = util.getConnection().getAdmin()) {
        assertEquals("Should start with empty table", 0, util.countRows(table));
        int numRegions = r.getStartKeys().length;
        assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
        // Generate the bulk load files
        runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir, putSortReducer);
        // This doesn't write into the table, just makes files
        assertEquals("HFOF should not touch actual table", 0, util.countRows(table));
        // Make sure that a directory was created for every CF
        int dir = 0;
        for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
            for (byte[] family : FAMILIES) {
                if (Bytes.toString(family).equals(f.getPath().getName())) {
                    ++dir;
                }
            }
        }
        assertEquals("Column family not found in FS.", FAMILIES.length, dir);
        // handle the split case
        if (shouldChangeRegions) {
            LOG.info("Changing regions in table");
            admin.disableTable(table.getName());
            util.waitUntilNoRegionsInTransition();
            util.deleteTable(table.getName());
            byte[][] newSplitKeys = generateRandomSplitKeys(14);
            table = util.createTable(tableName, FAMILIES, newSplitKeys);
            while (util.getConnection().getRegionLocator(tableName).getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) {
                Thread.sleep(200);
                LOG.info("Waiting for new region assignment to happen");
            }
        }
        // Perform the actual load
        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);
        // Ensure data shows up
        int expectedRows = 0;
        if (putSortReducer) {
            // no rows should be extracted
            assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
        } else {
            expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
            assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
            Scan scan = new Scan();
            ResultScanner results = table.getScanner(scan);
            for (Result res : results) {
                assertEquals(FAMILIES.length, res.rawCells().length);
                Cell first = res.rawCells()[0];
                for (Cell kv : res.rawCells()) {
                    assertTrue(CellUtil.matchingRow(first, kv));
                    assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
                }
            }
            results.close();
        }
        String tableDigestBefore = util.checksumRows(table);
        // Check region locality
        HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
        for (HRegion region : util.getHBaseCluster().getRegions(tableName)) {
            hbd.add(region.getHDFSBlocksDistribution());
        }
        for (String hostname : hostnames) {
            float locality = hbd.getBlockLocalityIndex(hostname);
            LOG.info("locality of [" + hostname + "]: " + locality);
            assertEquals(100, (int) (locality * 100));
        }
        // Cause regions to reopen
        admin.disableTable(tableName);
        while (!admin.isTableDisabled(tableName)) {
            Thread.sleep(200);
            LOG.info("Waiting for table to disable");
        }
        admin.enableTable(tableName);
        util.waitTableAvailable(tableName);
        assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table));
    } finally {
        testDir.getFileSystem(conf).delete(testDir, true);
        util.deleteTable(tableName);
        util.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Admin(org.apache.hadoop.hbase.client.Admin) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Example 55 with ResultScanner

use of org.apache.hadoop.hbase.client.ResultScanner in project hbase by apache.

the class TestImportExport method testWithDeletes.

@Test
public void testWithDeletes() throws Exception {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
    UTIL.getAdmin().createTable(desc);
    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
        Put p = new Put(ROW1);
        p.addColumn(FAMILYA, QUAL, now, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
        p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
        t.put(p);
        Delete d = new Delete(ROW1, now + 3);
        t.delete(d);
        d = new Delete(ROW1);
        d.addColumns(FAMILYA, QUAL, now + 2);
        t.delete(d);
    }
    String[] args = new String[] { "-D" + Export.RAW_SCAN + "=true", name.getMethodName(), FQ_OUTPUT_DIR, // max number of key versions per key to export
    "1000" };
    assertTrue(runExport(args));
    final String IMPORT_TABLE = name.getMethodName() + "import";
    desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
    desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
    UTIL.getAdmin().createTable(desc);
    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
        args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR };
        assertTrue(runImport(args));
        Scan s = new Scan();
        s.setMaxVersions();
        s.setRaw(true);
        ResultScanner scanner = t.getScanner(s);
        Result r = scanner.next();
        Cell[] res = r.rawCells();
        assertTrue(CellUtil.isDeleteFamily(res[0]));
        assertEquals(now + 4, res[1].getTimestamp());
        assertEquals(now + 3, res[2].getTimestamp());
        assertTrue(CellUtil.isDelete(res[3]));
        assertEquals(now + 2, res[4].getTimestamp());
        assertEquals(now + 1, res[5].getTimestamp());
        assertEquals(now, res[6].getTimestamp());
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)342 Scan (org.apache.hadoop.hbase.client.Scan)303 Result (org.apache.hadoop.hbase.client.Result)302 Table (org.apache.hadoop.hbase.client.Table)164 Test (org.junit.Test)152 Cell (org.apache.hadoop.hbase.Cell)106 IOException (java.io.IOException)102 TableName (org.apache.hadoop.hbase.TableName)89 Delete (org.apache.hadoop.hbase.client.Delete)79 Connection (org.apache.hadoop.hbase.client.Connection)77 Put (org.apache.hadoop.hbase.client.Put)75 ArrayList (java.util.ArrayList)71 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)47 InterruptedIOException (java.io.InterruptedIOException)46 CellScanner (org.apache.hadoop.hbase.CellScanner)42 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)31 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)29 HTable (org.apache.hadoop.hbase.client.HTable)29 Admin (org.apache.hadoop.hbase.client.Admin)24 Get (org.apache.hadoop.hbase.client.Get)23