Search in sources :

Example 36 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.

the class TestSeekBeforeWithReverseScan method testReverseScanWithoutPadding.

@Test
public void testReverseScanWithoutPadding() throws Exception {
    byte[] row1 = Bytes.toBytes("a");
    byte[] row2 = Bytes.toBytes("ab");
    byte[] row3 = Bytes.toBytes("b");
    Put put1 = new Put(row1);
    put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
    Put put2 = new Put(row2);
    put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
    Put put3 = new Put(row3);
    put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
    region.put(put1);
    region.put(put2);
    region.put(put3);
    region.flush(true);
    Scan scan = new Scan();
    scan.setCacheBlocks(false);
    scan.setReversed(true);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.addFamily(cfName);
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> res = new ArrayList<>();
    int count = 1;
    while (scanner.next(res)) {
        count++;
    }
    assertEquals("b", Bytes.toString(res.get(0).getRowArray(), res.get(0).getRowOffset(), res.get(0).getRowLength()));
    assertEquals("ab", Bytes.toString(res.get(1).getRowArray(), res.get(1).getRowOffset(), res.get(1).getRowLength()));
    assertEquals("a", Bytes.toString(res.get(2).getRowArray(), res.get(2).getRowOffset(), res.get(2).getRowLength()));
    assertEquals(3, count);
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 37 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsAfterAbortingFlush.

/**
 * Test that we could recover the data correctly after aborting flush. In the test, first we abort
 * flush after writing some data, then writing more data and flush again, at last verify the data.
 */
@Test
public void testReplayEditsAfterAbortingFlush() throws IOException {
    final TableName tableName = TableName.valueOf("testReplayEditsAfterAbortingFlush");
    final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final TableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtil.closeRegionAndWAL(region3);
    // Write countPerFamily edits into the three families. Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
    Mockito.doReturn(false).when(rsServices).isAborted();
    when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
    when(rsServices.getConfiguration()).thenReturn(conf);
    Configuration customConf = new Configuration(this.conf);
    customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, CustomStoreFlusher.class.getName());
    HRegion region = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null);
    int writtenRowCount = 10;
    List<ColumnFamilyDescriptor> families = Arrays.asList((htd.getColumnFamilies()));
    for (int i = 0; i < writtenRowCount; i++) {
        Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    // Now assert edits made it in.
    RegionScanner scanner = region.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
    // Let us flush the region
    CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
    try {
        region.flush(true);
        fail("Injected exception hasn't been thrown");
    } catch (IOException e) {
        LOG.info("Expected simulated exception when flushing region, {}", e.getMessage());
        // simulated to abort server
        Mockito.doReturn(true).when(rsServices).isAborted();
        // region normally does not accept writes after
        region.setClosing(false);
    // DroppedSnapshotException. We mock around it for this test.
    }
    // writing more data
    int moreRow = 10;
    for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
        Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
        put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val"));
        region.put(put);
    }
    writtenRowCount += moreRow;
    // call flush again
    CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
    try {
        region.flush(true);
    } catch (IOException t) {
        LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage());
    }
    region.close(true);
    wal.shutdown();
    // Let us try to split and recover
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    Mockito.doReturn(false).when(rsServices).isAborted();
    HRegion region2 = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null);
    scanner = region2.getScanner(new Scan());
    assertEquals(writtenRowCount, getScannedCount(scanner));
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 38 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.

the class HBaseTestingUtility method getClosestRowBefore.

public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
    Scan scan = new Scan().withStartRow(row);
    scan.setReadType(ReadType.PREAD);
    scan.setCaching(1);
    scan.setReversed(true);
    scan.addFamily(family);
    try (RegionScanner scanner = r.getScanner(scan)) {
        List<Cell> cells = new ArrayList<>(1);
        scanner.next(cells);
        if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
            return null;
        }
        return Result.create(cells);
    }
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan)

Example 39 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.

the class ExpAsStringVisibilityLabelServiceImpl method getAuths.

private void getAuths(Get get, List<String> auths) throws IOException {
    List<Cell> cells = new ArrayList<>();
    RegionScanner scanner = null;
    try {
        if (labelsRegion == null) {
            Table table = null;
            Connection connection = null;
            try {
                connection = ConnectionFactory.createConnection(conf);
                table = connection.getTable(VisibilityConstants.LABELS_TABLE_NAME);
                Result result = table.get(get);
                cells = result.listCells();
            } finally {
                if (table != null) {
                    table.close();
                }
                if (connection != null) {
                    connection.close();
                }
            }
        } else {
            // NOTE: Please don't use HRegion.get() instead,
            // because it will copy cells to heap. See HBASE-26036
            scanner = this.labelsRegion.getScanner(new Scan(get));
            scanner.next(cells);
        }
        for (Cell cell : cells) {
            String auth = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
            auths.add(auth);
        }
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ArrayList(java.util.ArrayList) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result)

Example 40 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project hbase by apache.

the class VisibilityController method prePrepareTimeStampForDeleteVersion.

@Override
public void prePrepareTimeStampForDeleteVersion(ObserverContext<RegionCoprocessorEnvironment> ctx, Mutation delete, Cell cell, byte[] byteNow, Get get) throws IOException {
    // Nothing to do if we are not filtering by visibility
    if (!authorizationEnabled) {
        return;
    }
    CellVisibility cellVisibility = null;
    try {
        cellVisibility = delete.getCellVisibility();
    } catch (DeserializationException de) {
        throw new IOException("Invalid cell visibility specified " + delete, de);
    }
    // The check for checkForReservedVisibilityTagPresence happens in preBatchMutate happens.
    // It happens for every mutation and that would be enough.
    List<Tag> visibilityTags = new ArrayList<>();
    if (cellVisibility != null) {
        String labelsExp = cellVisibility.getExpression();
        try {
            visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, false);
        } catch (InvalidLabelException e) {
            throw new IOException("Invalid cell visibility specified " + labelsExp, e);
        }
    }
    get.setFilter(new DeleteVersionVisibilityExpressionFilter(visibilityTags, VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT));
    try (RegionScanner scanner = ctx.getEnvironment().getRegion().getScanner(new Scan(get))) {
        // NOTE: Please don't use HRegion.get() instead,
        // because it will copy cells to heap. See HBASE-26036
        List<Cell> result = new ArrayList<>();
        scanner.next(result);
        if (result.size() < get.getMaxVersions()) {
            // Nothing to delete
            PrivateCellUtil.updateLatestStamp(cell, byteNow);
            return;
        }
        if (result.size() > get.getMaxVersions()) {
            throw new RuntimeException("Unexpected size: " + result.size() + ". Results more than the max versions obtained.");
        }
        Cell getCell = result.get(get.getMaxVersions() - 1);
        PrivateCellUtil.setTimestamp(cell, getCell.getTimestamp());
    }
    // We are bypassing here because in the HRegion.updateDeleteLatestVersionTimeStamp we would
    // update with the current timestamp after again doing a get. As the hook as already determined
    // the needed timestamp we need to bypass here.
    // TODO : See if HRegion.updateDeleteLatestVersionTimeStamp() could be
    // called only if the hook is not called.
    ctx.bypass();
}
Also used : ArrayList(java.util.ArrayList) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)97 Scan (org.apache.hadoop.hbase.client.Scan)75 Cell (org.apache.hadoop.hbase.Cell)59 ArrayList (java.util.ArrayList)35 Test (org.junit.Test)35 Put (org.apache.hadoop.hbase.client.Put)33 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)25 Region (org.apache.hadoop.hbase.regionserver.Region)20 List (java.util.List)18 TableId (co.cask.cdap.data2.util.TableId)17 IOException (java.io.IOException)14 Delete (org.apache.hadoop.hbase.client.Delete)14 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)12 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)12 KeyValue (org.apache.hadoop.hbase.KeyValue)11 Configuration (org.apache.hadoop.conf.Configuration)9 ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)9 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)8 Result (org.apache.hadoop.hbase.client.Result)6