Search in sources :

Example 16 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestRemoteTable method testIteratorScaner.

/**
   * Test RemoteHable.Scanner.iterator method  
   */
@Test
public void testIteratorScaner() throws IOException {
    List<Put> puts = new ArrayList<>(4);
    Put put = new Put(ROW_1);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_2);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_3);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_4);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    remoteTable.put(puts);
    ResultScanner scanner = remoteTable.getScanner(new Scan());
    Iterator<Result> iterator = scanner.iterator();
    assertTrue(iterator.hasNext());
    int counter = 0;
    while (iterator.hasNext()) {
        iterator.next();
        counter++;
    }
    assertEquals(4, counter);
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 17 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestRemoteTable method testPut.

@Test
public void testPut() throws IOException {
    Put put = new Put(ROW_3);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    remoteTable.put(put);
    Get get = new Get(ROW_3);
    get.addFamily(COLUMN_1);
    Result result = remoteTable.get(get);
    byte[] value = result.getValue(COLUMN_1, QUALIFIER_1);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_1, value));
    // multiput
    List<Put> puts = new ArrayList<>(3);
    put = new Put(ROW_3);
    put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
    puts.add(put);
    put = new Put(ROW_4);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_4);
    put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
    puts.add(put);
    remoteTable.put(puts);
    get = new Get(ROW_3);
    get.addFamily(COLUMN_2);
    result = remoteTable.get(get);
    value = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_2, value));
    get = new Get(ROW_4);
    result = remoteTable.get(get);
    value = result.getValue(COLUMN_1, QUALIFIER_1);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_1, value));
    value = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value);
    assertTrue(Bytes.equals(VALUE_2, value));
    assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable" + VALID_TABLE_NAME_CHARS), remoteTable.getTableName()));
}
Also used : Get(org.apache.hadoop.hbase.client.Get) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 18 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestRemoteTable method testGet.

@Test
public void testGet() throws IOException {
    Get get = new Get(ROW_1);
    Result result = remoteTable.get(get);
    byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    assertTrue(Bytes.equals(VALUE_1, value1));
    assertNull(value2);
    get = new Get(ROW_1);
    get.addFamily(COLUMN_3);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNull(value1);
    assertNull(value2);
    get = new Get(ROW_1);
    get.addColumn(COLUMN_1, QUALIFIER_1);
    get.addColumn(COLUMN_2, QUALIFIER_2);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    assertTrue(Bytes.equals(VALUE_1, value1));
    assertNull(value2);
    get = new Get(ROW_2);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    // @TS_2
    assertTrue(Bytes.equals(VALUE_2, value1));
    assertNotNull(value2);
    assertTrue(Bytes.equals(VALUE_2, value2));
    get = new Get(ROW_2);
    get.addFamily(COLUMN_1);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    // @TS_2
    assertTrue(Bytes.equals(VALUE_2, value1));
    assertNull(value2);
    get = new Get(ROW_2);
    get.addColumn(COLUMN_1, QUALIFIER_1);
    get.addColumn(COLUMN_2, QUALIFIER_2);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    // @TS_2
    assertTrue(Bytes.equals(VALUE_2, value1));
    assertNotNull(value2);
    assertTrue(Bytes.equals(VALUE_2, value2));
    // test timestamp
    get = new Get(ROW_2);
    get.addFamily(COLUMN_1);
    get.addFamily(COLUMN_2);
    get.setTimeStamp(TS_1);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    // @TS_1
    assertTrue(Bytes.equals(VALUE_1, value1));
    assertNull(value2);
    // test timerange
    get = new Get(ROW_2);
    get.addFamily(COLUMN_1);
    get.addFamily(COLUMN_2);
    get.setTimeRange(0, TS_1 + 1);
    result = remoteTable.get(get);
    value1 = result.getValue(COLUMN_1, QUALIFIER_1);
    value2 = result.getValue(COLUMN_2, QUALIFIER_2);
    assertNotNull(value1);
    // @TS_1
    assertTrue(Bytes.equals(VALUE_1, value1));
    assertNull(value2);
    // test maxVersions
    get = new Get(ROW_2);
    get.addFamily(COLUMN_1);
    get.setMaxVersions(2);
    result = remoteTable.get(get);
    int count = 0;
    for (Cell kv : result.listCells()) {
        if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_1 == kv.getTimestamp()) {
            // @TS_1
            assertTrue(CellUtil.matchingValue(kv, VALUE_1));
            count++;
        }
        if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_2 == kv.getTimestamp()) {
            // @TS_2
            assertTrue(CellUtil.matchingValue(kv, VALUE_2));
            count++;
        }
    }
    assertEquals(2, count);
}
Also used : Get(org.apache.hadoop.hbase.client.Get) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 19 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class CatalogJanitor method getMergedRegionsAndSplitParents.

/**
   * Scans hbase:meta and returns a number of scanned rows, and a map of merged
   * regions, and an ordered map of split parents. if the given table name is
   * null, return merged regions and split parents of all tables, else only the
   * specified table
   * @param tableName null represents all tables
   * @return triple of scanned rows, and map of merged regions, and map of split
   *         parent regioninfos
   * @throws IOException
   */
Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents(final TableName tableName) throws IOException {
    final boolean isTableSpecified = (tableName != null);
    // TODO: Only works with single hbase:meta region currently.  Fix.
    final AtomicInteger count = new AtomicInteger(0);
    // Keep Map of found split parents.  There are candidates for cleanup.
    // Use a comparator that has split parents come before its daughters.
    final Map<HRegionInfo, Result> splitParents = new TreeMap<>(new SplitParentFirstComparator());
    final Map<HRegionInfo, Result> mergedRegions = new TreeMap<>();
    // This visitor collects split parents and counts rows in the hbase:meta table
    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {

        @Override
        public boolean visit(Result r) throws IOException {
            if (r == null || r.isEmpty())
                return true;
            count.incrementAndGet();
            HRegionInfo info = MetaTableAccessor.getHRegionInfo(r);
            // Keep scanning
            if (info == null)
                return true;
            if (isTableSpecified && info.getTable().compareTo(tableName) > 0) {
                // Another table, stop scanning
                return false;
            }
            if (info.isSplitParent())
                splitParents.put(info, r);
            if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) {
                mergedRegions.put(info, r);
            }
            // Returning true means "keep scanning"
            return true;
        }
    };
    // Run full scan of hbase:meta catalog table passing in our custom visitor with
    // the start row
    MetaTableAccessor.scanMetaForTableRegions(this.connection, visitor, tableName);
    return new Triple<>(count.get(), mergedRegions, splitParents);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Triple(org.apache.hadoop.hbase.util.Triple) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) TreeMap(java.util.TreeMap) Result(org.apache.hadoop.hbase.client.Result)

Example 20 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class AssignmentManager method rebuildUserRegions.

/**
   * Rebuild the list of user regions and assignment information.
   * Updates regionstates with findings as we go through list of regions.
   * @return set of servers not online that hosted some regions according to a scan of hbase:meta
   * @throws IOException
   */
Set<ServerName> rebuildUserRegions() throws IOException, KeeperException {
    Set<TableName> disabledOrEnablingTables = tableStateManager.getTablesInStates(TableState.State.DISABLED, TableState.State.ENABLING);
    Set<TableName> disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(TableState.State.DISABLED, TableState.State.DISABLING, TableState.State.ENABLING);
    // Region assignment from META
    List<Result> results = MetaTableAccessor.fullScanRegions(server.getConnection());
    // Get any new but slow to checkin region server that joined the cluster
    Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
    // Set of offline servers to be returned
    Set<ServerName> offlineServers = new HashSet<>();
    // Iterate regions in META
    for (Result result : results) {
        if (result == null && LOG.isDebugEnabled()) {
            LOG.debug("null result from meta - ignoring but this is strange.");
            continue;
        }
        // keep a track of replicas to close. These were the replicas of the originally
        // unmerged regions. The master might have closed them before but it mightn't
        // maybe because it crashed.
        PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(result);
        if (p.getFirst() != null && p.getSecond() != null) {
            int numReplicas = getNumReplicas(server, p.getFirst().getTable());
            for (HRegionInfo merge : p) {
                for (int i = 1; i < numReplicas; i++) {
                    replicasToClose.add(RegionReplicaUtil.getRegionInfoForReplica(merge, i));
                }
            }
        }
        RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
        if (rl == null) {
            continue;
        }
        HRegionLocation[] locations = rl.getRegionLocations();
        if (locations == null) {
            continue;
        }
        for (HRegionLocation hrl : locations) {
            if (hrl == null)
                continue;
            HRegionInfo regionInfo = hrl.getRegionInfo();
            if (regionInfo == null)
                continue;
            int replicaId = regionInfo.getReplicaId();
            State state = RegionStateStore.getRegionState(result, replicaId);
            // but it couldn't maybe because it crashed
            if (replicaId == 0 && state.equals(State.SPLIT)) {
                for (HRegionLocation h : locations) {
                    replicasToClose.add(h.getRegionInfo());
                }
            }
            ServerName lastHost = hrl.getServerName();
            ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId);
            regionStates.createRegionState(regionInfo, state, regionLocation, lastHost);
            if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
                // Region is not open (either offline or in transition), skip
                continue;
            }
            TableName tableName = regionInfo.getTable();
            if (!onlineServers.contains(regionLocation)) {
                // Region is located on a server that isn't online
                offlineServers.add(regionLocation);
            } else if (!disabledOrEnablingTables.contains(tableName)) {
                // Region is being served and on an active server
                // add only if region not in disabled or enabling table
                regionStates.regionOnline(regionInfo, regionLocation);
                balancer.regionOnline(regionInfo, regionLocation);
            }
            // this will be used in rolling restarts
            if (!disabledOrDisablingOrEnabling.contains(tableName) && !getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
                setEnabledTable(tableName);
            }
        }
    }
    return offlineServers;
}
Also used : RegionLocations(org.apache.hadoop.hbase.RegionLocations) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) State(org.apache.hadoop.hbase.master.RegionState.State) TableState(org.apache.hadoop.hbase.client.TableState) RegionOpeningState(org.apache.hadoop.hbase.regionserver.RegionOpeningState) ServerName(org.apache.hadoop.hbase.ServerName) HashSet(java.util.HashSet)

Aggregations

Result (org.apache.hadoop.hbase.client.Result)715 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)286 Test (org.junit.Test)280 Scan (org.apache.hadoop.hbase.client.Scan)279 Get (org.apache.hadoop.hbase.client.Get)269 Table (org.apache.hadoop.hbase.client.Table)224 Put (org.apache.hadoop.hbase.client.Put)183 Cell (org.apache.hadoop.hbase.Cell)177 IOException (java.io.IOException)164 ArrayList (java.util.ArrayList)143 TableName (org.apache.hadoop.hbase.TableName)124 Connection (org.apache.hadoop.hbase.client.Connection)101 Delete (org.apache.hadoop.hbase.client.Delete)101 Configuration (org.apache.hadoop.conf.Configuration)70 KeyValue (org.apache.hadoop.hbase.KeyValue)70 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)62 InterruptedIOException (java.io.InterruptedIOException)58 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)50 CellScanner (org.apache.hadoop.hbase.CellScanner)47 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)45