Search in sources :

Example 11 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class BackupSystemTable method createScanForReadRegionServerLastLogRollResult.

/**
   * Creates Scan operation to load last RS log roll results
   * @return scan operation
   */
private Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) {
    Scan scan = new Scan();
    byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot);
    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
    scan.setStartRow(startRow);
    scan.setStopRow(stopRow);
    scan.addFamily(BackupSystemTable.META_FAMILY);
    scan.setMaxVersions(1);
    return scan;
}
Also used : Scan(org.apache.hadoop.hbase.client.Scan)

Example 12 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class VerifyingRSGroupAdminClient method verify.

public void verify() throws IOException {
    Map<String, RSGroupInfo> groupMap = Maps.newHashMap();
    Set<RSGroupInfo> zList = Sets.newHashSet();
    for (Result result : table.getScanner(new Scan())) {
        RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result.getValue(RSGroupInfoManager.META_FAMILY_BYTES, RSGroupInfoManager.META_QUALIFIER_BYTES));
        groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto));
    }
    Assert.assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups()));
    try {
        String groupBasePath = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "rsgroup");
        for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
            byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode));
            if (data.length > 0) {
                ProtobufUtil.expectPBMagicPrefix(data);
                ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
                zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
            }
        }
        Assert.assertEquals(zList.size(), groupMap.size());
        for (RSGroupInfo RSGroupInfo : zList) {
            Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo));
        }
    } catch (KeeperException e) {
        throw new IOException("ZK verification failed", e);
    } catch (DeserializationException e) {
        throw new IOException("ZK verification failed", e);
    } catch (InterruptedException e) {
        throw new IOException("ZK verification failed", e);
    }
}
Also used : RSGroupProtos(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) Result(org.apache.hadoop.hbase.client.Result) ByteArrayInputStream(java.io.ByteArrayInputStream) Scan(org.apache.hadoop.hbase.client.Scan) KeeperException(org.apache.zookeeper.KeeperException)

Example 13 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class TestRemoteTable method testIteratorScaner.

/**
   * Test RemoteHable.Scanner.iterator method  
   */
@Test
public void testIteratorScaner() throws IOException {
    List<Put> puts = new ArrayList<>(4);
    Put put = new Put(ROW_1);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_2);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_3);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    put = new Put(ROW_4);
    put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
    puts.add(put);
    remoteTable.put(puts);
    ResultScanner scanner = remoteTable.getScanner(new Scan());
    Iterator<Result> iterator = scanner.iterator();
    assertTrue(iterator.hasNext());
    int counter = 0;
    while (iterator.hasNext()) {
        iterator.next();
        counter++;
    }
    assertEquals(4, counter);
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 14 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class VerifyReplication method createSubmittableJob.

/**
   * Sets up the actual job.
   *
   * @param conf  The current configuration.
   * @param args  The command line parameters.
   * @return The newly created job.
   * @throws java.io.IOException When setting up the job fails.
   */
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
    if (!doCommandLine(args)) {
        return null;
    }
    conf.set(NAME + ".peerId", peerId);
    conf.set(NAME + ".tableName", tableName);
    conf.setLong(NAME + ".startTime", startTime);
    conf.setLong(NAME + ".endTime", endTime);
    conf.setInt(NAME + ".sleepMsBeforeReCompare", sleepMsBeforeReCompare);
    conf.set(NAME + ".delimiter", delimiter);
    conf.setBoolean(NAME + ".verbose", verbose);
    conf.setBoolean(NAME + ".includeDeletedCells", includeDeletedCells);
    if (families != null) {
        conf.set(NAME + ".families", families);
    }
    if (rowPrefixes != null) {
        conf.set(NAME + ".rowPrefixes", rowPrefixes);
    }
    Pair<ReplicationPeerConfig, Configuration> peerConfigPair = getPeerQuorumConfig(conf);
    ReplicationPeerConfig peerConfig = peerConfigPair.getFirst();
    String peerQuorumAddress = peerConfig.getClusterKey();
    LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + peerConfig.getConfiguration());
    conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
    HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX, peerConfig.getConfiguration().entrySet());
    conf.setInt(NAME + ".versions", versions);
    LOG.info("Number of version: " + versions);
    Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
    job.setJarByClass(VerifyReplication.class);
    Scan scan = new Scan();
    scan.setTimeRange(startTime, endTime);
    scan.setRaw(includeDeletedCells);
    if (versions >= 0) {
        scan.setMaxVersions(versions);
        LOG.info("Number of versions set to " + versions);
    }
    if (families != null) {
        String[] fams = families.split(",");
        for (String fam : fams) {
            scan.addFamily(Bytes.toBytes(fam));
        }
    }
    setRowPrefixFilter(scan, rowPrefixes);
    TableMapReduceUtil.initTableMapperJob(tableName, scan, Verifier.class, null, null, job);
    Configuration peerClusterConf = peerConfigPair.getSecond();
    // Obtain the auth token from peer cluster
    TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);
    job.setOutputFormatClass(NullOutputFormat.class);
    job.setNumReduceTasks(0);
    return job;
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Scan(org.apache.hadoop.hbase.client.Scan) Job(org.apache.hadoop.mapreduce.Job)

Example 15 with Scan

use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.

the class AccessControlLists method loadAll.

/**
   * Loads all of the permission grants stored in a region of the {@code _acl_}
   * table.
   *
   * @param aclRegion
   * @return a map of the permissions for this table.
   * @throws IOException
   */
static Map<byte[], ListMultimap<String, TablePermission>> loadAll(Region aclRegion) throws IOException {
    if (!isAclRegion(aclRegion)) {
        throw new IOException("Can only load permissions from " + ACL_TABLE_NAME);
    }
    Map<byte[], ListMultimap<String, TablePermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
    // do a full scan of _acl_ table
    Scan scan = new Scan();
    scan.addFamily(ACL_LIST_FAMILY);
    InternalScanner iScanner = null;
    try {
        iScanner = aclRegion.getScanner(scan);
        while (true) {
            List<Cell> row = new ArrayList<>();
            boolean hasNext = iScanner.next(row);
            ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
            byte[] entry = null;
            for (Cell kv : row) {
                if (entry == null) {
                    entry = CellUtil.cloneRow(kv);
                }
                Pair<String, TablePermission> permissionsOfUserOnTable = parsePermissionRecord(entry, kv);
                if (permissionsOfUserOnTable != null) {
                    String username = permissionsOfUserOnTable.getFirst();
                    TablePermission permissions = permissionsOfUserOnTable.getSecond();
                    perms.put(username, permissions);
                }
            }
            if (entry != null) {
                allPerms.put(entry, perms);
            }
            if (!hasNext) {
                break;
            }
        }
    } finally {
        if (iScanner != null) {
            iScanner.close();
        }
    }
    return allPerms;
}
Also used : InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) IOException(java.io.IOException) TreeMap(java.util.TreeMap) Scan(org.apache.hadoop.hbase.client.Scan) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) ListMultimap(com.google.common.collect.ListMultimap) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Scan (org.apache.hadoop.hbase.client.Scan)938 Test (org.junit.Test)494 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)295 Result (org.apache.hadoop.hbase.client.Result)279 Cell (org.apache.hadoop.hbase.Cell)256 ArrayList (java.util.ArrayList)236 Table (org.apache.hadoop.hbase.client.Table)174 Put (org.apache.hadoop.hbase.client.Put)161 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)153 IOException (java.io.IOException)134 TableName (org.apache.hadoop.hbase.TableName)97 Filter (org.apache.hadoop.hbase.filter.Filter)94 Delete (org.apache.hadoop.hbase.client.Delete)93 KeyValue (org.apache.hadoop.hbase.KeyValue)84 Connection (org.apache.hadoop.hbase.client.Connection)79 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)78 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)78 RowKeyComparisonFilter (org.apache.phoenix.filter.RowKeyComparisonFilter)72 Configuration (org.apache.hadoop.conf.Configuration)53 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)51