Search in sources :

Example 11 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class HMaster method getTableDescriptors.

/**
   * @return list of table table descriptors after filtering by regex and whether to include system
   *    tables, etc.
   * @throws IOException
   */
private List<HTableDescriptor> getTableDescriptors(final List<HTableDescriptor> htds, final String namespace, final String regex, final List<TableName> tableNameList, final boolean includeSysTables) throws IOException {
    if (tableNameList == null || tableNameList.isEmpty()) {
        // request for all TableDescriptors
        Collection<HTableDescriptor> allHtds;
        if (namespace != null && namespace.length() > 0) {
            // Do a check on the namespace existence. Will fail if does not exist.
            this.clusterSchemaService.getNamespace(namespace);
            allHtds = tableDescriptors.getByNamespace(namespace).values();
        } else {
            allHtds = tableDescriptors.getAll().values();
        }
        for (HTableDescriptor desc : allHtds) {
            if (tableStateManager.isTablePresent(desc.getTableName()) && (includeSysTables || !desc.getTableName().isSystemTable())) {
                htds.add(desc);
            }
        }
    } else {
        for (TableName s : tableNameList) {
            if (tableStateManager.isTablePresent(s)) {
                HTableDescriptor desc = tableDescriptors.get(s);
                if (desc != null) {
                    htds.add(desc);
                }
            }
        }
    }
    // Retains only those matched by regular expression.
    if (regex != null)
        filterTablesByRegex(htds, Pattern.compile(regex));
    return htds;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 12 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class HMaster method filterTablesByRegex.

/**
   * Removes the table descriptors that don't match the pattern.
   * @param descriptors list of table descriptors to filter
   * @param pattern the regex to use
   */
private static void filterTablesByRegex(final Collection<HTableDescriptor> descriptors, final Pattern pattern) {
    final String defaultNS = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
    Iterator<HTableDescriptor> itr = descriptors.iterator();
    while (itr.hasNext()) {
        HTableDescriptor htd = itr.next();
        String tableName = htd.getTableName().getNameAsString();
        boolean matched = pattern.matcher(tableName).matches();
        if (!matched && htd.getTableName().getNamespaceAsString().equals(defaultNS)) {
            matched = pattern.matcher(defaultNS + TableName.NAMESPACE_DELIM + tableName).matches();
        }
        if (!matched) {
            itr.remove();
        }
    }
}
Also used : HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 13 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class CatalogJanitor method cleanMergeRegion.

/**
   * If merged region no longer holds reference to the merge regions, archive
   * merge region on hdfs and perform deleting references in hbase:meta
   * @param mergedRegion
   * @param regionA
   * @param regionB
   * @return true if we delete references in merged region on hbase:meta and archive
   *         the files on the file system
   * @throws IOException
   */
boolean cleanMergeRegion(final HRegionInfo mergedRegion, final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
    HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
    HRegionFileSystem regionFs = null;
    try {
        regionFs = HRegionFileSystem.openRegionFromFileSystem(this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
    } catch (IOException e) {
        LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
    }
    if (regionFs == null || !regionFs.hasReferences(htd)) {
        LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and " + regionB.getRegionNameAsString() + " from fs because merged region no longer holds references");
        HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
        HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
        MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), mergedRegion);
        services.getServerManager().removeRegion(regionA);
        services.getServerManager().removeRegion(regionB);
        FavoredNodesManager fnm = this.services.getFavoredNodesManager();
        if (fnm != null) {
            fnm.deleteFavoredNodesForRegions(Lists.newArrayList(regionA, regionB));
        }
        return true;
    }
    return false;
}
Also used : Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FavoredNodesManager(org.apache.hadoop.hbase.favored.FavoredNodesManager) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 14 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class RegionSplitter method splitScan.

static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException {
    LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
    // Get table info
    Pair<Path, Path> tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName);
    Path tableDir = tableDirAndSplitFile.getFirst();
    FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
    // Clear the cache to forcibly refresh region information
    ((ClusterConnection) connection).clearRegionCache();
    HTableDescriptor htd = null;
    try (Table table = connection.getTable(tableName)) {
        htd = table.getTableDescriptor();
    }
    try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
        // for every region that hasn't been verified as a finished split
        for (Pair<byte[], byte[]> region : regionList) {
            byte[] start = region.getFirst();
            byte[] split = region.getSecond();
            // see if the new split daughter region has come online
            try {
                HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
                if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
                    logicalSplitting.add(region);
                    continue;
                }
            } catch (NoServerForRegionException nsfre) {
                // NSFRE will occur if the old hbase:meta entry has no server assigned
                LOG.info(nsfre);
                logicalSplitting.add(region);
                continue;
            }
            try {
                // when a daughter region is opened, a compaction is triggered
                // wait until compaction completes for both daughter regions
                LinkedList<HRegionInfo> check = Lists.newLinkedList();
                check.add(regionLocator.getRegionLocation(start).getRegionInfo());
                check.add(regionLocator.getRegionLocation(split).getRegionInfo());
                for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
                    byte[] sk = hri.getStartKey();
                    if (sk.length == 0)
                        sk = splitAlgo.firstRow();
                    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
                    // Check every Column Family for that region -- check does not have references.
                    boolean refFound = false;
                    for (HColumnDescriptor c : htd.getFamilies()) {
                        if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
                            break;
                        }
                    }
                    // compaction is completed when all reference files are gone
                    if (!refFound) {
                        check.remove(hri);
                    }
                }
                if (check.isEmpty()) {
                    finished.add(region);
                } else {
                    physicalSplitting.add(region);
                }
            } catch (NoServerForRegionException nsfre) {
                LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
                physicalSplitting.add(region);
                ((ClusterConnection) connection).clearRegionCache();
            }
        }
        LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait");
        return finished;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) NoServerForRegionException(org.apache.hadoop.hbase.client.NoServerForRegionException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 15 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class AccessControlClient method getUserPermissions.

/**
   * List all the userPermissions matching the given pattern. If pattern is null, the behavior is
   * dependent on whether user has global admin privileges or not. If yes, the global permissions
   * along with the list of superusers would be returned. Else, no rows get returned.
   * @param connection The Connection instance to use
   * @param tableRegex The regular expression string to match against
   * @return - returns an array of UserPermissions
   * @throws Throwable
   */
public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex) throws Throwable {
    /** TODO: Pass an rpcController
    HBaseRpcController controller
      = ((ClusterConnection) connection).getRpcControllerFactory().newController();
      */
    List<UserPermission> permList = new ArrayList<>();
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
        try (Admin admin = connection.getAdmin()) {
            CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW);
            BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service);
            HTableDescriptor[] htds = null;
            if (tableRegex == null || tableRegex.isEmpty()) {
                permList = AccessControlUtil.getUserPermissions(null, protocol);
            } else if (tableRegex.charAt(0) == '@') {
                // Namespaces
                String namespaceRegex = tableRegex.substring(1);
                for (NamespaceDescriptor nsds : admin.listNamespaceDescriptors()) {
                    // Read out all namespaces
                    String namespace = nsds.getName();
                    if (namespace.matches(namespaceRegex)) {
                        // Match the given namespace regex?
                        permList.addAll(AccessControlUtil.getUserPermissions(null, protocol, Bytes.toBytes(namespace)));
                    }
                }
            } else {
                // Tables
                htds = admin.listTables(Pattern.compile(tableRegex), true);
                for (HTableDescriptor hd : htds) {
                    permList.addAll(AccessControlUtil.getUserPermissions(null, protocol, hd.getTableName()));
                }
            }
        }
    }
    return permList;
}
Also used : BlockingInterface(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.BlockingInterface) Table(org.apache.hadoop.hbase.client.Table) CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) ArrayList(java.util.ArrayList) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43