Search in sources :

Example 26 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class HMaster method createMissingCFsInMetaDuringUpgrade.

private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) throws IOException {
    TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor).setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)).setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build();
    long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false);
    int tries = 30;
    while (!(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning() && tries > 0) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            throw new IOException("Wait interrupted", e);
        }
        tries--;
    }
    if (tries <= 0) {
        throw new HBaseIOException("Failed to add table and rep_barrier CFs to meta in a given time.");
    } else {
        Procedure<?> result = getMasterProcedureExecutor().getResult(pid);
        if (result != null && result.isFailed()) {
            throw new IOException("Failed to add table and rep_barrier CFs to meta. " + MasterProcedureUtil.unwrapRemoteIOException(result));
        }
    }
}
Also used : HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) InterruptedIOException(java.io.InterruptedIOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) RSGroupAdminEndpoint(org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint)

Example 27 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TableSnapshotInputFormatImpl method getSplits.

public static List<InputSplit> getSplits(Scan scan, SnapshotManifest manifest, List<RegionInfo> regionManifests, Path restoreDir, Configuration conf, RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException {
    // load table descriptor
    TableDescriptor htd = manifest.getTableDescriptor();
    Path tableDir = CommonFSUtils.getTableDir(restoreDir, htd.getTableName());
    boolean localityEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT);
    boolean scanMetricsEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED, SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT);
    scan.setScanMetricsEnabled(scanMetricsEnabled);
    boolean useRegionLoc = conf.getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT);
    Connection connection = null;
    RegionLocator regionLocator = null;
    if (localityEnabled && useRegionLoc) {
        Configuration newConf = new Configuration(conf);
        newConf.setInt("hbase.hconnection.threads.max", 1);
        try {
            connection = ConnectionFactory.createConnection(newConf);
            regionLocator = connection.getRegionLocator(htd.getTableName());
            /* Get all locations for the table and cache it */
            regionLocator.getAllRegionLocations();
        } finally {
            if (connection != null) {
                connection.close();
            }
        }
    }
    List<InputSplit> splits = new ArrayList<>();
    for (RegionInfo hri : regionManifests) {
        // load region descriptor
        List<String> hosts = null;
        if (localityEnabled) {
            if (regionLocator != null) {
                /* Get Location from the local cache */
                HRegionLocation location = regionLocator.getRegionLocation(hri.getStartKey(), false);
                hosts = new ArrayList<>(1);
                hosts.add(location.getHostname());
            } else {
                hosts = calculateLocationsForInputSplit(conf, htd, hri, tableDir);
            }
        }
        if (numSplits > 1) {
            byte[][] sp = sa.split(hri.getStartKey(), hri.getEndKey(), numSplits, true);
            for (int i = 0; i < sp.length - 1; i++) {
                if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i], sp[i + 1])) {
                    Scan boundedScan = new Scan(scan);
                    if (scan.getStartRow().length == 0) {
                        boundedScan.withStartRow(sp[i]);
                    } else {
                        boundedScan.withStartRow(Bytes.compareTo(scan.getStartRow(), sp[i]) > 0 ? scan.getStartRow() : sp[i]);
                    }
                    if (scan.getStopRow().length == 0) {
                        boundedScan.withStopRow(sp[i + 1]);
                    } else {
                        boundedScan.withStopRow(Bytes.compareTo(scan.getStopRow(), sp[i + 1]) < 0 ? scan.getStopRow() : sp[i + 1]);
                    }
                    splits.add(new InputSplit(htd, hri, hosts, boundedScan, restoreDir));
                }
            }
        } else {
            if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), hri.getEndKey())) {
                splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir));
            }
        }
    }
    return splits;
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Configuration(org.apache.hadoop.conf.Configuration) Connection(org.apache.hadoop.hbase.client.Connection) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Scan(org.apache.hadoop.hbase.client.Scan)

Example 28 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class MobRefReporter method run.

/**
 * Main method for the tool.
 * @return 0 if success, 1 for bad args. 2 if job aborted with an exception,
 *   3 if mr job was unsuccessful
 */
public int run(String[] args) throws IOException, InterruptedException {
    // TODO make family and table optional
    if (args.length != 3) {
        printUsage();
        return 1;
    }
    final String output = args[0];
    final String tableName = args[1];
    final String familyName = args[2];
    final long reportStartTime = EnvironmentEdgeManager.currentTime();
    Configuration conf = getConf();
    try {
        FileSystem fs = FileSystem.get(conf);
        // check whether the current user is the same one with the owner of hbase root
        String currentUserName = UserGroupInformation.getCurrentUser().getShortUserName();
        FileStatus[] hbaseRootFileStat = fs.listStatus(new Path(conf.get(HConstants.HBASE_DIR)));
        if (hbaseRootFileStat.length > 0) {
            String owner = hbaseRootFileStat[0].getOwner();
            if (!owner.equals(currentUserName)) {
                String errorMsg = "The current user[" + currentUserName + "] does not have hbase root credentials." + " If this job fails due to an inability to read HBase's internal directories, " + "you will need to rerun as a user with sufficient permissions. The HBase superuser " + "is a safe choice.";
                LOG.warn(errorMsg);
            }
        } else {
            LOG.error("The passed configs point to an HBase dir does not exist: {}", conf.get(HConstants.HBASE_DIR));
            throw new IOException("The target HBase does not exist");
        }
        byte[] family;
        int maxVersions;
        TableName tn = TableName.valueOf(tableName);
        try (Connection connection = ConnectionFactory.createConnection(conf);
            Admin admin = connection.getAdmin()) {
            TableDescriptor htd = admin.getDescriptor(tn);
            ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(familyName));
            if (hcd == null || !hcd.isMobEnabled()) {
                throw new IOException("Column family " + familyName + " is not a MOB column family");
            }
            family = hcd.getName();
            maxVersions = hcd.getMaxVersions();
        }
        String id = getClass().getSimpleName() + UUID.randomUUID().toString().replace("-", "");
        Job job = null;
        Scan scan = new Scan();
        scan.addFamily(family);
        // Do not retrieve the mob data when scanning
        scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
        scan.setAttribute(MobConstants.MOB_SCAN_REF_ONLY, Bytes.toBytes(Boolean.TRUE));
        // If a scanner caching value isn't set, pick a smaller default since we know we're doing
        // a full table scan and don't want to impact other clients badly.
        scan.setCaching(conf.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, 10000));
        scan.setCacheBlocks(false);
        scan.readVersions(maxVersions);
        conf.set(REPORT_JOB_ID, id);
        job = Job.getInstance(conf);
        job.setJarByClass(getClass());
        TableMapReduceUtil.initTableMapperJob(tn, scan, MobRefMapper.class, Text.class, ImmutableBytesWritable.class, job);
        job.setReducerClass(MobRefReducer.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job, new Path(output));
        job.setJobName(getClass().getSimpleName() + "-" + tn + "-" + familyName);
        // for use in the reducer. easier than re-parsing it out of the scan string.
        job.getConfiguration().set(TableInputFormat.SCAN_COLUMN_FAMILY, familyName);
        // Use when we start this job as the base point for file "recency".
        job.getConfiguration().setLong(REPORT_START_DATETIME, reportStartTime);
        if (job.waitForCompletion(true)) {
            LOG.info("Finished creating report for '{}', family='{}'", tn, familyName);
        } else {
            System.err.println("Job was not successful");
            return 3;
        }
        return 0;
    } catch (ClassNotFoundException | RuntimeException | IOException | InterruptedException e) {
        System.err.println("Job aborted due to exception " + e);
        // job failed
        return 2;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableName(org.apache.hadoop.hbase.TableName) FileSystem(org.apache.hadoop.fs.FileSystem) Scan(org.apache.hadoop.hbase.client.Scan) Job(org.apache.hadoop.mapreduce.Job)

Example 29 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class PerformanceEvaluation method checkTable.

/*
   * If table does not already exist, create. Also create a table when
   * {@code opts.presplitRegions} is specified or when the existing table's
   * region replica count doesn't match {@code opts.replicas}.
   */
static boolean checkTable(Admin admin, TestOptions opts) throws IOException {
    TableName tableName = TableName.valueOf(opts.tableName);
    boolean needsDelete = false, exists = admin.tableExists(tableName);
    boolean isReadCmd = opts.cmdName.toLowerCase(Locale.ROOT).contains("read") || opts.cmdName.toLowerCase(Locale.ROOT).contains("scan");
    if (!exists && isReadCmd) {
        throw new IllegalStateException("Must specify an existing table for read commands. Run a write command first.");
    }
    TableDescriptor desc = exists ? admin.getDescriptor(TableName.valueOf(opts.tableName)) : null;
    byte[][] splits = getSplits(opts);
    // number of column families does not match requested.
    if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) || (!isReadCmd && desc != null && !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) || (desc != null && desc.getColumnFamilyCount() != opts.families)) {
        needsDelete = true;
        // wait, why did it delete my table?!?
        LOG.debug(MoreObjects.toStringHelper("needsDelete").add("needsDelete", needsDelete).add("isReadCmd", isReadCmd).add("exists", exists).add("desc", desc).add("presplit", opts.presplitRegions).add("splitPolicy", opts.splitPolicy).add("replicas", opts.replicas).add("families", opts.families).toString());
    }
    // remove an existing table
    if (needsDelete) {
        if (admin.isTableEnabled(tableName)) {
            admin.disableTable(tableName);
        }
        admin.deleteTable(tableName);
    }
    // table creation is necessary
    if (!exists || needsDelete) {
        desc = getTableDescriptor(opts);
        if (splits != null) {
            if (LOG.isDebugEnabled()) {
                for (int i = 0; i < splits.length; i++) {
                    LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i]));
                }
            }
        }
        if (splits != null) {
            admin.createTable(desc, splits);
        } else {
            admin.createTable(desc);
        }
        LOG.info("Table " + desc + " created");
    }
    return admin.tableExists(tableName);
}
Also used : TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 30 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class RSRpcServices method executeOpenRegionProcedures.

private void executeOpenRegionProcedures(OpenRegionRequest request, Map<TableName, TableDescriptor> tdCache) {
    long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
    for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
        RegionInfo regionInfo = ProtobufUtil.toRegionInfo(regionOpenInfo.getRegion());
        TableName tableName = regionInfo.getTable();
        TableDescriptor tableDesc = tdCache.get(tableName);
        if (tableDesc == null) {
            try {
                tableDesc = server.getTableDescriptors().get(regionInfo.getTable());
            } catch (IOException e) {
                // Here we do not fail the whole method since we also need deal with other
                // procedures, and we can not ignore this one, so we still schedule a
                // AssignRegionHandler and it will report back to master if we still can not get the
                // TableDescriptor.
                LOG.warn("Failed to get TableDescriptor of {}, will try again in the handler", regionInfo.getTable(), e);
            }
            if (tableDesc != null) {
                tdCache.put(tableName, tableDesc);
            }
        }
        if (regionOpenInfo.getFavoredNodesCount() > 0) {
            server.updateRegionFavoredNodesMapping(regionInfo.getEncodedName(), regionOpenInfo.getFavoredNodesList());
        }
        long procId = regionOpenInfo.getOpenProcId();
        if (server.submitRegionProcedure(procId)) {
            server.getExecutorService().submit(AssignRegionHandler.create(server, regionInfo, procId, tableDesc, masterSystemTime));
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) RegionOpenInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39