Search in sources :

Example 41 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class RestoreTool method getTableDesc.

/**
   * Get table descriptor
   * @param tableName is the table backed up
   * @return {@link HTableDescriptor} saved in backup image of the table
   */
HTableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
    Path tableInfoPath = this.getTableInfoPath(tableName);
    SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
    HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
    if (!tableDescriptor.getTableName().equals(tableName)) {
        LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
        LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
        throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
    }
    return tableDescriptor;
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) FileNotFoundException(java.io.FileNotFoundException) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 42 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class RestoreTool method incrementalRestoreTable.

/**
   * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
   * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
   * the future
   * @param conn HBase connection
   * @param tableBackupPath backup path
   * @param logDirs : incremental backup folders, which contains WAL
   * @param tableNames : source tableNames(table names were backuped)
   * @param newTableNames : target tableNames(table names to be restored to)
   * @param incrBackupId incremental backup Id
   * @throws IOException exception
   */
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
    try (Admin admin = conn.getAdmin()) {
        if (tableNames.length != newTableNames.length) {
            throw new IOException("Number of source tables and target tables does not match!");
        }
        FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
        // full backup. Here, check that all new tables exists
        for (TableName tableName : newTableNames) {
            if (!admin.tableExists(tableName)) {
                throw new IOException("HBase table " + tableName + " does not exist. Create the table first, e.g. by restoring a full backup.");
            }
        }
        // adjust table schema
        for (int i = 0; i < tableNames.length; i++) {
            TableName tableName = tableNames[i];
            HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
            LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
            TableName newTableName = newTableNames[i];
            HTableDescriptor newTableDescriptor = admin.getTableDescriptor(newTableName);
            List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
            List<HColumnDescriptor> existingFamilies = Arrays.asList(newTableDescriptor.getColumnFamilies());
            boolean schemaChangeNeeded = false;
            for (HColumnDescriptor family : families) {
                if (!existingFamilies.contains(family)) {
                    newTableDescriptor.addFamily(family);
                    schemaChangeNeeded = true;
                }
            }
            for (HColumnDescriptor family : existingFamilies) {
                if (!families.contains(family)) {
                    newTableDescriptor.removeFamily(family.getName());
                    schemaChangeNeeded = true;
                }
            }
            if (schemaChangeNeeded) {
                modifyTableSync(conn, newTableDescriptor);
                LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
            }
        }
        RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
        restoreService.run(logDirs, tableNames, newTableNames, false);
    }
}
Also used : RestoreJob(org.apache.hadoop.hbase.backup.RestoreJob) TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBackupFileSystem(org.apache.hadoop.hbase.backup.HBackupFileSystem) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 43 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class RestoreTool method restoreTableAndCreate.

private void restoreTableAndCreate(Connection conn, TableName tableName, TableName newTableName, Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
    if (newTableName == null) {
        newTableName = tableName;
    }
    FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
    // get table descriptor first
    HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
    if (tableDescriptor != null) {
        LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
    }
    if (tableDescriptor == null) {
        Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
        if (fileSys.exists(tableSnapshotPath)) {
            // check whether snapshot dir already recorded for target table
            if (snapshotMap.get(tableName) != null) {
                SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
                SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
                tableDescriptor = manifest.getTableDescriptor();
            } else {
                tableDescriptor = getTableDesc(tableName);
                snapshotMap.put(tableName, getTableInfoPath(tableName));
            }
            if (tableDescriptor == null) {
                LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
            }
        } else {
            throw new IOException("Table snapshot directory: " + tableSnapshotPath + " does not exist.");
        }
    }
    Path tableArchivePath = getTableArchivePath(tableName);
    if (tableArchivePath == null) {
        if (tableDescriptor != null) {
            // find table descriptor but no archive dir means the table is empty, create table and exit
            if (LOG.isDebugEnabled()) {
                LOG.debug("find table descriptor but no archive dir for table " + tableName + ", will only create table");
            }
            tableDescriptor.setName(newTableName);
            checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, truncateIfExists);
            return;
        } else {
            throw new IllegalStateException("Cannot restore hbase table because directory '" + " tableArchivePath is null.");
        }
    }
    if (tableDescriptor == null) {
        tableDescriptor = new HTableDescriptor(newTableName);
    } else {
        tableDescriptor.setName(newTableName);
    }
    // load all files in dir
    try {
        ArrayList<Path> regionPathList = getRegionList(tableName);
        // should only try to create the table with all region informations, so we could pre-split
        // the regions in fine grain
        checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList, tableDescriptor, truncateIfExists);
        if (tableArchivePath != null) {
            // start real restore through bulkload
            // if the backup target is on local cluster, special action needed
            Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath);
            if (tempTableArchivePath.equals(tableArchivePath)) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath);
                }
            } else {
                // point to the tempDir
                regionPathList = getRegionList(tempTableArchivePath);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath);
                }
            }
            LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false);
            for (Path regionPath : regionPathList) {
                String regionName = regionPath.toString();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Restoring HFiles from directory " + regionName);
                }
                String[] args = { regionName, newTableName.getNameAsString() };
                loader.run(args);
            }
        }
    // we do not recovered edits
    } catch (Exception e) {
        throw new IllegalStateException("Cannot restore hbase table", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) LoadIncrementalHFiles(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBackupFileSystem(org.apache.hadoop.hbase.backup.HBackupFileSystem) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription) IOException(java.io.IOException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 44 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class ConstraintProcessor method start.

@Override
public void start(CoprocessorEnvironment environment) {
    // make sure we are on a region server
    if (!(environment instanceof RegionCoprocessorEnvironment)) {
        throw new IllegalArgumentException("Constraints only act on regions - started in an environment that was not a region");
    }
    RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) environment;
    HTableDescriptor desc = env.getRegion().getTableDesc();
    // load all the constraints from the HTD
    try {
        this.constraints = Constraints.getConstraints(desc, classloader);
    } catch (IOException e) {
        throw new IllegalArgumentException(e);
    }
    if (LOG.isInfoEnabled()) {
        LOG.info("Finished loading " + constraints.size() + " user Constraints on table: " + desc.getTableName());
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 45 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class MergeTableRegionsProcedure method mergeStoreFiles.

/**
   * Create reference file(s) of merging regions under the merges directory
   * @param env MasterProcedureEnv
   * @param regionFs region file system
   * @param mergedDir the temp directory of merged region
   * @throws IOException
   */
private void mergeStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();
    final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (String family : regionFs.getFamilies()) {
        final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
        final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                // Create reference file(s) of the region in mergedDir
                regionFs.mergeStoreFile(mergedRegionInfo, family, new StoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()), mergedDir);
            }
        }
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43