Search in sources :

Example 26 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.

the class MergeTableRegionsProcedure method cleanupMergedRegion.

/**
 * Clean up a merged region on rollback after failure.
 */
private void cleanupMergedRegion(final MasterProcedureEnv env) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    TableName tn = this.regionsToMerge[0].getTable();
    final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), tn);
    final FileSystem fs = mfs.getFileSystem();
    // See createMergedRegion above where we specify the merge dir as being in the
    // FIRST merge parent region.
    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false);
    regionFs.cleanupMergedRegion(mergedRegion);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 27 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.

the class CatalogJanitor method cleanMergeRegion.

/**
 * If merged region no longer holds reference to the merge regions, archive merge region on hdfs
 * and perform deleting references in hbase:meta
 * @return true if we delete references in merged region on hbase:meta and archive the files on
 *         the file system
 */
private boolean cleanMergeRegion(final RegionInfo mergedRegion, List<RegionInfo> parents) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Cleaning merged region {}", mergedRegion);
    }
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = CommonFSUtils.getTableDir(rootdir, mergedRegion.getTable());
    TableDescriptor htd = getDescriptor(mergedRegion.getTable());
    HRegionFileSystem regionFs = null;
    try {
        regionFs = HRegionFileSystem.openRegionFromFileSystem(this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
    } catch (IOException e) {
        LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
    }
    if (regionFs == null || !regionFs.hasReferences(htd)) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Deleting parents ({}) from fs; merged child {} no longer holds references", parents.stream().map(r -> RegionInfo.getShortNameToLog(r)).collect(Collectors.joining(", ")), mergedRegion);
        }
        ProcedureExecutor<MasterProcedureEnv> pe = this.services.getMasterProcedureExecutor();
        GCMultipleMergedRegionsProcedure mergeRegionProcedure = new GCMultipleMergedRegionsProcedure(pe.getEnvironment(), mergedRegion, parents);
        pe.submitProcedure(mergeRegionProcedure);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Submitted procedure {} for merged region {}", mergeRegionProcedure, mergedRegion);
        }
        return true;
    }
    return false;
}
Also used : Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) GCMultipleMergedRegionsProcedure(org.apache.hadoop.hbase.master.assignment.GCMultipleMergedRegionsProcedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 28 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.

the class MajorCompactionTTLRequest method getStoresRequiringCompaction.

Map<String, Long> getStoresRequiringCompaction(TableDescriptor htd) throws IOException {
    HRegionFileSystem fileSystem = getFileSystem();
    Map<String, Long> familyTTLMap = Maps.newHashMap();
    for (ColumnFamilyDescriptor descriptor : htd.getColumnFamilies()) {
        long ts = getColFamilyCutoffTime(descriptor);
        // If the table's TTL is forever, lets not compact any of the regions.
        if (ts > 0 && shouldCFBeCompacted(fileSystem, descriptor.getNameAsString(), ts)) {
            familyTTLMap.put(descriptor.getNameAsString(), ts);
        }
    }
    return familyTTLMap;
}
Also used : HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)

Example 29 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.

the class MajorCompactionRequest method getStoresRequiringCompaction.

Set<String> getStoresRequiringCompaction(Set<String> requestedStores, long timestamp) throws IOException {
    HRegionFileSystem fileSystem = getFileSystem();
    Set<String> familiesToCompact = Sets.newHashSet();
    for (String family : requestedStores) {
        if (shouldCFBeCompacted(fileSystem, family, timestamp)) {
            familiesToCompact.add(family);
        }
    }
    return familiesToCompact;
}
Also used : HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 30 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.

the class RestoreSnapshotHelper method cloneRegion.

/**
 * Clone region directory content from the snapshot info.
 *
 * Each region is encoded with the table name, so the cloned region will have
 * a different region name.
 *
 * Instead of copying the hfiles a HFileLink is created.
 *
 * @param regionDir {@link Path} cloned dir
 * @param snapshotRegionInfo
 */
private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException {
    final String tableName = tableDesc.getTableName().getNameAsString();
    final String snapshotName = snapshotDesc.getName();
    for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) {
        Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8());
        List<StoreFileInfo> clonedFiles = new ArrayList<>();
        for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) {
            LOG.info("Adding HFileLink " + storeFile.getName() + " from cloned region " + "in snapshot " + snapshotName + " to table=" + tableName);
            if (MobUtils.isMobRegionInfo(newRegionInfo)) {
                String mobFileName = HFileLink.createHFileLinkName(snapshotRegionInfo, storeFile.getName());
                Path mobPath = new Path(familyDir, mobFileName);
                if (fs.exists(mobPath)) {
                    fs.delete(mobPath, true);
                }
                restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs);
            } else {
                String file = restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs);
                clonedFiles.add(new StoreFileInfo(conf, fs, new Path(familyDir, file), true));
            }
        }
        // we don't need to track files under mobdir
        if (!MobUtils.isMobRegionInfo(newRegionInfo)) {
            Path regionPath = new Path(tableDir, newRegionInfo.getEncodedName());
            HRegionFileSystem regionFS = (fs.exists(regionPath)) ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo);
            Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray()));
            StoreFileTracker tracker = StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build());
            tracker.set(clonedFiles);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) ArrayList(java.util.ArrayList) SnapshotRegionManifest(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) StoreFileTracker(org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo)

Aggregations

HRegionFileSystem (org.apache.hadoop.hbase.regionserver.HRegionFileSystem)37 Path (org.apache.hadoop.fs.Path)28 FileSystem (org.apache.hadoop.fs.FileSystem)22 IOException (java.io.IOException)10 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)9 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)9 StoreFileInfo (org.apache.hadoop.hbase.regionserver.StoreFileInfo)9 HBaseTableUtil (co.cask.cdap.data2.util.hbase.HBaseTableUtil)7 HBaseTableUtilFactory (co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory)7 HTableDescriptorBuilder (co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)6 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)6 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)5 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)5 WAL (org.apache.hadoop.hbase.wal.WAL)5 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)5 ArrayList (java.util.ArrayList)4 StoreFileTracker (org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker)4 SnapshotRegionManifest (org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest)4