Search in sources :

Example 21 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.

the class CatalogJanitor method checkDaughterInFs.

/**
   * Checks if a daughter region -- either splitA or splitB -- still holds
   * references to parent.
   * @param parent Parent region
   * @param daughter Daughter region
   * @return A pair where the first boolean says whether or not the daughter
   * region directory exists in the filesystem and then the second boolean says
   * whether the daughter has references to the parent.
   * @throws IOException
   */
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter) throws IOException {
    if (daughter == null) {
        return new Pair<>(Boolean.FALSE, Boolean.FALSE);
    }
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());
    Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());
    HRegionFileSystem regionFs = null;
    try {
        if (!FSUtils.isExists(fs, daughterRegionDir)) {
            return new Pair<>(Boolean.FALSE, Boolean.FALSE);
        }
    } catch (IOException ioe) {
        LOG.error("Error trying to determine if daughter region exists, " + "assuming exists and has references", ioe);
        return new Pair<>(Boolean.TRUE, Boolean.TRUE);
    }
    boolean references = false;
    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
    try {
        regionFs = HRegionFileSystem.openRegionFromFileSystem(this.services.getConfiguration(), fs, tabledir, daughter, true);
        for (HColumnDescriptor family : parentDescriptor.getFamilies()) {
            if ((references = regionFs.hasReferences(family.getNameAsString()))) {
                break;
            }
        }
    } catch (IOException e) {
        LOG.error("Error trying to determine referenced files from : " + daughter.getEncodedName() + ", to: " + parent.getEncodedName() + " assuming has references", e);
        return new Pair<>(Boolean.TRUE, Boolean.TRUE);
    }
    return new Pair<>(Boolean.TRUE, Boolean.valueOf(references));
}
Also used : Path(org.apache.hadoop.fs.Path) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) IOException(java.io.IOException) Pair(org.apache.hadoop.hbase.util.Pair) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 22 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 23 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 24 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 25 with HRegionFileSystem

use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.

the class SplitTableRegionProcedure method createDaughterRegions.

/**
 * Create daughter regions
 */
public void createDaughterRegions(final MasterProcedureEnv env) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName());
    final FileSystem fs = mfs.getFileSystem();
    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, getParentRegion(), false);
    regionFs.createSplitsDir(daughterOneRI, daughterTwoRI);
    Pair<List<Path>, List<Path>> expectedReferences = splitStoreFiles(env, regionFs);
    assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(), regionFs.getSplitsDir(daughterOneRI));
    regionFs.commitDaughterRegion(daughterOneRI, expectedReferences.getFirst(), env);
    assertSplitResultFilesCount(fs, expectedReferences.getFirst().size(), new Path(tabledir, daughterOneRI.getEncodedName()));
    assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(), regionFs.getSplitsDir(daughterTwoRI));
    regionFs.commitDaughterRegion(daughterTwoRI, expectedReferences.getSecond(), env);
    assertSplitResultFilesCount(fs, expectedReferences.getSecond().size(), new Path(tabledir, daughterTwoRI.getEncodedName()));
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) List(java.util.List) ArrayList(java.util.ArrayList)

Aggregations

HRegionFileSystem (org.apache.hadoop.hbase.regionserver.HRegionFileSystem)37 Path (org.apache.hadoop.fs.Path)28 FileSystem (org.apache.hadoop.fs.FileSystem)22 IOException (java.io.IOException)10 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)9 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)9 StoreFileInfo (org.apache.hadoop.hbase.regionserver.StoreFileInfo)9 HBaseTableUtil (co.cask.cdap.data2.util.hbase.HBaseTableUtil)7 HBaseTableUtilFactory (co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory)7 HTableDescriptorBuilder (co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)6 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)6 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)5 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)5 WAL (org.apache.hadoop.hbase.wal.WAL)5 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)5 ArrayList (java.util.ArrayList)4 StoreFileTracker (org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker)4 SnapshotRegionManifest (org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest)4