Search in sources :

Example 11 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class TestHColumnDescriptorDefaultVersions method verifyHColumnDescriptor.

private void verifyHColumnDescriptor(int expected, final TableName tableName, final byte[]... families) throws IOException {
    Admin admin = TEST_UTIL.getAdmin();
    // Verify descriptor from master
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    HColumnDescriptor[] hcds = htd.getColumnFamilies();
    verifyHColumnDescriptor(expected, hcds, tableName, families);
    // Verify descriptor from HDFS
    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
    HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
    hcds = td.getColumnFamilies();
    verifyHColumnDescriptor(expected, hcds, tableName, families);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) Admin(org.apache.hadoop.hbase.client.Admin)

Example 12 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class TestAsyncTableAdminApi method verifyTableDescriptor.

private void verifyTableDescriptor(final TableName tableName, final byte[]... families) throws IOException {
    Admin admin = TEST_UTIL.getAdmin();
    // Verify descriptor from master
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    verifyTableDescriptor(htd, tableName, families);
    // Verify descriptor from HDFS
    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
    HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
    verifyTableDescriptor(td, tableName, families);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 13 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class SnapshotTestingUtils method confirmSnapshotValid.

public static void confirmSnapshotValid(HBaseTestingUtility testUtil, HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family) throws IOException {
    MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem();
    confirmSnapshotValid(snapshotDescriptor, tableName, family, mfs.getRootDir(), testUtil.getAdmin(), mfs.getFileSystem());
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem)

Example 14 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class TestTableDescriptorModificationFromClient method verifyTableDescriptor.

private void verifyTableDescriptor(final TableName tableName, final byte[]... families) throws IOException {
    Admin admin = TEST_UTIL.getAdmin();
    // Verify descriptor from master
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    verifyTableDescriptor(htd, tableName, families);
    // Verify descriptor from HDFS
    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
    HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
    verifyTableDescriptor(td, tableName, families);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 15 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class SplitTableRegionProcedure method createDaughterRegions.

/**
   * Create daughter regions
   * @param env MasterProcedureEnv
   * @throws IOException
   */
@VisibleForTesting
public void createDaughterRegions(final MasterProcedureEnv env) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), parentHRI.getTable());
    final FileSystem fs = mfs.getFileSystem();
    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, parentHRI, false);
    regionFs.createSplitsDir();
    Pair<Integer, Integer> expectedReferences = splitStoreFiles(env, regionFs);
    assertReferenceFileCount(fs, expectedReferences.getFirst(), regionFs.getSplitsDir(daughter_1_HRI));
    //Move the files from the temporary .splits to the final /table/region directory
    regionFs.commitDaughterRegion(daughter_1_HRI);
    assertReferenceFileCount(fs, expectedReferences.getFirst(), new Path(tabledir, daughter_1_HRI.getEncodedName()));
    assertReferenceFileCount(fs, expectedReferences.getSecond(), regionFs.getSplitsDir(daughter_2_HRI));
    regionFs.commitDaughterRegion(daughter_2_HRI);
    assertReferenceFileCount(fs, expectedReferences.getSecond(), new Path(tabledir, daughter_2_HRI.getEncodedName()));
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)24 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)10 IOException (java.io.IOException)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)5 Configuration (org.apache.hadoop.conf.Configuration)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)4 HRegionFileSystem (org.apache.hadoop.hbase.regionserver.HRegionFileSystem)4 SnapshotManifest (org.apache.hadoop.hbase.snapshot.SnapshotManifest)4 ArrayList (java.util.ArrayList)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 TableName (org.apache.hadoop.hbase.TableName)3 HashSet (java.util.HashSet)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2 Admin (org.apache.hadoop.hbase.client.Admin)2 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)2 ForeignExceptionDispatcher (org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher)2 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)2