use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
WAL hLog = walFactory.getWAL(new byte[] { 1 });
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, hConf);
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new MockRegionServerServices(hConf, null));
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
WAL hLog = walFactory.getWAL(new byte[] { 1 });
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class MergeTableRegionsProcedure method createMergedRegion.
/**
* Create merged region.
* The way the merge works is that we make a 'merges' temporary
* directory in the FIRST parent region to merge (Do not change this without
* also changing the rollback where we look in this FIRST region for the
* merge dir). We then collect here references to all the store files in all
* the parent regions including those of the FIRST parent region into a
* subdirectory, named for the resultant merged region. We then call
* commitMergeRegion. It finds this subdirectory of storefile references
* and moves them under the new merge region (creating the region layout
* as side effect). After assign of the new merge region, we will run a
* compaction. This will undo the references but the reference files remain
* in place until the archiver runs (which it does on a period as a chore
* in the RegionServer that hosts the merge region -- see
* CompactedHFilesDischarger). Once the archiver has moved aside the
* no-longer used references, the merge region no longer has references.
* The catalog janitor will notice when it runs next and it will remove
* the old parent regions.
*/
private void createMergedRegion(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable());
final FileSystem fs = mfs.getFileSystem();
List<Path> mergedFiles = new ArrayList<>();
HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(env.getMasterConfiguration(), fs, tableDir, mergedRegion);
for (RegionInfo ri : this.regionsToMerge) {
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tableDir, ri, false);
mergedFiles.addAll(mergeStoreFiles(env, regionFs, mergeRegionFs, mergedRegion));
}
assert mergeRegionFs != null;
mergeRegionFs.commitMergedRegion(mergedFiles, env);
// Prepare to create merged regions
env.getAssignmentManager().getRegionStates().getOrCreateRegionStateNode(mergedRegion).setState(State.MERGING_NEW);
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class CatalogJanitor method checkDaughterInFs.
/**
* Checks if a daughter region -- either splitA or splitB -- still holds references to parent.
* @param parent Parent region
* @param daughter Daughter region
* @return A pair where the first boolean says whether or not the daughter region directory exists
* in the filesystem and then the second boolean says whether the daughter has references
* to the parent.
*/
private static Pair<Boolean, Boolean> checkDaughterInFs(MasterServices services, final RegionInfo parent, final RegionInfo daughter) throws IOException {
if (daughter == null) {
return new Pair<>(Boolean.FALSE, Boolean.FALSE);
}
FileSystem fs = services.getMasterFileSystem().getFileSystem();
Path rootdir = services.getMasterFileSystem().getRootDir();
Path tabledir = CommonFSUtils.getTableDir(rootdir, daughter.getTable());
Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());
HRegionFileSystem regionFs;
try {
if (!CommonFSUtils.isExists(fs, daughterRegionDir)) {
return new Pair<>(Boolean.FALSE, Boolean.FALSE);
}
} catch (IOException ioe) {
LOG.error("Error trying to determine if daughter region exists, " + "assuming exists and has references", ioe);
return new Pair<>(Boolean.TRUE, Boolean.TRUE);
}
boolean references = false;
TableDescriptor parentDescriptor = services.getTableDescriptors().get(parent.getTable());
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(services.getConfiguration(), fs, tabledir, daughter, true);
for (ColumnFamilyDescriptor family : parentDescriptor.getColumnFamilies()) {
references = regionFs.hasReferences(family.getNameAsString());
if (references) {
break;
}
}
} catch (IOException e) {
LOG.error("Error trying to determine referenced files from : " + daughter.getEncodedName() + ", to: " + parent.getEncodedName() + " assuming has references", e);
return new Pair<>(Boolean.TRUE, Boolean.TRUE);
}
return new Pair<>(Boolean.TRUE, references);
}
Aggregations