use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TableSchemaModel method getTableDescriptor.
/**
* @return a table descriptor
*/
@JsonIgnore
public HTableDescriptor getTableDescriptor() {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getName()));
for (Map.Entry<QName, Object> e : getAny().entrySet()) {
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel column : getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(column.getName());
for (Map.Entry<QName, Object> e : column.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
htd.addFamily(hcd);
}
return htd;
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class MobUtils method doMobCompaction.
/**
* Performs the mob compaction.
* @param conf the Configuration
* @param fs the file system
* @param tableName the table the compact
* @param hcd the column descriptor
* @param pool the thread pool
* @param allFiles Whether add all mob files into the compaction.
*/
public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName, HColumnDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock) throws IOException {
String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY, PartitionedMobCompactor.class.getName());
// instantiate the mob compactor.
MobCompactor compactor = null;
try {
compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { Configuration.class, FileSystem.class, TableName.class, HColumnDescriptor.class, ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool });
} catch (Exception e) {
throw new IOException("Unable to load configured mob file compactor '" + className + "'", e);
}
// with major compaction in mob-enabled column.
try {
lock.acquire();
compactor.compact(allFiles);
} catch (Exception e) {
LOG.error("Failed to compact the mob files for the column " + hcd.getNameAsString() + " in the table " + tableName.getNameAsString(), e);
} finally {
lock.release();
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class ExpiredMobFileCleaner method run.
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public int run(String[] args) throws Exception {
if (args.length != 2) {
printUsage();
return 1;
}
String tableName = args[0];
String familyName = args[1];
TableName tn = TableName.valueOf(tableName);
HBaseAdmin.available(getConf());
Connection connection = ConnectionFactory.createConnection(getConf());
Admin admin = connection.getAdmin();
try {
HTableDescriptor htd = admin.getTableDescriptor(tn);
HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
if (family == null || !family.isMobEnabled()) {
throw new IOException("Column family " + familyName + " is not a MOB column family");
}
if (family.getMinVersions() > 0) {
throw new IOException("The minVersions of the column family is not 0, could not be handled by this cleaner");
}
cleanExpiredMobFiles(tableName, family);
return 0;
} finally {
try {
admin.close();
} catch (IOException e) {
LOG.error("Failed to close the HBaseAdmin.", e);
}
try {
connection.close();
} catch (IOException e) {
LOG.error("Failed to close the connection.", e);
}
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class VisibilityUtils method createVisibilityLabelFilter.
public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations) throws IOException {
Map<ByteRange, Integer> cfVsMaxVersions = new HashMap<>();
for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions());
}
VisibilityLabelService vls = VisibilityLabelServiceManager.getInstance().getVisibilityLabelService();
Filter visibilityLabelFilter = new VisibilityLabelFilter(vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions);
return visibilityLabelFilter;
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class SnapshotManifest method addMobRegion.
public void addMobRegion(HRegionInfo regionInfo) throws IOException {
// 0. Get the ManifestBuilder/RegionVisitor
RegionVisitor visitor = createRegionVisitor(desc);
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing mob region '" + regionInfo + "' region-info for snapshot.");
Object regionData = visitor.regionOpen(regionInfo);
monitor.rethrowException();
// 2. iterate through all the stores in the region
LOG.debug("Creating references for mob files");
Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
// 2.1. build the snapshot reference for the store if it's a mob store
if (!hcd.isMobEnabled()) {
continue;
}
Object familyData = visitor.familyOpen(regionData, hcd.getName());
monitor.rethrowException();
Path storePath = MobUtils.getMobFamilyPath(mobRegionPath, hcd.getNameAsString());
List<StoreFileInfo> storeFiles = getStoreFiles(storePath);
if (storeFiles == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("No mob files under family: " + hcd.getNameAsString());
}
continue;
}
addReferenceFiles(visitor, regionData, familyData, storeFiles, true);
visitor.familyClose(regionData, familyData);
}
visitor.regionClose(regionData);
}
Aggregations