use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class AccessController method getUserPermissions.
@Override
public void getUserPermissions(RpcController controller, AccessControlProtos.GetUserPermissionsRequest request, RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done) {
AccessControlProtos.GetUserPermissionsResponse response = null;
try {
// only allowed to be called on _acl_ region
if (aclRegion) {
if (!initialized) {
throw new CoprocessorException("AccessController not yet initialized");
}
User caller = RpcServer.getRequestUser();
List<UserPermission> perms = null;
if (request.getType() == AccessControlProtos.Permission.Type.Table) {
final TableName table = request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
requirePermission(caller, "userPermissions", table, null, null, Action.ADMIN);
perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {
@Override
public List<UserPermission> run() throws Exception {
return AccessControlLists.getUserTablePermissions(regionEnv.getConfiguration(), table);
}
});
} else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) {
final String namespace = request.getNamespaceName().toStringUtf8();
requireNamespacePermission(caller, "userPermissions", namespace, Action.ADMIN);
perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {
@Override
public List<UserPermission> run() throws Exception {
return AccessControlLists.getUserNamespacePermissions(regionEnv.getConfiguration(), namespace);
}
});
} else {
requirePermission(caller, "userPermissions", Action.ADMIN);
perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {
@Override
public List<UserPermission> run() throws Exception {
return AccessControlLists.getUserPermissions(regionEnv.getConfiguration(), null);
}
});
// help in avoiding any leakage of information about being superusers.
for (String user : Superusers.getSuperUsers()) {
perms.add(new UserPermission(user.getBytes(), AccessControlLists.ACL_TABLE_NAME, null, Action.values()));
}
}
response = AccessControlUtil.buildGetUserPermissionsResponse(perms);
} else {
throw new CoprocessorException(AccessController.class, "This method " + "can only execute at " + AccessControlLists.ACL_TABLE_NAME + " table.");
}
} catch (IOException ioe) {
// pass exception back up
CoprocessorRpcUtils.setControllerException(controller, ioe);
}
done.run(response);
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class AccessController method preIncrementAfterRowLock.
@Override
public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c, final Increment increment) throws IOException {
if (increment.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
AuthResult authResult = null;
User user = getActiveUser(c);
if (checkCoveringPermission(user, OpType.INCREMENT, c.getEnvironment(), increment.getRow(), increment.getFamilyCellMap(), increment.getTimeRange().getMax(), Action.WRITE)) {
authResult = AuthResult.allow(OpType.INCREMENT.toString(), "Covering cell set", user, Action.WRITE, table, increment.getFamilyCellMap());
} else {
authResult = AuthResult.deny(OpType.INCREMENT.toString(), "Covering cell set", user, Action.WRITE, table, increment.getFamilyCellMap());
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
return null;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class AccessController method preCheckAndPutAfterRowLock.
@Override
public boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family, final byte[] qualifier, final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException {
if (put.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
AuthResult authResult = null;
User user = getActiveUser(c);
if (checkCoveringPermission(user, OpType.CHECK_AND_PUT, c.getEnvironment(), row, families, HConstants.LATEST_TIMESTAMP, Action.READ)) {
authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, Action.READ, table, families);
} else {
authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, Action.READ, table, families);
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
return result;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class SnapshotReferenceUtil method verifyStoreFile.
/**
* Verify the validity of the snapshot store file
*
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshot the {@link SnapshotDescription} of the snapshot to verify
* @param regionInfo {@link HRegionInfo} of the region that contains the store file
* @param family family that contains the store file
* @param storeFile the store file to verify
* @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
private static void verifyStoreFile(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshot, final HRegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
TableName table = TableName.valueOf(snapshot.getTable());
String fileName = storeFile.getName();
Path refPath = null;
if (StoreFileInfo.isReference(fileName)) {
// If is a reference file check if the parent file is present in the snapshot
refPath = new Path(new Path(regionInfo.getEncodedName(), family), fileName);
refPath = StoreFileInfo.getReferredToFile(refPath);
String refRegion = refPath.getParent().getParent().getName();
refPath = HFileLink.createPath(table, refRegion, family, refPath.getName());
if (!HFileLink.buildFromHFileLinkPattern(conf, refPath).exists(fs)) {
throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName + " path=" + refPath, ProtobufUtil.createSnapshotDesc(snapshot));
}
if (storeFile.hasReference()) {
// we already have the Reference information embedded here.
return;
}
}
Path linkPath;
if (refPath != null && HFileLink.isHFileLink(refPath)) {
linkPath = new Path(family, refPath.getName());
} else if (HFileLink.isHFileLink(fileName)) {
linkPath = new Path(family, fileName);
} else {
linkPath = new Path(family, HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName));
}
// check if the linked file exists (in the archive, or in the table dir)
HFileLink link = null;
if (MobUtils.isMobRegionInfo(regionInfo)) {
// for mob region
link = HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), HFileArchiveUtil.getArchivePath(conf), linkPath);
} else {
// not mob region
link = HFileLink.buildFromHFileLinkPattern(conf, linkPath);
}
try {
FileStatus fstat = link.getFileStatus(fs);
if (storeFile.hasFileSize() && storeFile.getFileSize() != fstat.getLen()) {
String msg = "hfile: " + fileName + " size does not match with the expected one. " + " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize();
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
} catch (FileNotFoundException e) {
String msg = "Can't find hfile: " + fileName + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath() + ") directory for the primary table.";
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class TestBackupLogCleaner method testBackupLogCleaner.
// implements all test cases in 1 test since incremental full backup/
// incremental backup has dependencies
@Test
public void testBackupLogCleaner() throws Exception {
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
// Verify that we have no backup sessions yet
assertFalse(systemTable.hasBackupSessions());
List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
List<String> swalFiles = convert(walFiles);
BackupLogCleaner cleaner = new BackupLogCleaner();
cleaner.setConf(TEST_UTIL.getConfiguration());
cleaner.init(null);
cleaner.setConf(TEST_UTIL.getConfiguration());
Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
int size = Iterables.size(deletable);
// We can delete all files because we do not have yet recorded backup sessions
assertTrue(size == walFiles.size());
systemTable.addWALFiles(swalFiles, "backup", "root");
String backupIdFull = fullTableBackup(tableSetFullList);
assertTrue(checkSucceeded(backupIdFull));
// Check one more time
deletable = cleaner.getDeletableFiles(walFiles);
// We can delete wal files because they were saved into backup system table table
size = Iterables.size(deletable);
assertTrue(size == walFiles.size());
List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
LOG.debug("WAL list after full backup");
convert(newWalFiles);
// New list of wal files is greater than the previous one,
// because new wal per RS have been opened after full backup
assertTrue(walFiles.size() < newWalFiles.size());
Connection conn = ConnectionFactory.createConnection(conf1);
// #2 - insert some data to table
HTable t1 = (HTable) conn.getTable(table1);
Put p1;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
t1.close();
HTable t2 = (HTable) conn.getTable(table2);
Put p2;
for (int i = 0; i < 5; i++) {
p2 = new Put(Bytes.toBytes("row-t2" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
t2.close();
// #3 - incremental backup for multiple tables
List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR);
assertTrue(checkSucceeded(backupIdIncMultiple));
deletable = cleaner.getDeletableFiles(newWalFiles);
assertTrue(Iterables.size(deletable) == newWalFiles.size());
conn.close();
}
}
Aggregations