Search in sources :

Example 16 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RestoreSnapshotProcedure method prepareRestore.

/**
   * Action before any real action of restoring from snapshot.
   * @param env MasterProcedureEnv
   * @throws IOException
   */
private void prepareRestore(final MasterProcedureEnv env) throws IOException {
    final TableName tableName = getTableName();
    // Checks whether the table exists
    if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
        throw new TableNotFoundException(tableName);
    }
    // Check whether table is disabled.
    env.getMasterServices().checkTableModifiable(tableName);
    // Check that we have at least 1 CF
    if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
        throw new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family.");
    }
    if (!getTableName().isSystemTable()) {
        // Table already exist. Check and update the region quota for this table namespace.
        final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
        SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
        int snapshotRegionCount = manifest.getRegionManifestsMap().size();
        int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);
        if (snapshotRegionCount > 0 && tableRegionCount != snapshotRegionCount) {
            ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(tableName, snapshotRegionCount);
        }
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) TableName(org.apache.hadoop.hbase.TableName) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 17 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class VisibilityController method setAuths.

@Override
public synchronized void setAuths(RpcController controller, SetAuthsRequest request, RpcCallback<VisibilityLabelsResponse> done) {
    VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder();
    List<ByteString> auths = request.getAuthList();
    if (!initialized) {
        setExceptionResults(auths.size(), new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"), response);
    } else {
        byte[] user = request.getUser().toByteArray();
        List<byte[]> labelAuths = new ArrayList<>(auths.size());
        try {
            if (authorizationEnabled) {
                checkCallingUserAuth();
            }
            for (ByteString authBS : auths) {
                labelAuths.add(authBS.toByteArray());
            }
            OperationStatus[] opStatus = this.visibilityLabelService.setAuths(user, labelAuths);
            logResult(true, "setAuths", "Setting authorization for labels allowed", user, labelAuths, null);
            RegionActionResult successResult = RegionActionResult.newBuilder().build();
            for (OperationStatus status : opStatus) {
                if (status.getOperationStatusCode() == SUCCESS) {
                    response.addResult(successResult);
                } else {
                    RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder();
                    failureResultBuilder.setException(buildException(new DoNotRetryIOException(status.getExceptionMsg())));
                    response.addResult(failureResultBuilder.build());
                }
            }
        } catch (AccessDeniedException e) {
            logResult(false, "setAuths", e.getMessage(), user, labelAuths, null);
            LOG.error("User is not having required permissions to set authorization", e);
            setExceptionResults(auths.size(), e, response);
        } catch (IOException e) {
            LOG.error(e);
            setExceptionResults(auths.size(), e, response);
        }
    }
    done.run(response.build());
}
Also used : AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ByteString(com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) RegionActionResult(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) OperationStatus(org.apache.hadoop.hbase.regionserver.OperationStatus) VisibilityLabelsResponse(org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse)

Example 18 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class AccessController method preCheckAndPut.

@Override
public boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family, final byte[] qualifier, final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException {
    User user = getActiveUser(c);
    checkForReservedTagPresence(user, put);
    // Require READ and WRITE permissions on the table, CF, and KV to update
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
    AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, user, env, families, Action.READ, Action.WRITE);
    logResult(authResult);
    if (!authResult.isAllowed()) {
        if (cellFeaturesEnabled && !compatibleEarlyTermination) {
            put.setAttribute(CHECK_COVERING_PERM, TRUE);
        } else if (authorizationEnabled) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
    byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
    if (bytes != null) {
        if (cellFeaturesEnabled) {
            addCellPermissions(bytes, put.getFamilyCellMap());
        } else {
            throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
        }
    }
    return result;
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 19 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class AccessController method preIncrement.

@Override
public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c, final Increment increment) throws IOException {
    User user = getActiveUser(c);
    checkForReservedTagPresence(user, increment);
    // Require WRITE permission to the table, CF, and the KV to be replaced by
    // the incremented value
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[], ? extends Collection<Cell>> families = increment.getFamilyCellMap();
    AuthResult authResult = permissionGranted(OpType.INCREMENT, user, env, families, Action.WRITE);
    logResult(authResult);
    if (!authResult.isAllowed()) {
        if (cellFeaturesEnabled && !compatibleEarlyTermination) {
            increment.setAttribute(CHECK_COVERING_PERM, TRUE);
        } else if (authorizationEnabled) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
    byte[] bytes = increment.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
    if (bytes != null) {
        if (cellFeaturesEnabled) {
            addCellPermissions(bytes, increment.getFamilyCellMap());
        } else {
            throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
        }
    }
    return null;
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Cell(org.apache.hadoop.hbase.Cell)

Example 20 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class AccessController method prePut.

@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put, final WALEdit edit, final Durability durability) throws IOException {
    User user = getActiveUser(c);
    checkForReservedTagPresence(user, put);
    // Require WRITE permission to the table, CF, or top visible value, if any.
    // NOTE: We don't need to check the permissions for any earlier Puts
    // because we treat the ACLs in each Put as timestamped like any other
    // HBase value. A new ACL in a new Put applies to that Put. It doesn't
    // change the ACL of any previous Put. This allows simple evolution of
    // security policy over time without requiring expensive updates.
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[], ? extends Collection<Cell>> families = put.getFamilyCellMap();
    AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE);
    logResult(authResult);
    if (!authResult.isAllowed()) {
        if (cellFeaturesEnabled && !compatibleEarlyTermination) {
            put.setAttribute(CHECK_COVERING_PERM, TRUE);
        } else if (authorizationEnabled) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
    // Add cell ACLs from the operation to the cells themselves
    byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
    if (bytes != null) {
        if (cellFeaturesEnabled) {
            addCellPermissions(bytes, put.getFamilyCellMap());
        } else {
            throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
        }
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)77 IOException (java.io.IOException)28 Cell (org.apache.hadoop.hbase.Cell)18 ArrayList (java.util.ArrayList)12 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)12 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)12 TableName (org.apache.hadoop.hbase.TableName)11 InterruptedIOException (java.io.InterruptedIOException)10 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)10 Delete (org.apache.hadoop.hbase.client.Delete)10 Put (org.apache.hadoop.hbase.client.Put)10 Test (org.junit.Test)10 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)9 User (org.apache.hadoop.hbase.security.User)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteBufferCell (org.apache.hadoop.hbase.ByteBufferCell)5 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)5