Search in sources :

Example 6 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class MasterRpcServices method compactMob.

/**
   * Compacts the mob files in the current table.
   * @param request the request.
   * @param tableName the current table name.
   * @return The response of the mob file compaction.
   * @throws IOException
   */
private CompactRegionResponse compactMob(final CompactRegionRequest request, TableName tableName) throws IOException {
    if (!master.getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
        throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
    }
    boolean allFiles = false;
    List<HColumnDescriptor> compactedColumns = new ArrayList<>();
    HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
    byte[] family = null;
    if (request.hasFamily()) {
        family = request.getFamily().toByteArray();
        for (HColumnDescriptor hcd : hcds) {
            if (Bytes.equals(family, hcd.getName())) {
                if (!hcd.isMobEnabled()) {
                    LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family");
                    throw new DoNotRetryIOException("Column family " + hcd.getNameAsString() + " is not a mob column family");
                }
                compactedColumns.add(hcd);
            }
        }
    } else {
        for (HColumnDescriptor hcd : hcds) {
            if (hcd.isMobEnabled()) {
                compactedColumns.add(hcd);
            }
        }
    }
    if (compactedColumns.isEmpty()) {
        LOG.error("No mob column families are assigned in the mob compaction");
        throw new DoNotRetryIOException("No mob column families are assigned in the mob compaction");
    }
    if (request.hasMajor() && request.getMajor()) {
        allFiles = true;
    }
    String familyLogMsg = (family != null) ? Bytes.toString(family) : "";
    if (LOG.isTraceEnabled()) {
        LOG.trace("User-triggered mob compaction requested for table: " + tableName.getNameAsString() + " for column family: " + familyLogMsg);
    }
    master.requestMobCompaction(tableName, compactedColumns, allFiles);
    return CompactRegionResponse.newBuilder().build();
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList)

Example 7 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class AccessController method preDelete.

@Override
public void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException {
    // An ACL on a delete is useless, we shouldn't allow it
    if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) {
        throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString());
    }
    // Require WRITE permissions on all cells covered by the delete. Unlike
    // for Puts we need to check all visible prior versions, because a major
    // compaction could remove them. If the user doesn't have permission to
    // overwrite any of the visible versions ('visible' defined as not covered
    // by a tombstone already) then we have to disallow this operation.
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[], ? extends Collection<Cell>> families = delete.getFamilyCellMap();
    User user = getActiveUser(c);
    AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE);
    logResult(authResult);
    if (!authResult.isAllowed()) {
        if (cellFeaturesEnabled && !compatibleEarlyTermination) {
            delete.setAttribute(CHECK_COVERING_PERM, TRUE);
        } else if (authorizationEnabled) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Cell(org.apache.hadoop.hbase.Cell)

Example 8 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class ProtobufUtil method toFilter.

/**
   * Convert a protocol buffer Filter to a client Filter
   *
   * @param proto the protocol buffer Filter to convert
   * @return the converted Filter
   */
@SuppressWarnings("unchecked")
public static Filter toFilter(FilterProtos.Filter proto) throws IOException {
    String type = proto.getName();
    final byte[] value = proto.getSerializedFilter().toByteArray();
    String funcName = "parseFrom";
    try {
        Class<? extends Filter> c = (Class<? extends Filter>) Class.forName(type, true, CLASS_LOADER);
        Method parseFrom = c.getMethod(funcName, byte[].class);
        if (parseFrom == null) {
            throw new IOException("Unable to locate function: " + funcName + " in type: " + type);
        }
        return (Filter) parseFrom.invoke(c, value);
    } catch (Exception e) {
        // In either case, let's not retry.
        throw new DoNotRetryIOException(e);
    }
}
Also used : Filter(org.apache.hadoop.hbase.filter.Filter) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) Method(java.lang.reflect.Method) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) InvalidProtocolBufferException(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)

Example 9 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class ProtobufUtil method toIncrement.

/**
   * Convert a protocol buffer Mutate to an Increment
   *
   * @param proto the protocol buffer Mutate to convert
   * @return the converted client Increment
   * @throws IOException
   */
public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    MutationType type = proto.getMutateType();
    assert type == MutationType.INCREMENT : type.name();
    byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
    Increment increment = row != null ? new Increment(row) : null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
            }
            Cell cell = cellScanner.current();
            if (increment == null) {
                increment = new Increment(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
            }
            increment.add(cell);
        }
    } else {
        if (increment == null) {
            throw new IllegalArgumentException("row cannot be null");
        }
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                byte[] qualifier = qv.getQualifier().toByteArray();
                if (!qv.hasValue()) {
                    throw new DoNotRetryIOException("Missing required field: qualifier value");
                }
                byte[] value = qv.getValue().toByteArray();
                byte[] tags = null;
                if (qv.hasTags()) {
                    tags = qv.getTags().toByteArray();
                }
                increment.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
            }
        }
    }
    if (proto.hasTimeRange()) {
        TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
        increment.setTimeRange(timeRange.getMin(), timeRange.getMax());
    }
    increment.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        increment.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return increment;
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Increment(org.apache.hadoop.hbase.client.Increment) QualifierValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue) Cell(org.apache.hadoop.hbase.Cell) ByteBufferCell(org.apache.hadoop.hbase.ByteBufferCell)

Example 10 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class ResponseConverter method getResults.

/**
   * Create Results from the cells using the cells meta data.
   * @param cellScanner
   * @param response
   * @return results
   */
public static Result[] getResults(CellScanner cellScanner, ScanResponse response) throws IOException {
    if (response == null)
        return null;
    // If cellscanner, then the number of Results to return is the count of elements in the
    // cellsPerResult list.  Otherwise, it is how many results are embedded inside the response.
    int noOfResults = cellScanner != null ? response.getCellsPerResultCount() : response.getResultsCount();
    Result[] results = new Result[noOfResults];
    for (int i = 0; i < noOfResults; i++) {
        if (cellScanner != null) {
            // Cells are out in cellblocks.  Group them up again as Results.  How many to read at a
            // time will be found in getCellsLength -- length here is how many Cells in the i'th Result
            int noOfCells = response.getCellsPerResult(i);
            boolean isPartial = response.getPartialFlagPerResultCount() > i ? response.getPartialFlagPerResult(i) : false;
            List<Cell> cells = new ArrayList<>(noOfCells);
            for (int j = 0; j < noOfCells; j++) {
                try {
                    if (cellScanner.advance() == false) {
                        // We are not able to retrieve the exact number of cells which ResultCellMeta says us.
                        // We have to scan for the same results again. Throwing DNRIOE as a client retry on the
                        // same scanner will result in OutOfOrderScannerNextException
                        String msg = "Results sent from server=" + noOfResults + ". But only got " + i + " results completely at client. Resetting the scanner to scan again.";
                        LOG.error(msg);
                        throw new DoNotRetryIOException(msg);
                    }
                } catch (IOException ioe) {
                    // We are getting IOE while retrieving the cells for Results.
                    // We have to scan for the same results again. Throwing DNRIOE as a client retry on the
                    // same scanner will result in OutOfOrderScannerNextException
                    LOG.error("Exception while reading cells from result." + "Resetting the scanner to scan again.", ioe);
                    throw new DoNotRetryIOException("Resetting the scanner.", ioe);
                }
                cells.add(cellScanner.current());
            }
            results[i] = Result.create(cells, null, response.getStale(), isPartial);
        } else {
            // Result is pure pb.
            results[i] = ProtobufUtil.toResult(response.getResults(i));
        }
    }
    return results;
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult)

Aggregations

DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)77 IOException (java.io.IOException)28 Cell (org.apache.hadoop.hbase.Cell)18 ArrayList (java.util.ArrayList)12 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)12 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)12 TableName (org.apache.hadoop.hbase.TableName)11 InterruptedIOException (java.io.InterruptedIOException)10 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)10 Delete (org.apache.hadoop.hbase.client.Delete)10 Put (org.apache.hadoop.hbase.client.Put)10 Test (org.junit.Test)10 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)9 User (org.apache.hadoop.hbase.security.User)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteBufferCell (org.apache.hadoop.hbase.ByteBufferCell)5 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)5