Search in sources :

Example 11 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class ReplicationSourceWALReaderThread method countDistinctRowKeysAndHFiles.

/**
   * Count the number of different row keys in the given edit because of mini-batching. We assume
   * that there's at least one Cell in the WALEdit.
   * @param edit edit to count row keys from
   * @return number of different row keys and HFiles
   */
private Pair<Integer, Integer> countDistinctRowKeysAndHFiles(WALEdit edit) {
    List<Cell> cells = edit.getCells();
    int distinctRowKeys = 1;
    int totalHFileEntries = 0;
    Cell lastCell = cells.get(0);
    int totalCells = edit.size();
    for (int i = 0; i < totalCells; i++) {
        // Count HFiles to be replicated
        if (CellUtil.matchingQualifier(cells.get(i), WALEdit.BULK_LOAD)) {
            try {
                BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cells.get(i));
                List<StoreDescriptor> stores = bld.getStoresList();
                int totalStores = stores.size();
                for (int j = 0; j < totalStores; j++) {
                    totalHFileEntries += stores.get(j).getStoreFileList().size();
                }
            } catch (IOException e) {
                LOG.error("Failed to deserialize bulk load entry from wal edit. " + "Then its hfiles count will not be added into metric.");
            }
        }
        if (!CellUtil.matchingRows(cells.get(i), lastCell)) {
            distinctRowKeys++;
        }
        lastCell = cells.get(i);
    }
    Pair<Integer, Integer> result = new Pair<>(distinctRowKeys, totalHFileEntries);
    return result;
}
Also used : BulkLoadDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor) Pair(org.apache.hadoop.hbase.util.Pair)

Example 12 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class ReplicationSourceWALReaderThread method calculateTotalSizeOfStoreFiles.

/**
   * Calculate the total size of all the store files
   * @param edit edit to count row keys from
   * @return the total size of the store files
   */
private int calculateTotalSizeOfStoreFiles(WALEdit edit) {
    List<Cell> cells = edit.getCells();
    int totalStoreFilesSize = 0;
    int totalCells = edit.size();
    for (int i = 0; i < totalCells; i++) {
        if (CellUtil.matchingQualifier(cells.get(i), WALEdit.BULK_LOAD)) {
            try {
                BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cells.get(i));
                List<StoreDescriptor> stores = bld.getStoresList();
                int totalStores = stores.size();
                for (int j = 0; j < totalStores; j++) {
                    totalStoreFilesSize += stores.get(j).getStoreFileSizeBytes();
                }
            } catch (IOException e) {
                LOG.error("Failed to deserialize bulk load entry from wal edit. " + "Size of HFiles part of cell will not be considered in replication " + "request size calculation.", e);
            }
        }
    }
    return totalStoreFilesSize;
}
Also used : BulkLoadDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor)

Example 13 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class AccessController method preDelete.

@Override
public void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException {
    // An ACL on a delete is useless, we shouldn't allow it
    if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) {
        throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString());
    }
    // Require WRITE permissions on all cells covered by the delete. Unlike
    // for Puts we need to check all visible prior versions, because a major
    // compaction could remove them. If the user doesn't have permission to
    // overwrite any of the visible versions ('visible' defined as not covered
    // by a tombstone already) then we have to disallow this operation.
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[], ? extends Collection<Cell>> families = delete.getFamilyCellMap();
    User user = getActiveUser(c);
    AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE);
    logResult(authResult);
    if (!authResult.isAllowed()) {
        if (cellFeaturesEnabled && !compatibleEarlyTermination) {
            delete.setAttribute(CHECK_COVERING_PERM, TRUE);
        } else if (authorizationEnabled) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Cell(org.apache.hadoop.hbase.Cell)

Example 14 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class AccessController method postMutationBeforeWAL.

@Override
public Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx, MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException {
    // work to do here
    if (!cellFeaturesEnabled) {
        return newCell;
    }
    // Collect any ACLs from the old cell
    List<Tag> tags = Lists.newArrayList();
    List<Tag> aclTags = Lists.newArrayList();
    ListMultimap<String, Permission> perms = ArrayListMultimap.create();
    if (oldCell != null) {
        Iterator<Tag> tagIterator = CellUtil.tagsIterator(oldCell);
        while (tagIterator.hasNext()) {
            Tag tag = tagIterator.next();
            if (tag.getType() != AccessControlLists.ACL_TAG_TYPE) {
                // Not an ACL tag, just carry it through
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Carrying forward tag from " + oldCell + ": type " + tag.getType() + " length " + tag.getValueLength());
                }
                tags.add(tag);
            } else {
                aclTags.add(tag);
            }
        }
    }
    // Do we have an ACL on the operation?
    byte[] aclBytes = mutation.getACL();
    if (aclBytes != null) {
        // Yes, use it
        tags.add(new ArrayBackedTag(AccessControlLists.ACL_TAG_TYPE, aclBytes));
    } else {
        // No, use what we carried forward
        if (perms != null) {
            // Permission
            if (LOG.isTraceEnabled()) {
                LOG.trace("Carrying forward ACLs from " + oldCell + ": " + perms);
            }
            tags.addAll(aclTags);
        }
    }
    // If we have no tags to add, just return
    if (tags.isEmpty()) {
        return newCell;
    }
    Cell rewriteCell = CellUtil.createCell(newCell, tags);
    return rewriteCell;
}
Also used : Tag(org.apache.hadoop.hbase.Tag) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Cell(org.apache.hadoop.hbase.Cell)

Example 15 with Cell

use of org.apache.hadoop.hbase.Cell in project hbase by apache.

the class AccessController method updateACL.

/**
   * Writes all table ACLs for the tables in the given Map up into ZooKeeper
   * znodes.  This is called to synchronize ACL changes following {@code _acl_}
   * table updates.
   */
void updateACL(RegionCoprocessorEnvironment e, final Map<byte[], List<Cell>> familyMap) {
    Set<byte[]> entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
    for (Map.Entry<byte[], List<Cell>> f : familyMap.entrySet()) {
        List<Cell> cells = f.getValue();
        for (Cell cell : cells) {
            if (CellUtil.matchingFamily(cell, AccessControlLists.ACL_LIST_FAMILY)) {
                entries.add(CellUtil.cloneRow(cell));
            }
        }
    }
    ZKPermissionWatcher zkw = this.authManager.getZKPermissionWatcher();
    Configuration conf = regionEnv.getConfiguration();
    for (byte[] entry : entries) {
        try {
            try (Table t = regionEnv.getTable(AccessControlLists.ACL_TABLE_NAME)) {
                ListMultimap<String, TablePermission> perms = AccessControlLists.getPermissions(conf, entry, t);
                byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf);
                zkw.writeToZookeeper(entry, serialized);
            }
        } catch (IOException ex) {
            LOG.error("Failed updating permissions mirror for '" + Bytes.toString(entry) + "'", ex);
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) CompoundConfiguration(org.apache.hadoop.hbase.CompoundConfiguration) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TreeSet(java.util.TreeSet) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Cell (org.apache.hadoop.hbase.Cell)832 Test (org.junit.Test)324 ArrayList (java.util.ArrayList)298 Scan (org.apache.hadoop.hbase.client.Scan)256 KeyValue (org.apache.hadoop.hbase.KeyValue)196 Result (org.apache.hadoop.hbase.client.Result)177 Put (org.apache.hadoop.hbase.client.Put)141 IOException (java.io.IOException)123 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)104 Get (org.apache.hadoop.hbase.client.Get)84 Table (org.apache.hadoop.hbase.client.Table)83 List (java.util.List)79 TableName (org.apache.hadoop.hbase.TableName)77 Delete (org.apache.hadoop.hbase.client.Delete)74 CellScanner (org.apache.hadoop.hbase.CellScanner)69 InterruptedIOException (java.io.InterruptedIOException)48 Configuration (org.apache.hadoop.conf.Configuration)46 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)45 Map (java.util.Map)44 Path (org.apache.hadoop.fs.Path)44