Search in sources :

Example 36 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class ReplicationSink method replicateEntries.

/**
   * Replicate this array of entries directly into the local cluster using the native client. Only
   * operates against raw protobuf type saving on a conversion from pb to pojo.
   * @param entries
   * @param cells
   * @param replicationClusterId Id which will uniquely identify source cluster FS client
   *          configurations in the replication configuration directory
   * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
   *          directory
   * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
   * @throws IOException If failed to replicate the data
   */
public void replicateEntries(List<WALEntry> entries, final CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException {
    if (entries.isEmpty())
        return;
    if (cells == null)
        throw new NullPointerException("TODO: Add handling of null CellScanner");
    // to the same table.
    try {
        long totalReplicated = 0;
        // Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per
        // invocation of this method per table and cluster id.
        Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>();
        // Map of table name Vs list of pair of family and list of hfile paths from its namespace
        Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null;
        for (WALEntry entry : entries) {
            TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray());
            Cell previousCell = null;
            Mutation m = null;
            int count = entry.getAssociatedCellCount();
            for (int i = 0; i < count; i++) {
                // Throw index out of bounds if our cell count is off
                if (!cells.advance()) {
                    throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
                }
                Cell cell = cells.current();
                // Handle bulk load hfiles replication
                if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) {
                    if (bulkLoadHFileMap == null) {
                        bulkLoadHFileMap = new HashMap<>();
                    }
                    buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell);
                } else {
                    // Handle wal replication
                    if (isNewRowOrType(previousCell, cell)) {
                        // Create new mutation
                        m = CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                        List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size());
                        for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
                            clusterIds.add(toUUID(clusterId));
                        }
                        m.setClusterIds(clusterIds);
                        addToHashMultiMap(rowMap, table, clusterIds, m);
                    }
                    if (CellUtil.isDelete(cell)) {
                        ((Delete) m).addDeleteMarker(cell);
                    } else {
                        ((Put) m).add(cell);
                    }
                    previousCell = cell;
                }
            }
            totalReplicated++;
        }
        // TODO Replicating mutations and bulk loaded data can be made parallel
        if (!rowMap.isEmpty()) {
            LOG.debug("Started replicating mutations.");
            for (Entry<TableName, Map<List<UUID>, List<Row>>> entry : rowMap.entrySet()) {
                batch(entry.getKey(), entry.getValue().values());
            }
            LOG.debug("Finished replicating mutations.");
        }
        if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) {
            LOG.debug("Started replicating bulk loaded data.");
            HFileReplicator hFileReplicator = new HFileReplicator(this.provider.getConf(this.conf, replicationClusterId), sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath, bulkLoadHFileMap, conf, getConnection());
            hFileReplicator.replicate();
            LOG.debug("Finished replicating bulk loaded data.");
        }
        int size = entries.size();
        this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime());
        this.metrics.applyBatch(size + hfilesReplicated, hfilesReplicated);
        this.totalReplicatedEdits.addAndGet(totalReplicated);
    } catch (IOException ex) {
        LOG.error("Unable to accept edit because:", ex);
        throw ex;
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) Row(org.apache.hadoop.hbase.client.Row) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Mutation(org.apache.hadoop.hbase.client.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 37 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class ReaderBase method next.

@Override
public Entry next(Entry reuse) throws IOException {
    Entry e = reuse;
    if (e == null) {
        e = new Entry(new WALKey(), new WALEdit());
    }
    if (compressionContext != null) {
        e.setCompressionContext(compressionContext);
    }
    boolean hasEntry = false;
    try {
        hasEntry = readNext(e);
    } catch (IllegalArgumentException iae) {
        TableName tableName = e.getKey().getTablename();
        if (tableName != null && tableName.equals(TableName.OLD_ROOT_TABLE_NAME)) {
            // It is old ROOT table edit, ignore it
            LOG.info("Got an old ROOT edit, ignoring ");
            return next(e);
        } else
            throw iae;
    }
    edit++;
    if (compressionContext != null && emptyCompressionContext) {
        emptyCompressionContext = false;
    }
    return hasEntry ? e : null;
}
Also used : WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) Entry(org.apache.hadoop.hbase.wal.WAL.Entry)

Example 38 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class ZKDataMigrator method queryForTableStates.

/**
   * Method for table states migration.
   * Used when upgrading from pre-2.0 to 2.0
   * Reading state from zk, applying them to internal state
   * and delete.
   * Used by master to clean migration from zk based states to
   * table descriptor based states.
   */
@Deprecated
public static Map<TableName, TableState.State> queryForTableStates(ZooKeeperWatcher zkw) throws KeeperException, InterruptedException {
    Map<TableName, TableState.State> rv = new HashMap<>();
    List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.tableZNode);
    if (children == null)
        return rv;
    for (String child : children) {
        TableName tableName = TableName.valueOf(child);
        ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
        TableState.State newState = TableState.State.ENABLED;
        if (state != null) {
            switch(state) {
                case ENABLED:
                    newState = TableState.State.ENABLED;
                    break;
                case DISABLED:
                    newState = TableState.State.DISABLED;
                    break;
                case DISABLING:
                    newState = TableState.State.DISABLING;
                    break;
                case ENABLING:
                    newState = TableState.State.ENABLING;
                    break;
                default:
            }
        }
        rv.put(tableName, newState);
    }
    return rv;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) TableState(org.apache.hadoop.hbase.client.TableState) TableState(org.apache.hadoop.hbase.client.TableState)

Example 39 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class HFileCorruptionChecker method createMobRegionDirChecker.

/**
   * Creates an instance of MobRegionDirChecker.
   * @param tableDir The current table directory.
   * @return An instance of MobRegionDirChecker.
   */
private MobRegionDirChecker createMobRegionDirChecker(Path tableDir) {
    TableName tableName = FSUtils.getTableName(tableDir);
    Path mobDir = MobUtils.getMobRegionPath(conf, tableName);
    return new MobRegionDirChecker(mobDir);
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName)

Example 40 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class IntegrationTestBulkLoad method installSlowingCoproc.

/**
   * Modify table {@code getTableName()} to carry {@link SlowMeCoproScanOperations}.
   */
private void installSlowingCoproc() throws IOException, InterruptedException {
    int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
    if (replicaCount == NUM_REPLICA_COUNT_DEFAULT)
        return;
    TableName t = getTablename();
    Admin admin = util.getAdmin();
    HTableDescriptor desc = admin.getTableDescriptor(t);
    desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
    HBaseTestingUtility.modifyTableSync(admin, desc);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

TableName (org.apache.hadoop.hbase.TableName)1033 Test (org.junit.Test)695 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)257 Table (org.apache.hadoop.hbase.client.Table)228 IOException (java.io.IOException)225 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)215 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)203 Result (org.apache.hadoop.hbase.client.Result)125 ArrayList (java.util.ArrayList)120 Put (org.apache.hadoop.hbase.client.Put)118 Path (org.apache.hadoop.fs.Path)113 Connection (org.apache.hadoop.hbase.client.Connection)103 Scan (org.apache.hadoop.hbase.client.Scan)98 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)89 ServerName (org.apache.hadoop.hbase.ServerName)85 Admin (org.apache.hadoop.hbase.client.Admin)85 Cell (org.apache.hadoop.hbase.Cell)77 HashMap (java.util.HashMap)75 Delete (org.apache.hadoop.hbase.client.Delete)66 InterruptedIOException (java.io.InterruptedIOException)63