Search in sources :

Example 66 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class HBaseFsck method generatePuts.

/**
   * Generate set of puts to add to new meta.  This expects the tables to be
   * clean with no overlaps or holes.  If there are any problems it returns null.
   *
   * @return An array list of puts to do in bulk, null if tables have problems
   */
private ArrayList<Put> generatePuts(SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
    ArrayList<Put> puts = new ArrayList<>();
    boolean hasProblems = false;
    for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
        TableName name = e.getKey();
        // skip "hbase:meta"
        if (name.compareTo(TableName.META_TABLE_NAME) == 0) {
            continue;
        }
        TableInfo ti = e.getValue();
        puts.add(MetaTableAccessor.makePutFromTableState(new TableState(ti.tableName, TableState.State.ENABLED)));
        for (Entry<byte[], Collection<HbckInfo>> spl : ti.sc.getStarts().asMap().entrySet()) {
            Collection<HbckInfo> his = spl.getValue();
            int sz = his.size();
            if (sz != 1) {
                // problem
                LOG.error("Split starting at " + Bytes.toStringBinary(spl.getKey()) + " had " + sz + " regions instead of exactly 1.");
                hasProblems = true;
                continue;
            }
            // add the row directly to meta.
            HbckInfo hi = his.iterator().next();
            // hi.metaEntry;
            HRegionInfo hri = hi.getHdfsHRI();
            Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
            puts.add(p);
        }
    }
    return hasProblems ? null : puts;
}
Also used : ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Collection(java.util.Collection) TableState(org.apache.hadoop.hbase.client.TableState)

Example 67 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class HBaseFsck method rebuildMeta.

/**
   * Rebuilds meta from information in hdfs/fs.  Depends on configuration settings passed into
   * hbck constructor to point to a particular fs/dir. Assumes HBase is OFFLINE.
   *
   * @param fix flag that determines if method should attempt to fix holes
   * @return true if successful, false if attempt failed.
   */
public boolean rebuildMeta(boolean fix) throws IOException, InterruptedException {
    // TODO check to make sure hbase is offline. (or at least the table
    // currently being worked on is off line)
    // Determine what's on HDFS
    LOG.info("Loading HBase regioninfo from HDFS...");
    // populating regioninfo table.
    loadHdfsRegionDirs();
    int errs = errors.getErrorList().size();
    // update tableInfos based on region info in fs.
    tablesInfo = loadHdfsRegionInfos();
    checkHdfsIntegrity(false, false);
    // make sure ok.
    if (errors.getErrorList().size() != errs) {
        // While in error state, iterate until no more fixes possible
        while (true) {
            fixes = 0;
            suggestFixes(tablesInfo);
            errors.clear();
            // update tableInfos based on region info in fs.
            loadHdfsRegionInfos();
            checkHdfsIntegrity(shouldFixHdfsHoles(), shouldFixHdfsOverlaps());
            int errCount = errors.getErrorList().size();
            if (fixes == 0) {
                if (errCount > 0) {
                    // failed to fix problems.
                    return false;
                } else {
                    // no fixes and no problems? drop out and fix stuff!
                    break;
                }
            }
        }
    }
    // we can rebuild, move old meta out of the way and start
    LOG.info("HDFS regioninfo's seems good.  Sidelining old hbase:meta");
    Path backupDir = sidelineOldMeta();
    LOG.info("Creating new hbase:meta");
    String walFactoryId = "hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8);
    HRegion meta = createNewMeta(walFactoryId);
    // populate meta
    List<Put> puts = generatePuts(tablesInfo);
    if (puts == null) {
        LOG.fatal("Problem encountered when creating new hbase:meta entries.  " + "You may need to restore the previously sidelined hbase:meta");
        return false;
    }
    meta.batchMutate(puts.toArray(new Put[puts.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
    meta.close();
    if (meta.getWAL() != null) {
        meta.getWAL().close();
    }
    // clean up the temporary hbck meta recovery WAL directory
    removeHBCKMetaRecoveryWALDir(walFactoryId);
    LOG.info("Success! hbase:meta table rebuilt.");
    LOG.info("Old hbase:meta is moved into " + backupDir);
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Put(org.apache.hadoop.hbase.client.Put)

Example 68 with Put

use of org.apache.hadoop.hbase.client.Put in project DataX by alibaba.

the class HBaseClient method put.

public void put(String tableName, HBaseCell cell) throws Exception {
    Table table = getTable(tableName);
    Put p = new Put(Bytes.toBytes(cell.getRowKey()));
    p.addColumn(Bytes.toBytes(cell.getColf()), Bytes.toBytes(cell.getCol()), Bytes.toBytes(cell.getValue()));
    try {
        table.put(p);
    } catch (Exception e) {
        log.error("put table " + tableName + " failed.", e);
        throw e;
    } finally {
        if (table != null) {
            try {
                table.close();
            } catch (Exception e) {
                log.error("close table " + tableName + " failed.", e);
                throw e;
            }
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Put(org.apache.hadoop.hbase.client.Put)

Example 69 with Put

use of org.apache.hadoop.hbase.client.Put in project pinpoint by naver.

the class HbaseAgentInfoDao method insert.

@Override
public void insert(TAgentInfo agentInfo) {
    if (agentInfo == null) {
        throw new NullPointerException("agentInfo must not be null");
    }
    if (logger.isDebugEnabled()) {
        logger.debug("insert agent info. {}", agentInfo);
    }
    byte[] agentId = Bytes.toBytes(agentInfo.getAgentId());
    long reverseKey = TimeUtils.reverseTimeMillis(agentInfo.getStartTimestamp());
    byte[] rowKey = RowKeyUtils.concatFixedByteAndLong(agentId, HBaseTables.AGENT_NAME_MAX_LEN, reverseKey);
    Put put = new Put(rowKey);
    // should add additional agent informations. for now added only starttime for sqlMetaData
    AgentInfoBo agentInfoBo = this.agentInfoBoMapper.map(agentInfo);
    byte[] agentInfoBoValue = agentInfoBo.writeValue();
    put.addColumn(HBaseTables.AGENTINFO_CF_INFO, HBaseTables.AGENTINFO_CF_INFO_IDENTIFIER, agentInfoBoValue);
    if (agentInfo.isSetServerMetaData()) {
        ServerMetaDataBo serverMetaDataBo = this.serverMetaDataBoMapper.map(agentInfo.getServerMetaData());
        byte[] serverMetaDataBoValue = serverMetaDataBo.writeValue();
        put.addColumn(HBaseTables.AGENTINFO_CF_INFO, HBaseTables.AGENTINFO_CF_INFO_SERVER_META_DATA, serverMetaDataBoValue);
    }
    if (agentInfo.isSetJvmInfo()) {
        JvmInfoBo jvmInfoBo = this.jvmInfoBoMapper.map(agentInfo.getJvmInfo());
        byte[] jvmInfoBoValue = jvmInfoBo.writeValue();
        put.addColumn(HBaseTables.AGENTINFO_CF_INFO, HBaseTables.AGENTINFO_CF_INFO_JVM, jvmInfoBoValue);
    }
    hbaseTemplate.put(HBaseTables.AGENTINFO, put);
}
Also used : AgentInfoBo(com.navercorp.pinpoint.common.server.bo.AgentInfoBo) JvmInfoBo(com.navercorp.pinpoint.common.server.bo.JvmInfoBo) ServerMetaDataBo(com.navercorp.pinpoint.common.server.bo.ServerMetaDataBo) Put(org.apache.hadoop.hbase.client.Put)

Example 70 with Put

use of org.apache.hadoop.hbase.client.Put in project pinpoint by naver.

the class HbaseApplicationTraceIndexDao method insert.

@Override
public void insert(final TSpan span) {
    if (span == null) {
        throw new NullPointerException("span must not be null");
    }
    final Buffer buffer = new AutomaticBuffer(10 + AGENT_NAME_MAX_LEN);
    buffer.putVInt(span.getElapsed());
    buffer.putSVInt(span.getErr());
    buffer.putPrefixedString(span.getAgentId());
    final byte[] value = buffer.getBuffer();
    long acceptedTime = acceptedTimeService.getAcceptedTime();
    final byte[] distributedKey = createRowKey(span, acceptedTime);
    Put put = new Put(distributedKey);
    put.addColumn(APPLICATION_TRACE_INDEX_CF_TRACE, makeQualifier(span), acceptedTime, value);
    boolean success = hbaseTemplate.asyncPut(APPLICATION_TRACE_INDEX, put);
    if (!success) {
        hbaseTemplate.put(APPLICATION_TRACE_INDEX, put);
    }
}
Also used : Buffer(com.navercorp.pinpoint.common.buffer.Buffer) AutomaticBuffer(com.navercorp.pinpoint.common.buffer.AutomaticBuffer) AutomaticBuffer(com.navercorp.pinpoint.common.buffer.AutomaticBuffer) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80