use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class HBaseFsck method generatePuts.
/**
* Generate set of puts to add to new meta. This expects the tables to be
* clean with no overlaps or holes. If there are any problems it returns null.
*
* @return An array list of puts to do in bulk, null if tables have problems
*/
private ArrayList<Put> generatePuts(SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
ArrayList<Put> puts = new ArrayList<>();
boolean hasProblems = false;
for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
TableName name = e.getKey();
// skip "hbase:meta"
if (name.compareTo(TableName.META_TABLE_NAME) == 0) {
continue;
}
TableInfo ti = e.getValue();
puts.add(MetaTableAccessor.makePutFromTableState(new TableState(ti.tableName, TableState.State.ENABLED)));
for (Entry<byte[], Collection<HbckInfo>> spl : ti.sc.getStarts().asMap().entrySet()) {
Collection<HbckInfo> his = spl.getValue();
int sz = his.size();
if (sz != 1) {
// problem
LOG.error("Split starting at " + Bytes.toStringBinary(spl.getKey()) + " had " + sz + " regions instead of exactly 1.");
hasProblems = true;
continue;
}
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
// hi.metaEntry;
HRegionInfo hri = hi.getHdfsHRI();
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
puts.add(p);
}
}
return hasProblems ? null : puts;
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class HBaseFsck method rebuildMeta.
/**
* Rebuilds meta from information in hdfs/fs. Depends on configuration settings passed into
* hbck constructor to point to a particular fs/dir. Assumes HBase is OFFLINE.
*
* @param fix flag that determines if method should attempt to fix holes
* @return true if successful, false if attempt failed.
*/
public boolean rebuildMeta(boolean fix) throws IOException, InterruptedException {
// TODO check to make sure hbase is offline. (or at least the table
// currently being worked on is off line)
// Determine what's on HDFS
LOG.info("Loading HBase regioninfo from HDFS...");
// populating regioninfo table.
loadHdfsRegionDirs();
int errs = errors.getErrorList().size();
// update tableInfos based on region info in fs.
tablesInfo = loadHdfsRegionInfos();
checkHdfsIntegrity(false, false);
// make sure ok.
if (errors.getErrorList().size() != errs) {
// While in error state, iterate until no more fixes possible
while (true) {
fixes = 0;
suggestFixes(tablesInfo);
errors.clear();
// update tableInfos based on region info in fs.
loadHdfsRegionInfos();
checkHdfsIntegrity(shouldFixHdfsHoles(), shouldFixHdfsOverlaps());
int errCount = errors.getErrorList().size();
if (fixes == 0) {
if (errCount > 0) {
// failed to fix problems.
return false;
} else {
// no fixes and no problems? drop out and fix stuff!
break;
}
}
}
}
// we can rebuild, move old meta out of the way and start
LOG.info("HDFS regioninfo's seems good. Sidelining old hbase:meta");
Path backupDir = sidelineOldMeta();
LOG.info("Creating new hbase:meta");
String walFactoryId = "hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8);
HRegion meta = createNewMeta(walFactoryId);
// populate meta
List<Put> puts = generatePuts(tablesInfo);
if (puts == null) {
LOG.fatal("Problem encountered when creating new hbase:meta entries. " + "You may need to restore the previously sidelined hbase:meta");
return false;
}
meta.batchMutate(puts.toArray(new Put[puts.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
meta.close();
if (meta.getWAL() != null) {
meta.getWAL().close();
}
// clean up the temporary hbck meta recovery WAL directory
removeHBCKMetaRecoveryWALDir(walFactoryId);
LOG.info("Success! hbase:meta table rebuilt.");
LOG.info("Old hbase:meta is moved into " + backupDir);
return true;
}
use of org.apache.hadoop.hbase.client.Put in project DataX by alibaba.
the class HBaseClient method put.
public void put(String tableName, HBaseCell cell) throws Exception {
Table table = getTable(tableName);
Put p = new Put(Bytes.toBytes(cell.getRowKey()));
p.addColumn(Bytes.toBytes(cell.getColf()), Bytes.toBytes(cell.getCol()), Bytes.toBytes(cell.getValue()));
try {
table.put(p);
} catch (Exception e) {
log.error("put table " + tableName + " failed.", e);
throw e;
} finally {
if (table != null) {
try {
table.close();
} catch (Exception e) {
log.error("close table " + tableName + " failed.", e);
throw e;
}
}
}
}
use of org.apache.hadoop.hbase.client.Put in project pinpoint by naver.
the class HbaseAgentInfoDao method insert.
@Override
public void insert(TAgentInfo agentInfo) {
if (agentInfo == null) {
throw new NullPointerException("agentInfo must not be null");
}
if (logger.isDebugEnabled()) {
logger.debug("insert agent info. {}", agentInfo);
}
byte[] agentId = Bytes.toBytes(agentInfo.getAgentId());
long reverseKey = TimeUtils.reverseTimeMillis(agentInfo.getStartTimestamp());
byte[] rowKey = RowKeyUtils.concatFixedByteAndLong(agentId, HBaseTables.AGENT_NAME_MAX_LEN, reverseKey);
Put put = new Put(rowKey);
// should add additional agent informations. for now added only starttime for sqlMetaData
AgentInfoBo agentInfoBo = this.agentInfoBoMapper.map(agentInfo);
byte[] agentInfoBoValue = agentInfoBo.writeValue();
put.addColumn(HBaseTables.AGENTINFO_CF_INFO, HBaseTables.AGENTINFO_CF_INFO_IDENTIFIER, agentInfoBoValue);
if (agentInfo.isSetServerMetaData()) {
ServerMetaDataBo serverMetaDataBo = this.serverMetaDataBoMapper.map(agentInfo.getServerMetaData());
byte[] serverMetaDataBoValue = serverMetaDataBo.writeValue();
put.addColumn(HBaseTables.AGENTINFO_CF_INFO, HBaseTables.AGENTINFO_CF_INFO_SERVER_META_DATA, serverMetaDataBoValue);
}
if (agentInfo.isSetJvmInfo()) {
JvmInfoBo jvmInfoBo = this.jvmInfoBoMapper.map(agentInfo.getJvmInfo());
byte[] jvmInfoBoValue = jvmInfoBo.writeValue();
put.addColumn(HBaseTables.AGENTINFO_CF_INFO, HBaseTables.AGENTINFO_CF_INFO_JVM, jvmInfoBoValue);
}
hbaseTemplate.put(HBaseTables.AGENTINFO, put);
}
use of org.apache.hadoop.hbase.client.Put in project pinpoint by naver.
the class HbaseApplicationTraceIndexDao method insert.
@Override
public void insert(final TSpan span) {
if (span == null) {
throw new NullPointerException("span must not be null");
}
final Buffer buffer = new AutomaticBuffer(10 + AGENT_NAME_MAX_LEN);
buffer.putVInt(span.getElapsed());
buffer.putSVInt(span.getErr());
buffer.putPrefixedString(span.getAgentId());
final byte[] value = buffer.getBuffer();
long acceptedTime = acceptedTimeService.getAcceptedTime();
final byte[] distributedKey = createRowKey(span, acceptedTime);
Put put = new Put(distributedKey);
put.addColumn(APPLICATION_TRACE_INDEX_CF_TRACE, makeQualifier(span), acceptedTime, value);
boolean success = hbaseTemplate.asyncPut(APPLICATION_TRACE_INDEX, put);
if (!success) {
hbaseTemplate.put(APPLICATION_TRACE_INDEX, put);
}
}
Aggregations