use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class BackupSystemTable method addWALFiles.
/**
* Register WAL files as eligible for deletion
* @param files files
* @param backupId backup id
* @param backupRoot root directory path to backup destination
* @throws IOException exception
*/
public void addWALFiles(List<String> files, String backupId, String backupRoot) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("add WAL files to backup system table: " + backupId + " " + backupRoot + " files [" + StringUtils.join(files, ",") + "]");
for (String f : files) {
LOG.debug("add :" + f);
}
}
try (Table table = connection.getTable(tableName)) {
List<Put> puts = createPutsForAddWALFiles(files, backupId, backupRoot);
table.put(puts);
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TableNamespaceManager method insertIntoNSTable.
public void insertIntoNSTable(final NamespaceDescriptor ns) throws IOException {
if (nsTable == null) {
throw new IOException(this.getClass().getName() + " isn't ready to serve");
}
Put p = new Put(Bytes.toBytes(ns.getName()));
p.addImmutable(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, HTableDescriptor.NAMESPACE_COL_DESC_BYTES, ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
nsTable.put(p);
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class HRegionServer method closeAndOfflineRegionForSplitOrMerge.
/**
* Close and offline the region for split or merge
*
* @param regionEncodedName the name of the region(s) to close
* @return true if closed the region successfully.
* @throws IOException
*/
protected boolean closeAndOfflineRegionForSplitOrMerge(final List<String> regionEncodedName) throws IOException {
for (int i = 0; i < regionEncodedName.size(); ++i) {
Region regionToClose = this.getFromOnlineRegions(regionEncodedName.get(i));
if (regionToClose != null) {
Map<byte[], List<StoreFile>> hstoreFiles = null;
Exception exceptionToThrow = null;
try {
hstoreFiles = ((HRegion) regionToClose).close(false);
} catch (Exception e) {
exceptionToThrow = e;
}
if (exceptionToThrow == null && hstoreFiles == null) {
// The region was closed by someone else
exceptionToThrow = new IOException("Failed to close region: already closed by another thread");
}
if (exceptionToThrow != null) {
if (exceptionToThrow instanceof IOException)
throw (IOException) exceptionToThrow;
throw new IOException(exceptionToThrow);
}
if (regionToClose.getTableDesc().hasSerialReplicationScope()) {
// For serial replication, we need add a final barrier on this region. But the splitting
// or merging may be reverted, so we should make sure if we reopen this region, the open
// barrier is same as this final barrier
long seq = regionToClose.getMaxFlushedSeqId();
if (seq == HConstants.NO_SEQNUM) {
// No edits in WAL for this region; get the sequence number when the region was opened.
seq = regionToClose.getOpenSeqNum();
if (seq == HConstants.NO_SEQNUM) {
// This region has no data
seq = 0;
}
} else {
seq++;
}
Put finalBarrier = MetaTableAccessor.makeBarrierPut(Bytes.toBytes(regionEncodedName.get(i)), seq, regionToClose.getTableDesc().getTableName().getName());
MetaTableAccessor.putToMetaTable(getConnection(), finalBarrier);
}
// Offline the region
this.removeFromOnlineRegions(regionToClose, null);
}
}
return true;
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class HRegion method doPreBatchMutateHook.
private void doPreBatchMutateHook(BatchOperation<?> batchOp) throws IOException {
/* Run coprocessor pre hook outside of locks to avoid deadlock */
WALEdit walEdit = new WALEdit();
if (coprocessorHost != null) {
for (int i = 0; i < batchOp.operations.length; i++) {
Mutation m = batchOp.getMutation(i);
if (m instanceof Put) {
if (coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
// pre hook says skip this Put
// mark as success and skip in doMiniBatchMutation
batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
}
} else if (m instanceof Delete) {
Delete curDel = (Delete) m;
if (curDel.getFamilyCellMap().isEmpty()) {
// handle deleting a row case
prepareDelete(curDel);
}
if (coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
// pre hook says skip this Delete
// mark as success and skip in doMiniBatchMutation
batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
}
} else {
// In case of passing Append mutations along with the Puts and Deletes in batchMutate
// mark the operation return code as failure so that it will not be considered in
// the doMiniBatchMutation
batchOp.retCodeDetails[i] = new OperationStatus(OperationStatusCode.FAILURE, "Put/Delete mutations only supported in batchMutate() now");
}
if (!walEdit.isEmpty()) {
batchOp.walEditsFromCoprocessors[i] = walEdit;
walEdit = new WALEdit();
}
}
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class HBaseFsck method resetSplitParent.
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META");
}
Aggregations