use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class RSRpcServices method bulkLoadHFile.
/**
* Atomically bulk load several HFiles into an open region
* @return true if successful, false is failed but recoverably (no action)
* @throws ServiceException if failed unrecoverably
*/
@Override
public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller, final BulkLoadHFileRequest request) throws ServiceException {
long start = EnvironmentEdgeManager.currentTime();
List<String> clusterIds = new ArrayList<>(request.getClusterIdsList());
if (clusterIds.contains(this.server.getClusterId())) {
return BulkLoadHFileResponse.newBuilder().setLoaded(true).build();
} else {
clusterIds.add(this.server.getClusterId());
}
try {
checkOpen();
requestCount.increment();
HRegion region = getRegion(request.getRegion());
final boolean spaceQuotaEnabled = QuotaUtil.isQuotaEnabled(getConfiguration());
long sizeToBeLoaded = -1;
// Check to see if this bulk load would exceed the space quota for this table
if (spaceQuotaEnabled) {
ActivePolicyEnforcement activeSpaceQuotas = getSpaceQuotaManager().getActiveEnforcements();
SpaceViolationPolicyEnforcement enforcement = activeSpaceQuotas.getPolicyEnforcement(region);
if (enforcement != null) {
// Bulk loads must still be atomic. We must enact all or none.
List<String> filePaths = new ArrayList<>(request.getFamilyPathCount());
for (FamilyPath familyPath : request.getFamilyPathList()) {
filePaths.add(familyPath.getPath());
}
// Check if the batch of files exceeds the current quota
sizeToBeLoaded = enforcement.computeBulkLoadSize(getFileSystem(filePaths), filePaths);
}
}
// secure bulk load
Map<byte[], List<Path>> map = server.getSecureBulkLoadManager().secureBulkLoadHFiles(region, request, clusterIds);
BulkLoadHFileResponse.Builder builder = BulkLoadHFileResponse.newBuilder();
builder.setLoaded(map != null);
if (map != null) {
// not possible to occur in real life (cannot bulk load a file with negative size)
if (spaceQuotaEnabled && sizeToBeLoaded > 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("Incrementing space use of " + region.getRegionInfo() + " by " + sizeToBeLoaded + " bytes");
}
// Inform space quotas of the new files for this region
getSpaceQuotaManager().getRegionSizeStore().incrementRegionSize(region.getRegionInfo(), sizeToBeLoaded);
}
}
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
} finally {
final MetricsRegionServer metricsRegionServer = server.getMetrics();
if (metricsRegionServer != null) {
metricsRegionServer.updateBulkLoad(EnvironmentEdgeManager.currentTime() - start);
}
}
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class RSRpcServices method getRegionLoad.
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request) throws ServiceException {
List<HRegion> regions;
if (request.hasTableName()) {
TableName tableName = ProtobufUtil.toTableName(request.getTableName());
regions = server.getRegions(tableName);
} else {
regions = server.getRegions();
}
List<RegionLoad> rLoads = new ArrayList<>(regions.size());
RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder();
RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder();
try {
for (HRegion region : regions) {
rLoads.add(server.createRegionLoad(region, regionLoadBuilder, regionSpecifier));
}
} catch (IOException e) {
throw new ServiceException(e);
}
GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder();
builder.addAllRegionLoads(rLoads);
return builder.build();
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class RSRpcServices method compactRegion.
/**
* Compact a region on the region server.
*
* @param controller the RPC controller
* @param request the request
* @throws ServiceException
*/
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public CompactRegionResponse compactRegion(final RpcController controller, final CompactRegionRequest request) throws ServiceException {
try {
checkOpen();
requestCount.increment();
HRegion region = getRegion(request.getRegion());
// and a quota policy is enforced that disables compactions.
if (QuotaUtil.isQuotaEnabled(getConfiguration()) && !Superusers.isSuperUser(RpcServer.getRequestUser().orElse(null)) && this.server.getRegionServerSpaceQuotaManager().areCompactionsDisabled(region.getTableDescriptor().getTableName())) {
throw new DoNotRetryIOException("Compactions on this region are " + "disabled due to a space quota violation.");
}
region.startRegionOperation(Operation.COMPACT_REGION);
LOG.info("Compacting " + region.getRegionInfo().getRegionNameAsString());
boolean major = request.hasMajor() && request.getMajor();
if (request.hasFamily()) {
byte[] family = request.getFamily().toByteArray();
String log = "User-triggered " + (major ? "major " : "") + "compaction for region " + region.getRegionInfo().getRegionNameAsString() + " and family " + Bytes.toString(family);
LOG.trace(log);
region.requestCompaction(family, log, Store.PRIORITY_USER, major, CompactionLifeCycleTracker.DUMMY);
} else {
String log = "User-triggered " + (major ? "major " : "") + "compaction for region " + region.getRegionInfo().getRegionNameAsString();
LOG.trace(log);
region.requestCompaction(log, Store.PRIORITY_USER, major, CompactionLifeCycleTracker.DUMMY);
}
return CompactRegionResponse.newBuilder().build();
} catch (IOException ie) {
throw new ServiceException(ie);
}
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class RSRpcServices method replay.
/**
* Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
* that the given mutations will be durable on the receiving RS if this method returns without any
* exception.
* @param controller the RPC controller
* @param request the request
* @deprecated Since 3.0.0, will be removed in 4.0.0. Not used any more, put here only for
* compatibility with old region replica implementation. Now we will use
* {@code replicateToReplica} method instead.
*/
@Deprecated
@Override
@QosPriority(priority = HConstants.REPLAY_QOS)
public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException {
long before = EnvironmentEdgeManager.currentTime();
CellScanner cells = getAndReset(controller);
try {
checkOpen();
List<WALEntry> entries = request.getEntryList();
if (entries == null || entries.isEmpty()) {
// empty input
return ReplicateWALEntryResponse.newBuilder().build();
}
ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
HRegion region = server.getRegionByEncodedName(regionName.toStringUtf8());
RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : // do not invoke coprocessors if this is a secondary region replica
null;
List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<>();
// Skip adding the edits to WAL if this is a secondary region replica
boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
Durability durability = isPrimary ? Durability.USE_DEFAULT : Durability.SKIP_WAL;
for (WALEntry entry : entries) {
if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
throw new NotServingRegionException("Replay request contains entries from multiple " + "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName());
}
if (server.nonceManager != null && isPrimary) {
long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
server.nonceManager.reportOperationFromWal(nonceGroup, nonce, entry.getKey().getWriteTime());
}
Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null : new Pair<>();
List<MutationReplay> edits = WALSplitUtil.getMutationsFromWALEntry(entry, cells, walEntry, durability);
if (coprocessorHost != null) {
// KeyValue.
if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(), walEntry.getSecond())) {
// if bypass this log entry, ignore it ...
continue;
}
walEntries.add(walEntry);
}
if (edits != null && !edits.isEmpty()) {
// HBASE-17924
// sort to improve lock efficiency
Collections.sort(edits, (v1, v2) -> Row.COMPARATOR.compare(v1.mutation, v2.mutation));
long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId);
// check if it's a partial success
for (int i = 0; result != null && i < result.length; i++) {
if (result[i] != OperationStatus.SUCCESS) {
throw new IOException(result[i].getExceptionMsg());
}
}
}
}
// sync wal at the end because ASYNC_WAL is used above
WAL wal = region.getWAL();
if (wal != null) {
wal.sync();
}
if (coprocessorHost != null) {
for (Pair<WALKey, WALEdit> entry : walEntries) {
coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), entry.getSecond());
}
}
return ReplicateWALEntryResponse.newBuilder().build();
} catch (IOException ie) {
throw new ServiceException(ie);
} finally {
final MetricsRegionServer metricsRegionServer = server.getMetrics();
if (metricsRegionServer != null) {
metricsRegionServer.updateReplay(EnvironmentEdgeManager.currentTime() - before);
}
}
}
use of org.apache.hbase.thirdparty.com.google.protobuf.ServiceException in project hbase by apache.
the class MasterRpcServices method getCompletedSnapshots.
/**
* List the currently available/stored snapshots. Any in-progress snapshots are ignored
*/
@Override
public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller, GetCompletedSnapshotsRequest request) throws ServiceException {
try {
server.checkInitialized();
GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder();
List<SnapshotDescription> snapshots = server.snapshotManager.getCompletedSnapshots();
// convert to protobuf
for (SnapshotDescription snapshot : snapshots) {
builder.addSnapshots(snapshot);
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
Aggregations