use of org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota in project hbase by apache.
the class QuotaUtil method checkRSQuotaToEnableExceedThrottle.
private static void checkRSQuotaToEnableExceedThrottle(Quotas quotas) throws IOException {
if (quotas != null && quotas.hasThrottle()) {
Throttle throttle = quotas.getThrottle();
// If enable exceed throttle quota, make sure that there are at least one read(req/read +
// num/size/cu) and one write(req/write + num/size/cu) region server throttle quotas.
boolean hasReadQuota = false;
boolean hasWriteQuota = false;
if (throttle.hasReqNum() || throttle.hasReqSize() || throttle.hasReqCapacityUnit()) {
hasReadQuota = true;
hasWriteQuota = true;
}
if (!hasReadQuota && (throttle.hasReadNum() || throttle.hasReadSize() || throttle.hasReadCapacityUnit())) {
hasReadQuota = true;
}
if (!hasReadQuota) {
throw new DoNotRetryIOException("Please set at least one read region server quota before enable exceed throttle quota");
}
if (!hasWriteQuota && (throttle.hasWriteNum() || throttle.hasWriteSize() || throttle.hasWriteCapacityUnit())) {
hasWriteQuota = true;
}
if (!hasWriteQuota) {
throw new DoNotRetryIOException("Please set at least one write region server quota " + "before enable exceed throttle quota");
}
// If enable exceed throttle quota, make sure that region server throttle quotas are in
// seconds time unit. Because once previous requests exceed their quota and consume region
// server quota, quota in other time units may be refilled in a long time, this may affect
// later requests.
List<Pair<Boolean, TimedQuota>> list = Arrays.asList(Pair.newPair(throttle.hasReqNum(), throttle.getReqNum()), Pair.newPair(throttle.hasReadNum(), throttle.getReadNum()), Pair.newPair(throttle.hasWriteNum(), throttle.getWriteNum()), Pair.newPair(throttle.hasReqSize(), throttle.getReqSize()), Pair.newPair(throttle.hasReadSize(), throttle.getReadSize()), Pair.newPair(throttle.hasWriteSize(), throttle.getWriteSize()), Pair.newPair(throttle.hasReqCapacityUnit(), throttle.getReqCapacityUnit()), Pair.newPair(throttle.hasReadCapacityUnit(), throttle.getReadCapacityUnit()), Pair.newPair(throttle.hasWriteCapacityUnit(), throttle.getWriteCapacityUnit()));
for (Pair<Boolean, TimedQuota> pair : list) {
if (pair.getFirst()) {
if (pair.getSecond().getTimeUnit() != TimeUnit.SECONDS) {
throw new DoNotRetryIOException("All region server quota must be " + "in seconds time unit if enable exceed throttle quota");
}
}
}
} else {
// If enable exceed throttle quota, make sure that region server quota is already set
throw new DoNotRetryIOException("Please set region server quota before enable exceed throttle quota");
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota in project hbase by apache.
the class TestThrottleSettings method testIncompatibleThrottleTypes.
@Test
public void testIncompatibleThrottleTypes() throws IOException {
TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10).setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build();
ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota).setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build();
ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, requestsQuotaReq);
TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10).setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build();
ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota).setType(QuotaProtos.ThrottleType.READ_NUMBER).build();
try {
orig.merge(new ThrottleSettings("joe", null, null, null, readsQuotaReq));
fail("A read throttle should not be capable of being merged with a request quota");
} catch (IllegalArgumentException e) {
// Pass
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota in project hbase by apache.
the class TestThrottleSettings method testMerge.
@Test
public void testMerge() throws IOException {
TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10).setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build();
ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1).setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build();
ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1);
TimedQuota tq2 = TimedQuota.newBuilder().setSoftLimit(10).setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build();
ThrottleRequest tr2 = ThrottleRequest.newBuilder().setTimedQuota(tq2).setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build();
ThrottleSettings merged = orig.merge(new ThrottleSettings("joe", null, null, null, tr2));
assertEquals(10, merged.getSoftLimit());
assertEquals(ThrottleType.REQUEST_NUMBER, merged.getThrottleType());
assertEquals(TimeUnit.SECONDS, merged.getTimeUnit());
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota in project hbase by apache.
the class ThrottleSettings method toString.
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("TYPE => THROTTLE");
if (proto.hasType()) {
builder.append(", THROTTLE_TYPE => ");
builder.append(proto.getType().toString());
}
if (proto.hasTimedQuota()) {
QuotaProtos.TimedQuota timedQuota = proto.getTimedQuota();
builder.append(", LIMIT => ");
if (timedQuota.hasSoftLimit()) {
switch(getThrottleType()) {
case REQUEST_NUMBER:
case WRITE_NUMBER:
case READ_NUMBER:
builder.append(String.format("%dreq", timedQuota.getSoftLimit()));
break;
case REQUEST_SIZE:
case WRITE_SIZE:
case READ_SIZE:
builder.append(sizeToString(timedQuota.getSoftLimit()));
break;
case REQUEST_CAPACITY_UNIT:
case READ_CAPACITY_UNIT:
case WRITE_CAPACITY_UNIT:
builder.append(String.format("%dCU", timedQuota.getSoftLimit()));
break;
default:
}
} else if (timedQuota.hasShare()) {
builder.append(String.format("%.2f%%", timedQuota.getShare()));
}
builder.append('/');
builder.append(timeToString(ProtobufUtil.toTimeUnit(timedQuota.getTimeUnit())));
if (timedQuota.hasScope()) {
builder.append(", SCOPE => ");
builder.append(timedQuota.getScope().toString());
}
} else {
builder.append(", LIMIT => NONE");
}
return builder.toString();
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota in project hbase by apache.
the class GlobalQuotaSettingsImpl method toString.
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("GlobalQuota: ");
if (throttleProto != null) {
Map<ThrottleType, TimedQuota> throttleQuotas = buildThrottleQuotas(throttleProto);
builder.append(" { TYPE => THROTTLE ");
for (Entry<ThrottleType, TimedQuota> entry : throttleQuotas.entrySet()) {
final ThrottleType type = entry.getKey();
final TimedQuota timedQuota = entry.getValue();
builder.append("{THROTTLE_TYPE => ").append(type.name()).append(", LIMIT => ");
if (timedQuota.hasSoftLimit()) {
switch(type) {
case REQUEST_NUMBER:
case WRITE_NUMBER:
case READ_NUMBER:
builder.append(String.format("%dreq", timedQuota.getSoftLimit()));
break;
case REQUEST_SIZE:
case WRITE_SIZE:
case READ_SIZE:
builder.append(sizeToString(timedQuota.getSoftLimit()));
break;
case REQUEST_CAPACITY_UNIT:
case READ_CAPACITY_UNIT:
case WRITE_CAPACITY_UNIT:
builder.append(String.format("%dCU", timedQuota.getSoftLimit()));
default:
}
} else if (timedQuota.hasShare()) {
builder.append(String.format("%.2f%%", timedQuota.getShare()));
}
builder.append('/');
builder.append(timeToString(ProtobufUtil.toTimeUnit(timedQuota.getTimeUnit())));
if (timedQuota.hasScope()) {
builder.append(", SCOPE => ");
builder.append(timedQuota.getScope().toString());
}
}
builder.append("} } ");
} else {
builder.append(" {} ");
}
if (bypassGlobals != null) {
builder.append(" { GLOBAL_BYPASS => " + bypassGlobals + " } ");
}
if (spaceProto != null) {
builder.append(" { TYPE => SPACE");
if (getTableName() != null) {
builder.append(", TABLE => ").append(getTableName());
}
if (getNamespace() != null) {
builder.append(", NAMESPACE => ").append(getNamespace());
}
if (spaceProto.getRemove()) {
builder.append(", REMOVE => ").append(spaceProto.getRemove());
} else {
builder.append(", LIMIT => ").append(sizeToString(spaceProto.getSoftLimit()));
builder.append(", VIOLATION_POLICY => ").append(spaceProto.getViolationPolicy());
}
builder.append(" } ");
}
return builder.toString();
}
Aggregations