use of org.apache.hadoop.hbase.NotServingRegionException in project hbase by apache.
the class HBaseAdmin method compact.
/**
* Compact a table.
* Asynchronous operation.
*
* @param tableName table or region to compact
* @param columnFamily column family within a table or region
* @param major True if we are to do a major compaction.
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if a remote or network exception occurs
*/
private void compact(final TableName tableName, final byte[] columnFamily, final boolean major, CompactType compactType) throws IOException {
switch(compactType) {
case MOB:
ServerName master = getMasterAddress();
compact(master, getMobRegionInfo(tableName), major, columnFamily);
break;
case NORMAL:
default:
ZooKeeperWatcher zookeeper = null;
try {
checkTableExists(tableName);
zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), new ThrowableAbortable());
List<Pair<HRegionInfo, ServerName>> pairs;
if (TableName.META_TABLE_NAME.equals(tableName)) {
pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
} else {
pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
}
for (Pair<HRegionInfo, ServerName> pair : pairs) {
if (pair.getFirst().isOffline())
continue;
if (pair.getSecond() == null)
continue;
try {
compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
} catch (NotServingRegionException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to" + (major ? " major" : "") + " compact " + pair.getFirst() + ": " + StringUtils.stringifyException(e));
}
}
}
} finally {
if (zookeeper != null) {
zookeeper.close();
}
}
break;
}
}
use of org.apache.hadoop.hbase.NotServingRegionException in project hbase by apache.
the class ScannerCallable method next.
private ScanResponse next() throws IOException {
// Reset the heartbeat flag prior to each RPC in case an exception is thrown by the server
setHeartbeatMessage(false);
incRPCcallsMetrics();
ScanRequest request = RequestConverter.buildScanRequest(scannerId, caching, false, nextCallSeq, this.scanMetrics != null, renew, scan.getLimit());
try {
ScanResponse response = getStub().scan(getRpcController(), request);
nextCallSeq++;
return response;
} catch (Exception e) {
IOException ioe = ProtobufUtil.handleRemoteException(e);
if (logScannerActivity) {
LOG.info("Got exception making request " + ProtobufUtil.toText(request) + " to " + getLocation(), e);
}
if (logScannerActivity) {
if (ioe instanceof UnknownScannerException) {
try {
HRegionLocation location = getConnection().relocateRegion(getTableName(), scan.getStartRow());
LOG.info("Scanner=" + scannerId + " expired, current region location is " + location.toString());
} catch (Throwable t) {
LOG.info("Failed to relocate region", t);
}
} else if (ioe instanceof ScannerResetException) {
LOG.info("Scanner=" + scannerId + " has received an exception, and the server " + "asked us to reset the scanner state.", ioe);
}
}
// yeah and hard to follow and in need of a refactor).
if (ioe instanceof NotServingRegionException) {
// Attach NSRE to signal client that it needs to re-setup scanner.
if (this.scanMetrics != null) {
this.scanMetrics.countOfNSRE.incrementAndGet();
}
throw new DoNotRetryIOException("Resetting the scanner -- see exception cause", ioe);
} else if (ioe instanceof RegionServerStoppedException) {
// open scanner against new location.
throw new DoNotRetryIOException("Resetting the scanner -- see exception cause", ioe);
} else {
// The outer layers will retry
throw ioe;
}
}
}
use of org.apache.hadoop.hbase.NotServingRegionException in project phoenix by apache.
the class UpsertSelectOverlappingBatchesIT method testSplitDuringUpsertSelect.
/**
* Tests that splitting a region is not blocked indefinitely by UPSERT SELECT load
*/
@Test
public void testSplitDuringUpsertSelect() throws Exception {
int numUpsertSelectRunners = 4;
ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
try (Connection conn = driver.connect(url, props)) {
final UpsertSelectRunner upsertSelectRunner = new UpsertSelectRunner(dataTable, 0, 105, 1);
// keep running slow upsert selects
SlowBatchRegionObserver.SLOW_MUTATE = true;
for (int i = 0; i < numUpsertSelectRunners; i++) {
exec.submit(new UpsertSelectLooper(upsertSelectRunner));
Thread.sleep(300);
}
// keep trying to split the region
final HBaseTestingUtility utility = getUtility();
final HBaseAdmin admin = utility.getHBaseAdmin();
final TableName dataTN = TableName.valueOf(dataTable);
assertEquals(1, utility.getHBaseCluster().getRegions(dataTN).size());
utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
List<HRegionInfo> regions = admin.getTableRegions(dataTN);
if (regions.size() > 1) {
logger.info("Found region was split");
return true;
}
if (regions.size() == 0) {
// This happens when region in transition or closed
logger.info("No region returned");
return false;
}
;
HRegionInfo hRegion = regions.get(0);
logger.info("Attempting to split region");
admin.splitRegion(hRegion.getRegionName(), Bytes.toBytes(2));
return false;
} catch (NotServingRegionException nsre) {
// during split
return false;
}
}
});
} finally {
SlowBatchRegionObserver.SLOW_MUTATE = false;
exec.shutdownNow();
exec.awaitTermination(60, TimeUnit.SECONDS);
}
}
use of org.apache.hadoop.hbase.NotServingRegionException in project hbase by apache.
the class TestTableInputFormat method createDNRIOEScannerTable.
/**
* Create a table that throws a NotServingRegionException on first scanner
* next call
*
* @throws IOException
*/
static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
Answer<ResultScanner> a = new Answer<ResultScanner>() {
int cnt = 0;
@Override
public ResultScanner answer(InvocationOnMock invocation) throws Throwable {
// first invocation return the busted mock scanner
if (cnt++ < failCnt) {
// create mock ResultScanner that always fails.
Scan scan = mock(Scan.class);
// avoid npe
doReturn(Bytes.toBytes("bogus")).when(scan).getStartRow();
ResultScanner scanner = mock(ResultScanner.class);
// simulate NotServingRegionException
invocation.callRealMethod();
doThrow(new NotServingRegionException("Injected simulated TimeoutException")).when(scanner).next();
return scanner;
}
// otherwise return the real scanner.
return (ResultScanner) invocation.callRealMethod();
}
};
Table htable = spy(createTable(name));
doAnswer(a).when(htable).getScanner((Scan) anyObject());
return htable;
}
use of org.apache.hadoop.hbase.NotServingRegionException in project hbase by apache.
the class TestTableInputFormat method createDNRIOEScannerTable.
/**
* Create a table that throws a DoNoRetryIOException on first scanner next
* call
*
* @throws IOException
*/
static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException {
// build up a mock scanner stuff to fail the first time
Answer<ResultScanner> a = new Answer<ResultScanner>() {
int cnt = 0;
@Override
public ResultScanner answer(InvocationOnMock invocation) throws Throwable {
// first invocation return the busted mock scanner
if (cnt++ < failCnt) {
// create mock ResultScanner that always fails.
Scan scan = mock(Scan.class);
// avoid npe
doReturn(Bytes.toBytes("bogus")).when(scan).getStartRow();
ResultScanner scanner = mock(ResultScanner.class);
// simulate NotServingRegionException
invocation.callRealMethod();
doThrow(new NotServingRegionException("Injected simulated TimeoutException")).when(scanner).next();
return scanner;
}
// otherwise return the real scanner.
return (ResultScanner) invocation.callRealMethod();
}
};
Table htable = spy(createTable(name));
doAnswer(a).when(htable).getScanner((Scan) anyObject());
return htable;
}
Aggregations