use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class TestAdmin1 method testSplitFlushCompactUnknownTable.
@Test(timeout = 300000)
public void testSplitFlushCompactUnknownTable() throws InterruptedException {
final TableName unknowntable = TableName.valueOf(name.getMethodName());
Exception exception = null;
try {
this.admin.compact(unknowntable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
this.admin.flush(unknowntable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
this.admin.split(unknowntable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class ResourceBase method processException.
protected Response processException(Throwable exp) {
Throwable curr = exp;
if (accessDeniedClazz != null) {
//some access denied exceptions are buried
while (curr != null) {
if (accessDeniedClazz.isAssignableFrom(curr.getClass())) {
throw new WebApplicationException(Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Forbidden" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
curr = curr.getCause();
}
}
//TableNotFound may also be buried one level deep
if (exp instanceof TableNotFoundException || exp.getCause() instanceof TableNotFoundException) {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
if (exp instanceof NoSuchColumnFamilyException) {
throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
if (exp instanceof RuntimeException) {
throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
if (exp instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException retryException = (RetriesExhaustedWithDetailsException) exp;
processException(retryException.getCause(0));
}
throw new WebApplicationException(Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF + StringUtils.stringifyException(exp) + CRLF).build());
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class TestScannerWithBulkload method testBulkLoadWithParallelScan.
@Test
public void testBulkLoadWithParallelScan() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final long l = System.currentTimeMillis();
final Admin admin = TEST_UTIL.getAdmin();
createTable(admin, tableName);
Scan scan = createScan();
scan.setCaching(1);
final Table table = init(admin, l, scan, tableName);
// use bulkload
final Path hfilePath = writeToHFile(l, "/temp/testBulkLoadWithParallelScan/", "/temp/testBulkLoadWithParallelScan/col/file", false);
Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
ResultScanner scanner = table.getScanner(scan);
Result result = scanner.next();
// Create a scanner and then do bulk load
final CountDownLatch latch = new CountDownLatch(1);
new Thread() {
public void run() {
try {
Put put1 = new Put(Bytes.toBytes("row5"));
put1.add(new KeyValue(Bytes.toBytes("row5"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes.toBytes("version0")));
table.put(put1);
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
bulkload.doBulkLoad(hfilePath, admin, table, locator);
}
latch.countDown();
} catch (TableNotFoundException e) {
} catch (IOException e) {
}
}
}.start();
latch.await();
// By the time we do next() the bulk loaded files are also added to the kv
// scanner
scanAfterBulkLoad(scanner, result, "version1");
scanner.close();
table.close();
}
use of org.apache.hadoop.hbase.TableNotFoundException in project titan by thinkaurelius.
the class HBaseStoreManager method ensureColumnFamilyExists.
private void ensureColumnFamilyExists(String tableName, String columnFamily, int ttlInSeconds) throws BackendException {
AdminMask adm = null;
try {
adm = getAdminInterface();
HTableDescriptor desc = ensureTableExists(tableName, columnFamily, ttlInSeconds);
Preconditions.checkNotNull(desc);
HColumnDescriptor cf = desc.getFamily(columnFamily.getBytes());
// Create our column family, if necessary
if (cf == null) {
try {
if (!adm.isTableDisabled(tableName)) {
adm.disableTable(tableName);
}
} catch (TableNotEnabledException e) {
logger.debug("Table {} already disabled", tableName);
} catch (IOException e) {
throw new TemporaryBackendException(e);
}
try {
HColumnDescriptor cdesc = new HColumnDescriptor(columnFamily);
setCFOptions(cdesc, ttlInSeconds);
adm.addColumn(tableName, cdesc);
try {
logger.debug("Added HBase ColumnFamily {}, waiting for 1 sec. to propogate.", columnFamily);
Thread.sleep(1000L);
} catch (InterruptedException ie) {
throw new TemporaryBackendException(ie);
}
adm.enableTable(tableName);
} catch (TableNotFoundException ee) {
logger.error("TableNotFoundException", ee);
throw new PermanentBackendException(ee);
} catch (org.apache.hadoop.hbase.TableExistsException ee) {
logger.debug("Swallowing exception {}", ee);
} catch (IOException ee) {
throw new TemporaryBackendException(ee);
}
}
} finally {
IOUtils.closeQuietly(adm);
}
}
use of org.apache.hadoop.hbase.TableNotFoundException in project phoenix by apache.
the class ProductMetricsIT method destroyTable.
private static void destroyTable() throws Exception {
// Physically delete HBase table so that splits occur as expected for each test
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
HBaseAdmin admin = services.getAdmin();
try {
try {
admin.disableTable(PRODUCT_METRICS_NAME);
admin.deleteTable(PRODUCT_METRICS_NAME);
} catch (TableNotFoundException e) {
}
} finally {
admin.close();
}
}
Aggregations