use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class TestAdmin1 method testDeleteEditUnknownColumnFamilyAndOrTable.
@Test(timeout = 300000)
public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException {
// Test we get exception if we try to
final TableName nonexistentTable = TableName.valueOf("nonexistent");
final byte[] nonexistentColumn = Bytes.toBytes("nonexistent");
HColumnDescriptor nonexistentHcd = new HColumnDescriptor(nonexistentColumn);
Exception exception = null;
try {
this.admin.addColumnFamily(nonexistentTable, nonexistentHcd);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
this.admin.deleteTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
this.admin.deleteColumnFamily(nonexistentTable, nonexistentColumn);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
this.admin.disableTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
this.admin.enableTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
this.admin.modifyColumnFamily(nonexistentTable, nonexistentHcd);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
HTableDescriptor htd = new HTableDescriptor(nonexistentTable);
htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
this.admin.modifyTable(htd.getTableName(), htd);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
// Now make it so at least the table exists and then do tests against a
// nonexistent column family -- see if we get right exceptions.
final TableName tableName = TableName.valueOf(name.getMethodName() + System.currentTimeMillis());
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("cf"));
this.admin.createTable(htd);
try {
exception = null;
try {
this.admin.deleteColumnFamily(htd.getTableName(), nonexistentHcd.getName());
} catch (IOException e) {
exception = e;
}
assertTrue("found=" + exception.getClass().getName(), exception instanceof InvalidFamilyOperationException);
exception = null;
try {
this.admin.modifyColumnFamily(htd.getTableName(), nonexistentHcd);
} catch (IOException e) {
exception = e;
}
assertTrue("found=" + exception.getClass().getName(), exception instanceof InvalidFamilyOperationException);
} finally {
this.admin.disableTable(tableName);
this.admin.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class BackupLogCleaner method getDeletableFiles.
@Override
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
// so we cannot filter the files
if (this.getConf() == null || !BackupManager.isBackupEnabled(getConf())) {
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting");
return files;
}
List<FileStatus> list = new ArrayList<FileStatus>();
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
// If we do not have recorded backup sessions
try {
if (!table.hasBackupSessions()) {
LOG.trace("BackupLogCleaner has no backup sessions");
return files;
}
} catch (TableNotFoundException tnfe) {
LOG.warn("backup system table is not available" + tnfe.getMessage());
return files;
}
for (FileStatus file : files) {
String wal = file.getPath().toString();
boolean logInSystemTable = table.isWALFileDeletable(wal);
if (LOG.isDebugEnabled()) {
if (logInSystemTable) {
LOG.debug("Found log file in backup system table, deleting: " + wal);
list.add(file);
} else {
LOG.debug("Didn't find this log in backup system table, keeping: " + wal);
}
}
}
return list;
} catch (IOException e) {
LOG.error("Failed to get backup system table table, therefore will keep all files", e);
// nothing to delete
return new ArrayList<FileStatus>();
}
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class DisableTableProcedure method prepareDisable.
/**
* Action before any real action of disabling table. Set the exception in the procedure instead
* of throwing it. This approach is to deal with backward compatible with 1.0.
* @param env MasterProcedureEnv
* @throws IOException
*/
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true;
if (tableName.equals(TableName.META_TABLE_NAME)) {
setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table"));
canTableBeDisabled = false;
} else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
setFailure("master-disable-table", new TableNotFoundException(tableName));
canTableBeDisabled = false;
} else if (!skipTableStateCheck) {
// There could be multiple client requests trying to disable or enable
// the table at the same time. Ensure only the first request is honored
// After that, no other requests can be accepted until the table reaches
// DISABLED or ENABLED.
//
// Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
// the state to DISABLING from ENABLED. The implementation was done before table lock
// was implemented. With table lock, there is no need to set the state here (it will
// set the state later on). A quick state check should be enough for us to move forward.
TableStateManager tsm = env.getMasterServices().getTableStateManager();
TableState.State state = tsm.getTableState(tableName);
if (!state.equals(TableState.State.ENABLED)) {
LOG.info("Table " + tableName + " isn't enabled;is " + state.name() + "; skipping disable");
setFailure("master-disable-table", new TableNotEnabledException(tableName + " state is " + state.name()));
canTableBeDisabled = false;
}
}
// We are done the check. Future actions in this procedure could be done asynchronously.
releaseSyncLatch();
return canTableBeDisabled;
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class EnableTableProcedure method prepareEnable.
/**
* Action before any real action of enabling table. Set the exception in the procedure instead
* of throwing it. This approach is to deal with backward compatible with 1.0.
* @param env MasterProcedureEnv
* @return whether the table passes the necessary checks
* @throws IOException
*/
private boolean prepareEnable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeEnabled = true;
// Check whether table exists
if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
setFailure("master-enable-table", new TableNotFoundException(tableName));
canTableBeEnabled = false;
} else if (!skipTableStateCheck) {
// There could be multiple client requests trying to disable or enable
// the table at the same time. Ensure only the first request is honored
// After that, no other requests can be accepted until the table reaches
// DISABLED or ENABLED.
//
// Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
// the state to ENABLING from DISABLED. The implementation was done before table lock
// was implemented. With table lock, there is no need to set the state here (it will
// set the state later on). A quick state check should be enough for us to move forward.
TableStateManager tsm = env.getMasterServices().getTableStateManager();
TableState.State state = tsm.getTableState(tableName);
if (!state.equals(TableState.State.DISABLED)) {
LOG.info("Table " + tableName + " isn't disabled;is " + state.name() + "; skipping enable");
setFailure("master-enable-table", new TableNotDisabledException(this.tableName + " state is " + state.name()));
canTableBeEnabled = false;
}
}
// We are done the check. Future actions in this procedure could be done asynchronously.
releaseSyncLatch();
return canTableBeEnabled;
}
use of org.apache.hadoop.hbase.TableNotFoundException in project hbase by apache.
the class RegionReplicaFlushHandler method triggerFlushInPrimaryRegion.
void triggerFlushInPrimaryRegion(final HRegion region) throws IOException, RuntimeException {
long pause = connection.getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
int maxAttempts = getRetriesCount(connection.getConfiguration());
RetryCounter counter = new RetryCounterFactory(maxAttempts, (int) pause).create();
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting to do an RPC to the primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " to trigger a flush");
}
while (!region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped()) {
FlushRegionCallable flushCallable = new FlushRegionCallable(connection, rpcControllerFactory, RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true);
// TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we
// do not have to wait for the whole flush here, just initiate it.
FlushRegionResponse response = null;
try {
response = rpcRetryingCallerFactory.<FlushRegionResponse>newCaller().callWithRetries(flushCallable, this.operationTimeout);
} catch (IOException ex) {
if (ex instanceof TableNotFoundException || connection.isTableDisabled(region.getRegionInfo().getTable())) {
return;
}
throw ex;
}
if (response.getFlushed()) {
// a complete flush cycle or replay a region open event
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully triggered a flush of primary region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and blocking reads until observing a full flush cycle");
}
break;
} else {
if (response.hasWroteFlushWalMarker()) {
if (response.getWroteFlushWalMarker()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " + "region replica " + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and " + "blocking reads until observing a flush marker");
}
break;
} else {
// closing or already flushing. Retry flush again after some sleep.
if (!counter.shouldRetry()) {
throw new IOException("Cannot cause primary to flush or drop a wal marker after " + "retries. Failing opening of this region replica " + region.getRegionInfo().getEncodedName());
}
}
} else {
// nothing to do. Are we dealing with an old server?
LOG.warn("Was not able to trigger a flush from primary region due to old server version? " + "Continuing to open the secondary region replica: " + region.getRegionInfo().getEncodedName());
region.setReadsEnabled(true);
break;
}
}
try {
counter.sleepUntilNextRetry();
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
}
}
}
Aggregations