use of org.apache.hadoop.hbase.TableExistsException in project phoenix by apache.
the class ConnectionQueryServicesImpl method ensureTableCreated.
/**
* @param tableName
* @param splits
* @param modifyExistingMetaData TODO
* @return true if table was created and false if it already exists
* @throws SQLException
*/
private HTableDescriptor ensureTableCreated(byte[] physicalTableName, PTableType tableType, Map<String, Object> props, List<Pair<byte[], Map<String, Object>>> families, byte[][] splits, boolean modifyExistingMetaData, boolean isNamespaceMapped) throws SQLException {
SQLException sqlE = null;
HTableDescriptor existingDesc = null;
boolean isMetaTable = SchemaUtil.isMetaTable(physicalTableName);
boolean tableExist = true;
try (HBaseAdmin admin = getAdmin()) {
final String quorum = ZKConfig.getZKQuorumServersString(config);
final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
logger.debug("Found quorum: " + quorum + ":" + znode);
try {
existingDesc = admin.getTableDescriptor(physicalTableName);
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
tableExist = false;
if (tableType == PTableType.VIEW) {
String fullTableName = Bytes.toString(physicalTableName);
throw new ReadOnlyTableException("An HBase table for a VIEW must already exist", SchemaUtil.getSchemaNameFromFullName(fullTableName), SchemaUtil.getTableNameFromFullName(fullTableName));
}
}
HTableDescriptor newDesc = generateTableDescriptor(physicalTableName, existingDesc, tableType, props, families, splits, isNamespaceMapped);
if (!tableExist) {
if (newDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(newDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
newDesc.setValue(HTableDescriptor.SPLIT_POLICY, IndexRegionSplitPolicy.class.getName());
}
// Remove the splitPolicy attribute to prevent HBASE-12570
if (isMetaTable) {
newDesc.remove(HTableDescriptor.SPLIT_POLICY);
}
try {
if (splits == null) {
admin.createTable(newDesc);
} else {
admin.createTable(newDesc, splits);
}
} catch (TableExistsException e) {
// to creating the HBase metadata.
return null;
}
if (isMetaTable) {
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
/*
* Now we modify the table to add the split policy, since we know that the client and
* server and compatible. This works around HBASE-12570 which causes the cluster to be
* brought down.
*/
newDesc.setValue(HTableDescriptor.SPLIT_POLICY, MetaDataSplitPolicy.class.getName());
modifyTable(physicalTableName, newDesc, true);
}
return null;
} else {
if (isMetaTable) {
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
} else {
for (Pair<byte[], Map<String, Object>> family : families) {
if ((newDesc.getValue(HTableDescriptor.SPLIT_POLICY) == null || !newDesc.getValue(HTableDescriptor.SPLIT_POLICY).equals(IndexRegionSplitPolicy.class.getName())) && Bytes.toString(family.getFirst()).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
newDesc.setValue(HTableDescriptor.SPLIT_POLICY, IndexRegionSplitPolicy.class.getName());
break;
}
}
}
if (!modifyExistingMetaData) {
// Caller already knows that no metadata was changed
return existingDesc;
}
boolean willBeTx = Boolean.TRUE.equals(props.get(TableProperty.TRANSACTIONAL.name()));
// data is correctly read.
if (willBeTx) {
newDesc.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.TRUE.toString());
} else {
// transactional, don't allow.
if (existingDesc.hasCoprocessor(PhoenixTransactionalProcessor.class.getName())) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX).setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName)).setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build().buildException();
}
newDesc.remove(PhoenixTransactionContext.READ_NON_TX_DATA);
}
if (existingDesc.equals(newDesc)) {
// Indicate that no metadata was changed
return null;
}
modifyTable(physicalTableName, newDesc, true);
return newDesc;
}
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
} catch (TimeoutException e) {
sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setRootCause(e.getCause() != null ? e.getCause() : e).build().buildException();
} finally {
if (sqlE != null) {
throw sqlE;
}
}
// will never make it here
return null;
}
use of org.apache.hadoop.hbase.TableExistsException in project SpyGlass by ParallelAI.
the class GenerateTestingHTables method createTestTable.
private static void createTestTable(TestingTable testingTable) throws IOException {
// Reset configuration
config.clear();
config.set("hbase.zookeeper.quorum", QUORUM);
config.set("hbase.zookeeper.property.clientPort", QUORUM_PORT);
HBaseAdmin hbase = new HBaseAdmin(config);
// Get and set the name of the new table
String tableName = testingTable.name();
HTableDescriptor newTable = new HTableDescriptor(tableName);
// Table1
if (testingTable == TestingTable.TABLE_01) {
HColumnDescriptor meta = new HColumnDescriptor("data");
meta.setMaxVersions(3).setCompressionType(Compression.Algorithm.NONE).setInMemory(HColumnDescriptor.DEFAULT_IN_MEMORY).setBlockCacheEnabled(HColumnDescriptor.DEFAULT_BLOCKCACHE).setTimeToLive(HColumnDescriptor.DEFAULT_TTL).setBloomFilterType(StoreFile.BloomType.NONE);
newTable.addFamily(meta);
// Table2
} else if (testingTable == TestingTable.TABLE_02) {
HColumnDescriptor meta = new HColumnDescriptor("data".getBytes());
meta.setMaxVersions(3).setCompressionType(Compression.Algorithm.NONE).setInMemory(HColumnDescriptor.DEFAULT_IN_MEMORY).setBlockCacheEnabled(HColumnDescriptor.DEFAULT_BLOCKCACHE).setTimeToLive(HColumnDescriptor.DEFAULT_TTL).setBloomFilterType(StoreFile.BloomType.NONE);
// HColumnDescriptor prefix = new HColumnDescriptor("account".getBytes());
// newTable.addFamily(prefix);
newTable.addFamily(meta);
LOG.info("scan 'TABLE_02' , { VERSIONS => 3 }");
}
try {
LOG.info("Creating table " + tableName);
hbase.createTable(newTable);
} catch (TableExistsException et) {
LOG.error("TableExistsException for table: " + tableName);
LOG.debug(et.toString());
} catch (IOException e) {
LOG.error("IOException: " + e.toString());
}
hbase.close();
}
use of org.apache.hadoop.hbase.TableExistsException in project hbase by apache.
the class TestAdmin2 method testCreateBadTables.
@Test
public void testCreateBadTables() throws IOException {
String msg = null;
try {
ADMIN.createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build());
} catch (TableExistsException e) {
msg = e.toString();
}
assertTrue("Unexcepted exception message " + msg, msg != null && msg.startsWith(TableExistsException.class.getName()) && msg.contains(TableName.META_TABLE_NAME.getNameAsString()));
// Now try and do concurrent creation with a bunch of threads.
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build();
int count = 10;
Thread[] threads = new Thread[count];
final AtomicInteger successes = new AtomicInteger(0);
final AtomicInteger failures = new AtomicInteger(0);
final Admin localAdmin = ADMIN;
for (int i = 0; i < count; i++) {
threads[i] = new Thread(Integer.toString(i)) {
@Override
public void run() {
try {
localAdmin.createTable(tableDescriptor);
successes.incrementAndGet();
} catch (TableExistsException e) {
failures.incrementAndGet();
} catch (IOException e) {
throw new RuntimeException("Failed threaded create" + getName(), e);
}
}
};
}
for (int i = 0; i < count; i++) {
threads[i].start();
}
for (int i = 0; i < count; i++) {
while (threads[i].isAlive()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// continue
}
}
}
// All threads are now dead. Count up how many tables were created and
// how many failed w/ appropriate exception.
assertEquals(1, successes.get());
assertEquals(count - 1, failures.get());
}
use of org.apache.hadoop.hbase.TableExistsException in project hbase by apache.
the class MasterProcedureScheduler method completionCleanup.
@Override
public void completionCleanup(final Procedure proc) {
if (proc instanceof TableProcedureInterface) {
TableProcedureInterface iProcTable = (TableProcedureInterface) proc;
boolean tableDeleted;
if (proc.hasException()) {
Exception procEx = proc.getException().unwrapRemoteException();
if (iProcTable.getTableOperationType() == TableOperationType.CREATE) {
// create failed because the table already exist
tableDeleted = !(procEx instanceof TableExistsException);
} else {
// the operation failed because the table does not exist
tableDeleted = (procEx instanceof TableNotFoundException);
}
} else {
// the table was deleted
tableDeleted = (iProcTable.getTableOperationType() == TableOperationType.DELETE);
}
if (tableDeleted) {
markTableAsDeleted(iProcTable.getTableName(), proc);
return;
}
} else if (proc instanceof PeerProcedureInterface) {
tryCleanupPeerQueue(getPeerId(proc), proc);
} else if (proc instanceof ServerProcedureInterface) {
tryCleanupServerQueue(getServerName(proc), proc);
} else {
// No cleanup for other procedure types, yet.
return;
}
}
use of org.apache.hadoop.hbase.TableExistsException in project hbase by apache.
the class CreateTableProcedure method prepareCreate.
private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
final TableName tableName = getTableName();
if (env.getMasterServices().getTableDescriptors().exists(tableName)) {
setFailure("master-create-table", new TableExistsException(getTableName()));
return false;
}
// check that we have at least 1 CF
if (tableDescriptor.getColumnFamilyCount() == 0) {
setFailure("master-create-table", new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family."));
return false;
}
if (!tableName.isSystemTable()) {
// do not check rs group for system tables as we may block the bootstrap.
Supplier<String> forWhom = () -> "table " + tableName;
RSGroupInfo rsGroupInfo = MasterProcedureUtil.checkGroupExists(env.getMasterServices().getRSGroupInfoManager()::getRSGroup, tableDescriptor.getRegionServerGroup(), forWhom);
if (rsGroupInfo == null) {
// we do not set rs group info on table, check if we have one on namespace
String namespace = tableName.getNamespaceAsString();
NamespaceDescriptor nd = env.getMasterServices().getClusterSchema().getNamespace(namespace);
forWhom = () -> "table " + tableName + "(inherit from namespace)";
rsGroupInfo = MasterProcedureUtil.checkGroupExists(env.getMasterServices().getRSGroupInfoManager()::getRSGroup, MasterProcedureUtil.getNamespaceGroup(nd), forWhom);
}
MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
}
// check for store file tracker configurations
StoreFileTrackerValidationUtils.checkForCreateTable(env.getMasterConfiguration(), tableDescriptor);
return true;
}
Aggregations