use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class ConnectionQueryServicesImpl method incrementSequenceValues.
@SuppressWarnings("deprecation")
private void incrementSequenceValues(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException {
List<Sequence> sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size());
for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
SequenceKey key = sequenceAllocation.getSequenceKey();
Sequence newSequences = new Sequence(key);
Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
if (sequence == null) {
sequence = newSequences;
}
sequences.add(sequence);
}
try {
for (Sequence sequence : sequences) {
sequence.getLock().lock();
}
// Now that we have all the locks we need, increment the sequences
List<Increment> incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size());
List<Sequence> toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size());
int[] indexes = new int[sequences.size()];
for (int i = 0; i < sequences.size(); i++) {
Sequence sequence = sequences.get(i);
try {
values[i] = sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
} catch (EmptySequenceCacheException e) {
indexes[toIncrementList.size()] = i;
toIncrementList.add(sequence);
Increment inc = sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
incrementBatch.add(inc);
} catch (SQLException e) {
exceptions[i] = e;
}
}
if (toIncrementList.isEmpty()) {
return;
}
HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
Object[] resultObjects = null;
SQLException sqlE = null;
try {
resultObjects = hTable.batch(incrementBatch);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE == null) {
sqlE = ServerUtil.parseServerException(e);
} else {
sqlE.setNextException(ServerUtil.parseServerException(e));
}
}
if (sqlE != null) {
throw sqlE;
}
}
for (int i = 0; i < resultObjects.length; i++) {
Sequence sequence = toIncrementList.get(i);
Result result = (Result) resultObjects[i];
try {
long numToAllocate = Bytes.toLong(incrementBatch.get(i).getAttribute(SequenceRegionObserver.NUM_TO_ALLOCATE));
values[indexes[i]] = sequence.incrementValue(result, op, numToAllocate);
} catch (SQLException e) {
exceptions[indexes[i]] = e;
}
}
} finally {
for (Sequence sequence : sequences) {
sequence.getLock().unlock();
}
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class ConnectionQueryServicesImpl method dropSequence.
@Override
public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException {
SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
Sequence newSequences = new Sequence(sequenceKey);
Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
if (sequence == null) {
sequence = newSequences;
}
try {
sequence.getLock().lock();
// Now that we have the lock we need, create the sequence
Append append = sequence.dropSequence(timestamp);
HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
try {
Result result = htable.append(append);
return sequence.dropSequence(result);
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
} finally {
Closeables.closeQuietly(htable);
}
} finally {
sequence.getLock().unlock();
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class ConnectionQueryServicesImpl method createSysMutexTable.
private void createSysMutexTable(HBaseAdmin admin) throws IOException, SQLException {
try {
final TableName mutexTableName = TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES);
List<TableName> systemTables = getSystemTableNames(admin);
if (systemTables.contains(mutexTableName)) {
logger.debug("System mutex table already appears to exist, not creating it");
return;
}
HTableDescriptor tableDesc = new HTableDescriptor(mutexTableName);
HColumnDescriptor columnDesc = new HColumnDescriptor(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES);
// Let mutex expire after some time
columnDesc.setTimeToLive(TTL_FOR_MUTEX);
tableDesc.addFamily(columnDesc);
admin.createTable(tableDesc);
try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
Put put = new Put(mutexRowKey);
put.add(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED);
sysMutexTable.put(put);
}
} catch (TableExistsException e) {
// Ignore
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class ConnectionQueryServicesImpl method ensureSystemTablesUpgraded.
void ensureSystemTablesUpgraded(ReadOnlyProps props) throws SQLException, IOException, IllegalArgumentException, InterruptedException {
if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, props)) {
return;
}
HTableInterface metatable = null;
try (HBaseAdmin admin = getAdmin()) {
// Namespace-mapping is enabled at this point.
try {
ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
} catch (PhoenixIOException e) {
// We could either:
// 1) Not access the NS descriptor. The NS may or may not exist at this point.
// 2) We could not create the NS
// Regardless of the case 1 or 2, if the NS does not exist, we will error expectedly
// below. If the NS does exist and is mapped, the below check will exit gracefully.
}
List<TableName> tableNames = getSystemTableNames(admin);
// No tables exist matching "SYSTEM\..*", they are all already in "SYSTEM:.*"
if (tableNames.size() == 0) {
return;
}
// Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:"
if (tableNames.size() > 5) {
logger.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames);
}
byte[] mappedSystemTable = SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props).getName();
metatable = getTable(mappedSystemTable);
if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
if (!admin.tableExists(mappedSystemTable)) {
UpgradeUtil.mapTableToNamespace(admin, metatable, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, props, null, PTableType.SYSTEM, null);
ConnectionQueryServicesImpl.this.removeTable(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
}
tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
}
tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME);
for (TableName table : tableNames) {
UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), props, null, PTableType.SYSTEM, null);
ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
}
if (!tableNames.isEmpty()) {
clearCache();
}
} finally {
if (metatable != null) {
metatable.close();
}
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class DisableLocalIndexIT method testDisabledLocalIndexes.
@Test
public void testDisabledLocalIndexes() throws Exception {
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
props.setProperty(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, Boolean.FALSE.toString());
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(true);
String baseName = generateUniqueName();
String tableName = baseName + "_TABLE";
String viewName = baseName + "_VIEW";
String indexName1 = baseName + "_INDEX1";
String indexName2 = baseName + "_INDEX2";
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t1','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t2','y')");
HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
assertFalse(admin.tableExists(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName)));
admin.close();
try {
HTableInterface t = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName));
// Exception no longer thrown by getTable, but instead need to force an RPC
t.getTableDescriptor();
fail("Local index table should not have been created");
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
//expected
} finally {
admin.close();
}
Properties tsconnProps = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
tsconnProps.setProperty(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, Boolean.FALSE.toString());
tsconnProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1");
Connection tsconn = DriverManager.getConnection(getUrl(), tsconnProps);
tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 VARCHAR) AS SELECT * FROM " + tableName);
tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + viewName + "(V1)");
tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
try {
conn.createStatement().execute("CREATE LOCAL INDEX " + indexName2 + " ON " + tableName + "(k2)");
fail("Should not allow creation of local index");
} catch (SQLException e) {
assertEquals(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES.getErrorCode(), e.getErrorCode());
}
try {
tsconn.createStatement().execute("CREATE LOCAL INDEX " + indexName2 + " ON " + viewName + "(k2, v1)");
fail("Should not allow creation of local index");
} catch (SQLException e) {
assertEquals(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES.getErrorCode(), e.getErrorCode());
}
}
Aggregations