use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class TableSnapshotReadsMapReduceIT method deleteSnapshotAndTable.
public void deleteSnapshotAndTable(String tableName) throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
admin.deleteSnapshot(SNAPSHOT_NAME);
conn.createStatement().execute("DROP TABLE " + tableName);
conn.close();
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class TenantSpecificTablesDDLIT method testCreateTenantSpecificTable.
@Test
public void testCreateTenantSpecificTable() throws Exception {
// ensure we didn't create a physical HBase table for the tenant-specific table
Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
assertEquals(0, admin.listTables(TENANT_TABLE_NAME).length);
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class ParameterizedTransactionIT method testNonTxToTxTableFailure.
@Ignore
@Test
public void testNonTxToTxTableFailure() throws Exception {
String nonTxTableName = generateUniqueName();
Connection conn = DriverManager.getConnection(getUrl());
// Put table in SYSTEM schema to prevent attempts to update the cache after we disable SYSTEM.CATALOG
conn.createStatement().execute("CREATE TABLE \"SYSTEM\"." + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptions);
conn.createStatement().execute("UPSERT INTO \"SYSTEM\"." + nonTxTableName + " VALUES (1)");
conn.commit();
// Reset empty column value to an empty value like it is pre-transactions
HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
Put put = new Put(PInteger.INSTANCE.toBytes(1));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
htable.put(put);
HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
try {
// This will succeed initially in updating the HBase metadata, but then will fail when
// the SYSTEM.CATALOG table is attempted to be updated, exercising the code to restore
// the coprocessors back to the non transactional ones.
conn.createStatement().execute("ALTER TABLE \"SYSTEM\"." + nonTxTableName + " SET TRANSACTIONAL=true");
fail();
} catch (SQLException e) {
assertTrue(e.getMessage().contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " is disabled"));
} finally {
admin.enableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
admin.close();
}
ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM \"SYSTEM\"." + nonTxTableName + " WHERE v IS NULL");
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertFalse(rs.next());
htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
assertEquals(1, conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("SYSTEM." + nonTxTableName)).getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions());
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class RoundRobinResultIteratorIT method testRoundRobinAfterTableSplit.
@Test
public void testRoundRobinAfterTableSplit() throws Exception {
String tableName = generateUniqueName();
byte[] tableNameBytes = Bytes.toBytes(tableName);
int numRows = setupTableForSplit(tableName);
Connection conn = getConnection();
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
int nRegions = services.getAllTableRegions(tableNameBytes).size();
int nRegionsBeforeSplit = nRegions;
HBaseAdmin admin = services.getAdmin();
try {
// Split is an async operation. So hoping 10 seconds is long enough time.
// If the test tends to flap, then you might want to increase the wait time
admin.split(tableName);
CountDownLatch latch = new CountDownLatch(1);
int nTries = 0;
long waitTimeMillis = 2000;
while (nRegions == nRegionsBeforeSplit && nTries < 10) {
latch.await(waitTimeMillis, TimeUnit.MILLISECONDS);
nRegions = services.getAllTableRegions(tableNameBytes).size();
nTries++;
}
String query = "SELECT * FROM " + tableName;
Statement stmt = conn.createStatement();
// this makes scanner caches to be replenished in parallel.
stmt.setFetchSize(10);
ResultSet rs = stmt.executeQuery(query);
int numRowsRead = 0;
while (rs.next()) {
numRowsRead++;
}
nRegions = services.getAllTableRegions(tableNameBytes).size();
// Region cache has been updated, as there are more regions now
assertNotEquals(nRegions, nRegionsBeforeSplit);
assertEquals(numRows, numRowsRead);
} finally {
admin.close();
}
}
use of org.apache.hadoop.hbase.client.HBaseAdmin in project phoenix by apache.
the class RoundRobinResultIteratorIT method setupTableForSplit.
private static int setupTableForSplit(String tableName) throws Exception {
int batchSize = 25;
int maxFileSize = 1024 * 10;
int payLoadSize = 1024;
String payload;
StringBuilder buf = new StringBuilder();
for (int i = 0; i < payLoadSize; i++) {
buf.append('a');
}
payload = buf.toString();
int MIN_CHAR = 'a';
int MAX_CHAR = 'z';
Connection conn = getConnection();
conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " + HTableDescriptor.MAX_FILESIZE + "=" + maxFileSize + "," + " SALT_BUCKETS = " + NUM_SALT_BUCKETS);
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)");
int rowCount = 0;
for (int c1 = MIN_CHAR; c1 <= MAX_CHAR; c1++) {
for (int c2 = MIN_CHAR; c2 <= MAX_CHAR; c2++) {
String pk = Character.toString((char) c1) + Character.toString((char) c2);
stmt.setString(1, pk);
stmt.setString(2, payload);
stmt.execute();
rowCount++;
if (rowCount % batchSize == 0) {
conn.commit();
}
}
}
conn.commit();
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
HBaseAdmin admin = services.getAdmin();
try {
admin.flush(tableName);
} finally {
admin.close();
}
conn.close();
return rowCount;
}
Aggregations