use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class MappingTableDataTypeIT method testMappingHbaseTableToPhoenixTable.
@Test
public void testMappingHbaseTableToPhoenixTable() throws Exception {
String mtest = generateUniqueName();
final TableName tableName = TableName.valueOf(mtest);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
HBaseAdmin admin = conn.getQueryServices().getAdmin();
try {
// Create table then get the single region for our new table.
HTableDescriptor descriptor = new HTableDescriptor(tableName);
HColumnDescriptor columnDescriptor1 = new HColumnDescriptor(Bytes.toBytes("cf1"));
HColumnDescriptor columnDescriptor2 = new HColumnDescriptor(Bytes.toBytes("cf2"));
descriptor.addFamily(columnDescriptor1);
descriptor.addFamily(columnDescriptor2);
admin.createTable(descriptor);
HTableInterface t = conn.getQueryServices().getTable(Bytes.toBytes(mtest));
insertData(tableName.getName(), admin, t);
t.close();
// create phoenix table that maps to existing HBase table
createPhoenixTable(mtest);
String selectSql = "SELECT * FROM " + mtest;
ResultSet rs = conn.createStatement().executeQuery(selectSql);
ResultSetMetaData rsMetaData = rs.getMetaData();
assertTrue("Expected single row", rs.next());
// verify values from cf2 is not returned
assertEquals("Number of columns", 2, rsMetaData.getColumnCount());
assertEquals("Column Value", "value1", rs.getString(2));
assertFalse("Expected single row ", rs.next());
// delete the row
String deleteSql = "DELETE FROM " + mtest + " WHERE id = 'row'";
conn.createStatement().executeUpdate(deleteSql);
conn.commit();
// verify that no rows are returned when querying through phoenix
rs = conn.createStatement().executeQuery(selectSql);
assertFalse("Expected no row` ", rs.next());
// verify that row with value for cf2 still exists when using hbase apis
Scan scan = new Scan();
ResultScanner results = t.getScanner(scan);
Result result = results.next();
assertNotNull("Expected single row", result);
List<KeyValue> kvs = result.getColumn(Bytes.toBytes("cf2"), Bytes.toBytes("q2"));
assertEquals("Expected single value ", 1, kvs.size());
assertEquals("Column Value", "value2", Bytes.toString(kvs.get(0).getValue()));
assertNull("Expected single row", results.next());
} finally {
admin.close();
}
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class NativeHBaseTypesIT method testNegativeCompareNegativeValue.
@SuppressWarnings("deprecation")
@Test
public void testNegativeCompareNegativeValue() throws Exception {
String query = "SELECT string_key FROM HBASE_NATIVE WHERE uint_key > 100000";
// Run query at timestamp 7
String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 7);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
PhoenixConnection conn = DriverManager.getConnection(url, props).unwrap(PhoenixConnection.class);
HTableInterface hTable = conn.getQueryServices().getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
List<Row> mutations = new ArrayList<Row>();
byte[] family = Bytes.toBytes("1");
byte[] uintCol = Bytes.toBytes("UINT_COL");
byte[] ulongCol = Bytes.toBytes("ULONG_COL");
byte[] key;
Put put;
// Need to use native APIs because the Phoenix APIs wouldn't let you insert a
// negative number for an unsigned type
key = ByteUtil.concat(Bytes.toBytes(-10), Bytes.toBytes(100L), Bytes.toBytes("e"));
put = new Put(key);
// Insert at later timestamp than other queries in this test are using, so that
// we don't affect them
put.add(family, uintCol, ts + 6, Bytes.toBytes(10));
put.add(family, ulongCol, ts + 6, Bytes.toBytes(100L));
put.add(family, QueryConstants.EMPTY_COLUMN_BYTES, ts + 6, ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
hTable.batch(mutations);
// Demonstrates weakness of HBase Bytes serialization. Negative numbers
// show up as bigger than positive numbers
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals("e", rs.getString(1));
assertFalse(rs.next());
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class PhoenixRuntimeIT method testGetTenantIdExpression.
private void testGetTenantIdExpression(boolean isSalted) throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(true);
String tableName = generateUniqueName();
String sequenceName = generateUniqueName();
String t1 = generateUniqueName();
// ensure bigger
String t2 = t1 + generateUniqueName();
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true" + (isSalted ? ",SALT_BUCKETS=3" : ""));
conn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t1 + "','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t2 + "','y')");
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, t1);
Connection tsconn = DriverManager.getConnection(getUrl(), props);
tsconn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
Expression e1 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
HTableInterface htable1 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] { "", t1 });
String viewName = generateUniqueName();
tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 VARCHAR) AS SELECT * FROM " + tableName);
Expression e2 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
HTableInterface htable2 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new String[] { "", t1 });
Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, tableName);
HTableInterface htable3 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] { t1, t2 });
String basTableName = generateUniqueName();
conn.createStatement().execute("CREATE TABLE " + basTableName + " (k1 VARCHAR PRIMARY KEY)");
Expression e4 = PhoenixRuntime.getTenantIdExpression(conn, basTableName);
assertNull(e4);
String indexName1 = generateUniqueName();
tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + viewName + "(V1)");
Expression e5 = PhoenixRuntime.getTenantIdExpression(tsconn, indexName1);
HTableInterface htable5 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] { t1 });
String indexName2 = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + indexName2 + " ON " + tableName + "(k2)");
Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, indexName2);
HTableInterface htable6 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName2));
assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] { t1, t2 });
tableName = generateUniqueName() + "BAR_" + (isSalted ? "SALTED" : "UNSALTED");
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) " + (isSalted ? "SALT_BUCKETS=3" : ""));
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t1 + "','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t2 + "','y')");
Expression e7 = PhoenixRuntime.getFirstPKColumnExpression(conn, tableName);
HTableInterface htable7 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] { t1, t2 });
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class ConnectionQueryServicesImpl method releaseUpgradeMutex.
@VisibleForTesting
public boolean releaseUpgradeMutex(byte[] mutexRowKey) {
boolean released = false;
try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
byte[] expectedValue = UPGRADE_MUTEX_LOCKED;
byte[] newValue = UPGRADE_MUTEX_UNLOCKED;
Put put = new Put(mutexRowKey);
put.addColumn(family, qualifier, newValue);
released = sysMutexTable.checkAndPut(mutexRowKey, family, qualifier, expectedValue, put);
} catch (Exception e) {
logger.warn("Release of upgrade mutex failed", e);
}
return released;
}
use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.
the class ConnectionQueryServicesImpl method acquireUpgradeMutex.
/**
* Acquire distributed mutex of sorts to make sure only one JVM is able to run the upgrade code by
* making use of HBase's checkAndPut api.
*
* @return true if client won the race, false otherwise
* @throws IOException
* @throws SQLException
*/
@VisibleForTesting
public boolean acquireUpgradeMutex(long currentServerSideTableTimestamp, byte[] rowToLock) throws IOException, SQLException {
Preconditions.checkArgument(currentServerSideTableTimestamp < MIN_SYSTEM_TABLE_TIMESTAMP);
try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
byte[] oldValue = UPGRADE_MUTEX_UNLOCKED;
byte[] newValue = UPGRADE_MUTEX_LOCKED;
Put put = new Put(rowToLock);
put.addColumn(family, qualifier, newValue);
boolean acquired = sysMutexTable.checkAndPut(rowToLock, family, qualifier, oldValue, put);
if (!acquired) {
/*
* Because of TTL on the SYSTEM_MUTEX_FAMILY, it is very much possible that the cell
* has gone away. So we need to retry with an old value of null. Note there is a small
* race condition here that between the two checkAndPut calls, it is possible that another
* request would have set the value back to UPGRADE_MUTEX_UNLOCKED. In that scenario this
* following checkAndPut would still return false even though the lock was available.
*/
acquired = sysMutexTable.checkAndPut(rowToLock, family, qualifier, null, put);
if (!acquired) {
throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp), getVersion(MIN_SYSTEM_TABLE_TIMESTAMP));
}
}
return true;
}
}
Aggregations