use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class AppendOnlySchemaIT method testTableWithSameSchema.
private void testTableWithSameSchema(boolean notExists, boolean sameClient) throws Exception {
// use a spyed ConnectionQueryServices so we can verify calls to getTable
ConnectionQueryServices connectionQueryServices = Mockito.spy(driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)));
Properties props = new Properties();
props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
try (Connection conn1 = connectionQueryServices.connect(getUrl(), props);
Connection conn2 = sameClient ? conn1 : connectionQueryServices.connect(getUrl(), props)) {
String metricTableName = generateUniqueName();
String viewName = generateUniqueName();
String metricIdSeqTableName = generateUniqueName();
// create sequence for auto partition
conn1.createStatement().execute("CREATE SEQUENCE " + metricIdSeqTableName + " CACHE 1");
// create base table
conn1.createStatement().execute("CREATE TABLE " + metricTableName + "(metricId INTEGER NOT NULL, metricVal DOUBLE, CONSTRAINT PK PRIMARY KEY(metricId))" + " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, AUTO_PARTITION_SEQ=" + metricIdSeqTableName);
// create view
String ddl = "CREATE VIEW " + (notExists ? "IF NOT EXISTS " : "") + viewName + " ( hostName varchar NOT NULL, tagName varChar" + " CONSTRAINT HOSTNAME_PK PRIMARY KEY (hostName))" + " AS SELECT * FROM " + metricTableName + " UPDATE_CACHE_FREQUENCY=300000";
conn1.createStatement().execute(ddl);
conn1.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal) VALUES('host1', 1.0)");
conn1.commit();
reset(connectionQueryServices);
// execute same create ddl
try {
conn2.createStatement().execute(ddl);
if (!notExists) {
fail("Create Table should fail");
}
} catch (TableAlreadyExistsException e) {
if (notExists) {
fail("Create Table should not fail");
}
}
// verify getTable rpcs
verify(connectionQueryServices, sameClient ? never() : times(1)).getTable((PName) isNull(), eq(new byte[0]), eq(Bytes.toBytes(viewName)), anyLong(), anyLong());
// verify no create table rpcs
verify(connectionQueryServices, never()).createTable(anyListOf(Mutation.class), any(byte[].class), any(PTableType.class), anyMap(), anyList(), any(byte[][].class), eq(false), eq(false));
reset(connectionQueryServices);
// execute alter table ddl that adds the same column
ddl = "ALTER VIEW " + viewName + " ADD " + (notExists ? "IF NOT EXISTS" : "") + " tagName varchar";
try {
conn2.createStatement().execute(ddl);
if (!notExists) {
fail("Alter Table should fail");
}
} catch (ColumnAlreadyExistsException e) {
if (notExists) {
fail("Alter Table should not fail");
}
}
// if not verify exists is true one call to add column table with empty mutation list (which does not make a rpc)
// else verify no add column calls
verify(connectionQueryServices, notExists ? times(1) : never()).addColumn(eq(Collections.<Mutation>emptyList()), any(PTable.class), anyMap(), anySetOf(String.class), anyListOf(PColumn.class));
// upsert one row
conn2.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal) VALUES('host2', 2.0)");
conn2.commit();
// verify data in base table
ResultSet rs = conn2.createStatement().executeQuery("SELECT * from " + metricTableName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(1.0, rs.getDouble(2), 1e-6);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(2.0, rs.getDouble(2), 1e-6);
assertFalse(rs.next());
// verify data in view
rs = conn2.createStatement().executeQuery("SELECT * from " + viewName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(1.0, rs.getDouble(2), 1e-6);
assertEquals("host1", rs.getString(3));
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(2.0, rs.getDouble(2), 1e-6);
assertEquals("host2", rs.getString(3));
assertFalse(rs.next());
}
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class NativeHBaseTypesIT method initTableValues.
@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
try {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
byte[] family = Bytes.toBytes("1");
byte[] uintCol = Bytes.toBytes("UINT_COL");
byte[] ulongCol = Bytes.toBytes("ULONG_COL");
byte[] key, bKey;
Put put;
key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
put = new Put(key);
put.add(family, uintCol, ts - 2, Bytes.toBytes(5));
put.add(family, ulongCol, ts - 2, Bytes.toBytes(50L));
mutations.add(put);
put = new Put(key);
put.add(family, uintCol, ts, Bytes.toBytes(10));
put.add(family, ulongCol, ts, Bytes.toBytes(100L));
mutations.add(put);
bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
put = new Put(key);
put.add(family, uintCol, ts - 4, Bytes.toBytes(5000));
put.add(family, ulongCol, ts - 4, Bytes.toBytes(50000L));
mutations.add(put);
// FIXME: the version of the Delete constructor without the lock args was introduced
// in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
// of the client.
Delete del = new Delete(key, ts - 2);
mutations.add(del);
put = new Put(key);
put.add(family, uintCol, ts, Bytes.toBytes(2000));
put.add(family, ulongCol, ts, Bytes.toBytes(20000L));
mutations.add(put);
key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
put = new Put(key);
put.add(family, uintCol, ts, Bytes.toBytes(3000));
put.add(family, ulongCol, ts, Bytes.toBytes(30000L));
mutations.add(put);
key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
put = new Put(key);
put.add(family, uintCol, ts, Bytes.toBytes(4000));
put.add(family, ulongCol, ts, Bytes.toBytes(40000L));
mutations.add(put);
hTable.batch(mutations);
Result r = hTable.get(new Get(bKey));
assertFalse(r.isEmpty());
} finally {
hTable.close();
}
// Create Phoenix table after HBase table was created through the native APIs
// The timestamp of the table creation must be later than the timestamp of the data
ensureTableCreated(getUrl(), HBASE_NATIVE, HBASE_NATIVE, null, ts + 1, null);
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class DynamicColumnIT method initTable.
@Before
public void initTable() throws Exception {
tableName = generateUniqueName();
try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
ConnectionQueryServices services = pconn.getQueryServices();
try (HBaseAdmin admin = services.getAdmin()) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_B));
admin.createTable(htd);
}
try (HTableInterface hTable = services.getTable(Bytes.toBytes(tableName))) {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
byte[] dv = Bytes.toBytes("DV");
byte[] first = Bytes.toBytes("F");
byte[] f1v1 = Bytes.toBytes("F1V1");
byte[] f1v2 = Bytes.toBytes("F1V2");
byte[] f2v1 = Bytes.toBytes("F2V1");
byte[] f2v2 = Bytes.toBytes("F2V2");
byte[] key = Bytes.toBytes("entry1");
Put put = new Put(key);
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
mutations.add(put);
hTable.batch(mutations);
// Create Phoenix table after HBase table was created through the native APIs
// The timestamp of the table creation must be later than the timestamp of the data
pconn.createStatement().execute("create table " + tableName + " (entry varchar not null," + " F varchar," + " A.F1v1 varchar," + " A.F1v2 varchar," + " B.F2v1 varchar" + " CONSTRAINT pk PRIMARY KEY (entry))");
}
}
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class DynamicFamilyIT method initTableValues.
@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(WEB_STATS_SCHEMA_NAME, WEB_STATS));
try {
// Insert rows using standard HBase mechanism with standard HBase "types"
Put put;
List<Row> mutations = new ArrayList<Row>();
put = new Put(Bytes.toBytes("entry1"));
put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT));
put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME));
put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME));
mutations.add(put);
put = new Put(Bytes.toBytes("entry2"));
put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT));
put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME));
put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME));
mutations.add(put);
put = new Put(Bytes.toBytes("entry3"));
put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT));
put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME));
put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME));
put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME));
mutations.add(put);
hTable.batch(mutations);
} finally {
hTable.close();
}
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class RoundRobinResultIteratorIT method testRoundRobinAfterTableSplit.
@Test
public void testRoundRobinAfterTableSplit() throws Exception {
String tableName = generateUniqueName();
byte[] tableNameBytes = Bytes.toBytes(tableName);
int numRows = setupTableForSplit(tableName);
Connection conn = getConnection();
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
int nRegions = services.getAllTableRegions(tableNameBytes).size();
int nRegionsBeforeSplit = nRegions;
HBaseAdmin admin = services.getAdmin();
try {
// Split is an async operation. So hoping 10 seconds is long enough time.
// If the test tends to flap, then you might want to increase the wait time
admin.split(tableName);
CountDownLatch latch = new CountDownLatch(1);
int nTries = 0;
long waitTimeMillis = 2000;
while (nRegions == nRegionsBeforeSplit && nTries < 10) {
latch.await(waitTimeMillis, TimeUnit.MILLISECONDS);
nRegions = services.getAllTableRegions(tableNameBytes).size();
nTries++;
}
String query = "SELECT * FROM " + tableName;
Statement stmt = conn.createStatement();
// this makes scanner caches to be replenished in parallel.
stmt.setFetchSize(10);
ResultSet rs = stmt.executeQuery(query);
int numRowsRead = 0;
while (rs.next()) {
numRowsRead++;
}
nRegions = services.getAllTableRegions(tableNameBytes).size();
// Region cache has been updated, as there are more regions now
assertNotEquals(nRegions, nRegionsBeforeSplit);
assertEquals(numRows, numRowsRead);
} finally {
admin.close();
}
}
Aggregations