use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class DynamicColumnIT method initTable.
@Before
public void initTable() throws Exception {
tableName = generateUniqueName();
try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
ConnectionQueryServices services = pconn.getQueryServices();
try (HBaseAdmin admin = services.getAdmin()) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_B));
admin.createTable(htd);
}
try (HTableInterface hTable = services.getTable(Bytes.toBytes(tableName))) {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
byte[] dv = Bytes.toBytes("DV");
byte[] first = Bytes.toBytes("F");
byte[] f1v1 = Bytes.toBytes("F1V1");
byte[] f1v2 = Bytes.toBytes("F1V2");
byte[] f2v1 = Bytes.toBytes("F2V1");
byte[] f2v2 = Bytes.toBytes("F2V2");
byte[] key = Bytes.toBytes("entry1");
Put put = new Put(key);
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
mutations.add(put);
hTable.batch(mutations);
// Create Phoenix table after HBase table was created through the native APIs
// The timestamp of the table creation must be later than the timestamp of the data
pconn.createStatement().execute("create table " + tableName + " (entry varchar not null," + " F varchar," + " A.F1v1 varchar," + " A.F1v2 varchar," + " B.F2v1 varchar" + " CONSTRAINT pk PRIMARY KEY (entry))");
}
}
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class ExplainPlanWithStatsEnabledIT method assertUseStatsForQueryFlag.
private static void assertUseStatsForQueryFlag(String tableName, PhoenixConnection conn, boolean flag) throws TableNotFoundException, SQLException {
assertEquals(flag, conn.unwrap(PhoenixConnection.class).getMetaDataCache().getTableRef(new PTableKey(null, tableName)).getTable().useStatsForParallelization());
String query = "SELECT USE_STATS_FOR_PARALLELIZATION FROM SYSTEM.CATALOG WHERE TABLE_NAME = ? AND COLUMN_NAME IS NULL AND COLUMN_FAMILY IS NULL AND TENANT_ID IS NULL";
PreparedStatement stmt = conn.prepareStatement(query);
stmt.setString(1, tableName);
ResultSet rs = stmt.executeQuery();
rs.next();
assertEquals(flag, rs.getBoolean(1));
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class QueryDatabaseMetaDataIT method testCreateOnExistingTable.
@Test
public void testCreateOnExistingTable() throws Exception {
PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
String tableName = MDTEST_NAME;
String schemaName = MDTEST_SCHEMA_NAME;
byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
byte[] cfC = Bytes.toBytes("c");
byte[][] familyNames = new byte[][] { cfB, cfC };
byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
HBaseAdmin admin = pconn.getQueryServices().getAdmin();
try {
admin.disableTable(htableName);
admin.deleteTable(htableName);
admin.enableTable(htableName);
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
@SuppressWarnings("deprecation") HTableDescriptor descriptor = new HTableDescriptor(htableName);
for (byte[] familyName : familyNames) {
HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
descriptor.addFamily(columnDescriptor);
}
admin.createTable(descriptor);
long ts = nextTimestamp();
Properties props = new Properties();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
PhoenixConnection conn1 = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
ensureTableCreated(getUrl(), tableName, tableName, ts);
descriptor = admin.getTableDescriptor(htableName);
assertEquals(3, descriptor.getColumnFamilies().length);
HColumnDescriptor cdA = descriptor.getFamily(cfA);
assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
// Overriden using WITH
assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding());
// Overriden using WITH
assertEquals(1, cdA.getMaxVersions());
HColumnDescriptor cdB = descriptor.getFamily(cfB);
// Allow KEEP_DELETED_CELLS to be false for VIEW
assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
// Should keep the original value.
assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding());
// CF c should stay the same since it's not a Phoenix cf.
HColumnDescriptor cdC = descriptor.getFamily(cfC);
assertNotNull("Column family not found", cdC);
assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
admin.close();
int rowCount = 5;
String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
PreparedStatement ps = conn1.prepareStatement(upsert);
for (int i = 0; i < rowCount; i++) {
ps.setString(1, Integer.toString(i));
ps.setInt(2, i + 1);
ps.setInt(3, i + 2);
ps.execute();
}
conn1.commit();
conn1.close();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
Connection conn2 = DriverManager.getConnection(getUrl(), props);
String query = "SELECT count(1) FROM " + tableName;
ResultSet rs = conn2.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(rowCount, rs.getLong(1));
query = "SELECT id, col1,col2 FROM " + tableName;
rs = conn2.createStatement().executeQuery(query);
for (int i = 0; i < rowCount; i++) {
assertTrue(rs.next());
assertEquals(Integer.toString(i), rs.getString(1));
assertEquals(i + 1, rs.getInt(2));
assertEquals(i + 2, rs.getInt(3));
}
assertFalse(rs.next());
conn2.close();
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class QueryMoreIT method testMaxMutationSize.
@Test
public void testMaxMutationSize() throws Exception {
Properties connectionProperties = new Properties();
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "1000000");
PhoenixConnection connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
String fullTableName = generateUniqueName();
try (Statement stmt = connection.createStatement()) {
stmt.execute("CREATE TABLE " + fullTableName + "(\n" + " ORGANIZATION_ID CHAR(15) NOT NULL,\n" + " SCORE DOUBLE NOT NULL,\n" + " ENTITY_ID CHAR(15) NOT NULL\n" + " CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + " ORGANIZATION_ID,\n" + " SCORE DESC,\n" + " ENTITY_ID DESC\n" + " )\n" + ") MULTI_TENANT=TRUE");
}
try {
upsertRows(connection, fullTableName);
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(), e.getErrorCode());
}
// set the max mutation size (bytes) to a low value
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "1000");
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "4");
connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
try {
upsertRows(connection, fullTableName);
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getErrorCode(), e.getErrorCode());
}
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class FlappingTransactionIT method testExternalTxContext.
@Test
public void testExternalTxContext() throws Exception {
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
String fullTableName = generateUniqueName();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
TransactionSystemClient txServiceClient = pconn.getQueryServices().getTransactionSystemClient();
Statement stmt = conn.createStatement();
stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
conn.commit();
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
}
// Use HBase level Tephra APIs to start a new transaction
TransactionAwareHTable txAware = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.ROW);
TransactionContext txContext = new TransactionContext(txServiceClient, txAware);
txContext.start();
// Use HBase APIs to add a new row
Put put = new Put(Bytes.toBytes("z"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
txAware.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
// New connection should not see data as it hasn't been committed yet
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
}
// Use new connection to create a row with a conflict
Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
// Use Tephra APIs directly to finish (i.e. commit) the transaction
txContext.finish();
// Confirm that attempt to commit row with conflict fails
try {
connWithConflict.commit();
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
} finally {
connWithConflict.close();
}
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
}
// Repeat the same as above, but this time abort the transaction
txContext = new TransactionContext(txServiceClient, txAware);
txContext.start();
// Use HBase APIs to add a new row
put = new Put(Bytes.toBytes("j"));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
txAware.put(put);
// Use Phoenix APIs to add new row (sharing the transaction context)
pconn.setTransactionContext(txContext);
conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
// Existing connection should see data even though it hasn't been committed yet
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(5, rs.getInt(1));
connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
// Use Tephra APIs directly to abort (i.e. rollback) the transaction
txContext.abort();
rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
// Should succeed since conflicting row was aborted
connWithConflict.commit();
// New connection should now see data as it has been committed
try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
}
// Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
// written to hide it.
Result result = htable.get(new Get(Bytes.toBytes("j")));
assertTrue(result.isEmpty());
}
Aggregations