use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class ScannerLeaseRenewalIT method setUp.
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = HBaseConfiguration.create();
hbaseTestUtil = new HBaseTestingUtility(conf);
setUpConfigForMiniCluster(conf);
conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, LEASE_TIMEOUT_PERIOD_MILLIS);
hbaseTestUtil.startMiniCluster();
// establish url and quorum. Need to use PhoenixDriver and not PhoenixTestDriver
zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
Properties driverProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
driverProps.put(RENEW_LEASE_THREAD_POOL_SIZE, Long.toString(4));
// if this property is false, tests will fail with UnknownScannerException errors.
driverProps.put(RENEW_LEASE_ENABLED, Boolean.toString(true));
driverProps.put(RENEW_LEASE_THRESHOLD_MILLISECONDS, Long.toString(LEASE_TIMEOUT_PERIOD_MILLIS / 2));
driverProps.put(RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS, Long.toString(LEASE_TIMEOUT_PERIOD_MILLIS / 4));
driverProps.put(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, Long.toString(LEASE_TIMEOUT_PERIOD_MILLIS));
// use round robin iterator
driverProps.put(FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false));
DriverManager.registerDriver(PhoenixDriver.INSTANCE);
try (PhoenixConnection phxConn = DriverManager.getConnection(url, driverProps).unwrap(PhoenixConnection.class)) {
// run test methods only if we are at the hbase version that supports lease renewal.
Assume.assumeTrue(phxConn.getQueryServices().supportsFeature(Feature.RENEW_LEASE));
}
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class ScannerLeaseRenewalIT method testRenewLeasePreventsUpsertSelectFromFailing.
@Test
public void testRenewLeasePreventsUpsertSelectFromFailing() throws Exception {
String table1 = "testRenewLeasePreventsUpsertSelectFromFailing";
String table2 = "testRenewLeasePreventsUpsertSelectFromFailing2";
try (Connection conn = DriverManager.getConnection(url)) {
conn.createStatement().execute("CREATE TABLE " + table1 + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR)");
conn.createStatement().execute("CREATE TABLE " + table2 + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR)");
int numRecords = 5;
int i = 0;
String upsert = "UPSERT INTO " + table1 + " VALUES (?, ?)";
Random random = new Random();
PreparedStatement stmt = conn.prepareStatement(upsert);
while (i < numRecords) {
stmt.setInt(1, random.nextInt());
stmt.setString(2, "KV" + random.nextInt());
stmt.executeUpdate();
i++;
}
conn.commit();
}
try (PhoenixConnection phxConn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class)) {
String upsertSelect = "UPSERT INTO " + table2 + " SELECT PK1, KV1 FROM " + table1;
// at every next call wait for this period. This will cause lease to expire.
long delayAfterInit = 2 * LEASE_TIMEOUT_PERIOD_MILLIS;
phxConn.setTableResultIteratorFactory(new DelayedTableResultIteratorFactory(delayAfterInit));
Statement s = phxConn.createStatement();
s.setFetchSize(2);
s.executeUpdate(upsertSelect);
}
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class PhoenixMetricsIT method testMetricsForUpsertSelect.
@Test
public void testMetricsForUpsertSelect() throws Exception {
String tableName1 = generateUniqueName();
long table1SaltBuckets = 6;
String ddl = "CREATE TABLE " + tableName1 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = " + table1SaltBuckets;
Connection ddlConn = DriverManager.getConnection(getUrl());
ddlConn.createStatement().execute(ddl);
ddlConn.close();
int numRows = 10;
insertRowsInTable(tableName1, numRows);
String tableName2 = generateUniqueName();
ddl = "CREATE TABLE " + tableName2 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = 10";
ddlConn = DriverManager.getConnection(getUrl());
ddlConn.createStatement().execute(ddl);
ddlConn.close();
Connection conn = DriverManager.getConnection(getUrl());
String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " + tableName1;
conn.createStatement().executeUpdate(upsertSelect);
conn.commit();
PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
Map<String, Map<String, Long>> mutationMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
assertMutationMetrics(tableName2, numRows, mutationMetrics);
Map<String, Map<String, Long>> readMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
assertReadMetricsForMutatingSql(tableName1, table1SaltBuckets, readMetrics);
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class CreateFunctionCompiler method compile.
public MutationPlan compile(final CreateFunctionStatement create) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
PhoenixConnection connectionToBe = connection;
final StatementContext context = new StatementContext(statement);
final MetaDataClient client = new MetaDataClient(connectionToBe);
return new BaseMutationPlan(context, create.getOperation()) {
@Override
public MutationState execute() throws SQLException {
try {
return client.createFunction(create);
} finally {
if (client.getConnection() != connection) {
client.getConnection().close();
}
}
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("CREATE" + (create.getFunctionInfo().isReplace() ? " OR REPLACE" : "") + " FUNCTION"));
}
@Override
public StatementContext getContext() {
return context;
}
};
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class CreateSequenceCompiler method compile.
public MutationPlan compile(final CreateSequenceStatement sequence) throws SQLException {
ParseNode startsWithNode = sequence.getStartWith();
ParseNode incrementByNode = sequence.getIncrementBy();
ParseNode maxValueNode = sequence.getMaxValue();
ParseNode minValueNode = sequence.getMinValue();
ParseNode cacheNode = sequence.getCacheSize();
// validate parse nodes
if (startsWithNode != null) {
validateNodeIsStateless(sequence, startsWithNode, SQLExceptionCode.START_WITH_MUST_BE_CONSTANT);
}
validateNodeIsStateless(sequence, incrementByNode, SQLExceptionCode.INCREMENT_BY_MUST_BE_CONSTANT);
validateNodeIsStateless(sequence, maxValueNode, SQLExceptionCode.MAXVALUE_MUST_BE_CONSTANT);
validateNodeIsStateless(sequence, minValueNode, SQLExceptionCode.MINVALUE_MUST_BE_CONSTANT);
if (cacheNode != null) {
validateNodeIsStateless(sequence, cacheNode, SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT);
}
final PhoenixConnection connection = statement.getConnection();
final StatementContext context = new StatementContext(statement);
// add param meta data if required
if (startsWithNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode) startsWithNode, LONG_DATUM);
}
if (incrementByNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode) incrementByNode, LONG_DATUM);
}
if (maxValueNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode) maxValueNode, LONG_DATUM);
}
if (minValueNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode) minValueNode, LONG_DATUM);
}
if (cacheNode instanceof BindParseNode) {
context.getBindManager().addParamMetaData((BindParseNode) cacheNode, INTEGER_DATUM);
}
ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
final long incrementBy = evalExpression(sequence, context, incrementByNode.accept(expressionCompiler), SQLExceptionCode.INCREMENT_BY_MUST_BE_CONSTANT);
if (incrementBy == 0) {
throw SequenceUtil.getException(sequence.getSequenceName().getSchemaName(), sequence.getSequenceName().getTableName(), SQLExceptionCode.INCREMENT_BY_MUST_NOT_BE_ZERO);
}
final long maxValue = evalExpression(sequence, context, maxValueNode.accept(expressionCompiler), SQLExceptionCode.MAXVALUE_MUST_BE_CONSTANT);
final long minValue = evalExpression(sequence, context, minValueNode.accept(expressionCompiler), SQLExceptionCode.MINVALUE_MUST_BE_CONSTANT);
if (minValue > maxValue) {
TableName sequenceName = sequence.getSequenceName();
throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), SQLExceptionCode.MINVALUE_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXVALUE);
}
long startsWithValue;
if (startsWithNode == null) {
startsWithValue = incrementBy > 0 ? minValue : maxValue;
} else {
startsWithValue = evalExpression(sequence, context, startsWithNode.accept(expressionCompiler), SQLExceptionCode.START_WITH_MUST_BE_CONSTANT);
if (startsWithValue < minValue || startsWithValue > maxValue) {
TableName sequenceName = sequence.getSequenceName();
throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), SQLExceptionCode.STARTS_WITH_MUST_BE_BETWEEN_MIN_MAX_VALUE);
}
}
final long startsWith = startsWithValue;
long cacheSizeValue;
if (cacheNode == null) {
cacheSizeValue = connection.getQueryServices().getProps().getLong(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_SEQUENCE_CACHE_SIZE);
} else {
cacheSizeValue = evalExpression(sequence, context, cacheNode.accept(expressionCompiler), SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT);
if (cacheSizeValue < 0) {
TableName sequenceName = sequence.getSequenceName();
throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT);
}
}
final long cacheSize = Math.max(1L, cacheSizeValue);
final MetaDataClient client = new MetaDataClient(connection);
return new BaseMutationPlan(context, operation) {
@Override
public MutationState execute() throws SQLException {
return client.createSequence(sequence, startsWith, incrementBy, cacheSize, minValue, maxValue);
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("CREATE SEQUENCE"));
}
};
}
Aggregations