use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.
the class QueryOptimizerTest method testChooseIndexOverTable.
@Test
public void testChooseIndexOverTable() throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true");
conn.createStatement().execute("CREATE INDEX idx ON t(v1)");
PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'bar'");
assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString());
}
use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.
the class UpsertCompiler method upsertSelect.
public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException {
PhoenixStatement statement = childContext.getStatement();
PhoenixConnection connection = statement.getConnection();
ConnectionQueryServices services = connection.getQueryServices();
int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
boolean isAutoCommit = connection.getAutoCommit();
int numSplColumns = (tableRef.getTable().isMultiTenant() ? 1 : 0) + (tableRef.getTable().getViewIndexId() != null ? 1 : 0);
byte[][] values = new byte[columnIndexes.length + numSplColumns][];
if (prefixSysColValues) {
int i = 0;
if (tableRef.getTable().isMultiTenant()) {
values[i++] = connection.getTenantId().getBytes();
}
if (tableRef.getTable().getViewIndexId() != null) {
values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId());
}
}
int rowCount = 0;
Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(batchSize);
PTable table = tableRef.getTable();
IndexMaintainer indexMaintainer = null;
byte[][] viewConstants = null;
if (table.getIndexType() == IndexType.LOCAL) {
PTable parentTable = statement.getConnection().getMetaDataCache().getTableRef(new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())).getTable();
indexMaintainer = table.getIndexMaintainer(parentTable, connection);
viewConstants = IndexUtil.getViewConstants(parentTable);
}
try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
while (rs.next()) {
for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
PColumn column = table.getColumns().get(columnIndexes[i]);
byte[] bytes = rs.getBytes(i + 1);
ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
Object value = rs.getObject(i + 1);
int rsPrecision = rs.getMetaData().getPrecision(i + 1);
Integer precision = rsPrecision == 0 ? null : rsPrecision;
int rsScale = rs.getMetaData().getScale(i + 1);
Integer scale = rsScale == 0 ? null : rsScale;
// as we checked that before.
if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), SortOrder.getDefault(), precision, scale, column.getMaxLength(), column.getScale())) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString()).setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
}
column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr);
}
setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, indexMaintainer, viewConstants, null, numSplColumns);
rowCount++;
// Commit a batch if auto commit is true and we're at our batch size
if (isAutoCommit && rowCount % batchSize == 0) {
MutationState state = new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(state);
connection.getMutationState().send();
mutation.clear();
}
}
// If auto commit is true, this last batch will be committed upon return
return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, maxSizeBytes, connection);
}
}
use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.
the class JoinQueryCompilerTest method getJoinTable.
private static JoinTable getJoinTable(String query, PhoenixConnection connection) throws SQLException {
SQLParser parser = new SQLParser(query);
SelectStatement select = SubselectRewriter.flatten(parser.parseQuery(), connection);
ColumnResolver resolver = FromCompiler.getResolverForQuery(select, connection);
select = StatementNormalizer.normalize(select, resolver);
SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolver, connection);
if (transformedSelect != select) {
resolver = FromCompiler.getResolverForQuery(transformedSelect, connection);
select = StatementNormalizer.normalize(transformedSelect, resolver);
}
PhoenixStatement stmt = connection.createStatement().unwrap(PhoenixStatement.class);
return JoinCompiler.compile(stmt, select, resolver);
}
use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.
the class ParallelIteratorsSplitTest method getSplits.
private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
final List<TableRef> tableRefs = Collections.singletonList(tableRef);
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<PFunction> getFunctions() {
return Collections.emptyList();
}
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
return null;
}
@Override
public List<PSchema> getSchemas() {
return null;
}
};
PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
final PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
context.setScanRanges(scanRanges);
ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {
private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);
@Override
public StatementContext getContext() {
return context;
}
@Override
public ParameterMetaData getParameterMetaData() {
return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return ExplainPlan.EMPTY_PLAN;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator() throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public long getEstimatedSize() {
return 0;
}
@Override
public Set<TableRef> getSourceRefs() {
return tableRefs;
}
@Override
public TableRef getTableRef() {
return tableRef;
}
@Override
public RowProjector getProjector() {
return RowProjector.EMPTY_PROJECTOR;
}
@Override
public Integer getLimit() {
return null;
}
@Override
public Integer getOffset() {
return null;
}
@Override
public OrderBy getOrderBy() {
return OrderBy.EMPTY_ORDER_BY;
}
@Override
public GroupBy getGroupBy() {
return GroupBy.EMPTY_GROUP_BY;
}
@Override
public List<KeyRange> getSplits() {
return null;
}
@Override
public FilterableStatement getStatement() {
return SelectStatement.SELECT_ONE;
}
@Override
public boolean isDegenerate() {
return false;
}
@Override
public boolean isRowKeyOrdered() {
return true;
}
@Override
public List<List<Scan>> getScans() {
return null;
}
@Override
public Operation getOperation() {
return Operation.QUERY;
}
@Override
public boolean useRoundRobinIterator() {
return false;
}
@Override
public Long getEstimatedRowsToScan() {
return null;
}
@Override
public Long getEstimatedBytesToScan() {
return null;
}
}, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false);
List<KeyRange> keyRanges = parallelIterators.getSplits();
return keyRanges;
}
use of org.apache.phoenix.jdbc.PhoenixStatement in project phoenix by apache.
the class SequenceIT method testCompilerOptimization.
@Test
public void testCompilerOptimization() throws Exception {
nextConnection();
conn.createStatement().execute("CREATE SEQUENCE seq.perf START WITH 3 INCREMENT BY 2");
conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true");
nextConnection();
conn.createStatement().execute("CREATE INDEX idx ON t(v1) INCLUDE (v2)");
nextConnection();
PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
stmt.optimizeQuery("SELECT k, NEXT VALUE FOR seq.perf FROM t WHERE v1 = 'bar'");
}
Aggregations