use of org.apache.phoenix.parse.FilterableStatement in project phoenix by apache.
the class BaseResultIterators method initializeScan.
private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
StatementContext context = plan.getContext();
TableRef tableRef = plan.getTableRef();
PTable table = tableRef.getTable();
Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
// Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
// We project *all* KeyValues across all column families as we make a pass over
// a physical table and we want to make sure we catch all KeyValues that may be
// dynamic or part of an updatable view.
familyMap.clear();
scan.setMaxVersions();
// Remove any filter
scan.setFilter(null);
// Traverse (and subsequently clone) all KeyValues
scan.setRaw(true);
// Pass over PTable so we can re-write rows according to the row key schema
scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
} else {
FilterableStatement statement = plan.getStatement();
RowProjector projector = plan.getProjector();
boolean optimizeProjection = false;
boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
if (!projector.projectEverything()) {
// not match the actual column families of the table (which is bad).
if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
// Project the one column family. We must project a column family since it's possible
// that there are other non declared column families that we need to ignore.
scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
} else {
optimizeProjection = true;
if (projector.projectEveryRow()) {
if (table.getViewType() == ViewType.MAPPED) {
// Since we don't have the empty key value in MAPPED tables,
// we must project all CFs in HRS. However, only the
// selected column values are returned back to client.
context.getWhereConditionColumns().clear();
for (PColumnFamily family : table.getColumnFamilies()) {
context.addWhereConditionColumn(family.getName().getBytes(), null);
}
} else {
byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
// been projected in its entirety.
if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
}
}
}
}
}
// Add FirstKeyOnlyFilter if there are no references to key value columns
if (keyOnlyFilter) {
ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
}
if (perScanLimit != null) {
ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
}
if (offset != null) {
ScanUtil.addOffsetAttribute(scan, offset);
}
int cols = plan.getGroupBy().getOrderPreservingColumnCount();
if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
if (plan.getLimit() != null) {
// We can push the limit to the server
ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
}
}
scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
// we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
// So there is no point setting the range.
if (!ScanUtil.isAnalyzeTable(scan)) {
setQualifierRanges(keyOnlyFilter, table, scan, context);
}
if (optimizeProjection) {
optimizeProjection(context, scan, table, statement);
}
}
}
use of org.apache.phoenix.parse.FilterableStatement in project phoenix by apache.
the class ParallelIteratorsSplitTest method getSplits.
private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
final List<TableRef> tableRefs = Collections.singletonList(tableRef);
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<PFunction> getFunctions() {
return Collections.emptyList();
}
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
return null;
}
@Override
public List<PSchema> getSchemas() {
return null;
}
};
PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
final PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
context.setScanRanges(scanRanges);
ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {
private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);
@Override
public StatementContext getContext() {
return context;
}
@Override
public ParameterMetaData getParameterMetaData() {
return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return ExplainPlan.EMPTY_PLAN;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator() throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public long getEstimatedSize() {
return 0;
}
@Override
public Set<TableRef> getSourceRefs() {
return tableRefs;
}
@Override
public TableRef getTableRef() {
return tableRef;
}
@Override
public RowProjector getProjector() {
return RowProjector.EMPTY_PROJECTOR;
}
@Override
public Integer getLimit() {
return null;
}
@Override
public Integer getOffset() {
return null;
}
@Override
public OrderBy getOrderBy() {
return OrderBy.EMPTY_ORDER_BY;
}
@Override
public GroupBy getGroupBy() {
return GroupBy.EMPTY_GROUP_BY;
}
@Override
public List<KeyRange> getSplits() {
return null;
}
@Override
public FilterableStatement getStatement() {
return SelectStatement.SELECT_ONE;
}
@Override
public boolean isDegenerate() {
return false;
}
@Override
public boolean isRowKeyOrdered() {
return true;
}
@Override
public List<List<Scan>> getScans() {
return null;
}
@Override
public Operation getOperation() {
return Operation.QUERY;
}
@Override
public boolean useRoundRobinIterator() {
return false;
}
@Override
public Long getEstimatedRowsToScan() {
return null;
}
@Override
public Long getEstimatedBytesToScan() {
return null;
}
}, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false);
List<KeyRange> keyRanges = parallelIterators.getSplits();
return keyRanges;
}
Aggregations