use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class TestUtil method getGuidePostsList.
public static Collection<GuidePostsInfo> getGuidePostsList(Connection conn, String tableName, String pkCol, byte[] lowerRange, byte[] upperRange, String whereClauseSuffix) throws SQLException {
String whereClauseStart = (lowerRange == null && upperRange == null ? "" : " WHERE " + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + (upperRange != null ? (pkCol + " < ?") : "")));
String whereClause = whereClauseSuffix == null ? whereClauseStart : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) : (" AND " + whereClauseSuffix);
String query = "SELECT /*+ NO_INDEX */ COUNT(*) FROM " + tableName + whereClause;
PhoenixPreparedStatement pstmt = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class);
if (lowerRange != null) {
pstmt.setBytes(1, lowerRange);
}
if (upperRange != null) {
pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange);
}
pstmt.execute();
TableRef tableRef = pstmt.getQueryPlan().getTableRef();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = tableRef.getTable();
GuidePostsInfo info = pconn.getQueryServices().getTableStats(new GuidePostsKey(table.getName().getBytes(), SchemaUtil.getEmptyColumnFamily(table)));
return Collections.singletonList(info);
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class BaseResultIterators method initializeScan.
private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
StatementContext context = plan.getContext();
TableRef tableRef = plan.getTableRef();
PTable table = tableRef.getTable();
Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
// Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
// We project *all* KeyValues across all column families as we make a pass over
// a physical table and we want to make sure we catch all KeyValues that may be
// dynamic or part of an updatable view.
familyMap.clear();
scan.setMaxVersions();
// Remove any filter
scan.setFilter(null);
// Traverse (and subsequently clone) all KeyValues
scan.setRaw(true);
// Pass over PTable so we can re-write rows according to the row key schema
scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
} else {
FilterableStatement statement = plan.getStatement();
RowProjector projector = plan.getProjector();
boolean optimizeProjection = false;
boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
if (!projector.projectEverything()) {
// not match the actual column families of the table (which is bad).
if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
// Project the one column family. We must project a column family since it's possible
// that there are other non declared column families that we need to ignore.
scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
} else {
optimizeProjection = true;
if (projector.projectEveryRow()) {
if (table.getViewType() == ViewType.MAPPED) {
// Since we don't have the empty key value in MAPPED tables,
// we must project all CFs in HRS. However, only the
// selected column values are returned back to client.
context.getWhereConditionColumns().clear();
for (PColumnFamily family : table.getColumnFamilies()) {
context.addWhereConditionColumn(family.getName().getBytes(), null);
}
} else {
byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
// been projected in its entirety.
if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
}
}
}
}
}
// Add FirstKeyOnlyFilter if there are no references to key value columns
if (keyOnlyFilter) {
ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
}
if (perScanLimit != null) {
ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
}
if (offset != null) {
ScanUtil.addOffsetAttribute(scan, offset);
}
int cols = plan.getGroupBy().getOrderPreservingColumnCount();
if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
if (plan.getLimit() != null) {
// We can push the limit to the server
ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
}
}
scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
// we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
// So there is no point setting the range.
if (!ScanUtil.isAnalyzeTable(scan)) {
setQualifierRanges(keyOnlyFilter, table, scan, context);
}
if (optimizeProjection) {
optimizeProjection(context, scan, table, statement);
}
}
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class IndexUtil method rewriteViewStatement.
/**
* Rewrite a view statement to be valid against an index
* @param conn
* @param index
* @param table
* @return
* @throws SQLException
*/
public static String rewriteViewStatement(PhoenixConnection conn, PTable index, PTable table, String viewStatement) throws SQLException {
if (viewStatement == null) {
return null;
}
SelectStatement select = new SQLParser(viewStatement).parseQuery();
ColumnResolver resolver = FromCompiler.getResolver(new TableRef(table));
SelectStatement translatedSelect = IndexStatementRewriter.translate(select, resolver);
ParseNode whereNode = translatedSelect.getWhere();
PhoenixStatement statement = new PhoenixStatement(conn);
TableRef indexTableRef = new TableRef(index) {
@Override
public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) {
return '"' + ref.getColumn().getName().getString() + '"';
}
};
ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef);
StatementContext context = new StatementContext(statement, indexResolver);
// Compile to ensure validity
WhereCompiler.compile(context, whereNode);
StringBuilder buf = new StringBuilder();
whereNode.toSQL(indexResolver, buf);
return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), buf.toString());
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class KeyValueUtil method getEstimatedRowSize.
/**
* Estimates the storage size of a row
* @param mutations map from table to row to RowMutationState
* @return estimated row size
*/
public static long getEstimatedRowSize(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations) {
long size = 0;
// iterate over tables
for (Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableEntry : mutations.entrySet()) {
PTable table = tableEntry.getKey().getTable();
// iterate over rows
for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : tableEntry.getValue().entrySet()) {
int rowLength = rowEntry.getKey().getLength();
Map<PColumn, byte[]> colValueMap = rowEntry.getValue().getColumnValues();
switch(table.getImmutableStorageScheme()) {
case ONE_CELL_PER_COLUMN:
// iterate over columns
for (Entry<PColumn, byte[]> colValueEntry : colValueMap.entrySet()) {
PColumn pColumn = colValueEntry.getKey();
size += KeyValue.getKeyValueDataStructureSize(rowLength, pColumn.getFamilyName().getBytes().length, pColumn.getColumnQualifierBytes().length, colValueEntry.getValue().length);
}
break;
case SINGLE_CELL_ARRAY_WITH_OFFSETS:
// we store all the column values in a single key value that contains all the
// column values followed by an offset array
size += PArrayDataTypeEncoder.getEstimatedByteSize(table, rowLength, colValueMap);
break;
}
// count the empty key value
Pair<byte[], byte[]> emptyKeyValueInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(table);
size += KeyValue.getKeyValueDataStructureSize(rowLength, SchemaUtil.getEmptyColumnFamilyPtr(table).getLength(), emptyKeyValueInfo.getFirst().length, emptyKeyValueInfo.getSecond().length);
}
}
return size;
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class WhereCompilerTest method testTenantConstraintsAddedToScan.
@Test
public void testTenantConstraintsAddedToScan() throws SQLException {
String tenantTypeId = "5678";
String tenantId = "000000000000123";
String url = getUrl(tenantId);
createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, type_id char(4) not null, " + "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, type_id, id)) multi_tenant=true");
createTestTable(url, "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST WHERE type_id= '" + tenantTypeId + "'");
String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
PhoenixConnection pconn = DriverManager.getConnection(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
QueryPlan plan = pstmt.optimizeQuery();
Scan scan = plan.getContext().getScan();
Filter filter = scan.getFilter();
PTable table = plan.getTableRef().getTable();
Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression();
Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression();
assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOp.EQUAL, aInteger, 0), constantComparison(CompareOp.EQUAL, aString, "foo")), TWO_BYTE_QUALIFIERS), filter);
byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + tenantTypeId);
assertArrayEquals(startRow, scan.getStartRow());
byte[] stopRow = startRow;
assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
}
Aggregations