use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class PhoenixRuntime method getPkColumns.
@Deprecated
private static List<PColumn> getPkColumns(PTable ptable, Connection conn, boolean forDataTable) throws SQLException {
PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
List<PColumn> pkColumns = ptable.getPKColumns();
// Skip the salting column and the view index id column if present.
// Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant
int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1);
// get a sublist of pkColumns by skipping the offset columns.
pkColumns = pkColumns.subList(offset, pkColumns.size());
if (ptable.getType() == PTableType.INDEX && forDataTable) {
// index tables have the same schema name as their parent/data tables.
String fullDataTableName = ptable.getParentName().getString();
// Get the corresponding columns of the data table.
List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn);
pkColumns = dataColumns;
}
return pkColumns;
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class PhoenixRuntime method getFirstPKColumnExpression.
private static Expression getFirstPKColumnExpression(PTable table) throws SQLException {
if (table.getIndexType() == IndexType.LOCAL) {
/*
* With some hackery, we could deduce the tenant ID from a multi-tenant local index,
* however it's not clear that we'd want to maintain the same prefixing of the region
* start key, as the region boundaries may end up being different on a cluster being
* replicated/backed-up to (which is the use case driving the method).
*/
throw new SQLFeatureNotSupportedException();
}
// skip salt and viewIndexId columns.
int pkPosition = (table.getBucketNum() == null ? 0 : 1) + (table.getViewIndexId() == null ? 0 : 1);
List<PColumn> pkColumns = table.getPKColumns();
return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition));
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class PhoenixRuntime method getPkColsForSql.
/**
* Get the column family, column name pairs that make up the row key of the table that will be queried.
* @param conn - connection used to generate the query plan. Caller should take care of closing the connection appropriately.
* @param plan - query plan to get info for.
* @return the pairs of column family name and column name columns in the data table that make up the row key for
* the table used in the query plan. Column family names are optional and hence the first part of the pair is nullable.
* Column names and family names are enclosed in double quotes to allow for case sensitivity and for presence of
* special characters. Salting column and view index id column are not included. If the connection is tenant specific
* and the table used by the query plan is multi-tenant, then the tenant id column is not included as well.
* @throws SQLException
*/
public static List<Pair<String, String>> getPkColsForSql(Connection conn, QueryPlan plan) throws SQLException {
checkNotNull(plan);
checkNotNull(conn);
List<PColumn> pkColumns = getPkColumns(plan.getTableRef().getTable(), conn);
List<Pair<String, String>> columns = Lists.newArrayListWithExpectedSize(pkColumns.size());
String columnName;
String familyName;
for (PColumn pCol : pkColumns) {
columnName = addQuotes(pCol.getName().getString());
familyName = pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null;
columns.add(new Pair<String, String>(familyName, columnName));
}
return columns;
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class PhoenixRuntime method getColumnInfo.
/**
* Returns the column info for the given column for the given table.
*
* @param table
* @param columnName User-specified column name. May be family-qualified or bare.
* @return columnInfo associated with the column in the table
* @throws SQLException if parameters are null or if column is not found or if column is ambiguous.
*/
public static ColumnInfo getColumnInfo(PTable table, String columnName) throws SQLException {
if (table == null) {
throw new SQLException("Table must not be null.");
}
if (columnName == null) {
throw new SQLException("columnName must not be null.");
}
PColumn pColumn = null;
if (columnName.contains(QueryConstants.NAME_SEPARATOR)) {
String[] tokens = columnName.split(QueryConstants.NAME_SEPARATOR_REGEX);
if (tokens.length != 2) {
throw new SQLException(String.format("Unable to process column %s, expected family-qualified name.", columnName));
}
String familyName = tokens[0];
String familyColumn = tokens[1];
PColumnFamily family = table.getColumnFamily(familyName);
pColumn = family.getPColumnForColumnName(familyColumn);
} else {
pColumn = table.getColumnForColumnName(columnName);
}
return getColumnInfo(pColumn);
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class PhoenixRuntime method encodeColumnValues.
/**
*
* @param conn connection that was used for reading/generating value.
* @param fullTableName fully qualified table name
* @param values values of the columns
* @param columns list of pair of column that includes column family as first part and column name as the second part.
* Column family is optional and hence nullable. Columns in the list have to be in the same order as the order of occurence
* of their values in the object array.
* @return values encoded in a byte array
* @throws SQLException
* @see {@link #decodeValues(Connection, String, byte[], List)}
*/
public static byte[] encodeColumnValues(Connection conn, String fullTableName, Object[] values, List<Pair<String, String>> columns) throws SQLException {
PTable table = getTable(conn, fullTableName);
List<PColumn> pColumns = getColumns(table, columns);
List<Expression> expressions = new ArrayList<Expression>(pColumns.size());
int i = 0;
for (PColumn col : pColumns) {
Object value = values[i];
// for purposes of encoding, sort order of the columns doesn't matter.
Expression expr = LiteralExpression.newConstant(value, col.getDataType(), col.getMaxLength(), col.getScale());
expressions.add(expr);
i++;
}
KeyValueSchema kvSchema = buildKeyValueSchema(pColumns);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema);
return kvSchema.toBytes(expressions.toArray(new Expression[0]), valueSet, ptr);
}
Aggregations