use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class QueryCompilerTest method testSameColumnNameInPKAndNonPK.
@Test
public void testSameColumnNameInPKAndNonPK() throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
try {
String query = "CREATE TABLE t1 (k integer not null primary key, a.k decimal, b.k decimal)";
conn.createStatement().execute(query);
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PColumn c = pconn.getTable(new PTableKey(pconn.getTenantId(), "T1")).getColumnForColumnName("K");
assertTrue(SchemaUtil.isPKColumn(c));
} finally {
conn.close();
}
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumnsFromChildViews.
private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
// are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Delete) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnDeletesForBaseTable.add((Delete) m);
}
}
}
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short numColsDeleted = 0;
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
// lock the rows corresponding to views so that no other thread can modify the view
// meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
int numCols = view.getColumns().size();
int minDroppedColOrdinalPos = Integer.MAX_VALUE;
for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to
// be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
// it
if (existingViewColumn != null && view.getViewStatement() != null) {
ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
PhoenixConnection conn = null;
try {
conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
}
PhoenixStatement statement = new PhoenixStatement(conn);
TableRef baseTableRef = new TableRef(basePhysicalTable);
ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
StatementContext context = new StatementContext(statement, columnResolver);
Expression whereExpression = WhereCompiler.compile(context, viewWhere);
Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
ColumnFinder columnFinder = new ColumnFinder(colExpression);
whereExpression.accept(columnFinder);
if (columnFinder.getColumnFound()) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
}
minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
if (existingViewColumn != null) {
--numColsDeleted;
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
for (PColumn col : view.getColumns()) {
ordinalPositionList.addColumn(getColumnKey(viewKey, col));
}
}
ordinalPositionList.dropColumn(columnKey);
Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
mutationsForAddingColumnsToViews.add(viewColumnDelete);
// drop any view indexes that need this column
dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
}
}
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
}
return null;
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class MetaDataEndpointImpl method addColumnsAndTablePropertiesToChildViews.
private MetaDataMutationResult addColumnsAndTablePropertiesToChildViews(PTable basePhysicalTable, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, Region region, List<RowLock> locks) throws IOException, SQLException {
List<PutWithOrdinalPosition> columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size());
Map<TableProperty, Cell> tablePropertyCellMap = Maps.newHashMapWithExpectedSize(tableMetadata.size());
// Isolate the puts relevant to adding columns. Also figure out what kind of columns are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Put) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
// check if this put is for adding a column
if (pkCount > COLUMN_NAME_INDEX && rkmd[COLUMN_NAME_INDEX] != null && rkmd[COLUMN_NAME_INDEX].length > 0 && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnPutsForBaseTable.add(new PutWithOrdinalPosition((Put) m, getInteger((Put) m, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES)));
} else // check if the put is for a table property
if (pkCount <= COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
for (Cell cell : m.getFamilyCellMap().get(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)) {
for (TableProperty tableProp : TableProperty.values()) {
byte[] propNameBytes = Bytes.toBytes(tableProp.getPropertyName());
if (Bytes.compareTo(propNameBytes, 0, propNameBytes.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) == 0 && tableProp.isValidOnView() && tableProp.isMutable()) {
Cell tablePropCell = CellUtil.createCell(cell.getRow(), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell));
tablePropertyCellMap.put(tableProp, tablePropCell);
}
}
}
}
}
}
// Sort the puts by ordinal position
Collections.sort(columnPutsForBaseTable);
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short deltaNumPkColsSoFar = 0;
short columnsAddedToView = 0;
short columnsAddedToBaseTable = 0;
byte[] tenantId = viewInfo.getTenantId();
byte[] schema = viewInfo.getSchemaName();
byte[] table = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(tenantId, schema, table);
// lock the rows corresponding to views so that no other thread can modify the view meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
List<PColumn> viewPkCols = new ArrayList<>(view.getPKColumns());
boolean addingExistingPkCol = false;
int numCols = view.getColumns().size();
// add the new columns to the child view
for (PutWithOrdinalPosition p : columnPutsForBaseTable) {
Put baseTableColumnPut = p.put;
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(baseTableColumnPut.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
boolean isPkCol = columnFamily == null;
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
if (existingViewColumn != null) {
MetaDataMutationResult result = validateColumnForAddToBaseTable(existingViewColumn, baseTableColumnPut, basePhysicalTable, isPkCol, view);
if (result != null) {
return result;
}
if (isPkCol) {
viewPkCols.remove(existingViewColumn);
addingExistingPkCol = true;
}
/*
* For views that are not diverged, we need to make sure that the existing columns
* have the same ordinal position as in the base table. This is important because
* we rely on the ordinal position of the column to figure out whether dropping a
* column from the view will end up diverging the view from the base table.
*
* For already diverged views, we don't care about the ordinal position of the existing column.
*/
if (!isDivergedView(view)) {
int newOrdinalPosition = p.ordinalPosition;
// Check if the ordinal position of the column was getting updated from previous add column
// mutations.
int existingOrdinalPos = ordinalPositionList.getOrdinalPositionOfColumn(columnKey);
if (ordinalPositionList.size() == 0) {
/*
* No ordinal positions to be updated are in the list. In that case, check whether the
* existing ordinal position of the column is different from its new ordinal position.
* If yes, then initialize the ordinal position list with this column's ordinal position
* as the offset.
*/
existingOrdinalPos = getOrdinalPosition(view, existingViewColumn);
if (existingOrdinalPos != newOrdinalPosition) {
ordinalPositionList.setOffset(newOrdinalPosition);
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
for (PColumn col : view.getColumns()) {
int ordinalPos = getOrdinalPosition(view, col);
if (ordinalPos >= newOrdinalPosition) {
if (ordinalPos == existingOrdinalPos) {
/*
* No need to update ordinal positions of columns beyond the existing column's
* old ordinal position.
*/
break;
}
// increment ordinal position of columns occurring after this column by 1
int updatedPos = ordinalPos + 1;
ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
}
}
}
} else {
if (existingOrdinalPos != newOrdinalPosition) {
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
}
}
columnsAddedToBaseTable++;
}
} else {
// The column doesn't exist in the view.
Put viewColumnPut = new Put(columnKey, clientTimeStamp);
for (Cell cell : baseTableColumnPut.getFamilyCellMap().values().iterator().next()) {
viewColumnPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell)));
}
if (isDivergedView(view)) {
if (isPkCol) {
/*
* Only pk cols of the base table are added to the diverged views. These pk
* cols are added at the end.
*/
int lastOrdinalPos = getOrdinalPosition(view, view.getColumns().get(numCols - 1));
int newPosition = ++lastOrdinalPos;
byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0);
viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
mutationsForAddingColumnsToViews.add(viewColumnPut);
} else {
// move on to the next column
continue;
}
} else {
int newOrdinalPosition = p.ordinalPosition;
/*
* For a non-diverged view, we need to make sure that the base table column
* is added at the right position.
*/
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(newOrdinalPosition);
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
for (PColumn col : view.getColumns()) {
int ordinalPos = getOrdinalPosition(view, col);
if (ordinalPos >= newOrdinalPosition) {
// increment ordinal position of columns by 1
int updatedPos = ordinalPos + 1;
ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
}
}
} else {
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
}
mutationsForAddingColumnsToViews.add(viewColumnPut);
}
if (isPkCol) {
deltaNumPkColsSoFar++;
// Set the key sequence for the pk column to be added
short currentKeySeq = SchemaUtil.getMaxKeySeq(view);
short newKeySeq = (short) (currentKeySeq + deltaNumPkColsSoFar);
byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
addMutationsForAddingPkColsToViewIndexes(mutationsForAddingColumnsToViews, clientTimeStamp, view, deltaNumPkColsSoFar, columnName, viewColumnPut);
}
columnsAddedToView++;
columnsAddedToBaseTable++;
}
}
/*
* Allow adding a pk columns to base table : 1. if all the view pk columns are exactly the same as the base
* table pk columns 2. if we are adding all the existing view pk columns to the base table
*/
if (addingExistingPkCol && !viewPkCols.equals(basePhysicalTable.getPKColumns())) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
addViewIndexesHeaderRowMutations(mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, view, deltaNumPkColsSoFar);
// set table properties in child view
if (!tablePropertyCellMap.isEmpty()) {
Put viewHeaderRowPut = new Put(viewKey, clientTimeStamp);
for (TableProperty tableProp : TableProperty.values()) {
Cell tablePropertyCell = tablePropertyCellMap.get(tableProp);
if (tablePropertyCell != null) {
// or if it is mutable on a view and the property value is the same as the base table property (which means it wasn't changed on the view)
if (!tableProp.isMutableOnView() || tableProp.getPTableValue(view).equals(tableProp.getPTableValue(basePhysicalTable))) {
viewHeaderRowPut.add(CellUtil.createCell(viewKey, CellUtil.cloneFamily(tablePropertyCell), CellUtil.cloneQualifier(tablePropertyCell), clientTimeStamp, tablePropertyCell.getTypeByte(), CellUtil.cloneValue(tablePropertyCell)));
}
}
}
byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
// invalidate the view so that it is removed from the cache
invalidateList.add(new ImmutableBytesPtr(viewKey));
mutationsForAddingColumnsToViews.add(viewHeaderRowPut);
}
/*
* Increment the sequence number by 1 if:
* 1) For a diverged view, there were columns (pk columns) added to the view.
* 2) For a non-diverged view if the base column count changed.
*/
boolean changeSequenceNumber = (isDivergedView(view) && columnsAddedToView > 0) || (!isDivergedView(view) && columnsAddedToBaseTable > 0);
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, columnsAddedToView, columnsAddedToBaseTable, viewKey, view, ordinalPositionList, numCols, changeSequenceNumber);
}
return null;
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class ProjectionCompiler method projectIndexColumnFamily.
private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
PTable index = tableRef.getTable();
PhoenixConnection conn = context.getConnection();
String tableName = index.getParentName().getString();
PTable table = conn.getTable(new PTableKey(conn.getTenantId(), tableName));
PColumnFamily pfamily = table.getColumnFamily(cfName);
for (PColumn column : pfamily.getColumns()) {
String indexColName = IndexUtil.getIndexColumnName(column);
PColumn indexColumn = null;
ColumnRef ref = null;
String indexColumnFamily = null;
try {
indexColumn = index.getColumnForColumnName(indexColName);
ref = new ColumnRef(tableRef, indexColumn.getPosition());
indexColumnFamily = indexColumn.getFamilyName() == null ? null : indexColumn.getFamilyName().getString();
} catch (ColumnNotFoundException e) {
if (index.getIndexType() == IndexType.LOCAL) {
try {
ref = new LocalIndexDataColumnRef(context, indexColName);
indexColumn = ref.getColumn();
indexColumnFamily = indexColumn.getFamilyName() == null ? null : (index.getIndexType() == IndexType.LOCAL ? IndexUtil.getLocalIndexColumnFamily(indexColumn.getFamilyName().getString()) : indexColumn.getFamilyName().getString());
} catch (ColumnFamilyNotFoundException c) {
throw e;
}
} else {
throw e;
}
}
if (resolveColumn) {
ref = context.getResolver().resolveColumn(index.getTableName().getString(), indexColumnFamily, indexColName);
}
Expression expression = ref.newColumnExpression();
projectedExpressions.add(expression);
String colName = column.getName().toString();
boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
projectedColumns.add(new ExpressionProjector(colName, tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive));
}
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class ProjectionCompiler method projectAllIndexColumns.
private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns, List<? extends PDatum> targetColumns) throws SQLException {
ColumnResolver resolver = context.getResolver();
PTable index = tableRef.getTable();
int projectedOffset = projectedExpressions.size();
PhoenixConnection conn = context.getConnection();
PName tenantId = conn.getTenantId();
String tableName = index.getParentName().getString();
PTable dataTable = null;
try {
dataTable = conn.getTable(new PTableKey(tenantId, tableName));
} catch (TableNotFoundException e) {
if (tenantId != null) {
// Check with null tenantId
dataTable = conn.getTable(new PTableKey(null, tableName));
} else {
throw e;
}
}
int tableOffset = dataTable.getBucketNum() == null ? 0 : 1;
int minTablePKOffset = getMinPKOffset(dataTable, tenantId);
int minIndexPKOffset = getMinPKOffset(index, tenantId);
if (index.getIndexType() != IndexType.LOCAL) {
if (index.getColumns().size() - minIndexPKOffset != dataTable.getColumns().size() - minTablePKOffset) {
// We'll end up not using this by the optimizer, so just throw
String schemaNameStr = dataTable.getSchemaName() == null ? null : dataTable.getSchemaName().getString();
String tableNameStr = dataTable.getTableName() == null ? null : dataTable.getTableName().getString();
throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, WildcardParseNode.INSTANCE.toString());
}
}
for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) {
PColumn column = dataTable.getColumns().get(i);
// Skip tenant ID column (which may not be the first column, but is the first PK column)
if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) {
tableOffset++;
continue;
}
PColumn tableColumn = dataTable.getColumns().get(i);
String indexColName = IndexUtil.getIndexColumnName(tableColumn);
PColumn indexColumn = null;
ColumnRef ref = null;
try {
indexColumn = index.getColumnForColumnName(indexColName);
ref = new ColumnRef(tableRef, indexColumn.getPosition());
} catch (ColumnNotFoundException e) {
if (index.getIndexType() == IndexType.LOCAL) {
try {
ref = new LocalIndexDataColumnRef(context, indexColName);
indexColumn = ref.getColumn();
} catch (ColumnFamilyNotFoundException c) {
throw e;
}
} else {
throw e;
}
}
String colName = tableColumn.getName().getString();
String tableAlias = tableRef.getTableAlias();
if (resolveColumn) {
try {
if (tableAlias != null) {
ref = resolver.resolveColumn(null, tableAlias, indexColName);
} else {
String schemaName = index.getSchemaName().getString();
ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName);
}
} catch (AmbiguousColumnException e) {
if (indexColumn.getFamilyName() != null) {
ref = resolver.resolveColumn(tableAlias != null ? tableAlias : index.getTableName().getString(), indexColumn.getFamilyName().getString(), indexColName);
} else {
throw e;
}
}
}
Expression expression = ref.newColumnExpression();
expression = coerceIfNecessary(i - tableOffset + projectedOffset, targetColumns, expression);
// We do not need to check if the column is a viewConstant, because view constants never
// appear as a column in an index
projectedExpressions.add(expression);
boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
ExpressionProjector projector = new ExpressionProjector(colName, tableRef.getTableAlias() == null ? dataTable.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive);
projectedColumns.add(projector);
}
}
Aggregations