use of org.apache.phoenix.schema.PColumnFamily in project phoenix by apache.
the class ConnectionQueryServicesImpl method ensureViewIndexTableCreated.
private void ensureViewIndexTableCreated(PTable table, long timestamp, boolean isNamespaceMapped) throws SQLException {
byte[] physicalTableName = table.getPhysicalName().getBytes();
HTableDescriptor htableDesc = this.getTableDescriptor(physicalTableName);
Map<String, Object> tableProps = createPropertiesMap(htableDesc.getValues());
List<Pair<byte[], Map<String, Object>>> families = Lists.newArrayListWithExpectedSize(Math.max(1, table.getColumnFamilies().size() + 1));
if (families.isEmpty()) {
byte[] familyName = SchemaUtil.getEmptyColumnFamily(table);
Map<String, Object> familyProps = createPropertiesMap(htableDesc.getFamily(familyName).getValues());
families.add(new Pair<byte[], Map<String, Object>>(familyName, familyProps));
} else {
for (PColumnFamily family : table.getColumnFamilies()) {
byte[] familyName = family.getName().getBytes();
Map<String, Object> familyProps = createPropertiesMap(htableDesc.getFamily(familyName).getValues());
families.add(new Pair<byte[], Map<String, Object>>(familyName, familyProps));
}
// Always create default column family, because we don't know in advance if we'll
// need it for an index with no covered columns.
families.add(new Pair<byte[], Map<String, Object>>(table.getDefaultFamilyName().getBytes(), Collections.<String, Object>emptyMap()));
}
byte[][] splits = null;
if (table.getBucketNum() != null) {
splits = SaltingUtil.getSalteByteSplitPoints(table.getBucketNum());
}
// Transfer over table values into tableProps
// TODO: encapsulate better
tableProps.put(PhoenixDatabaseMetaData.TRANSACTIONAL, table.isTransactional());
tableProps.put(PhoenixDatabaseMetaData.IMMUTABLE_ROWS, table.isImmutableRows());
ensureViewIndexTableCreated(physicalTableName, tableProps, families, splits, timestamp, isNamespaceMapped);
}
use of org.apache.phoenix.schema.PColumnFamily in project phoenix by apache.
the class FormatToKeyValueReducer method initColumnsMap.
private void initColumnsMap(PhoenixConnection conn) throws SQLException {
Map<byte[], Integer> indexMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
columnIndexes = new HashMap<>();
int columnIndex = 0;
for (int index = 0; index < logicalNames.size(); index++) {
PTable table = PhoenixRuntime.getTable(conn, logicalNames.get(index));
if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) {
List<PColumnFamily> cfs = table.getColumnFamilies();
for (int i = 0; i < cfs.size(); i++) {
byte[] family = cfs.get(i).getName().getBytes();
Pair<byte[], byte[]> pair = new Pair<>(family, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES);
columnIndexes.put(new Integer(columnIndex), pair);
columnIndex++;
}
} else {
List<PColumn> cls = table.getColumns();
for (int i = 0; i < cls.size(); i++) {
PColumn c = cls.get(i);
byte[] family = new byte[0];
byte[] cq;
if (!SchemaUtil.isPKColumn(c)) {
family = c.getFamilyName().getBytes();
cq = c.getColumnQualifierBytes();
} else {
cq = c.getName().getBytes();
}
byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq);
Pair<byte[], byte[]> pair = new Pair<>(family, cq);
if (!indexMap.containsKey(cfn)) {
indexMap.put(cfn, new Integer(columnIndex));
columnIndexes.put(new Integer(columnIndex), pair);
columnIndex++;
}
}
}
byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table);
byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst();
Pair<byte[], byte[]> pair = new Pair<>(emptyColumnFamily, emptyKeyValue);
columnIndexes.put(new Integer(columnIndex), pair);
columnIndex++;
}
}
use of org.apache.phoenix.schema.PColumnFamily in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumn.
@Override
public void dropColumn(RpcController controller, DropColumnRequest request, RpcCallback<MetaDataResponse> done) {
List<Mutation> tableMetaData = null;
final List<byte[]> tableNamesToDelete = Lists.newArrayList();
final List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
try {
tableMetaData = ProtobufUtil.getMutations(request);
MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
@Override
public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData, List<Mutation> tableMetaData, Region region, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp) throws IOException, SQLException {
byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
boolean deletePKColumn = false;
List<Mutation> additionalTableMetaData = Lists.newArrayList();
PTableType type = table.getType();
if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
TableViewFinder childViewsResult = new TableViewFinder();
findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp);
if (childViewsResult.hasViews()) {
MetaDataMutationResult mutationResult = dropColumnsFromChildViews(region, table, locks, tableMetaData, additionalTableMetaData, schemaName, tableName, invalidateList, clientTimeStamp, childViewsResult, tableNamesToDelete, sharedTablesToDelete);
// return if we were not able to drop the column successfully
if (mutationResult != null)
return mutationResult;
}
}
for (Mutation m : tableMetaData) {
if (m instanceof Delete) {
byte[] key = m.getRow();
int pkCount = getVarChars(key, rowKeyMetaData);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) {
PColumn columnToDelete = null;
try {
if (pkCount > FAMILY_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0) {
PColumnFamily family = table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
columnToDelete = family.getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]);
} else if (pkCount > COLUMN_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0) {
deletePKColumn = true;
columnToDelete = table.getPKColumn(new String(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]));
} else {
continue;
}
if (table.getType() == PTableType.VIEW) {
if (table.getBaseColumnCount() != DIVERGED_VIEW_BASE_COLUMN_COUNT && columnToDelete.getPosition() < table.getBaseColumnCount()) {
/*
* If the column being dropped is inherited from the base table, then the
* view is about to diverge itself from the base table. The consequence of
* this divergence is that that any further meta-data changes made to the
* base table will not be propagated to the hierarchy of views where this
* view is the root.
*/
byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
Put updateBaseColumnCountPut = new Put(viewKey);
byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
PInteger.INSTANCE.getCodec().encodeInt(DIVERGED_VIEW_BASE_COLUMN_COUNT, baseColumnCountPtr, 0);
updateBaseColumnCountPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, baseColumnCountPtr);
additionalTableMetaData.add(updateBaseColumnCountPut);
}
}
if (columnToDelete.isViewReferenced()) {
// Disallow deletion of column referenced in WHERE clause of view
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
}
// drop any indexes that need the column that is going to be dropped
dropIndexes(table, region, invalidateList, locks, clientTimeStamp, schemaName, tableName, additionalTableMetaData, columnToDelete, tableNamesToDelete, sharedTablesToDelete);
} catch (ColumnFamilyNotFoundException e) {
return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
} catch (ColumnNotFoundException e) {
return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
}
}
}
}
if (deletePKColumn) {
if (table.getPKColumns().size() == 1) {
return new MetaDataMutationResult(MutationCode.NO_PK_COLUMNS, EnvironmentEdgeManager.currentTimeMillis(), null);
}
}
tableMetaData.addAll(additionalTableMetaData);
long currentTime = MetaDataUtil.getClientTimeStamp(tableMetaData);
return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, null, tableNamesToDelete, sharedTablesToDelete);
}
});
if (result != null) {
done.run(MetaDataMutationResult.toProto(result));
}
} catch (IOException ioe) {
ProtobufUtil.setControllerException(controller, ioe);
}
}
use of org.apache.phoenix.schema.PColumnFamily in project phoenix by apache.
the class ProjectionCompiler method projectIndexColumnFamily.
private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
PTable index = tableRef.getTable();
PhoenixConnection conn = context.getConnection();
String tableName = index.getParentName().getString();
PTable table = conn.getTable(new PTableKey(conn.getTenantId(), tableName));
PColumnFamily pfamily = table.getColumnFamily(cfName);
for (PColumn column : pfamily.getColumns()) {
String indexColName = IndexUtil.getIndexColumnName(column);
PColumn indexColumn = null;
ColumnRef ref = null;
String indexColumnFamily = null;
try {
indexColumn = index.getColumnForColumnName(indexColName);
ref = new ColumnRef(tableRef, indexColumn.getPosition());
indexColumnFamily = indexColumn.getFamilyName() == null ? null : indexColumn.getFamilyName().getString();
} catch (ColumnNotFoundException e) {
if (index.getIndexType() == IndexType.LOCAL) {
try {
ref = new LocalIndexDataColumnRef(context, indexColName);
indexColumn = ref.getColumn();
indexColumnFamily = indexColumn.getFamilyName() == null ? null : (index.getIndexType() == IndexType.LOCAL ? IndexUtil.getLocalIndexColumnFamily(indexColumn.getFamilyName().getString()) : indexColumn.getFamilyName().getString());
} catch (ColumnFamilyNotFoundException c) {
throw e;
}
} else {
throw e;
}
}
if (resolveColumn) {
ref = context.getResolver().resolveColumn(index.getTableName().getString(), indexColumnFamily, indexColName);
}
Expression expression = ref.newColumnExpression();
projectedExpressions.add(expression);
String colName = column.getName().toString();
boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
projectedColumns.add(new ExpressionProjector(colName, tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive));
}
}
use of org.apache.phoenix.schema.PColumnFamily in project phoenix by apache.
the class ProjectionCompiler method projectTableColumnFamily.
private static void projectTableColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
PTable table = tableRef.getTable();
PColumnFamily pfamily = table.getColumnFamily(cfName);
for (PColumn column : pfamily.getColumns()) {
ColumnRef ref = new ColumnRef(tableRef, column.getPosition());
if (resolveColumn) {
ref = context.getResolver().resolveColumn(table.getTableName().getString(), cfName, column.getName().getString());
}
Expression expression = ref.newColumnExpression();
projectedExpressions.add(expression);
String colName = column.getName().toString();
boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
projectedColumns.add(new ExpressionProjector(colName, tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive));
}
}
Aggregations