use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.
the class PostDDLCompiler method compile.
public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public List<PFunction> getFunctions() {
return Collections.<PFunction>emptyList();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new FunctionNotFoundException(functionName);
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
}, scan, new SequenceManager(statement));
return new BaseMutationPlan(context, Operation.UPSERT) {
/* FIXME */
@Override
public MutationState execute() throws SQLException {
if (tableRefs.isEmpty()) {
return new MutationState(0, 1000, connection);
}
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(true);
SQLException sqlE = null;
/*
* Handles:
* 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
* 2) deletion of all column values for a ALTER TABLE DROP COLUMN
* 3) updating the necessary rows to have an empty KV
* 4) updating table stats
*/
long totalMutationCount = 0;
for (final TableRef tableRef : tableRefs) {
Scan scan = ScanUtil.newScan(context.getScan());
SelectStatement select = SelectStatement.COUNT_ONE;
// We need to use this tableRef
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return Collections.singletonList(tableRef);
}
@Override
public java.util.List<PFunction> getFunctions() {
return Collections.emptyList();
}
;
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
return new ColumnRef(tableRef, column.getPosition());
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
;
@Override
public boolean hasUDFs() {
return false;
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
};
PhoenixStatement statement = new PhoenixStatement(connection);
StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
long ts = timestamp;
// in this case, so maybe this is ok.
if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
ts = TransactionUtil.convertToNanoseconds(ts);
}
ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
if (emptyCF != null) {
scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
}
ServerCache cache = null;
try {
if (deleteList != null) {
if (deleteList.isEmpty()) {
scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
// In the case of a row deletion, add index metadata so mutable secondary indexing works
/* TODO: we currently manually run a scan to delete the index data here
ImmutableBytesWritable ptr = context.getTempPtr();
tableRef.getTable().getIndexMaintainers(ptr);
if (ptr.getLength() > 0) {
IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
byte[] uuidValue = cache.getId();
scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
}
*/
} else {
// In the case of the empty key value column family changing, do not send the index
// metadata, as we're currently managing this from the client. It's possible for the
// data empty column family to stay the same, while the index empty column family
// changes.
PColumn column = deleteList.get(0);
byte[] cq = column.getColumnQualifierBytes();
if (emptyCF == null) {
scan.addColumn(column.getFamilyName().getBytes(), cq);
}
scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
}
}
List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
if (projectCFs == null) {
for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
columnFamilies.add(family.getName().getBytes());
}
} else {
for (byte[] projectCF : projectCFs) {
columnFamilies.add(projectCF);
}
}
// Need to project all column families into the scan, since we haven't yet created our empty key value
RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
// since at this point we haven't added the empty key value everywhere.
if (columnFamilies != null) {
scan.getFamilyMap().clear();
for (byte[] family : columnFamilies) {
scan.addFamily(family);
}
projector = new RowProjector(projector, false);
}
// any other Post DDL operations.
try {
// Since dropping a VIEW does not affect the underlying data, we do
// not need to pass through the view statement here.
// Push where clause into scan
WhereCompiler.compile(context, select);
} catch (ColumnFamilyNotFoundException e) {
continue;
} catch (ColumnNotFoundException e) {
continue;
} catch (AmbiguousColumnException e) {
continue;
}
QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
try {
ResultIterator iterator = plan.iterator();
try {
Tuple row = iterator.next();
ImmutableBytesWritable ptr = context.getTempPtr();
totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
} catch (SQLException e) {
sqlE = e;
} finally {
try {
iterator.close();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
if (sqlE != null) {
throw sqlE;
}
}
}
} catch (TableNotFoundException e) {
// Ignore and continue, as HBase throws when table hasn't been written to
// FIXME: Remove if this is fixed in 0.96
}
} finally {
if (cache != null) {
// Remove server cache if there is one
cache.close();
}
}
}
final long count = totalMutationCount;
return new MutationState(1, 1000, connection) {
@Override
public long getUpdateCount() {
return count;
}
};
} finally {
if (!wasAutoCommit)
connection.setAutoCommit(wasAutoCommit);
}
}
};
}
use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.
the class ConnectionQueryServicesImpl method separateAndValidateProperties.
private Pair<HTableDescriptor, HTableDescriptor> separateAndValidateProperties(PTable table, Map<String, List<Pair<String, Object>>> properties, Set<String> colFamiliesForPColumnsToBeAdded, List<Pair<byte[], Map<String, Object>>> families, Map<String, Object> tableProps) throws SQLException {
Map<String, Map<String, Object>> stmtFamiliesPropsMap = new HashMap<>(properties.size());
Map<String, Object> commonFamilyProps = new HashMap<>();
boolean addingColumns = colFamiliesForPColumnsToBeAdded != null && !colFamiliesForPColumnsToBeAdded.isEmpty();
HashSet<String> existingColumnFamilies = existingColumnFamilies(table);
Map<String, Map<String, Object>> allFamiliesProps = new HashMap<>(existingColumnFamilies.size());
boolean isTransactional = table.isTransactional();
boolean willBeTransactional = false;
boolean isOrWillBeTransactional = isTransactional;
Integer newTTL = null;
for (String family : properties.keySet()) {
List<Pair<String, Object>> propsList = properties.get(family);
if (propsList != null && propsList.size() > 0) {
Map<String, Object> colFamilyPropsMap = new HashMap<String, Object>(propsList.size());
for (Pair<String, Object> prop : propsList) {
String propName = prop.getFirst();
Object propValue = prop.getSecond();
if ((MetaDataUtil.isHTableProperty(propName) || TableProperty.isPhoenixTableProperty(propName)) && addingColumns) {
// setting HTable and PhoenixTable properties while adding a column is not allowed.
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN).setMessage("Property: " + propName).build().buildException();
}
if (MetaDataUtil.isHTableProperty(propName)) {
// Can't have a column family name for a property that's an HTableProperty
if (!family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY).setMessage("Column Family: " + family + ", Property: " + propName).build().buildException();
}
tableProps.put(propName, propValue);
} else {
if (TableProperty.isPhoenixTableProperty(propName)) {
TableProperty.valueOf(propName).validate(true, !family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY), table.getType());
if (propName.equals(TTL)) {
newTTL = ((Number) prop.getSecond()).intValue();
// Even though TTL is really a HColumnProperty we treat it specially.
// We enforce that all column families have the same TTL.
commonFamilyProps.put(propName, prop.getSecond());
} else if (propName.equals(PhoenixDatabaseMetaData.TRANSACTIONAL) && Boolean.TRUE.equals(propValue)) {
willBeTransactional = isOrWillBeTransactional = true;
tableProps.put(TxConstants.READ_NON_TX_DATA, propValue);
}
} else {
if (MetaDataUtil.isHColumnProperty(propName)) {
if (family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
commonFamilyProps.put(propName, propValue);
} else {
colFamilyPropsMap.put(propName, propValue);
}
} else {
// as HTableProp if its neither HColumnProp or PhoenixTableProp.
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_PROPERTY).setMessage("Column Family: " + family + ", Property: " + propName).build().buildException();
}
}
}
}
if (!colFamilyPropsMap.isEmpty()) {
stmtFamiliesPropsMap.put(family, colFamilyPropsMap);
}
}
}
commonFamilyProps = Collections.unmodifiableMap(commonFamilyProps);
boolean isAddingPkColOnly = colFamiliesForPColumnsToBeAdded.size() == 1 && colFamiliesForPColumnsToBeAdded.contains(null);
if (!commonFamilyProps.isEmpty()) {
if (!addingColumns) {
// Add the common family props to all existing column families
for (String existingColFamily : existingColumnFamilies) {
Map<String, Object> m = new HashMap<String, Object>(commonFamilyProps.size());
m.putAll(commonFamilyProps);
allFamiliesProps.put(existingColFamily, m);
}
} else {
// Add the common family props to the column families of the columns being added
for (String colFamily : colFamiliesForPColumnsToBeAdded) {
if (colFamily != null) {
// only set properties for key value columns
Map<String, Object> m = new HashMap<String, Object>(commonFamilyProps.size());
m.putAll(commonFamilyProps);
allFamiliesProps.put(colFamily, m);
} else if (isAddingPkColOnly) {
// only for the kv cols and not pk cols.
throw new SQLExceptionInfo.Builder(SQLExceptionCode.SET_UNSUPPORTED_PROP_ON_ALTER_TABLE).build().buildException();
}
}
}
}
// and merge them with the common family properties.
for (String f : stmtFamiliesPropsMap.keySet()) {
if (!addingColumns && !existingColumnFamilies.contains(f)) {
String schemaNameStr = table.getSchemaName() == null ? null : table.getSchemaName().getString();
String tableNameStr = table.getTableName() == null ? null : table.getTableName().getString();
throw new ColumnFamilyNotFoundException(schemaNameStr, tableNameStr, f);
}
if (addingColumns && !colFamiliesForPColumnsToBeAdded.contains(f)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED).build().buildException();
}
Map<String, Object> commonProps = allFamiliesProps.get(f);
Map<String, Object> stmtProps = stmtFamiliesPropsMap.get(f);
if (commonProps != null) {
if (stmtProps != null) {
// merge common props with statement props for the family
commonProps.putAll(stmtProps);
}
} else {
// if no common props were specified, then assign family specific props
if (stmtProps != null) {
allFamiliesProps.put(f, stmtProps);
}
}
}
// ALTER TABLE ADD CF.COL
for (String cf : colFamiliesForPColumnsToBeAdded) {
if (cf != null && allFamiliesProps.get(cf) == null) {
allFamiliesProps.put(cf, new HashMap<String, Object>());
}
}
if (table.getColumnFamilies().isEmpty() && !addingColumns && !commonFamilyProps.isEmpty()) {
allFamiliesProps.put(Bytes.toString(table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : table.getDefaultFamilyName().getBytes()), commonFamilyProps);
}
// Views are not allowed to have any of these properties.
if (table.getType() == PTableType.VIEW && (!stmtFamiliesPropsMap.isEmpty() || !commonFamilyProps.isEmpty() || !tableProps.isEmpty())) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException();
}
HTableDescriptor newTableDescriptor = null;
HTableDescriptor origTableDescriptor = null;
if (!allFamiliesProps.isEmpty() || !tableProps.isEmpty()) {
byte[] tableNameBytes = Bytes.toBytes(table.getPhysicalName().getString());
HTableDescriptor existingTableDescriptor = origTableDescriptor = getTableDescriptor(tableNameBytes);
newTableDescriptor = new HTableDescriptor(existingTableDescriptor);
if (!tableProps.isEmpty()) {
// add all the table properties to the existing table descriptor
for (Entry<String, Object> entry : tableProps.entrySet()) {
newTableDescriptor.setValue(entry.getKey(), entry.getValue() != null ? entry.getValue().toString() : null);
}
}
if (addingColumns) {
// Make sure that all the CFs of the table have the same TTL as the empty CF.
setTTLForNewCFs(allFamiliesProps, table, newTableDescriptor, newTTL);
}
// Set TTL on all table column families, even if they're not referenced here
if (newTTL != null) {
for (PColumnFamily family : table.getColumnFamilies()) {
if (!allFamiliesProps.containsKey(family.getName().getString())) {
Map<String, Object> familyProps = Maps.newHashMapWithExpectedSize(1);
familyProps.put(TTL, newTTL);
allFamiliesProps.put(family.getName().getString(), familyProps);
}
}
}
Integer defaultTxMaxVersions = null;
if (isOrWillBeTransactional) {
// Calculate default for max versions
Map<String, Object> emptyFamilyProps = allFamiliesProps.get(SchemaUtil.getEmptyColumnFamilyAsString(table));
if (emptyFamilyProps != null) {
defaultTxMaxVersions = (Integer) emptyFamilyProps.get(HConstants.VERSIONS);
}
if (defaultTxMaxVersions == null) {
if (isTransactional) {
defaultTxMaxVersions = newTableDescriptor.getFamily(SchemaUtil.getEmptyColumnFamily(table)).getMaxVersions();
} else {
defaultTxMaxVersions = this.getProps().getInt(QueryServices.MAX_VERSIONS_TRANSACTIONAL_ATTRIB, QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL);
}
}
if (willBeTransactional) {
// Set VERSIONS for all column families when transitioning to transactional
for (PColumnFamily family : table.getColumnFamilies()) {
if (!allFamiliesProps.containsKey(family.getName().getString())) {
Map<String, Object> familyProps = Maps.newHashMapWithExpectedSize(1);
familyProps.put(HConstants.VERSIONS, defaultTxMaxVersions);
allFamiliesProps.put(family.getName().getString(), familyProps);
}
}
}
}
// an already transactional table.
if (isOrWillBeTransactional) {
int ttl = getTTL(table, newTableDescriptor, newTTL);
if (ttl != HColumnDescriptor.DEFAULT_TTL) {
for (Map.Entry<String, Map<String, Object>> entry : allFamiliesProps.entrySet()) {
Map<String, Object> props = entry.getValue();
if (props == null) {
props = new HashMap<String, Object>();
}
props.put(TxConstants.PROPERTY_TTL, ttl);
// or if the existing transactional table wasn't originally non transactional.
if (!willBeTransactional && !Boolean.valueOf(newTableDescriptor.getValue(TxConstants.READ_NON_TX_DATA))) {
props.remove(TTL);
}
}
}
}
for (Entry<String, Map<String, Object>> entry : allFamiliesProps.entrySet()) {
Map<String, Object> familyProps = entry.getValue();
if (isOrWillBeTransactional) {
if (!familyProps.containsKey(HConstants.VERSIONS)) {
familyProps.put(HConstants.VERSIONS, defaultTxMaxVersions);
}
}
byte[] cf = Bytes.toBytes(entry.getKey());
HColumnDescriptor colDescriptor = newTableDescriptor.getFamily(cf);
if (colDescriptor == null) {
// new column family
colDescriptor = generateColumnFamilyDescriptor(new Pair<>(cf, familyProps), table.getType());
newTableDescriptor.addFamily(colDescriptor);
} else {
modifyColumnFamilyDescriptor(colDescriptor, familyProps);
}
if (isOrWillBeTransactional) {
checkTransactionalVersionsValue(colDescriptor);
}
}
}
return new Pair<>(origTableDescriptor, newTableDescriptor);
}
use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.
the class IndexUtil method getDataColumn.
public static PColumn getDataColumn(PTable dataTable, String indexColumnName) {
int pos = indexColumnName.indexOf(INDEX_COLUMN_NAME_SEP);
if (pos < 0) {
throw new IllegalArgumentException("Could not find expected '" + INDEX_COLUMN_NAME_SEP + "' separator in index column name of \"" + indexColumnName + "\"");
}
if (pos == 0) {
try {
return dataTable.getPKColumn(indexColumnName.substring(1));
} catch (ColumnNotFoundException e) {
throw new IllegalArgumentException("Could not find PK column \"" + indexColumnName.substring(pos + 1) + "\" in index column name of \"" + indexColumnName + "\"", e);
}
}
PColumnFamily family;
try {
family = dataTable.getColumnFamily(getDataColumnFamilyName(indexColumnName));
} catch (ColumnFamilyNotFoundException e) {
throw new IllegalArgumentException("Could not find column family \"" + indexColumnName.substring(0, pos) + "\" in index column name of \"" + indexColumnName + "\"", e);
}
try {
return family.getPColumnForColumnName(indexColumnName.substring(pos + 1));
} catch (ColumnNotFoundException e) {
throw new IllegalArgumentException("Could not find column \"" + indexColumnName.substring(pos + 1) + "\" in index column name of \"" + indexColumnName + "\"", e);
}
}
use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumn.
@Override
public void dropColumn(RpcController controller, DropColumnRequest request, RpcCallback<MetaDataResponse> done) {
List<Mutation> tableMetaData = null;
final List<byte[]> tableNamesToDelete = Lists.newArrayList();
final List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
try {
tableMetaData = ProtobufUtil.getMutations(request);
MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
@Override
public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData, List<Mutation> tableMetaData, Region region, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp) throws IOException, SQLException {
byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
boolean deletePKColumn = false;
List<Mutation> additionalTableMetaData = Lists.newArrayList();
PTableType type = table.getType();
if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
TableViewFinder childViewsResult = new TableViewFinder();
findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp);
if (childViewsResult.hasViews()) {
MetaDataMutationResult mutationResult = dropColumnsFromChildViews(region, table, locks, tableMetaData, additionalTableMetaData, schemaName, tableName, invalidateList, clientTimeStamp, childViewsResult, tableNamesToDelete, sharedTablesToDelete);
// return if we were not able to drop the column successfully
if (mutationResult != null)
return mutationResult;
}
}
for (Mutation m : tableMetaData) {
if (m instanceof Delete) {
byte[] key = m.getRow();
int pkCount = getVarChars(key, rowKeyMetaData);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) {
PColumn columnToDelete = null;
try {
if (pkCount > FAMILY_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0) {
PColumnFamily family = table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
columnToDelete = family.getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]);
} else if (pkCount > COLUMN_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0) {
deletePKColumn = true;
columnToDelete = table.getPKColumn(new String(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]));
} else {
continue;
}
if (table.getType() == PTableType.VIEW) {
if (table.getBaseColumnCount() != DIVERGED_VIEW_BASE_COLUMN_COUNT && columnToDelete.getPosition() < table.getBaseColumnCount()) {
/*
* If the column being dropped is inherited from the base table, then the
* view is about to diverge itself from the base table. The consequence of
* this divergence is that that any further meta-data changes made to the
* base table will not be propagated to the hierarchy of views where this
* view is the root.
*/
byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
Put updateBaseColumnCountPut = new Put(viewKey);
byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
PInteger.INSTANCE.getCodec().encodeInt(DIVERGED_VIEW_BASE_COLUMN_COUNT, baseColumnCountPtr, 0);
updateBaseColumnCountPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, baseColumnCountPtr);
additionalTableMetaData.add(updateBaseColumnCountPut);
}
}
if (columnToDelete.isViewReferenced()) {
// Disallow deletion of column referenced in WHERE clause of view
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
}
// drop any indexes that need the column that is going to be dropped
dropIndexes(table, region, invalidateList, locks, clientTimeStamp, schemaName, tableName, additionalTableMetaData, columnToDelete, tableNamesToDelete, sharedTablesToDelete);
} catch (ColumnFamilyNotFoundException e) {
return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
} catch (ColumnNotFoundException e) {
return new MetaDataMutationResult(MutationCode.COLUMN_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
}
}
}
}
if (deletePKColumn) {
if (table.getPKColumns().size() == 1) {
return new MetaDataMutationResult(MutationCode.NO_PK_COLUMNS, EnvironmentEdgeManager.currentTimeMillis(), null);
}
}
tableMetaData.addAll(additionalTableMetaData);
long currentTime = MetaDataUtil.getClientTimeStamp(tableMetaData);
return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, null, tableNamesToDelete, sharedTablesToDelete);
}
});
if (result != null) {
done.run(MetaDataMutationResult.toProto(result));
}
} catch (IOException ioe) {
ProtobufUtil.setControllerException(controller, ioe);
}
}
use of org.apache.phoenix.schema.ColumnFamilyNotFoundException in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumnsFromChildViews.
private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
// are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Delete) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnDeletesForBaseTable.add((Delete) m);
}
}
}
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short numColsDeleted = 0;
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
// lock the rows corresponding to views so that no other thread can modify the view
// meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
int numCols = view.getColumns().size();
int minDroppedColOrdinalPos = Integer.MAX_VALUE;
for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to
// be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
// it
if (existingViewColumn != null && view.getViewStatement() != null) {
ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
PhoenixConnection conn = null;
try {
conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
}
PhoenixStatement statement = new PhoenixStatement(conn);
TableRef baseTableRef = new TableRef(basePhysicalTable);
ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
StatementContext context = new StatementContext(statement, columnResolver);
Expression whereExpression = WhereCompiler.compile(context, viewWhere);
Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
ColumnFinder columnFinder = new ColumnFinder(colExpression);
whereExpression.accept(columnFinder);
if (columnFinder.getColumnFound()) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
}
minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
if (existingViewColumn != null) {
--numColsDeleted;
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
for (PColumn col : view.getColumns()) {
ordinalPositionList.addColumn(getColumnKey(viewKey, col));
}
}
ordinalPositionList.dropColumn(columnKey);
Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
mutationsForAddingColumnsToViews.add(viewColumnDelete);
// drop any view indexes that need this column
dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
}
}
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
}
return null;
}
Aggregations