use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.
the class MetaDataClient method dropFunction.
private MutationState dropFunction(String functionName, boolean ifExists) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
PName tenantId = connection.getTenantId();
byte[] key = SchemaUtil.getFunctionKey(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(), Bytes.toBytes(functionName));
Long scn = connection.getSCN();
long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
try {
PFunction function = connection.getMetaDataCache().getFunction(new PTableKey(tenantId, functionName));
if (function.isTemporaryFunction()) {
connection.removeFunction(tenantId, functionName, clientTimeStamp);
return new MutationState(0, 0, connection);
}
} catch (FunctionNotFoundException e) {
}
List<Mutation> functionMetaData = Lists.newArrayListWithExpectedSize(2);
Delete functionDelete = new Delete(key, clientTimeStamp);
functionMetaData.add(functionDelete);
MetaDataMutationResult result = connection.getQueryServices().dropFunction(functionMetaData, ifExists);
MutationCode code = result.getMutationCode();
switch(code) {
case FUNCTION_NOT_FOUND:
if (!ifExists) {
throw new FunctionNotFoundException(functionName);
}
break;
default:
connection.removeFunction(tenantId, functionName, result.getMutationTime());
break;
}
return new MutationState(0, 0, connection);
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.
the class MetaDataClient method processMutationResult.
private MutationCode processMutationResult(String schemaName, String tableName, MetaDataMutationResult result) throws SQLException {
final MutationCode mutationCode = result.getMutationCode();
PName tenantId = connection.getTenantId();
switch(mutationCode) {
case TABLE_NOT_FOUND:
// Only called for add/remove column so parentTableName will always be null
connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), null, HConstants.LATEST_TIMESTAMP);
throw new TableNotFoundException(schemaName, tableName);
case UNALLOWED_TABLE_MUTATION:
String columnName = null;
String familyName = null;
String msg = null;
// TODO: better to return error code
if (result.getColumnName() != null) {
familyName = result.getFamilyName() == null ? null : Bytes.toString(result.getFamilyName());
columnName = Bytes.toString(result.getColumnName());
msg = "Cannot add/drop column referenced by VIEW";
}
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE).setSchemaName(schemaName).setTableName(tableName).setFamilyName(familyName).setColumnName(columnName).setMessage(msg).build().buildException();
case NO_OP:
case COLUMN_ALREADY_EXISTS:
case COLUMN_NOT_FOUND:
break;
case CONCURRENT_TABLE_MUTATION:
addTableToCache(result);
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
}
throw new ConcurrentTableMutationException(schemaName, tableName);
case NEWER_TABLE_FOUND:
// }
throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
case NO_PK_COLUMNS:
throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING).setSchemaName(schemaName).setTableName(tableName).build().buildException();
case TABLE_ALREADY_EXISTS:
break;
default:
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNEXPECTED_MUTATION_CODE).setSchemaName(schemaName).setTableName(tableName).setMessage("mutation code: " + mutationCode).build().buildException();
}
return mutationCode;
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.
the class MetaDataClient method dropSchema.
public MutationState dropSchema(DropSchemaStatement executableDropSchemaStatement) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
PSchema schema = new PSchema(executableDropSchemaStatement.getSchemaName());
String schemaName = schema.getSchemaName();
boolean ifExists = executableDropSchemaStatement.ifExists();
byte[] key = SchemaUtil.getSchemaKey(schemaName);
Long scn = connection.getSCN();
long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
List<Mutation> schemaMetaData = Lists.newArrayListWithExpectedSize(2);
Delete schemaDelete = new Delete(key, clientTimeStamp);
schemaMetaData.add(schemaDelete);
MetaDataMutationResult result = connection.getQueryServices().dropSchema(schemaMetaData, schemaName);
MutationCode code = result.getMutationCode();
schema = result.getSchema();
switch(code) {
case SCHEMA_NOT_FOUND:
if (!ifExists) {
throw new SchemaNotFoundException(schemaName);
}
break;
case NEWER_SCHEMA_FOUND:
throw new NewerSchemaAlreadyExistsException(schemaName);
case TABLES_EXIST_ON_SCHEMA:
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_SCHEMA).setSchemaName(schemaName).build().buildException();
default:
connection.removeSchema(schema, result.getMutationTime());
break;
}
return new MutationState(0, 0, connection);
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.
the class MetaDataClient method addColumn.
public MutationState addColumn(PTable table, List<ColumnDef> origColumnDefs, ListMultimap<String, Pair<String, Object>> stmtProperties, boolean ifNotExists, boolean removeTableProps, NamedTableNode namedTableNode, PTableType tableType) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(false);
PName tenantId = connection.getTenantId();
String schemaName = table.getSchemaName().getString();
String tableName = table.getTableName().getString();
Boolean isImmutableRowsProp = null;
Boolean multiTenantProp = null;
Boolean disableWALProp = null;
Boolean storeNullsProp = null;
Boolean isTransactionalProp = null;
Long updateCacheFrequencyProp = null;
Boolean appendOnlySchemaProp = null;
Long guidePostWidth = -1L;
ImmutableStorageScheme immutableStorageSchemeProp = null;
Boolean useStatsForParallelizationProp = null;
Map<String, List<Pair<String, Object>>> properties = new HashMap<>(stmtProperties.size());
List<ColumnDef> columnDefs = null;
if (table.isAppendOnlySchema()) {
// only make the rpc if we are adding new columns
columnDefs = Lists.newArrayList();
for (ColumnDef columnDef : origColumnDefs) {
String familyName = columnDef.getColumnDefName().getFamilyName();
String columnName = columnDef.getColumnDefName().getColumnName();
if (familyName != null) {
try {
PColumnFamily columnFamily = table.getColumnFamily(familyName);
columnFamily.getPColumnForColumnName(columnName);
if (!ifNotExists) {
throw new ColumnAlreadyExistsException(schemaName, tableName, columnName);
}
} catch (ColumnFamilyNotFoundException | ColumnNotFoundException e) {
columnDefs.add(columnDef);
}
} else {
try {
table.getColumnForColumnName(columnName);
if (!ifNotExists) {
throw new ColumnAlreadyExistsException(schemaName, tableName, columnName);
}
} catch (ColumnNotFoundException e) {
columnDefs.add(columnDef);
}
}
}
} else {
columnDefs = origColumnDefs == null ? Collections.<ColumnDef>emptyList() : origColumnDefs;
}
for (String family : stmtProperties.keySet()) {
List<Pair<String, Object>> origPropsList = stmtProperties.get(family);
List<Pair<String, Object>> propsList = Lists.newArrayListWithExpectedSize(origPropsList.size());
for (Pair<String, Object> prop : origPropsList) {
String propName = prop.getFirst();
if (TableProperty.isPhoenixTableProperty(propName)) {
TableProperty tableProp = TableProperty.valueOf(propName);
tableProp.validate(true, !family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY), table.getType());
Object value = tableProp.getValue(prop.getSecond());
if (propName.equals(PTable.IS_IMMUTABLE_ROWS_PROP_NAME)) {
isImmutableRowsProp = (Boolean) value;
} else if (propName.equals(PhoenixDatabaseMetaData.MULTI_TENANT)) {
multiTenantProp = (Boolean) value;
} else if (propName.equals(DISABLE_WAL)) {
disableWALProp = (Boolean) value;
} else if (propName.equals(STORE_NULLS)) {
storeNullsProp = (Boolean) value;
} else if (propName.equals(TRANSACTIONAL)) {
isTransactionalProp = (Boolean) value;
} else if (propName.equals(UPDATE_CACHE_FREQUENCY)) {
updateCacheFrequencyProp = (Long) value;
} else if (propName.equals(GUIDE_POSTS_WIDTH)) {
guidePostWidth = (Long) value;
} else if (propName.equals(APPEND_ONLY_SCHEMA)) {
appendOnlySchemaProp = (Boolean) value;
} else if (propName.equalsIgnoreCase(IMMUTABLE_STORAGE_SCHEME)) {
immutableStorageSchemeProp = (ImmutableStorageScheme) value;
} else if (propName.equalsIgnoreCase(USE_STATS_FOR_PARALLELIZATION)) {
useStatsForParallelizationProp = (Boolean) value;
}
}
// if removeTableProps is true only add the property if it is not a HTable or Phoenix Table property
if (!removeTableProps || (!TableProperty.isPhoenixTableProperty(propName) && !MetaDataUtil.isHTableProperty(propName))) {
propsList.add(prop);
}
}
properties.put(family, propsList);
}
boolean retried = false;
boolean changingPhoenixTableProperty = false;
boolean nonTxToTx = false;
while (true) {
ColumnResolver resolver = FromCompiler.getResolver(namedTableNode, connection);
table = resolver.getTables().get(0).getTable();
int nIndexes = table.getIndexes().size();
int numCols = columnDefs.size();
int nNewColumns = numCols;
List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1));
List<Mutation> columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1));
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
}
int position = table.getColumns().size();
List<PColumn> currentPKs = table.getPKColumns();
PColumn lastPK = currentPKs.get(currentPKs.size() - 1);
// Disallow adding columns if the last column is VARBIANRY.
if (lastPK.getDataType() == PVarbinary.INSTANCE || lastPK.getDataType().isArrayType()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_LAST_PK).setColumnName(lastPK.getName().getString()).build().buildException();
}
// Disallow adding columns if last column is fixed width and nullable.
if (lastPK.isNullable() && lastPK.getDataType().isFixedWidth()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULLABLE_FIXED_WIDTH_LAST_PK).setColumnName(lastPK.getName().getString()).build().buildException();
}
Boolean isImmutableRows = null;
if (isImmutableRowsProp != null) {
if (isImmutableRowsProp.booleanValue() != table.isImmutableRows()) {
if (table.getImmutableStorageScheme() != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_IMMUTABLE_ROWS_PROPERTY).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
isImmutableRows = isImmutableRowsProp;
changingPhoenixTableProperty = true;
}
}
Boolean multiTenant = null;
if (multiTenantProp != null) {
if (multiTenantProp.booleanValue() != table.isMultiTenant()) {
multiTenant = multiTenantProp;
changingPhoenixTableProperty = true;
}
}
Boolean disableWAL = null;
if (disableWALProp != null) {
if (disableWALProp.booleanValue() != table.isWALDisabled()) {
disableWAL = disableWALProp;
changingPhoenixTableProperty = true;
}
}
Long updateCacheFrequency = null;
if (updateCacheFrequencyProp != null) {
if (updateCacheFrequencyProp.longValue() != table.getUpdateCacheFrequency()) {
updateCacheFrequency = updateCacheFrequencyProp;
changingPhoenixTableProperty = true;
}
}
Boolean appendOnlySchema = null;
if (appendOnlySchemaProp != null) {
if (appendOnlySchemaProp != table.isAppendOnlySchema()) {
appendOnlySchema = appendOnlySchemaProp;
changingPhoenixTableProperty = true;
}
}
ImmutableStorageScheme immutableStorageScheme = null;
if (immutableStorageSchemeProp != null) {
if (table.getImmutableStorageScheme() == ONE_CELL_PER_COLUMN || immutableStorageSchemeProp == ONE_CELL_PER_COLUMN) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
} else if (immutableStorageSchemeProp != table.getImmutableStorageScheme()) {
immutableStorageScheme = immutableStorageSchemeProp;
changingPhoenixTableProperty = true;
}
}
if (guidePostWidth == null || guidePostWidth >= 0) {
changingPhoenixTableProperty = true;
}
Boolean storeNulls = null;
if (storeNullsProp != null) {
if (storeNullsProp.booleanValue() != table.getStoreNulls()) {
storeNulls = storeNullsProp;
changingPhoenixTableProperty = true;
}
}
Boolean useStatsForParallelization = null;
if (useStatsForParallelizationProp != null) {
if (useStatsForParallelizationProp.booleanValue() != table.useStatsForParallelization()) {
useStatsForParallelization = useStatsForParallelizationProp;
changingPhoenixTableProperty = true;
}
}
Boolean isTransactional = null;
if (isTransactionalProp != null) {
if (isTransactionalProp.booleanValue() != table.isTransactional()) {
isTransactional = isTransactionalProp;
// delete markers.
if (!isTransactional) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
// cannot create a transactional table if transactions are disabled
boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
if (!transactionsEnabled) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
// cannot make a table transactional if it has a row timestamp column
if (SchemaUtil.hasRowTimestampColumn(table)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
changingPhoenixTableProperty = true;
nonTxToTx = true;
}
}
Long timeStamp = TransactionUtil.getTableTimestamp(connection, table.isTransactional() || nonTxToTx);
int numPkColumnsAdded = 0;
List<PColumn> columns = Lists.newArrayListWithExpectedSize(numCols);
Set<String> colFamiliesForPColumnsToBeAdded = new LinkedHashSet<>();
Set<String> families = new LinkedHashSet<>();
PTable tableForCQCounters = tableType == PTableType.VIEW ? PhoenixRuntime.getTable(connection, table.getPhysicalName().getString()) : table;
;
EncodedCQCounter cqCounterToUse = tableForCQCounters.getEncodedCQCounter();
Map<String, Integer> changedCqCounters = new HashMap<>(numCols);
if (numCols > 0) {
StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver);
String addColumnSqlToUse = connection.isRunningUpgrade() && tableName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE) && schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA) ? ALTER_SYSCATALOG_TABLE_UPGRADE : INSERT_COLUMN_ALTER_TABLE;
try (PreparedStatement colUpsert = connection.prepareStatement(addColumnSqlToUse)) {
short nextKeySeq = SchemaUtil.getMaxKeySeq(table);
for (ColumnDef colDef : columnDefs) {
if (colDef != null && !colDef.isNull()) {
if (colDef.isPK()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
} else {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ADD_NOT_NULLABLE_COLUMN).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
}
}
if (colDef != null && colDef.isPK() && table.getType() == VIEW && table.getViewType() != MAPPED) {
throwIfLastPKOfParentIsFixedLength(getParentOfView(table), schemaName, tableName, colDef);
}
if (colDef != null && colDef.isRowTimestamp()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_CREATE_ONLY).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
}
if (!colDef.validateDefault(context, null)) {
// Remove DEFAULT as it's not necessary
colDef = new ColumnDef(colDef, null);
}
Integer encodedCQ = null;
if (!colDef.isPK()) {
String colDefFamily = colDef.getColumnDefName().getFamilyName();
String familyName = null;
ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme();
String defaultColumnFamily = tableForCQCounters.getDefaultFamilyName() != null && !Strings.isNullOrEmpty(tableForCQCounters.getDefaultFamilyName().getString()) ? tableForCQCounters.getDefaultFamilyName().getString() : DEFAULT_COLUMN_FAMILY;
if (table.getType() == PTableType.INDEX && table.getIndexType() == IndexType.LOCAL) {
defaultColumnFamily = QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + defaultColumnFamily;
}
if (storageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS) {
familyName = colDefFamily != null ? colDefFamily : defaultColumnFamily;
} else {
familyName = defaultColumnFamily;
}
encodedCQ = cqCounterToUse.getNextQualifier(familyName);
if (cqCounterToUse.increment(familyName)) {
changedCqCounters.put(familyName, cqCounterToUse.getNextQualifier(familyName));
}
}
byte[] columnQualifierBytes = null;
try {
columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(colDef.getColumnDefName().getColumnName(), encodedCQ, table, colDef.isPK());
} catch (QualifierOutOfRangeException e) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : table.getDefaultFamilyName().getString(), true, columnQualifierBytes);
columns.add(column);
String pkName = null;
Short keySeq = null;
// TODO: support setting properties on other families?
if (column.getFamilyName() == null) {
++numPkColumnsAdded;
pkName = table.getPKName() == null ? null : table.getPKName().getString();
keySeq = ++nextKeySeq;
} else {
families.add(column.getFamilyName().getString());
}
colFamiliesForPColumnsToBeAdded.add(column.getFamilyName() == null ? null : column.getFamilyName().getString());
addColumnMutation(schemaName, tableName, column, colUpsert, null, pkName, keySeq, table.getBucketNum() != null);
}
// Add any new PK columns to end of index PK
if (numPkColumnsAdded > 0) {
// create PK column list that includes the newly created columns
List<PColumn> pkColumns = Lists.newArrayListWithExpectedSize(table.getPKColumns().size() + numPkColumnsAdded);
pkColumns.addAll(table.getPKColumns());
for (int i = 0; i < numCols; ++i) {
if (columnDefs.get(i).isPK()) {
pkColumns.add(columns.get(i));
}
}
int pkSlotPosition = table.getPKColumns().size() - 1;
for (PTable index : table.getIndexes()) {
short nextIndexKeySeq = SchemaUtil.getMaxKeySeq(index);
int indexPosition = index.getColumns().size();
for (int i = 0; i < numCols; ++i) {
ColumnDef colDef = columnDefs.get(i);
if (colDef.isPK()) {
PDataType indexColDataType = IndexUtil.getIndexColumnDataType(colDef.isNull(), colDef.getDataType());
ColumnName indexColName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, colDef.getColumnDefName().getColumnName()));
Expression expression = new RowKeyColumnExpression(columns.get(i), new RowKeyValueAccessor(pkColumns, ++pkSlotPosition));
ColumnDef indexColDef = FACTORY.columnDef(indexColName, indexColDataType.getSqlTypeName(), colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), true, colDef.getSortOrder(), expression.toString(), colDef.isRowTimestamp());
PColumn indexColumn = newColumn(indexPosition++, indexColDef, PrimaryKeyConstraint.EMPTY, null, true, null);
addColumnMutation(schemaName, index.getTableName().getString(), indexColumn, colUpsert, index.getParentTableName().getString(), index.getPKName() == null ? null : index.getPKName().getString(), ++nextIndexKeySeq, index.getBucketNum() != null);
}
}
}
}
columnMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
} else {
// have existing indexes.
if (Boolean.FALSE.equals(isImmutableRows) && !table.getIndexes().isEmpty()) {
int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
if (!connection.getQueryServices().hasIndexWALCodec() && !table.isTransactional()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
}
if (Boolean.TRUE.equals(multiTenant)) {
throwIfInsufficientColumns(schemaName, tableName, table.getPKColumns(), table.getBucketNum() != null, multiTenant);
}
}
if (!table.getIndexes().isEmpty() && (numPkColumnsAdded > 0 || nonTxToTx)) {
for (PTable index : table.getIndexes()) {
incrementTableSeqNum(index, index.getType(), numPkColumnsAdded, nonTxToTx ? Boolean.TRUE : null, updateCacheFrequency);
}
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
if (changingPhoenixTableProperty || columnDefs.size() > 0) {
incrementTableSeqNum(table, tableType, columnDefs.size(), isTransactional, updateCacheFrequency, isImmutableRows, disableWAL, multiTenant, storeNulls, guidePostWidth, appendOnlySchema, immutableStorageScheme, useStatsForParallelization);
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
// Force the table header row to be first
Collections.reverse(tableMetaData);
// Add column metadata afterwards, maintaining the order so columns have more predictable ordinal position
tableMetaData.addAll(columnMetaData);
boolean sharedIndex = tableType == PTableType.INDEX && (table.getIndexType() == IndexType.LOCAL || table.getViewIndexId() != null);
String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
if (!changedCqCounters.isEmpty()) {
PreparedStatement linkStatement;
linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER);
for (Entry<String, Integer> entry : changedCqCounters.entrySet()) {
linkStatement.setString(1, tenantIdToUse);
linkStatement.setString(2, tableForCQCounters.getSchemaName().getString());
linkStatement.setString(3, tableForCQCounters.getTableName().getString());
linkStatement.setString(4, entry.getKey());
linkStatement.setInt(5, entry.getValue());
linkStatement.execute();
}
// too since we want clients to get the latest PTable of the base table.
if (tableType == VIEW) {
PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
incrementStatement.setString(1, null);
incrementStatement.setString(2, tableForCQCounters.getSchemaName().getString());
incrementStatement.setString(3, tableForCQCounters.getTableName().getString());
incrementStatement.setLong(4, tableForCQCounters.getSequenceNumber() + 1);
incrementStatement.execute();
}
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
byte[] family = families.size() > 0 ? families.iterator().next().getBytes() : null;
// Figure out if the empty column family is changing as a result of adding the new column
byte[] emptyCF = null;
byte[] projectCF = null;
if (table.getType() != PTableType.VIEW && family != null) {
if (table.getColumnFamilies().isEmpty()) {
emptyCF = family;
} else {
try {
table.getColumnFamily(family);
} catch (ColumnFamilyNotFoundException e) {
projectCF = family;
emptyCF = SchemaUtil.getEmptyColumnFamily(table);
}
}
}
MetaDataMutationResult result = connection.getQueryServices().addColumn(tableMetaData, table, properties, colFamiliesForPColumnsToBeAdded, columns);
try {
MutationCode code = processMutationResult(schemaName, tableName, result);
if (code == MutationCode.COLUMN_ALREADY_EXISTS) {
addTableToCache(result);
if (!ifNotExists) {
throw new ColumnAlreadyExistsException(schemaName, tableName, SchemaUtil.findExistingColumn(result.getTable(), columns));
}
return new MutationState(0, 0, connection);
}
// Only update client side cache if we aren't adding a PK column to a table with indexes or
// transitioning a table from non transactional to transactional.
// We could update the cache manually then too, it'd just be a pain.
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
long resolvedTimeStamp = TransactionUtil.getResolvedTime(connection, result);
if (table.getIndexes().isEmpty() || (numPkColumnsAdded == 0 && !nonTxToTx)) {
connection.addTable(result.getTable(), resolvedTimeStamp);
table = result.getTable();
} else if (updateCacheFrequency != null) {
// Force removal from cache as the update cache frequency has changed
// Note that clients outside this JVM won't be affected.
connection.removeTable(tenantId, fullTableName, null, resolvedTimeStamp);
}
// We only need to do this if the multiTenant transitioned to false
if (table.getType() == PTableType.TABLE && Boolean.FALSE.equals(multiTenant) && MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) {
connection.setAutoCommit(true);
MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped());
// commands are run would remove all rows already.
if (!connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA)) {
Long scn = connection.getSCN();
long ts = (scn == null ? result.getMutationTime() : scn);
byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
PTable viewIndexTable = new PTableImpl(null, SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName), SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName), ts, table.getColumnFamilies(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.useStatsForParallelization());
List<TableRef> tableRefs = Collections.singletonList(new TableRef(null, viewIndexTable, ts, false));
MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
connection.getQueryServices().updateData(plan);
}
}
if (emptyCF != null) {
Long scn = connection.getSCN();
connection.setAutoCommit(true);
// Delete everything in the column. You'll still be able to do queries at earlier timestamps
long ts = (scn == null ? result.getMutationTime() : scn);
MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(new TableRef(null, table, ts, false)), emptyCF, projectCF == null ? null : Collections.singletonList(projectCF), null, ts);
return connection.getQueryServices().updateData(plan);
}
return new MutationState(0, 0, connection);
} catch (ConcurrentTableMutationException e) {
if (retried) {
throw e;
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
}
retried = true;
}
}
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode in project phoenix by apache.
the class MetaDataClient method updateCache.
public MetaDataMutationResult updateCache(PName tenantId, List<String> functionNames, boolean alwaysHitServer) throws SQLException {
// TODO: pass byte[] herez
long clientTimeStamp = getClientTimeStamp();
List<PFunction> functions = new ArrayList<PFunction>(functionNames.size());
List<Long> functionTimeStamps = new ArrayList<Long>(functionNames.size());
Iterator<String> iterator = functionNames.iterator();
while (iterator.hasNext()) {
PFunction function = null;
try {
String functionName = iterator.next();
function = connection.getMetaDataCache().getFunction(new PTableKey(tenantId, functionName));
if (function != null && !alwaysHitServer && function.getTimeStamp() == clientTimeStamp - 1) {
functions.add(function);
iterator.remove();
continue;
}
if (function != null && function.getTimeStamp() != clientTimeStamp - 1) {
functionTimeStamps.add(function.getTimeStamp());
} else {
functionTimeStamps.add(HConstants.LATEST_TIMESTAMP);
}
} catch (FunctionNotFoundException e) {
functionTimeStamps.add(HConstants.LATEST_TIMESTAMP);
}
}
// Don't bother with server call: we can't possibly find a newer function
if (functionNames.isEmpty()) {
return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, QueryConstants.UNSET_TIMESTAMP, functions, true);
}
int maxTryCount = tenantId == null ? 1 : 2;
int tryCount = 0;
MetaDataMutationResult result;
do {
List<Pair<byte[], Long>> functionsToFecth = new ArrayList<Pair<byte[], Long>>(functionNames.size());
for (int i = 0; i < functionNames.size(); i++) {
functionsToFecth.add(new Pair<byte[], Long>(PVarchar.INSTANCE.toBytes(functionNames.get(i)), functionTimeStamps.get(i)));
}
result = connection.getQueryServices().getFunctions(tenantId, functionsToFecth, clientTimeStamp);
MutationCode code = result.getMutationCode();
// We found an updated table, so update our cache
if (result.getFunctions() != null && !result.getFunctions().isEmpty()) {
result.getFunctions().addAll(functions);
addFunctionToCache(result);
return result;
} else {
if (code == MutationCode.FUNCTION_ALREADY_EXISTS) {
result.getFunctions().addAll(functions);
addFunctionToCache(result);
return result;
}
if (code == MutationCode.FUNCTION_NOT_FOUND && tryCount + 1 == maxTryCount) {
for (Pair<byte[], Long> f : functionsToFecth) {
connection.removeFunction(tenantId, Bytes.toString(f.getFirst()), f.getSecond());
}
// TODO removeFunctions all together from cache when
throw new FunctionNotFoundException(functionNames.toString() + " not found");
}
}
// Try again with global tenantId
tenantId = null;
} while (++tryCount < maxTryCount);
return result;
}
Aggregations