use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.
the class MetaDataClient method addColumn.
public MutationState addColumn(PTable table, List<ColumnDef> origColumnDefs, ListMultimap<String, Pair<String, Object>> stmtProperties, boolean ifNotExists, boolean removeTableProps, NamedTableNode namedTableNode, PTableType tableType) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(false);
PName tenantId = connection.getTenantId();
String schemaName = table.getSchemaName().getString();
String tableName = table.getTableName().getString();
Boolean isImmutableRowsProp = null;
Boolean multiTenantProp = null;
Boolean disableWALProp = null;
Boolean storeNullsProp = null;
Boolean isTransactionalProp = null;
Long updateCacheFrequencyProp = null;
Boolean appendOnlySchemaProp = null;
Long guidePostWidth = -1L;
ImmutableStorageScheme immutableStorageSchemeProp = null;
Boolean useStatsForParallelizationProp = null;
Map<String, List<Pair<String, Object>>> properties = new HashMap<>(stmtProperties.size());
List<ColumnDef> columnDefs = null;
if (table.isAppendOnlySchema()) {
// only make the rpc if we are adding new columns
columnDefs = Lists.newArrayList();
for (ColumnDef columnDef : origColumnDefs) {
String familyName = columnDef.getColumnDefName().getFamilyName();
String columnName = columnDef.getColumnDefName().getColumnName();
if (familyName != null) {
try {
PColumnFamily columnFamily = table.getColumnFamily(familyName);
columnFamily.getPColumnForColumnName(columnName);
if (!ifNotExists) {
throw new ColumnAlreadyExistsException(schemaName, tableName, columnName);
}
} catch (ColumnFamilyNotFoundException | ColumnNotFoundException e) {
columnDefs.add(columnDef);
}
} else {
try {
table.getColumnForColumnName(columnName);
if (!ifNotExists) {
throw new ColumnAlreadyExistsException(schemaName, tableName, columnName);
}
} catch (ColumnNotFoundException e) {
columnDefs.add(columnDef);
}
}
}
} else {
columnDefs = origColumnDefs == null ? Collections.<ColumnDef>emptyList() : origColumnDefs;
}
for (String family : stmtProperties.keySet()) {
List<Pair<String, Object>> origPropsList = stmtProperties.get(family);
List<Pair<String, Object>> propsList = Lists.newArrayListWithExpectedSize(origPropsList.size());
for (Pair<String, Object> prop : origPropsList) {
String propName = prop.getFirst();
if (TableProperty.isPhoenixTableProperty(propName)) {
TableProperty tableProp = TableProperty.valueOf(propName);
tableProp.validate(true, !family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY), table.getType());
Object value = tableProp.getValue(prop.getSecond());
if (propName.equals(PTable.IS_IMMUTABLE_ROWS_PROP_NAME)) {
isImmutableRowsProp = (Boolean) value;
} else if (propName.equals(PhoenixDatabaseMetaData.MULTI_TENANT)) {
multiTenantProp = (Boolean) value;
} else if (propName.equals(DISABLE_WAL)) {
disableWALProp = (Boolean) value;
} else if (propName.equals(STORE_NULLS)) {
storeNullsProp = (Boolean) value;
} else if (propName.equals(TRANSACTIONAL)) {
isTransactionalProp = (Boolean) value;
} else if (propName.equals(UPDATE_CACHE_FREQUENCY)) {
updateCacheFrequencyProp = (Long) value;
} else if (propName.equals(GUIDE_POSTS_WIDTH)) {
guidePostWidth = (Long) value;
} else if (propName.equals(APPEND_ONLY_SCHEMA)) {
appendOnlySchemaProp = (Boolean) value;
} else if (propName.equalsIgnoreCase(IMMUTABLE_STORAGE_SCHEME)) {
immutableStorageSchemeProp = (ImmutableStorageScheme) value;
} else if (propName.equalsIgnoreCase(USE_STATS_FOR_PARALLELIZATION)) {
useStatsForParallelizationProp = (Boolean) value;
}
}
// if removeTableProps is true only add the property if it is not a HTable or Phoenix Table property
if (!removeTableProps || (!TableProperty.isPhoenixTableProperty(propName) && !MetaDataUtil.isHTableProperty(propName))) {
propsList.add(prop);
}
}
properties.put(family, propsList);
}
boolean retried = false;
boolean changingPhoenixTableProperty = false;
boolean nonTxToTx = false;
while (true) {
ColumnResolver resolver = FromCompiler.getResolver(namedTableNode, connection);
table = resolver.getTables().get(0).getTable();
int nIndexes = table.getIndexes().size();
int numCols = columnDefs.size();
int nNewColumns = numCols;
List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1));
List<Mutation> columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1));
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
}
int position = table.getColumns().size();
List<PColumn> currentPKs = table.getPKColumns();
PColumn lastPK = currentPKs.get(currentPKs.size() - 1);
// Disallow adding columns if the last column is VARBIANRY.
if (lastPK.getDataType() == PVarbinary.INSTANCE || lastPK.getDataType().isArrayType()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_LAST_PK).setColumnName(lastPK.getName().getString()).build().buildException();
}
// Disallow adding columns if last column is fixed width and nullable.
if (lastPK.isNullable() && lastPK.getDataType().isFixedWidth()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULLABLE_FIXED_WIDTH_LAST_PK).setColumnName(lastPK.getName().getString()).build().buildException();
}
Boolean isImmutableRows = null;
if (isImmutableRowsProp != null) {
if (isImmutableRowsProp.booleanValue() != table.isImmutableRows()) {
if (table.getImmutableStorageScheme() != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_IMMUTABLE_ROWS_PROPERTY).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
isImmutableRows = isImmutableRowsProp;
changingPhoenixTableProperty = true;
}
}
Boolean multiTenant = null;
if (multiTenantProp != null) {
if (multiTenantProp.booleanValue() != table.isMultiTenant()) {
multiTenant = multiTenantProp;
changingPhoenixTableProperty = true;
}
}
Boolean disableWAL = null;
if (disableWALProp != null) {
if (disableWALProp.booleanValue() != table.isWALDisabled()) {
disableWAL = disableWALProp;
changingPhoenixTableProperty = true;
}
}
Long updateCacheFrequency = null;
if (updateCacheFrequencyProp != null) {
if (updateCacheFrequencyProp.longValue() != table.getUpdateCacheFrequency()) {
updateCacheFrequency = updateCacheFrequencyProp;
changingPhoenixTableProperty = true;
}
}
Boolean appendOnlySchema = null;
if (appendOnlySchemaProp != null) {
if (appendOnlySchemaProp != table.isAppendOnlySchema()) {
appendOnlySchema = appendOnlySchemaProp;
changingPhoenixTableProperty = true;
}
}
ImmutableStorageScheme immutableStorageScheme = null;
if (immutableStorageSchemeProp != null) {
if (table.getImmutableStorageScheme() == ONE_CELL_PER_COLUMN || immutableStorageSchemeProp == ONE_CELL_PER_COLUMN) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
} else if (immutableStorageSchemeProp != table.getImmutableStorageScheme()) {
immutableStorageScheme = immutableStorageSchemeProp;
changingPhoenixTableProperty = true;
}
}
if (guidePostWidth == null || guidePostWidth >= 0) {
changingPhoenixTableProperty = true;
}
Boolean storeNulls = null;
if (storeNullsProp != null) {
if (storeNullsProp.booleanValue() != table.getStoreNulls()) {
storeNulls = storeNullsProp;
changingPhoenixTableProperty = true;
}
}
Boolean useStatsForParallelization = null;
if (useStatsForParallelizationProp != null) {
if (useStatsForParallelizationProp.booleanValue() != table.useStatsForParallelization()) {
useStatsForParallelization = useStatsForParallelizationProp;
changingPhoenixTableProperty = true;
}
}
Boolean isTransactional = null;
if (isTransactionalProp != null) {
if (isTransactionalProp.booleanValue() != table.isTransactional()) {
isTransactional = isTransactionalProp;
// delete markers.
if (!isTransactional) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
// cannot create a transactional table if transactions are disabled
boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
if (!transactionsEnabled) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
// cannot make a table transactional if it has a row timestamp column
if (SchemaUtil.hasRowTimestampColumn(table)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
changingPhoenixTableProperty = true;
nonTxToTx = true;
}
}
Long timeStamp = TransactionUtil.getTableTimestamp(connection, table.isTransactional() || nonTxToTx);
int numPkColumnsAdded = 0;
List<PColumn> columns = Lists.newArrayListWithExpectedSize(numCols);
Set<String> colFamiliesForPColumnsToBeAdded = new LinkedHashSet<>();
Set<String> families = new LinkedHashSet<>();
PTable tableForCQCounters = tableType == PTableType.VIEW ? PhoenixRuntime.getTable(connection, table.getPhysicalName().getString()) : table;
;
EncodedCQCounter cqCounterToUse = tableForCQCounters.getEncodedCQCounter();
Map<String, Integer> changedCqCounters = new HashMap<>(numCols);
if (numCols > 0) {
StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver);
String addColumnSqlToUse = connection.isRunningUpgrade() && tableName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE) && schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA) ? ALTER_SYSCATALOG_TABLE_UPGRADE : INSERT_COLUMN_ALTER_TABLE;
try (PreparedStatement colUpsert = connection.prepareStatement(addColumnSqlToUse)) {
short nextKeySeq = SchemaUtil.getMaxKeySeq(table);
for (ColumnDef colDef : columnDefs) {
if (colDef != null && !colDef.isNull()) {
if (colDef.isPK()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
} else {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ADD_NOT_NULLABLE_COLUMN).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
}
}
if (colDef != null && colDef.isPK() && table.getType() == VIEW && table.getViewType() != MAPPED) {
throwIfLastPKOfParentIsFixedLength(getParentOfView(table), schemaName, tableName, colDef);
}
if (colDef != null && colDef.isRowTimestamp()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_CREATE_ONLY).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
}
if (!colDef.validateDefault(context, null)) {
// Remove DEFAULT as it's not necessary
colDef = new ColumnDef(colDef, null);
}
Integer encodedCQ = null;
if (!colDef.isPK()) {
String colDefFamily = colDef.getColumnDefName().getFamilyName();
String familyName = null;
ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme();
String defaultColumnFamily = tableForCQCounters.getDefaultFamilyName() != null && !Strings.isNullOrEmpty(tableForCQCounters.getDefaultFamilyName().getString()) ? tableForCQCounters.getDefaultFamilyName().getString() : DEFAULT_COLUMN_FAMILY;
if (table.getType() == PTableType.INDEX && table.getIndexType() == IndexType.LOCAL) {
defaultColumnFamily = QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + defaultColumnFamily;
}
if (storageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS) {
familyName = colDefFamily != null ? colDefFamily : defaultColumnFamily;
} else {
familyName = defaultColumnFamily;
}
encodedCQ = cqCounterToUse.getNextQualifier(familyName);
if (cqCounterToUse.increment(familyName)) {
changedCqCounters.put(familyName, cqCounterToUse.getNextQualifier(familyName));
}
}
byte[] columnQualifierBytes = null;
try {
columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(colDef.getColumnDefName().getColumnName(), encodedCQ, table, colDef.isPK());
} catch (QualifierOutOfRangeException e) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : table.getDefaultFamilyName().getString(), true, columnQualifierBytes);
columns.add(column);
String pkName = null;
Short keySeq = null;
// TODO: support setting properties on other families?
if (column.getFamilyName() == null) {
++numPkColumnsAdded;
pkName = table.getPKName() == null ? null : table.getPKName().getString();
keySeq = ++nextKeySeq;
} else {
families.add(column.getFamilyName().getString());
}
colFamiliesForPColumnsToBeAdded.add(column.getFamilyName() == null ? null : column.getFamilyName().getString());
addColumnMutation(schemaName, tableName, column, colUpsert, null, pkName, keySeq, table.getBucketNum() != null);
}
// Add any new PK columns to end of index PK
if (numPkColumnsAdded > 0) {
// create PK column list that includes the newly created columns
List<PColumn> pkColumns = Lists.newArrayListWithExpectedSize(table.getPKColumns().size() + numPkColumnsAdded);
pkColumns.addAll(table.getPKColumns());
for (int i = 0; i < numCols; ++i) {
if (columnDefs.get(i).isPK()) {
pkColumns.add(columns.get(i));
}
}
int pkSlotPosition = table.getPKColumns().size() - 1;
for (PTable index : table.getIndexes()) {
short nextIndexKeySeq = SchemaUtil.getMaxKeySeq(index);
int indexPosition = index.getColumns().size();
for (int i = 0; i < numCols; ++i) {
ColumnDef colDef = columnDefs.get(i);
if (colDef.isPK()) {
PDataType indexColDataType = IndexUtil.getIndexColumnDataType(colDef.isNull(), colDef.getDataType());
ColumnName indexColName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, colDef.getColumnDefName().getColumnName()));
Expression expression = new RowKeyColumnExpression(columns.get(i), new RowKeyValueAccessor(pkColumns, ++pkSlotPosition));
ColumnDef indexColDef = FACTORY.columnDef(indexColName, indexColDataType.getSqlTypeName(), colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), true, colDef.getSortOrder(), expression.toString(), colDef.isRowTimestamp());
PColumn indexColumn = newColumn(indexPosition++, indexColDef, PrimaryKeyConstraint.EMPTY, null, true, null);
addColumnMutation(schemaName, index.getTableName().getString(), indexColumn, colUpsert, index.getParentTableName().getString(), index.getPKName() == null ? null : index.getPKName().getString(), ++nextIndexKeySeq, index.getBucketNum() != null);
}
}
}
}
columnMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
} else {
// have existing indexes.
if (Boolean.FALSE.equals(isImmutableRows) && !table.getIndexes().isEmpty()) {
int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
if (!connection.getQueryServices().hasIndexWALCodec() && !table.isTransactional()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setSchemaName(schemaName).setTableName(tableName).build().buildException();
}
}
if (Boolean.TRUE.equals(multiTenant)) {
throwIfInsufficientColumns(schemaName, tableName, table.getPKColumns(), table.getBucketNum() != null, multiTenant);
}
}
if (!table.getIndexes().isEmpty() && (numPkColumnsAdded > 0 || nonTxToTx)) {
for (PTable index : table.getIndexes()) {
incrementTableSeqNum(index, index.getType(), numPkColumnsAdded, nonTxToTx ? Boolean.TRUE : null, updateCacheFrequency);
}
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
if (changingPhoenixTableProperty || columnDefs.size() > 0) {
incrementTableSeqNum(table, tableType, columnDefs.size(), isTransactional, updateCacheFrequency, isImmutableRows, disableWAL, multiTenant, storeNulls, guidePostWidth, appendOnlySchema, immutableStorageScheme, useStatsForParallelization);
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
// Force the table header row to be first
Collections.reverse(tableMetaData);
// Add column metadata afterwards, maintaining the order so columns have more predictable ordinal position
tableMetaData.addAll(columnMetaData);
boolean sharedIndex = tableType == PTableType.INDEX && (table.getIndexType() == IndexType.LOCAL || table.getViewIndexId() != null);
String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
if (!changedCqCounters.isEmpty()) {
PreparedStatement linkStatement;
linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER);
for (Entry<String, Integer> entry : changedCqCounters.entrySet()) {
linkStatement.setString(1, tenantIdToUse);
linkStatement.setString(2, tableForCQCounters.getSchemaName().getString());
linkStatement.setString(3, tableForCQCounters.getTableName().getString());
linkStatement.setString(4, entry.getKey());
linkStatement.setInt(5, entry.getValue());
linkStatement.execute();
}
// too since we want clients to get the latest PTable of the base table.
if (tableType == VIEW) {
PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
incrementStatement.setString(1, null);
incrementStatement.setString(2, tableForCQCounters.getSchemaName().getString());
incrementStatement.setString(3, tableForCQCounters.getTableName().getString());
incrementStatement.setLong(4, tableForCQCounters.getSequenceNumber() + 1);
incrementStatement.execute();
}
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
byte[] family = families.size() > 0 ? families.iterator().next().getBytes() : null;
// Figure out if the empty column family is changing as a result of adding the new column
byte[] emptyCF = null;
byte[] projectCF = null;
if (table.getType() != PTableType.VIEW && family != null) {
if (table.getColumnFamilies().isEmpty()) {
emptyCF = family;
} else {
try {
table.getColumnFamily(family);
} catch (ColumnFamilyNotFoundException e) {
projectCF = family;
emptyCF = SchemaUtil.getEmptyColumnFamily(table);
}
}
}
MetaDataMutationResult result = connection.getQueryServices().addColumn(tableMetaData, table, properties, colFamiliesForPColumnsToBeAdded, columns);
try {
MutationCode code = processMutationResult(schemaName, tableName, result);
if (code == MutationCode.COLUMN_ALREADY_EXISTS) {
addTableToCache(result);
if (!ifNotExists) {
throw new ColumnAlreadyExistsException(schemaName, tableName, SchemaUtil.findExistingColumn(result.getTable(), columns));
}
return new MutationState(0, 0, connection);
}
// Only update client side cache if we aren't adding a PK column to a table with indexes or
// transitioning a table from non transactional to transactional.
// We could update the cache manually then too, it'd just be a pain.
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
long resolvedTimeStamp = TransactionUtil.getResolvedTime(connection, result);
if (table.getIndexes().isEmpty() || (numPkColumnsAdded == 0 && !nonTxToTx)) {
connection.addTable(result.getTable(), resolvedTimeStamp);
table = result.getTable();
} else if (updateCacheFrequency != null) {
// Force removal from cache as the update cache frequency has changed
// Note that clients outside this JVM won't be affected.
connection.removeTable(tenantId, fullTableName, null, resolvedTimeStamp);
}
// We only need to do this if the multiTenant transitioned to false
if (table.getType() == PTableType.TABLE && Boolean.FALSE.equals(multiTenant) && MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) {
connection.setAutoCommit(true);
MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped());
// commands are run would remove all rows already.
if (!connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA)) {
Long scn = connection.getSCN();
long ts = (scn == null ? result.getMutationTime() : scn);
byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
PTable viewIndexTable = new PTableImpl(null, SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName), SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName), ts, table.getColumnFamilies(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.useStatsForParallelization());
List<TableRef> tableRefs = Collections.singletonList(new TableRef(null, viewIndexTable, ts, false));
MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
connection.getQueryServices().updateData(plan);
}
}
if (emptyCF != null) {
Long scn = connection.getSCN();
connection.setAutoCommit(true);
// Delete everything in the column. You'll still be able to do queries at earlier timestamps
long ts = (scn == null ? result.getMutationTime() : scn);
MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(new TableRef(null, table, ts, false)), emptyCF, projectCF == null ? null : Collections.singletonList(projectCF), null, ts);
return connection.getQueryServices().updateData(plan);
}
return new MutationState(0, 0, connection);
} catch (ConcurrentTableMutationException e) {
if (retried) {
throw e;
}
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
}
retried = true;
}
}
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.
the class ConnectionlessQueryServicesImpl method getTable.
@Override
public MetaDataMutationResult getTable(PName tenantId, byte[] schemaBytes, byte[] tableBytes, long tableTimestamp, long clientTimestamp) throws SQLException {
// to get anything from the server (since we don't have a connection)
try {
String fullTableName = SchemaUtil.getTableName(schemaBytes, tableBytes);
PTable table = metaData.getTableRef(new PTableKey(tenantId, fullTableName)).getTable();
return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, table, true);
} catch (TableNotFoundException e) {
return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null);
}
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.
the class MetaDataClient method updateCache.
public MetaDataMutationResult updateCache(PName tenantId, List<String> functionNames, boolean alwaysHitServer) throws SQLException {
// TODO: pass byte[] herez
long clientTimeStamp = getClientTimeStamp();
List<PFunction> functions = new ArrayList<PFunction>(functionNames.size());
List<Long> functionTimeStamps = new ArrayList<Long>(functionNames.size());
Iterator<String> iterator = functionNames.iterator();
while (iterator.hasNext()) {
PFunction function = null;
try {
String functionName = iterator.next();
function = connection.getMetaDataCache().getFunction(new PTableKey(tenantId, functionName));
if (function != null && !alwaysHitServer && function.getTimeStamp() == clientTimeStamp - 1) {
functions.add(function);
iterator.remove();
continue;
}
if (function != null && function.getTimeStamp() != clientTimeStamp - 1) {
functionTimeStamps.add(function.getTimeStamp());
} else {
functionTimeStamps.add(HConstants.LATEST_TIMESTAMP);
}
} catch (FunctionNotFoundException e) {
functionTimeStamps.add(HConstants.LATEST_TIMESTAMP);
}
}
// Don't bother with server call: we can't possibly find a newer function
if (functionNames.isEmpty()) {
return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, QueryConstants.UNSET_TIMESTAMP, functions, true);
}
int maxTryCount = tenantId == null ? 1 : 2;
int tryCount = 0;
MetaDataMutationResult result;
do {
List<Pair<byte[], Long>> functionsToFecth = new ArrayList<Pair<byte[], Long>>(functionNames.size());
for (int i = 0; i < functionNames.size(); i++) {
functionsToFecth.add(new Pair<byte[], Long>(PVarchar.INSTANCE.toBytes(functionNames.get(i)), functionTimeStamps.get(i)));
}
result = connection.getQueryServices().getFunctions(tenantId, functionsToFecth, clientTimeStamp);
MutationCode code = result.getMutationCode();
// We found an updated table, so update our cache
if (result.getFunctions() != null && !result.getFunctions().isEmpty()) {
result.getFunctions().addAll(functions);
addFunctionToCache(result);
return result;
} else {
if (code == MutationCode.FUNCTION_ALREADY_EXISTS) {
result.getFunctions().addAll(functions);
addFunctionToCache(result);
return result;
}
if (code == MutationCode.FUNCTION_NOT_FOUND && tryCount + 1 == maxTryCount) {
for (Pair<byte[], Long> f : functionsToFecth) {
connection.removeFunction(tenantId, Bytes.toString(f.getFirst()), f.getSecond());
}
// TODO removeFunctions all together from cache when
throw new FunctionNotFoundException(functionNames.toString() + " not found");
}
}
// Try again with global tenantId
tenantId = null;
} while (++tryCount < maxTryCount);
return result;
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.
the class MetaDataClient method updateCache.
public MetaDataMutationResult updateCache(String schemaName, boolean alwaysHitServer) throws SQLException {
long clientTimeStamp = getClientTimeStamp();
PSchema schema = null;
try {
schema = connection.getMetaDataCache().getSchema(new PTableKey(null, schemaName));
if (schema != null && !alwaysHitServer) {
return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, QueryConstants.UNSET_TIMESTAMP);
}
} catch (SchemaNotFoundException e) {
}
MetaDataMutationResult result;
result = connection.getQueryServices().getSchema(schemaName, clientTimeStamp);
return result;
}
use of org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult in project phoenix by apache.
the class MetaDataClient method createFunction.
public MutationState createFunction(CreateFunctionStatement stmt) throws SQLException {
boolean wasAutoCommit = connection.getAutoCommit();
connection.rollback();
try {
PFunction function = new PFunction(stmt.getFunctionInfo(), stmt.isTemporary(), stmt.isReplace());
connection.setAutoCommit(false);
String tenantIdStr = connection.getTenantId() == null ? null : connection.getTenantId().getString();
List<Mutation> functionData = Lists.newArrayListWithExpectedSize(function.getFunctionArguments().size() + 1);
List<FunctionArgument> args = function.getFunctionArguments();
try (PreparedStatement argUpsert = connection.prepareStatement(INSERT_FUNCTION_ARGUMENT)) {
for (int i = 0; i < args.size(); i++) {
FunctionArgument arg = args.get(i);
addFunctionArgMutation(function.getFunctionName(), arg, argUpsert, i);
}
functionData.addAll(connection.getMutationState().toMutations().next().getSecond());
connection.rollback();
}
try (PreparedStatement functionUpsert = connection.prepareStatement(CREATE_FUNCTION)) {
functionUpsert.setString(1, tenantIdStr);
functionUpsert.setString(2, function.getFunctionName());
functionUpsert.setInt(3, function.getFunctionArguments().size());
functionUpsert.setString(4, function.getClassName());
functionUpsert.setString(5, function.getJarPath());
functionUpsert.setString(6, function.getReturnType());
functionUpsert.execute();
functionData.addAll(connection.getMutationState().toMutations(null).next().getSecond());
connection.rollback();
}
MetaDataMutationResult result = connection.getQueryServices().createFunction(functionData, function, stmt.isTemporary());
MutationCode code = result.getMutationCode();
switch(code) {
case FUNCTION_ALREADY_EXISTS:
if (!function.isReplace()) {
throw new FunctionAlreadyExistsException(function.getFunctionName(), result.getFunctions().get(0));
} else {
connection.removeFunction(function.getTenantId(), function.getFunctionName(), result.getMutationTime());
addFunctionToCache(result);
}
case NEWER_FUNCTION_FOUND:
// it to this connection as we can't see it.
throw new NewerFunctionAlreadyExistsException(function.getFunctionName(), result.getFunctions().get(0));
default:
List<PFunction> functions = new ArrayList<PFunction>(1);
functions.add(function);
result = new MetaDataMutationResult(code, result.getMutationTime(), functions, true);
if (function.isReplace()) {
connection.removeFunction(function.getTenantId(), function.getFunctionName(), result.getMutationTime());
}
addFunctionToCache(result);
}
} finally {
connection.setAutoCommit(wasAutoCommit);
}
return new MutationState(1, 1000, connection);
}
Aggregations