use of org.apache.phoenix.parse.TableName in project phoenix by apache.
the class MetaDataClient method dropColumn.
public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(false);
PName tenantId = connection.getTenantId();
TableName tableNameNode = statement.getTable().getName();
String schemaName = tableNameNode.getSchemaName();
String tableName = tableNameNode.getTableName();
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
boolean retried = false;
while (true) {
final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
TableRef tableRef = resolver.getTables().get(0);
PTable table = tableRef.getTable();
List<ColumnName> columnRefs = statement.getColumnRefs();
if (columnRefs == null) {
columnRefs = Lists.newArrayListWithCapacity(0);
}
List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
for (ColumnName column : columnRefs) {
ColumnRef columnRef = null;
try {
columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
} catch (ColumnNotFoundException e) {
if (statement.ifExists()) {
return new MutationState(0, 0, connection);
}
throw e;
}
PColumn columnToDrop = columnRef.getColumn();
tableColumnsToDrop.add(columnToDrop);
if (SchemaUtil.isPKColumn(columnToDrop)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
} else if (table.isAppendOnlySchema()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
}
columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
}
dropColumnMutations(table, tableColumnsToDrop);
boolean removedIndexTableOrColumn = false;
Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
for (PTable index : table.getIndexes()) {
IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
// get the covered columns
List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
for (PColumn columnToDrop : tableColumnsToDrop) {
Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
if (isColumnIndexed) {
if (index.getViewIndexId() == null) {
indexesToDrop.add(new TableRef(index));
}
connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
removedIndexTableOrColumn = true;
} else if (coveredCols.contains(colDropRef)) {
String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
indexColumnsToDrop.add(indexColumn);
// add the index column to be dropped so that we actually delete the column values
columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
removedIndexTableOrColumn = true;
}
}
if (!indexColumnsToDrop.isEmpty()) {
long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
dropColumnMutations(index, indexColumnsToDrop);
long clientTimestamp = MutationState.getMutationTimestamp(timeStamp, connection.getSCN());
connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
}
}
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
// Force table header to be first in list
Collections.reverse(tableMetaData);
/*
* Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
* in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
* ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
* support declaring what the empty column family is on a table, as:
* - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
* - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
* the empty column family name on the PTable.
*/
for (ColumnRef columnRefToDrop : columnsToDrop) {
PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
if (emptyCF != null) {
try {
tableContainingColumnToDrop.getColumnFamily(emptyCF);
} catch (ColumnFamilyNotFoundException e) {
// Only if it's not already a column family do we need to ensure it's created
Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
// Just use a Put without any key values as the Mutation, as addColumn will treat this specially
// TODO: pass through schema name and table name instead to these methods as it's cleaner
byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
if (tenantIdBytes == null)
tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
}
}
}
MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
try {
MutationCode code = processMutationResult(schemaName, tableName, result);
if (code == MutationCode.COLUMN_NOT_FOUND) {
addTableToCache(result);
if (!statement.ifExists()) {
throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
}
return new MutationState(0, 0, connection);
}
// the server when needed.
if (tableColumnsToDrop.size() > 0) {
if (removedIndexTableOrColumn)
connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
else
connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
}
// If we have a VIEW, then only delete the metadata, and leave the table data alone
if (table.getType() != PTableType.VIEW) {
MutationState state = null;
connection.setAutoCommit(true);
Long scn = connection.getSCN();
// Delete everything in the column. You'll still be able to do queries at earlier timestamps
long ts = (scn == null ? result.getMutationTime() : scn);
PostDDLCompiler compiler = new PostDDLCompiler(connection);
boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
// if the index is a local index or view index it uses a shared physical table
// so we need to issue deletes markers for all the rows of the index
final List<TableRef> tableRefsToDrop = Lists.newArrayList();
Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
if (result.getSharedTablesToDelete() != null) {
for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
TableRef indexTableRef = new TableRef(viewIndexTable);
PName indexTableTenantId = sharedTableState.getTenantId();
if (indexTableTenantId == null) {
tableRefsToDrop.add(indexTableRef);
} else {
if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
}
tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
}
}
}
// they would have been dropped in ConnectionQueryServices.dropColumn)
if (!dropMetaData) {
tableRefsToDrop.addAll(indexesToDrop);
}
// Drop any index tables that had the dropped column in the PK
state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
// Drop any tenant-specific indexes
if (!tenantIdTableRefMap.isEmpty()) {
for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
String indexTenantId = entry.getKey();
Properties props = new Properties(connection.getClientInfo());
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
}
}
}
// See https://issues.apache.org/jira/browse/PHOENIX-3605
if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
// Update empty key value column if necessary
for (ColumnRef droppedColumnRef : columnsToDrop) {
// Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
// to get any updates from the region server.
// TODO: move this into PostDDLCompiler
// TODO: consider filtering mutable indexes here, but then the issue is that
// we'd need to force an update of the data row empty key value if a mutable
// secondary index is changing its empty key value family.
droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
PColumn droppedColumn = droppedColumnRef.getColumn();
MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
state = connection.getQueryServices().updateData(plan);
}
}
// Return the last MutationState
return state;
}
return new MutationState(0, 0, connection);
} catch (ConcurrentTableMutationException e) {
if (retried) {
throw e;
}
table = connection.getTable(new PTableKey(tenantId, fullTableName));
retried = true;
}
}
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.parse.TableName in project phoenix by apache.
the class MetaDataClient method createIndex.
/**
* Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling
* MetaDataClient.createTable. In doing so, we perform the following translations:
* 1) Change the type of any columns being indexed to types that support null if the column is nullable.
* For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type supports null
* when it's in the row key while a BIGINT does not.
* 2) Append any row key column from the data table that is not in the indexed column list. Our indexes
* rely on having a 1:1 correspondence between the index and data rows.
* 3) Change the name of the columns to include the column family. For example, if you have a column
* named "B" in a column family named "A", the indexed column name will be "A:B". This makes it easy
* to translate the column references in a query to the correct column references in an index table
* regardless of whether the column reference is prefixed with the column family name or not. It also
* has the side benefit of allowing the same named column in different column families to both be
* listed as an index column.
* @param statement
* @param splits
* @return MutationState from population of index table from data table
* @throws SQLException
*/
public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException {
IndexKeyConstraint ik = statement.getIndexConstraint();
TableName indexTableName = statement.getIndexTableName();
Map<String, Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
Map<String, Object> commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1);
populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps);
List<Pair<ParseNode, SortOrder>> indexParseNodeAndSortOrderList = ik.getParseNodeAndSortOrderList();
List<ColumnName> includedColumns = statement.getIncludeColumns();
TableRef tableRef = null;
PTable table = null;
int numRetries = 0;
boolean allocateIndexId = false;
boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL;
int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
if (isLocalIndex) {
if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
}
if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
}
}
while (true) {
try {
ColumnResolver resolver = FromCompiler.getResolver(statement, connection, statement.getUdfParseNodes());
tableRef = resolver.getTables().get(0);
Date asyncCreatedDate = null;
if (statement.isAsync()) {
asyncCreatedDate = new Date(tableRef.getTimeStamp());
}
PTable dataTable = tableRef.getTable();
boolean isTenantConnection = connection.getTenantId() != null;
if (isTenantConnection) {
if (dataTable.getType() != PTableType.VIEW) {
throw new SQLFeatureNotSupportedException("An index may only be created for a VIEW through a tenant-specific connection");
}
}
if (!dataTable.isImmutableRows()) {
if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
}
if (!connection.getQueryServices().hasIndexWALCodec() && !dataTable.isTransactional()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setTableName(indexTableName.getTableName()).build().buildException();
}
}
int posOffset = 0;
List<PColumn> pkColumns = dataTable.getPKColumns();
Set<RowKeyColumnExpression> unusedPkColumns;
if (dataTable.getBucketNum() != null) {
// Ignore SALT column
unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size() - 1);
posOffset++;
} else {
unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
}
for (int i = posOffset; i < pkColumns.size(); i++) {
PColumn column = pkColumns.get(i);
unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\"" + column.getName().getString() + "\""));
}
List<ColumnDefInPkConstraint> allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size());
List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexParseNodeAndSortOrderList.size());
/*
* Allocate an index ID in two circumstances:
* 1) for a local index, as all local indexes will reside in the same HBase table
* 2) for a view on an index.
*/
if (isLocalIndex || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) {
allocateIndexId = true;
PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
ColumnName colName = ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName());
allPkColumns.add(new ColumnDefInPkConstraint(colName, SortOrder.getDefault(), false));
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault(), null, false));
}
if (dataTable.isMultiTenant()) {
PColumn col = dataTable.getPKColumns().get(posOffset);
RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString());
unusedPkColumns.remove(columnExpression);
PDataType dataType = IndexUtil.getIndexColumnDataType(col);
ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false));
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(), col.getName().getString(), col.isRowTimestamp()));
}
PhoenixStatement phoenixStatment = new PhoenixStatement(connection);
StatementContext context = new StatementContext(phoenixStatment, resolver);
IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
Set<ColumnName> indexedColumnNames = Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size());
for (Pair<ParseNode, SortOrder> pair : indexParseNodeAndSortOrderList) {
ParseNode parseNode = pair.getFirst();
// normalize the parse node
parseNode = StatementNormalizer.normalize(parseNode, resolver);
// compile the parseNode to get an expression
expressionIndexCompiler.reset();
Expression expression = parseNode.accept(expressionIndexCompiler);
if (expressionIndexCompiler.isAggregate()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
}
if (expression.getDeterminism() != Determinism.ALWAYS) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
}
if (expression.isStateless()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
}
unusedPkColumns.remove(expression);
// Go through parse node to get string as otherwise we
// can lose information during compilation
StringBuilder buf = new StringBuilder();
parseNode.toSQL(resolver, buf);
// need to escape backslash as this expression will be re-parsed later
String expressionStr = StringUtil.escapeBackslash(buf.toString());
ColumnName colName = null;
ColumnRef colRef = expressionIndexCompiler.getColumnRef();
boolean isRowTimestamp = false;
if (colRef != null) {
// if this is a regular column
PColumn column = colRef.getColumn();
String columnFamilyName = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
isRowTimestamp = column.isRowTimestamp();
if (colRef.getColumn().getExpressionStr() != null) {
expressionStr = colRef.getColumn().getExpressionStr();
}
} else {
// if this is an expression
// TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
String name = expressionStr.replaceAll("\"", "'");
colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
}
indexedColumnNames.add(colName);
PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp));
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expressionStr, isRowTimestamp));
}
// Next all the PK columns from the data table that aren't indexed
if (!unusedPkColumns.isEmpty()) {
for (RowKeyColumnExpression colExpression : unusedPkColumns) {
PColumn col = dataTable.getPKColumns().get(colExpression.getPosition());
// we don't need these in the index
if (col.getViewConstant() == null) {
ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
allPkColumns.add(new ColumnDefInPkConstraint(colName, colExpression.getSortOrder(), col.isRowTimestamp()));
PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(), colExpression.getDataType());
columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), colExpression.isNullable(), colExpression.getMaxLength(), colExpression.getScale(), false, colExpression.getSortOrder(), colExpression.toString(), col.isRowTimestamp()));
}
}
}
// Last all the included columns (minus any PK columns)
for (ColumnName colName : includedColumns) {
PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
// Check for duplicates between indexed and included columns
if (indexedColumnNames.contains(colName)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
}
if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
// Need to re-create ColumnName, since the above one won't have the column family name
colName = ColumnName.caseSensitiveColumnName(isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(col.getFamilyName().getString()) : col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col));
columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(), col.getExpressionStr(), col.isRowTimestamp()));
}
}
// We need this in the props so that the correct column family is created
if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW && !allocateIndexId) {
statement.getProps().put("", new Pair<String, Object>(DEFAULT_COLUMN_FAMILY_NAME, dataTable.getDefaultFamilyName().getString()));
}
PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, dataTable.getName().getString());
CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, null, statement.getBindCount(), null);
table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, allocateIndexId, statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
break;
} catch (ConcurrentTableMutationException e) {
// Can happen if parent data table changes while above is in progress
if (numRetries < 5) {
numRetries++;
continue;
}
throw e;
}
}
if (table == null) {
return new MutationState(0, 0, connection);
}
if (logger.isInfoEnabled())
logger.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.INDEX_ASYNC_BUILD_ENABLED, QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED);
// In async process, we return immediately as the MR job needs to be triggered .
if (statement.isAsync() && asyncIndexBuildEnabled) {
return new MutationState(0, 0, connection);
}
// connection so that our new index table is visible.
if (connection.getSCN() != null) {
return buildIndexAtTimeStamp(table, statement.getTable());
}
return buildIndex(table, tableRef);
}
use of org.apache.phoenix.parse.TableName in project phoenix by apache.
the class MetaDataClient method createTable.
public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, String viewStatement, ViewType viewType, byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException {
TableName tableName = statement.getTableName();
Map<String, Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
Map<String, Object> commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1);
populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps);
boolean isAppendOnlySchema = false;
long updateCacheFrequency = connection.getQueryServices().getProps().getLong(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB, QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY);
if (parent == null) {
Boolean appendOnlySchemaProp = (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps);
if (appendOnlySchemaProp != null) {
isAppendOnlySchema = appendOnlySchemaProp;
}
Long updateCacheFrequencyProp = (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps);
if (updateCacheFrequencyProp != null) {
updateCacheFrequency = updateCacheFrequencyProp;
}
} else {
isAppendOnlySchema = parent.isAppendOnlySchema();
updateCacheFrequency = parent.getUpdateCacheFrequency();
}
// updateCacheFrequency cannot be set to ALWAYS if isAppendOnlySchema is true
if (isAppendOnlySchema && updateCacheFrequency == 0) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPDATE_CACHE_FREQUENCY_INVALID).setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build().buildException();
}
Boolean immutableProp = (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps);
if (statement.immutableRows() != null && immutableProp != null) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID).setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build().buildException();
}
PTable table = null;
// if it is add columns that are not already present
if (isAppendOnlySchema) {
// look up the table in the cache
MetaDataMutationResult result = updateCache(tableName.getSchemaName(), tableName.getTableName());
if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) {
table = result.getTable();
if (!statement.ifNotExists()) {
throw new NewerTableAlreadyExistsException(tableName.getSchemaName(), tableName.getTableName(), table);
}
List<ColumnDef> columnDefs = statement.getColumnDefs();
PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
// get the list of columns to add
for (ColumnDef columnDef : columnDefs) {
if (pkConstraint.contains(columnDef.getColumnDefName())) {
columnDef.setIsPK(true);
}
}
// if there are new columns to add
return addColumn(table, columnDefs, statement.getProps(), statement.ifNotExists(), true, NamedTableNode.create(statement.getTableName()), statement.getTableType());
}
}
table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced, false, null, null, tableProps, commonFamilyProps);
if (table == null || table.getType() == PTableType.VIEW) /*|| table.isTransactional()*/
{
return new MutationState(0, 0, connection);
}
// Hack to get around the case when an SCN is specified on the connection.
// In this case, we won't see the table we just created yet, so we hack
// around it by forcing the compiler to not resolve anything.
PostDDLCompiler compiler = new PostDDLCompiler(connection);
//connection.setAutoCommit(true);
// Execute any necessary data updates
Long scn = connection.getSCN();
long ts = (scn == null ? table.getTimeStamp() : scn);
// Getting the schema through the current connection doesn't work when the connection has an scn specified
// Since the table won't be added to the current connection.
TableRef tableRef = new TableRef(null, table, ts, false);
byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table);
MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), emptyCF, null, null, tableRef.getTimeStamp());
return connection.getQueryServices().updateData(plan);
}
use of org.apache.phoenix.parse.TableName in project phoenix by apache.
the class SequenceManager method newSequenceReference.
public SequenceValueExpression newSequenceReference(SequenceValueParseNode node) throws SQLException {
PName tenantName = statement.getConnection().getTenantId();
String tenantId = tenantName == null ? null : tenantName.getString();
TableName tableName = node.getTableName();
if (tableName.getSchemaName() == null && statement.getConnection().getSchema() != null) {
tableName = TableName.create(statement.getConnection().getSchema(), tableName.getTableName());
}
int nSaltBuckets = statement.getConnection().getQueryServices().getSequenceSaltBuckets();
ParseNode numToAllocateNode = node.getNumToAllocateNode();
long numToAllocate = determineNumToAllocate(tableName, numToAllocateNode);
SequenceKey key = new SequenceKey(tenantId, tableName.getSchemaName(), tableName.getTableName(), nSaltBuckets);
SequenceValueExpression expression = sequenceMap.get(key);
if (expression == null) {
int index = sequenceMap.size();
expression = new SequenceValueExpression(key, node.getOp(), index, numToAllocate);
sequenceMap.put(key, expression);
} else if (expression.op != node.getOp() || expression.getNumToAllocate() < numToAllocate) {
// Keep the maximum allocation size we see in a statement
SequenceValueExpression oldExpression = expression;
expression = new SequenceValueExpression(key, node.getOp(), expression.getIndex(), Math.max(expression.getNumToAllocate(), numToAllocate));
if (oldExpression.getNumToAllocate() < numToAllocate) {
// If we found a NEXT VALUE expression with a higher number to allocate
// We override the original expression
sequenceMap.put(key, expression);
}
}
// If we see a NEXT and a CURRENT, treat the CURRENT just like a NEXT
if (node.getOp() == Op.NEXT_VALUE) {
isNextSequence.set(expression.getIndex());
}
return expression;
}
use of org.apache.phoenix.parse.TableName in project phoenix by apache.
the class StatementNormalizer method normalize.
/**
* Rewrite the select statement by switching any constants to the right hand side
* of the expression.
* @param statement the select statement
* @param resolver
* @return new select statement
* @throws SQLException
*/
public static SelectStatement normalize(SelectStatement statement, ColumnResolver resolver) throws SQLException {
boolean multiTable = statement.isJoin();
// Replace WildcardParse with a list of TableWildcardParseNode for multi-table queries
if (multiTable) {
List<AliasedNode> selectNodes = statement.getSelect();
List<AliasedNode> normSelectNodes = selectNodes;
for (int i = 0; i < selectNodes.size(); i++) {
AliasedNode aliasedNode = selectNodes.get(i);
ParseNode selectNode = aliasedNode.getNode();
if (selectNode == WildcardParseNode.INSTANCE) {
if (selectNodes == normSelectNodes) {
normSelectNodes = Lists.newArrayList(selectNodes.subList(0, i));
}
List<TableName> tableNames = statement.getFrom().accept(new TableNameVisitor());
for (TableName tableName : tableNames) {
TableWildcardParseNode node = NODE_FACTORY.tableWildcard(tableName);
normSelectNodes.add(NODE_FACTORY.aliasedNode(null, node));
}
} else if (selectNodes != normSelectNodes) {
normSelectNodes.add(aliasedNode);
}
}
if (selectNodes != normSelectNodes) {
statement = NODE_FACTORY.select(statement.getFrom(), statement.getHint(), statement.isDistinct(), normSelectNodes, statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes());
}
}
return rewrite(statement, new StatementNormalizer(resolver, statement.getSelect().size(), multiTable));
}
Aggregations