use of org.apache.phoenix.compile.PostDDLCompiler in project phoenix by apache.
the class MetaDataClient method dropColumn.
public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(false);
PName tenantId = connection.getTenantId();
TableName tableNameNode = statement.getTable().getName();
String schemaName = tableNameNode.getSchemaName();
String tableName = tableNameNode.getTableName();
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
boolean retried = false;
while (true) {
final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
TableRef tableRef = resolver.getTables().get(0);
PTable table = tableRef.getTable();
List<ColumnName> columnRefs = statement.getColumnRefs();
if (columnRefs == null) {
columnRefs = Lists.newArrayListWithCapacity(0);
}
List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
for (ColumnName column : columnRefs) {
ColumnRef columnRef = null;
try {
columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
} catch (ColumnNotFoundException e) {
if (statement.ifExists()) {
return new MutationState(0, 0, connection);
}
throw e;
}
PColumn columnToDrop = columnRef.getColumn();
tableColumnsToDrop.add(columnToDrop);
if (SchemaUtil.isPKColumn(columnToDrop)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
} else if (table.isAppendOnlySchema()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
}
columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
}
dropColumnMutations(table, tableColumnsToDrop);
boolean removedIndexTableOrColumn = false;
Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
for (PTable index : table.getIndexes()) {
IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
// get the covered columns
List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
for (PColumn columnToDrop : tableColumnsToDrop) {
Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
if (isColumnIndexed) {
if (index.getViewIndexId() == null) {
indexesToDrop.add(new TableRef(index));
}
connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
removedIndexTableOrColumn = true;
} else if (coveredCols.contains(colDropRef)) {
String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
indexColumnsToDrop.add(indexColumn);
// add the index column to be dropped so that we actually delete the column values
columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
removedIndexTableOrColumn = true;
}
}
if (!indexColumnsToDrop.isEmpty()) {
long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
dropColumnMutations(index, indexColumnsToDrop);
long clientTimestamp = MutationState.getMutationTimestamp(timeStamp, connection.getSCN());
connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
}
}
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
// Force table header to be first in list
Collections.reverse(tableMetaData);
/*
* Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
* in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
* ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
* support declaring what the empty column family is on a table, as:
* - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
* - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
* the empty column family name on the PTable.
*/
for (ColumnRef columnRefToDrop : columnsToDrop) {
PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
if (emptyCF != null) {
try {
tableContainingColumnToDrop.getColumnFamily(emptyCF);
} catch (ColumnFamilyNotFoundException e) {
// Only if it's not already a column family do we need to ensure it's created
Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
// Just use a Put without any key values as the Mutation, as addColumn will treat this specially
// TODO: pass through schema name and table name instead to these methods as it's cleaner
byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
if (tenantIdBytes == null)
tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
}
}
}
MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
try {
MutationCode code = processMutationResult(schemaName, tableName, result);
if (code == MutationCode.COLUMN_NOT_FOUND) {
addTableToCache(result);
if (!statement.ifExists()) {
throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
}
return new MutationState(0, 0, connection);
}
// the server when needed.
if (tableColumnsToDrop.size() > 0) {
if (removedIndexTableOrColumn)
connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
else
connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
}
// If we have a VIEW, then only delete the metadata, and leave the table data alone
if (table.getType() != PTableType.VIEW) {
MutationState state = null;
connection.setAutoCommit(true);
Long scn = connection.getSCN();
// Delete everything in the column. You'll still be able to do queries at earlier timestamps
long ts = (scn == null ? result.getMutationTime() : scn);
PostDDLCompiler compiler = new PostDDLCompiler(connection);
boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
// if the index is a local index or view index it uses a shared physical table
// so we need to issue deletes markers for all the rows of the index
final List<TableRef> tableRefsToDrop = Lists.newArrayList();
Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
if (result.getSharedTablesToDelete() != null) {
for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
TableRef indexTableRef = new TableRef(viewIndexTable);
PName indexTableTenantId = sharedTableState.getTenantId();
if (indexTableTenantId == null) {
tableRefsToDrop.add(indexTableRef);
} else {
if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
}
tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
}
}
}
// they would have been dropped in ConnectionQueryServices.dropColumn)
if (!dropMetaData) {
tableRefsToDrop.addAll(indexesToDrop);
}
// Drop any index tables that had the dropped column in the PK
state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
// Drop any tenant-specific indexes
if (!tenantIdTableRefMap.isEmpty()) {
for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
String indexTenantId = entry.getKey();
Properties props = new Properties(connection.getClientInfo());
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
}
}
}
// See https://issues.apache.org/jira/browse/PHOENIX-3605
if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
// Update empty key value column if necessary
for (ColumnRef droppedColumnRef : columnsToDrop) {
// Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
// to get any updates from the region server.
// TODO: move this into PostDDLCompiler
// TODO: consider filtering mutable indexes here, but then the issue is that
// we'd need to force an update of the data row empty key value if a mutable
// secondary index is changing its empty key value family.
droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
PColumn droppedColumn = droppedColumnRef.getColumn();
MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
state = connection.getQueryServices().updateData(plan);
}
}
// Return the last MutationState
return state;
}
return new MutationState(0, 0, connection);
} catch (ConcurrentTableMutationException e) {
if (retried) {
throw e;
}
table = connection.getTable(new PTableKey(tenantId, fullTableName));
retried = true;
}
}
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.compile.PostDDLCompiler in project phoenix by apache.
the class MetaDataClient method updateStatisticsInternal.
private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map<String, Object> statsProps, List<byte[]> cfs) throws SQLException {
ReadOnlyProps props = connection.getQueryServices().getProps();
final long msMinBetweenUpdates = props.getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB, props.getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS) / 2);
byte[] tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
Long scn = connection.getSCN();
// Always invalidate the cache
long clientTimeStamp = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
ResultSet rs = connection.createStatement().executeQuery(query);
long msSinceLastUpdate = Long.MAX_VALUE;
if (rs.next()) {
msSinceLastUpdate = rs.getLong(1) - rs.getLong(2);
}
long rowCount = 0;
if (msSinceLastUpdate >= msMinBetweenUpdates) {
/*
* Execute a COUNT(*) through PostDDLCompiler as we need to use the logicalTable passed through,
* since it may not represent a "real" table in the case of the view indexes of a base table.
*/
PostDDLCompiler compiler = new PostDDLCompiler(connection);
//even if table is transactional, while calculating stats we scan the table non-transactionally to
//view all the data belonging to the table
PTable nonTxnLogicalTable = new DelegateTable(logicalTable) {
@Override
public boolean isTransactional() {
return false;
}
};
TableRef tableRef = new TableRef(null, nonTxnLogicalTable, clientTimeStamp, false);
MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), null, cfs, null, clientTimeStamp);
Scan scan = plan.getContext().getScan();
scan.setCacheBlocks(false);
scan.setAttribute(ANALYZE_TABLE, TRUE_BYTES);
boolean runUpdateStatsAsync = props.getBoolean(QueryServices.RUN_UPDATE_STATS_ASYNC, DEFAULT_RUN_UPDATE_STATS_ASYNC);
scan.setAttribute(RUN_UPDATE_STATS_ASYNC_ATTRIB, runUpdateStatsAsync ? TRUE_BYTES : FALSE_BYTES);
if (statsProps != null) {
Object gp_width = statsProps.get(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB);
if (gp_width != null) {
scan.setAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES, PLong.INSTANCE.toBytes(gp_width));
}
Object gp_per_region = statsProps.get(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB);
if (gp_per_region != null) {
scan.setAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION, PInteger.INSTANCE.toBytes(gp_per_region));
}
}
MutationState mutationState = plan.execute();
rowCount = mutationState.getUpdateCount();
}
/*
* Update the stats table so that client will pull the new one with the updated stats.
* Even if we don't run the command due to the last update time, invalidate the cache.
* This supports scenarios in which a major compaction was manually initiated and the
* client wants the modified stats to be reflected immediately.
*/
if (cfs == null) {
List<PColumnFamily> families = logicalTable.getColumnFamilies();
if (families.isEmpty()) {
connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), SchemaUtil.getEmptyColumnFamily(logicalTable)));
} else {
for (PColumnFamily family : families) {
connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), family.getName().getBytes()));
}
}
} else {
for (byte[] cf : cfs) {
connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), cf));
}
}
return rowCount;
}
use of org.apache.phoenix.compile.PostDDLCompiler in project phoenix by apache.
the class MetaDataClient method alterIndex.
public MutationState alterIndex(AlterIndexStatement statement) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
String dataTableName = statement.getTableName();
String schemaName = statement.getTable().getName().getSchemaName();
String indexName = statement.getTable().getName().getTableName();
boolean isAsync = statement.isAsync();
PIndexState newIndexState = statement.getIndexState();
if (isAsync && newIndexState != PIndexState.REBUILD) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.ASYNC_NOT_ALLOWED).setMessage(" ASYNC building of index is allowed only with REBUILD index state").setSchemaName(schemaName).setTableName(indexName).build().buildException();
}
if (newIndexState == PIndexState.REBUILD) {
newIndexState = PIndexState.BUILDING;
}
connection.setAutoCommit(false);
// Confirm index table is valid and up-to-date
TableRef indexRef = FromCompiler.getResolver(statement, connection).getTables().get(0);
PreparedStatement tableUpsert = null;
try {
if (newIndexState == PIndexState.ACTIVE) {
tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE);
} else {
tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE);
}
tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
tableUpsert.setString(2, schemaName);
tableUpsert.setString(3, indexName);
tableUpsert.setString(4, newIndexState.getSerializedValue());
tableUpsert.setLong(5, 0);
if (newIndexState == PIndexState.ACTIVE) {
tableUpsert.setLong(6, 0);
}
tableUpsert.execute();
} finally {
if (tableUpsert != null) {
tableUpsert.close();
}
}
Long timeStamp = indexRef.getTable().isTransactional() ? indexRef.getTimeStamp() : null;
List<Mutation> tableMetadata = connection.getMutationState().toMutations(timeStamp).next().getSecond();
connection.rollback();
MetaDataMutationResult result = connection.getQueryServices().updateIndexState(tableMetadata, dataTableName);
MutationCode code = result.getMutationCode();
if (code == MutationCode.TABLE_NOT_FOUND) {
throw new TableNotFoundException(schemaName, indexName);
}
if (code == MutationCode.UNALLOWED_TABLE_MUTATION) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION).setMessage(" currentState=" + indexRef.getTable().getIndexState() + ". requestedState=" + newIndexState).setSchemaName(schemaName).setTableName(indexName).build().buildException();
}
if (code == MutationCode.TABLE_ALREADY_EXISTS) {
if (result.getTable() != null) {
// To accommodate connection-less update of index state
addTableToCache(result);
// Set so that we get the table below with the potentially modified rowKeyOrderOptimizable flag set
indexRef.setTable(result.getTable());
if (newIndexState == PIndexState.BUILDING && isAsync) {
try {
tableUpsert = connection.prepareStatement(UPDATE_INDEX_REBUILD_ASYNC_STATE);
tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
tableUpsert.setString(2, schemaName);
tableUpsert.setString(3, indexName);
tableUpsert.setLong(4, result.getTable().getTimeStamp());
tableUpsert.execute();
connection.commit();
} finally {
if (tableUpsert != null) {
tableUpsert.close();
}
}
}
}
}
if (newIndexState == PIndexState.BUILDING && !isAsync) {
PTable index = indexRef.getTable();
// First delete any existing rows of the index
Long scn = connection.getSCN();
long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
connection.getQueryServices().updateData(plan);
NamedTableNode dataTableNode = NamedTableNode.create(null, TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
// Next rebuild the index
connection.setAutoCommit(true);
if (connection.getSCN() != null) {
return buildIndexAtTimeStamp(index, dataTableNode);
}
TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0);
return buildIndex(index, dataTableRef);
}
return new MutationState(1, 1000, connection);
} catch (TableNotFoundException e) {
if (!statement.ifExists()) {
throw e;
}
return new MutationState(0, 0, connection);
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.compile.PostDDLCompiler in project phoenix by apache.
the class MetaDataClient method dropTable.
private MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType, boolean ifExists, boolean cascade) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
PName tenantId = connection.getTenantId();
String tenantIdStr = tenantId == null ? null : tenantId.getString();
byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
Long scn = connection.getSCN();
long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
Delete tableDelete = new Delete(key, clientTimeStamp);
tableMetaData.add(tableDelete);
boolean hasViewIndexTable = false;
if (parentTableName != null) {
byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
Delete linkDelete = new Delete(linkKey, clientTimeStamp);
tableMetaData.add(linkDelete);
}
MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
MutationCode code = result.getMutationCode();
PTable table = result.getTable();
switch(code) {
case TABLE_NOT_FOUND:
if (!ifExists) {
throw new TableNotFoundException(schemaName, tableName);
}
break;
case NEWER_TABLE_FOUND:
throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
case UNALLOWED_TABLE_MUTATION:
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
default:
connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
if (table != null) {
boolean dropMetaData = false;
long ts = (scn == null ? result.getMutationTime() : scn);
List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
connection.setAutoCommit(true);
if (tableType == PTableType.VIEW) {
for (PTable index : table.getIndexes()) {
tableRefs.add(new TableRef(null, index, ts, false));
}
} else {
dropMetaData = result.getTable().getViewIndexId() == null && connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
// All multi-tenant tables have a view index table, so no need to check in that case
if (parentTableName == null) {
// keeping always true for deletion of stats if view index present
hasViewIndexTable = true;
// or not
MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped());
byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
if (!dropMetaData) {
// we need to drop rows only when actually view index exists
try (HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
hasViewIndexTable = admin.tableExists(viewIndexPhysicalName);
} catch (IOException e1) {
// absorbing as it is not critical check
}
}
}
if (tableType == PTableType.TABLE && (table.isMultiTenant() || hasViewIndexTable)) {
if (hasViewIndexTable) {
byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
PTable viewIndexTable = new PTableImpl(null, SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName), SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName), ts, table.getColumnFamilies(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.useStatsForParallelization());
tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
}
}
tableRefs.add(new TableRef(null, table, ts, false));
// TODO: Let the standard mutable secondary index maintenance handle this?
for (PTable index : table.getIndexes()) {
tableRefs.add(new TableRef(null, index, ts, false));
}
deleteFromStatsTable(tableRefs, ts);
}
if (!dropMetaData) {
MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
// Delete everything in the column. You'll still be able to do queries at earlier timestamps
return connection.getQueryServices().updateData(plan);
}
}
break;
}
return new MutationState(0, 0, connection);
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.compile.PostDDLCompiler in project phoenix by apache.
the class MetaDataClient method createTable.
public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, String viewStatement, ViewType viewType, byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException {
TableName tableName = statement.getTableName();
Map<String, Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
Map<String, Object> commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1);
populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps);
boolean isAppendOnlySchema = false;
long updateCacheFrequency = connection.getQueryServices().getProps().getLong(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB, QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY);
if (parent == null) {
Boolean appendOnlySchemaProp = (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps);
if (appendOnlySchemaProp != null) {
isAppendOnlySchema = appendOnlySchemaProp;
}
Long updateCacheFrequencyProp = (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps);
if (updateCacheFrequencyProp != null) {
updateCacheFrequency = updateCacheFrequencyProp;
}
} else {
isAppendOnlySchema = parent.isAppendOnlySchema();
updateCacheFrequency = parent.getUpdateCacheFrequency();
}
// updateCacheFrequency cannot be set to ALWAYS if isAppendOnlySchema is true
if (isAppendOnlySchema && updateCacheFrequency == 0) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPDATE_CACHE_FREQUENCY_INVALID).setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build().buildException();
}
Boolean immutableProp = (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps);
if (statement.immutableRows() != null && immutableProp != null) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID).setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build().buildException();
}
PTable table = null;
// if it is add columns that are not already present
if (isAppendOnlySchema) {
// look up the table in the cache
MetaDataMutationResult result = updateCache(tableName.getSchemaName(), tableName.getTableName());
if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) {
table = result.getTable();
if (!statement.ifNotExists()) {
throw new NewerTableAlreadyExistsException(tableName.getSchemaName(), tableName.getTableName(), table);
}
List<ColumnDef> columnDefs = statement.getColumnDefs();
PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
// get the list of columns to add
for (ColumnDef columnDef : columnDefs) {
if (pkConstraint.contains(columnDef.getColumnDefName())) {
columnDef.setIsPK(true);
}
}
// if there are new columns to add
return addColumn(table, columnDefs, statement.getProps(), statement.ifNotExists(), true, NamedTableNode.create(statement.getTableName()), statement.getTableType());
}
}
table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced, false, null, null, tableProps, commonFamilyProps);
if (table == null || table.getType() == PTableType.VIEW) /*|| table.isTransactional()*/
{
return new MutationState(0, 0, connection);
}
// Hack to get around the case when an SCN is specified on the connection.
// In this case, we won't see the table we just created yet, so we hack
// around it by forcing the compiler to not resolve anything.
PostDDLCompiler compiler = new PostDDLCompiler(connection);
//connection.setAutoCommit(true);
// Execute any necessary data updates
Long scn = connection.getSCN();
long ts = (scn == null ? table.getTimeStamp() : scn);
// Getting the schema through the current connection doesn't work when the connection has an scn specified
// Since the table won't be added to the current connection.
TableRef tableRef = new TableRef(null, table, ts, false);
byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table);
MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), emptyCF, null, null, tableRef.getTimeStamp());
return connection.getQueryServices().updateData(plan);
}
Aggregations