Search in sources :

Example 6 with EncodedCQCounter

use of org.apache.phoenix.schema.PTable.EncodedCQCounter in project phoenix by apache.

the class AlterTableIT method testMetadataForImmutableTable.

@Test
public void testMetadataForImmutableTable() throws Exception {
    String schemaName = "XYZ";
    String baseTableName = generateUniqueName();
    String viewName = generateUniqueName();
    String fullTableName = schemaName + "." + baseTableName;
    String fullViewName = schemaName + "." + viewName;
    try (Connection conn = DriverManager.getConnection(getUrl())) {
        PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
        conn.createStatement().execute("CREATE TABLE IF NOT EXISTS " + fullTableName + " (" + " ID char(1) NOT NULL," + " COL1 integer NOT NULL," + " COL2 bigint NOT NULL," + " KV1 VARCHAR" + " CONSTRAINT NAME_PK PRIMARY KEY (ID, COL1, COL2)" + " ) " + generateDDLOptions("IMMUTABLE_ROWS = true" + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : "")));
        PTable baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
        long initBaseTableSeqNumber = baseTable.getSequenceNumber();
        // assert that the client side cache is updated.
        EncodedCQCounter cqCounter = baseTable.getEncodedCQCounter();
        assertEquals(columnEncoded ? (Integer) (ENCODED_CQ_COUNTER_INITIAL_VALUE + 1) : null, cqCounter.getNextQualifier(QueryConstants.DEFAULT_COLUMN_FAMILY));
        // assert that the server side metadata is updated correctly.
        assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "KV1", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE);
        assertSequenceNumber(schemaName, baseTableName, initBaseTableSeqNumber);
        // now create a view and validate client and server side metadata
        String viewDDL = "CREATE VIEW " + fullViewName + " ( VIEW_COL1 INTEGER, A.VIEW_COL2 VARCHAR ) AS SELECT * FROM " + fullTableName;
        conn.createStatement().execute(viewDDL);
        baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
        PTable view = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullViewName));
        // verify that the client side cache is updated. Base table's cq counters should be updated.
        assertEquals(columnEncoded ? (Integer) (ENCODED_CQ_COUNTER_INITIAL_VALUE + 2) : null, baseTable.getEncodedCQCounter().getNextQualifier(DEFAULT_COLUMN_FAMILY));
        assertEquals(columnEncoded ? (Integer) (ENCODED_CQ_COUNTER_INITIAL_VALUE + 1) : null, baseTable.getEncodedCQCounter().getNextQualifier("A"));
        assertNull("A view should always have the null cq counter", view.getEncodedCQCounter().getNextQualifier(DEFAULT_COLUMN_FAMILY));
        // assert that the server side metadata for the base table and the view is also updated correctly.
        assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 2);
        assertEncodedCQCounter("A", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL1", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
        assertEncodedCQValue("A", "VIEW_COL2", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE);
        assertSequenceNumber(schemaName, baseTableName, initBaseTableSeqNumber + (columnEncoded ? 1 : 0));
        assertSequenceNumber(schemaName, viewName, PTable.INITIAL_SEQ_NUM);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) EncodedCQCounter(org.apache.phoenix.schema.PTable.EncodedCQCounter) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TestUtil.closeConnection(org.apache.phoenix.util.TestUtil.closeConnection) PTableKey(org.apache.phoenix.schema.PTableKey) PTable(org.apache.phoenix.schema.PTable) BaseTest(org.apache.phoenix.query.BaseTest) Test(org.junit.Test)

Example 7 with EncodedCQCounter

use of org.apache.phoenix.schema.PTable.EncodedCQCounter in project phoenix by apache.

the class MetaDataClient method addColumn.

public MutationState addColumn(PTable table, List<ColumnDef> origColumnDefs, ListMultimap<String, Pair<String, Object>> stmtProperties, boolean ifNotExists, boolean removeTableProps, NamedTableNode namedTableNode, PTableType tableType) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        connection.setAutoCommit(false);
        PName tenantId = connection.getTenantId();
        String schemaName = table.getSchemaName().getString();
        String tableName = table.getTableName().getString();
        List<ColumnDef> columnDefs = null;
        if (table.isAppendOnlySchema()) {
            // only make the rpc if we are adding new columns
            columnDefs = Lists.newArrayList();
            for (ColumnDef columnDef : origColumnDefs) {
                String familyName = columnDef.getColumnDefName().getFamilyName();
                String columnName = columnDef.getColumnDefName().getColumnName();
                if (familyName != null) {
                    try {
                        PColumnFamily columnFamily = table.getColumnFamily(familyName);
                        columnFamily.getPColumnForColumnName(columnName);
                        if (!ifNotExists) {
                            throw new ColumnAlreadyExistsException(schemaName, tableName, columnName);
                        }
                    } catch (ColumnFamilyNotFoundException | ColumnNotFoundException e) {
                        columnDefs.add(columnDef);
                    }
                } else {
                    try {
                        table.getColumnForColumnName(columnName);
                        if (!ifNotExists) {
                            throw new ColumnAlreadyExistsException(schemaName, tableName, columnName);
                        }
                    } catch (ColumnNotFoundException e) {
                        columnDefs.add(columnDef);
                    }
                }
            }
        } else {
            columnDefs = origColumnDefs == null ? Collections.<ColumnDef>emptyList() : origColumnDefs;
        }
        boolean retried = false;
        boolean changingPhoenixTableProperty = false;
        MetaProperties metaProperties = new MetaProperties();
        while (true) {
            Map<String, List<Pair<String, Object>>> properties = new HashMap<>(stmtProperties.size());
            ;
            metaProperties = loadStmtProperties(stmtProperties, properties, table, removeTableProps);
            ColumnResolver resolver = FromCompiler.getResolver(namedTableNode, connection);
            table = resolver.getTables().get(0).getTable();
            int nIndexes = table.getIndexes().size();
            int numCols = columnDefs.size();
            int nNewColumns = numCols;
            List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1));
            List<Mutation> columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1));
            if (logger.isDebugEnabled()) {
                logger.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
            }
            int position = table.getColumns().size();
            List<PColumn> currentPKs = table.getPKColumns();
            PColumn lastPK = currentPKs.get(currentPKs.size() - 1);
            // Disallow adding columns if the last column is VARBIANRY.
            if (lastPK.getDataType() == PVarbinary.INSTANCE || lastPK.getDataType().isArrayType()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_LAST_PK).setColumnName(lastPK.getName().getString()).build().buildException();
            }
            // Disallow adding columns if last column is fixed width and nullable.
            if (lastPK.isNullable() && lastPK.getDataType().isFixedWidth()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULLABLE_FIXED_WIDTH_LAST_PK).setColumnName(lastPK.getName().getString()).build().buildException();
            }
            MetaPropertiesEvaluated metaPropertiesEvaluated = new MetaPropertiesEvaluated();
            changingPhoenixTableProperty = evaluateStmtProperties(metaProperties, metaPropertiesEvaluated, table, schemaName, tableName);
            // If changing isImmutableRows to true or it's not being changed and is already true
            boolean willBeImmutableRows = Boolean.TRUE.equals(metaPropertiesEvaluated.getIsImmutableRows()) || (metaPropertiesEvaluated.getIsImmutableRows() == null && table.isImmutableRows());
            Long timeStamp = TransactionUtil.getTableTimestamp(connection, table.isTransactional() || metaProperties.getNonTxToTx());
            int numPkColumnsAdded = 0;
            List<PColumn> columns = Lists.newArrayListWithExpectedSize(numCols);
            Set<String> colFamiliesForPColumnsToBeAdded = new LinkedHashSet<>();
            Set<String> families = new LinkedHashSet<>();
            PTable tableForCQCounters = tableType == PTableType.VIEW ? PhoenixRuntime.getTable(connection, table.getPhysicalName().getString()) : table;
            ;
            EncodedCQCounter cqCounterToUse = tableForCQCounters.getEncodedCQCounter();
            Map<String, Integer> changedCqCounters = new HashMap<>(numCols);
            if (numCols > 0) {
                StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver);
                String addColumnSqlToUse = connection.isRunningUpgrade() && tableName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE) && schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA) ? ALTER_SYSCATALOG_TABLE_UPGRADE : INSERT_COLUMN_ALTER_TABLE;
                try (PreparedStatement colUpsert = connection.prepareStatement(addColumnSqlToUse)) {
                    short nextKeySeq = SchemaUtil.getMaxKeySeq(table);
                    for (ColumnDef colDef : columnDefs) {
                        if (colDef != null && !colDef.isNull()) {
                            if (colDef.isPK()) {
                                throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
                            } else if (!willBeImmutableRows) {
                                throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
                            }
                        }
                        if (colDef != null && colDef.isPK() && table.getType() == VIEW && table.getViewType() != MAPPED) {
                            throwIfLastPKOfParentIsFixedLength(getParentOfView(table), schemaName, tableName, colDef);
                        }
                        if (colDef != null && colDef.isRowTimestamp()) {
                            throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_CREATE_ONLY).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
                        }
                        if (!colDef.validateDefault(context, null)) {
                            // Remove DEFAULT as it's not necessary
                            colDef = new ColumnDef(colDef, null);
                        }
                        Integer encodedCQ = null;
                        if (!colDef.isPK()) {
                            String colDefFamily = colDef.getColumnDefName().getFamilyName();
                            String familyName = null;
                            ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme();
                            String defaultColumnFamily = tableForCQCounters.getDefaultFamilyName() != null && !Strings.isNullOrEmpty(tableForCQCounters.getDefaultFamilyName().getString()) ? tableForCQCounters.getDefaultFamilyName().getString() : DEFAULT_COLUMN_FAMILY;
                            if (table.getType() == PTableType.INDEX && table.getIndexType() == IndexType.LOCAL) {
                                defaultColumnFamily = QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + defaultColumnFamily;
                            }
                            if (storageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS) {
                                familyName = colDefFamily != null ? colDefFamily : defaultColumnFamily;
                            } else {
                                familyName = defaultColumnFamily;
                            }
                            encodedCQ = cqCounterToUse.getNextQualifier(familyName);
                            if (cqCounterToUse.increment(familyName)) {
                                changedCqCounters.put(familyName, cqCounterToUse.getNextQualifier(familyName));
                            }
                        }
                        byte[] columnQualifierBytes = null;
                        try {
                            columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(colDef.getColumnDefName().getColumnName(), encodedCQ, table, colDef.isPK());
                        } catch (QualifierOutOfRangeException e) {
                            throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
                        }
                        PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : table.getDefaultFamilyName().getString(), true, columnQualifierBytes, willBeImmutableRows);
                        columns.add(column);
                        String pkName = null;
                        Short keySeq = null;
                        // TODO: support setting properties on other families?
                        if (column.getFamilyName() == null) {
                            ++numPkColumnsAdded;
                            pkName = table.getPKName() == null ? null : table.getPKName().getString();
                            keySeq = ++nextKeySeq;
                        } else {
                            families.add(column.getFamilyName().getString());
                        }
                        colFamiliesForPColumnsToBeAdded.add(column.getFamilyName() == null ? null : column.getFamilyName().getString());
                        addColumnMutation(schemaName, tableName, column, colUpsert, null, pkName, keySeq, table.getBucketNum() != null);
                    }
                    // Add any new PK columns to end of index PK
                    if (numPkColumnsAdded > 0) {
                        // create PK column list that includes the newly created columns
                        List<PColumn> pkColumns = Lists.newArrayListWithExpectedSize(table.getPKColumns().size() + numPkColumnsAdded);
                        pkColumns.addAll(table.getPKColumns());
                        for (int i = 0; i < numCols; ++i) {
                            if (columnDefs.get(i).isPK()) {
                                pkColumns.add(columns.get(i));
                            }
                        }
                        int pkSlotPosition = table.getPKColumns().size() - 1;
                        for (PTable index : table.getIndexes()) {
                            short nextIndexKeySeq = SchemaUtil.getMaxKeySeq(index);
                            int indexPosition = index.getColumns().size();
                            for (int i = 0; i < numCols; ++i) {
                                ColumnDef colDef = columnDefs.get(i);
                                if (colDef.isPK()) {
                                    PDataType indexColDataType = IndexUtil.getIndexColumnDataType(colDef.isNull(), colDef.getDataType());
                                    ColumnName indexColName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, colDef.getColumnDefName().getColumnName()));
                                    Expression expression = new RowKeyColumnExpression(columns.get(i), new RowKeyValueAccessor(pkColumns, ++pkSlotPosition));
                                    ColumnDef indexColDef = FACTORY.columnDef(indexColName, indexColDataType.getSqlTypeName(), colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), true, colDef.getSortOrder(), expression.toString(), colDef.isRowTimestamp());
                                    PColumn indexColumn = newColumn(indexPosition++, indexColDef, PrimaryKeyConstraint.EMPTY, null, true, null, willBeImmutableRows);
                                    addColumnMutation(schemaName, index.getTableName().getString(), indexColumn, colUpsert, index.getParentTableName().getString(), index.getPKName() == null ? null : index.getPKName().getString(), ++nextIndexKeySeq, index.getBucketNum() != null);
                                }
                            }
                        }
                    }
                    columnMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
                    connection.rollback();
                }
            } else {
                // have existing indexes.
                if (Boolean.FALSE.equals(metaPropertiesEvaluated.getIsImmutableRows()) && !table.getIndexes().isEmpty()) {
                    int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
                    if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setSchemaName(schemaName).setTableName(tableName).build().buildException();
                    }
                    if (!connection.getQueryServices().hasIndexWALCodec() && !table.isTransactional()) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setSchemaName(schemaName).setTableName(tableName).build().buildException();
                    }
                }
                if (Boolean.TRUE.equals(metaPropertiesEvaluated.getMultiTenant())) {
                    throwIfInsufficientColumns(schemaName, tableName, table.getPKColumns(), table.getBucketNum() != null, metaPropertiesEvaluated.getMultiTenant());
                }
            }
            if (!table.getIndexes().isEmpty() && (numPkColumnsAdded > 0 || metaProperties.getNonTxToTx())) {
                for (PTable index : table.getIndexes()) {
                    incrementTableSeqNum(index, index.getType(), numPkColumnsAdded, metaProperties.getNonTxToTx() ? Boolean.TRUE : null, metaPropertiesEvaluated.getUpdateCacheFrequency());
                }
                tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
                connection.rollback();
            }
            if (changingPhoenixTableProperty || columnDefs.size() > 0) {
                incrementTableSeqNum(table, tableType, columnDefs.size(), metaPropertiesEvaluated);
                tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
                connection.rollback();
            }
            // Force the table header row to be first
            Collections.reverse(tableMetaData);
            // Add column metadata afterwards, maintaining the order so columns have more predictable ordinal position
            tableMetaData.addAll(columnMetaData);
            boolean sharedIndex = tableType == PTableType.INDEX && (table.getIndexType() == IndexType.LOCAL || table.getViewIndexId() != null);
            String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
            if (!changedCqCounters.isEmpty()) {
                PreparedStatement linkStatement;
                linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER);
                for (Entry<String, Integer> entry : changedCqCounters.entrySet()) {
                    linkStatement.setString(1, tenantIdToUse);
                    linkStatement.setString(2, tableForCQCounters.getSchemaName().getString());
                    linkStatement.setString(3, tableForCQCounters.getTableName().getString());
                    linkStatement.setString(4, entry.getKey());
                    linkStatement.setInt(5, entry.getValue());
                    linkStatement.execute();
                }
                // too since we want clients to get the latest PTable of the base table.
                if (tableType == VIEW) {
                    PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
                    incrementStatement.setString(1, null);
                    incrementStatement.setString(2, tableForCQCounters.getSchemaName().getString());
                    incrementStatement.setString(3, tableForCQCounters.getTableName().getString());
                    incrementStatement.setLong(4, tableForCQCounters.getSequenceNumber() + 1);
                    incrementStatement.execute();
                }
                tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
                connection.rollback();
            }
            byte[] family = families.size() > 0 ? families.iterator().next().getBytes() : null;
            // Figure out if the empty column family is changing as a result of adding the new column
            byte[] emptyCF = null;
            byte[] projectCF = null;
            if (table.getType() != PTableType.VIEW && family != null) {
                if (table.getColumnFamilies().isEmpty()) {
                    emptyCF = family;
                } else {
                    try {
                        table.getColumnFamily(family);
                    } catch (ColumnFamilyNotFoundException e) {
                        projectCF = family;
                        emptyCF = SchemaUtil.getEmptyColumnFamily(table);
                    }
                }
            }
            MetaDataMutationResult result = connection.getQueryServices().addColumn(tableMetaData, table, properties, colFamiliesForPColumnsToBeAdded, columns);
            try {
                MutationCode code = processMutationResult(schemaName, tableName, result);
                if (code == MutationCode.COLUMN_ALREADY_EXISTS) {
                    addTableToCache(result);
                    if (!ifNotExists) {
                        throw new ColumnAlreadyExistsException(schemaName, tableName, SchemaUtil.findExistingColumn(result.getTable(), columns));
                    }
                    return new MutationState(0, 0, connection);
                }
                // Only update client side cache if we aren't adding a PK column to a table with indexes or
                // transitioning a table from non transactional to transactional.
                // We could update the cache manually then too, it'd just be a pain.
                String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
                long resolvedTimeStamp = TransactionUtil.getResolvedTime(connection, result);
                if (table.getIndexes().isEmpty() || (numPkColumnsAdded == 0 && !metaProperties.getNonTxToTx())) {
                    connection.addTable(result.getTable(), resolvedTimeStamp);
                    table = result.getTable();
                } else if (metaPropertiesEvaluated.getUpdateCacheFrequency() != null) {
                    // Force removal from cache as the update cache frequency has changed
                    // Note that clients outside this JVM won't be affected.
                    connection.removeTable(tenantId, fullTableName, null, resolvedTimeStamp);
                }
                // We only need to do this if the multiTenant transitioned to false
                if (table.getType() == PTableType.TABLE && Boolean.FALSE.equals(metaPropertiesEvaluated.getMultiTenant()) && MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) {
                    connection.setAutoCommit(true);
                    MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped());
                    // commands are run would remove all rows already.
                    if (!connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA)) {
                        Long scn = connection.getSCN();
                        long ts = (scn == null ? result.getMutationTime() : scn);
                        byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
                        PTable viewIndexTable = new PTableImpl(null, SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName), SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName), ts, table.getColumnFamilies(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.useStatsForParallelization());
                        List<TableRef> tableRefs = Collections.singletonList(new TableRef(null, viewIndexTable, ts, false));
                        MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
                        connection.getQueryServices().updateData(plan);
                    }
                }
                if (emptyCF != null) {
                    Long scn = connection.getSCN();
                    connection.setAutoCommit(true);
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(new TableRef(null, table, ts, false)), emptyCF, projectCF == null ? null : Collections.singletonList(projectCF), null, ts);
                    return connection.getQueryServices().updateData(plan);
                }
                return new MutationState(0, 0, connection);
            } catch (ConcurrentTableMutationException e) {
                if (retried) {
                    throw e;
                }
                if (logger.isDebugEnabled()) {
                    logger.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
                }
                retried = true;
            }
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : LinkedHashSet(java.util.LinkedHashSet) Sets.newLinkedHashSet(com.google.common.collect.Sets.newLinkedHashSet) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) StatementContext(org.apache.phoenix.compile.StatementContext) PDataType(org.apache.phoenix.schema.types.PDataType) ArrayList(java.util.ArrayList) List(java.util.List) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) ColumnDef(org.apache.phoenix.parse.ColumnDef) MutationPlan(org.apache.phoenix.compile.MutationPlan) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) QualifierOutOfRangeException(org.apache.phoenix.schema.PTable.QualifierEncodingScheme.QualifierOutOfRangeException) ImmutableStorageScheme(org.apache.phoenix.schema.PTable.ImmutableStorageScheme) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PreparedStatement(java.sql.PreparedStatement) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) PInteger(org.apache.phoenix.schema.types.PInteger) ColumnName(org.apache.phoenix.parse.ColumnName) EncodedCQCounter(org.apache.phoenix.schema.PTable.EncodedCQCounter) MutationState(org.apache.phoenix.execute.MutationState) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) Expression(org.apache.phoenix.expression.Expression)

Example 8 with EncodedCQCounter

use of org.apache.phoenix.schema.PTable.EncodedCQCounter in project phoenix by apache.

the class AlterTableIT method testAddingColumnsToTablesAndViews.

@Test
public void testAddingColumnsToTablesAndViews() throws Exception {
    String schemaName = generateUniqueName();
    String baseTableName = generateUniqueName();
    String viewName = generateUniqueName();
    String fullTableName = schemaName + "." + baseTableName;
    String fullViewName = schemaName + "." + viewName;
    Properties props = new Properties();
    props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
    try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
        conn.createStatement().execute("CREATE SCHEMA " + schemaName);
        PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
        conn.createStatement().execute("CREATE TABLE " + fullTableName + " (" + " ID char(1) NOT NULL," + " COL1 integer NOT NULL," + " COL2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (ID, COL1, COL2)" + " ) " + tableDDLOptions);
        PTable baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
        long initBaseTableSeqNumber = baseTable.getSequenceNumber();
        // Add a column to the base table and see if the client and server metadata is updated correctly
        String alterDDL = "ALTER TABLE " + fullTableName + " ADD COL3 VARCHAR PRIMARY KEY, COL4 INTEGER, COL5 VARCHAR, B.COL6 DECIMAL (10, 2)";
        conn.createStatement().execute(alterDDL);
        // assert that the client side cache is updated.
        baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
        EncodedCQCounter encodedCqCounter = baseTable.getEncodedCQCounter();
        assertEquals(columnEncoded ? (Integer) (ENCODED_CQ_COUNTER_INITIAL_VALUE + 3) : null, encodedCqCounter.getNextQualifier(DEFAULT_COLUMN_FAMILY));
        // assert that the server side metadata is updated correctly.
        assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 3);
        // assert that the server side metadata for columns is updated correctly.
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL4", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE);
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL5", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
        assertEncodedCQValue("B", "COL6", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 2);
        long baseTableSeqNumBeforeAddingChildCols = initBaseTableSeqNumber + 1;
        assertSequenceNumber(schemaName, baseTableName, baseTableSeqNumBeforeAddingChildCols);
        // Create a view
        String viewDDL = "CREATE VIEW " + fullViewName + " ( VIEW_COL1 INTEGER, A.VIEW_COL2 VARCHAR ) AS SELECT * FROM " + fullTableName;
        conn.createStatement().execute(viewDDL);
        // assert that the server side metadata is updated correctly.
        assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 5);
        // assert that the server side metadata for columns is updated correctly.
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL1", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 3);
        assertEncodedCQValue("A", "VIEW_COL2", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 4);
        // for encoded columns creating a view that adds its own columns should increment the base table's sequence number too.
        assertSequenceNumber(schemaName, baseTableName, columnEncoded ? initBaseTableSeqNumber + 2 : baseTableSeqNumBeforeAddingChildCols);
        // Add column to the view
        viewDDL = "ALTER VIEW " + fullViewName + " ADD VIEW_COL3 DECIMAL(10, 2), A.VIEW_COL4 VARCHAR, B.VIEW_COL5 INTEGER";
        conn.createStatement().execute(viewDDL);
        // assert that the client cache for the base table is updated
        baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
        encodedCqCounter = baseTable.getEncodedCQCounter();
        assertEquals(columnEncoded ? (Integer) (ENCODED_CQ_COUNTER_INITIAL_VALUE + 8) : null, encodedCqCounter.getNextQualifier(DEFAULT_COLUMN_FAMILY));
        // assert client cache for view
        PTable view = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullViewName));
        encodedCqCounter = view.getEncodedCQCounter();
        assertNull("A view should always have the column qualifier counter as null", view.getEncodedCQCounter().getNextQualifier(DEFAULT_COLUMN_FAMILY));
        // assert that the server side metadata for the base table and the view is also updated correctly.
        assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 8);
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL1", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 3);
        assertEncodedCQValue("A", "VIEW_COL2", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 4);
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL3", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 5);
        assertEncodedCQValue("A", "VIEW_COL4", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 6);
        assertEncodedCQValue("B", "VIEW_COL5", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 7);
        // adding a column to the should increment the base table's sequence number too since we update the cq counters for column families.
        assertSequenceNumber(schemaName, baseTableName, columnEncoded ? initBaseTableSeqNumber + 3 : baseTableSeqNumBeforeAddingChildCols);
        assertSequenceNumber(schemaName, viewName, PTable.INITIAL_SEQ_NUM + 1);
        // Add column to the base table which doesn't already exist in the view.
        alterDDL = "ALTER TABLE " + fullTableName + " ADD COL10 VARCHAR, A.COL11 INTEGER";
        conn.createStatement().execute(alterDDL);
        baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
        // assert that the client cache for the base table is updated
        encodedCqCounter = baseTable.getEncodedCQCounter();
        assertEquals(columnEncoded ? (Integer) (ENCODED_CQ_COUNTER_INITIAL_VALUE + 10) : null, encodedCqCounter.getNextQualifier(DEFAULT_COLUMN_FAMILY));
        // assert client cache for view
        view = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullViewName));
        encodedCqCounter = view.getEncodedCQCounter();
        assertNull("A view should always have the column qualifier counter as null", view.getEncodedCQCounter().getNextQualifier(DEFAULT_COLUMN_FAMILY));
        // assert that the server side metadata for the base table and the view is also updated correctly.
        assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, (ENCODED_CQ_COUNTER_INITIAL_VALUE + 10));
        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL10", schemaName, viewName, (ENCODED_CQ_COUNTER_INITIAL_VALUE + 8));
        assertEncodedCQValue("A", "COL11", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 9);
        assertSequenceNumber(schemaName, baseTableName, columnEncoded ? initBaseTableSeqNumber + 4 : initBaseTableSeqNumber + 2);
        assertSequenceNumber(schemaName, viewName, PTable.INITIAL_SEQ_NUM + 2);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) EncodedCQCounter(org.apache.phoenix.schema.PTable.EncodedCQCounter) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TestUtil.closeConnection(org.apache.phoenix.util.TestUtil.closeConnection) Properties(java.util.Properties) PTableKey(org.apache.phoenix.schema.PTableKey) PTable(org.apache.phoenix.schema.PTable) BaseTest(org.apache.phoenix.query.BaseTest) Test(org.junit.Test)

Aggregations

EncodedCQCounter (org.apache.phoenix.schema.PTable.EncodedCQCounter)8 PTable (org.apache.phoenix.schema.PTable)6 ImmutableStorageScheme (org.apache.phoenix.schema.PTable.ImmutableStorageScheme)4 PInteger (org.apache.phoenix.schema.types.PInteger)4 Connection (java.sql.Connection)3 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)3 BaseTest (org.apache.phoenix.query.BaseTest)3 PColumn (org.apache.phoenix.schema.PColumn)3 PName (org.apache.phoenix.schema.PName)3 QualifierEncodingScheme (org.apache.phoenix.schema.PTable.QualifierEncodingScheme)3 PTableKey (org.apache.phoenix.schema.PTableKey)3 ByteString (com.google.protobuf.ByteString)2 PreparedStatement (java.sql.PreparedStatement)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 Cell (org.apache.hadoop.hbase.Cell)2 Mutation (org.apache.hadoop.hbase.client.Mutation)2 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)2 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)2