Search in sources :

Example 46 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataClient method createTableInternal.

private PTable createTableInternal(CreateTableStatement statement, byte[][] splits, final PTable parent, String viewStatement, ViewType viewType, final byte[][] viewColumnConstants, final BitSet isViewColumnReferenced, boolean allocateIndexId, IndexType indexType, Date asyncCreatedDate, Map<String, Object> tableProps, Map<String, Object> commonFamilyProps) throws SQLException {
    final PTableType tableType = statement.getTableType();
    boolean wasAutoCommit = connection.getAutoCommit();
    connection.rollback();
    try {
        connection.setAutoCommit(false);
        List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(statement.getColumnDefs().size() + 3);
        TableName tableNameNode = statement.getTableName();
        final String schemaName = connection.getSchema() != null && tableNameNode.getSchemaName() == null ? connection.getSchema() : tableNameNode.getSchemaName();
        final String tableName = tableNameNode.getTableName();
        String parentTableName = null;
        PName tenantId = connection.getTenantId();
        String tenantIdStr = tenantId == null ? null : tenantId.getString();
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        boolean multiTenant = false;
        boolean storeNulls = false;
        boolean transactional = (parent != null) ? parent.isTransactional() : false;
        Integer saltBucketNum = null;
        String defaultFamilyName = null;
        boolean isImmutableRows = false;
        boolean isAppendOnlySchema = false;
        List<PName> physicalNames = Collections.emptyList();
        boolean addSaltColumn = false;
        boolean rowKeyOrderOptimizable = true;
        Long timestamp = null;
        boolean isNamespaceMapped = parent == null ? SchemaUtil.isNamespaceMappingEnabled(tableType, connection.getQueryServices().getProps()) : parent.isNamespaceMapped();
        boolean isLocalIndex = indexType == IndexType.LOCAL;
        QualifierEncodingScheme encodingScheme = NON_ENCODED_QUALIFIERS;
        ImmutableStorageScheme immutableStorageScheme = ONE_CELL_PER_COLUMN;
        if (parent != null && tableType == PTableType.INDEX) {
            timestamp = TransactionUtil.getTableTimestamp(connection, transactional);
            storeNulls = parent.getStoreNulls();
            isImmutableRows = parent.isImmutableRows();
            isAppendOnlySchema = parent.isAppendOnlySchema();
            // from the table to the index, though.
            if (isLocalIndex || (parent.getType() == PTableType.VIEW && parent.getViewType() != ViewType.MAPPED)) {
                PName physicalName = parent.getPhysicalName();
                saltBucketNum = parent.getBucketNum();
                addSaltColumn = (saltBucketNum != null && !isLocalIndex);
                defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
                if (isLocalIndex) {
                    defaultFamilyName = parent.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY : IndexUtil.getLocalIndexColumnFamily(parent.getDefaultFamilyName().getString());
                    saltBucketNum = null;
                    // Set physical name of local index table
                    physicalNames = Collections.singletonList(PNameFactory.newName(physicalName.getBytes()));
                } else {
                    defaultFamilyName = parent.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : parent.getDefaultFamilyName().getString();
                    // Set physical name of view index table
                    physicalNames = Collections.singletonList(PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(physicalName.getBytes())));
                }
            }
            multiTenant = parent.isMultiTenant();
            storeNulls = parent.getStoreNulls();
            parentTableName = parent.getTableName().getString();
            // Pass through data table sequence number so we can check it hasn't changed
            PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
            incrementStatement.setString(1, tenantIdStr);
            incrementStatement.setString(2, schemaName);
            incrementStatement.setString(3, parentTableName);
            incrementStatement.setLong(4, parent.getSequenceNumber());
            incrementStatement.execute();
            // Get list of mutations and add to table meta data that will be passed to server
            // to guarantee order. This row will always end up last
            tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
            connection.rollback();
            // Add row linking from data table row to index table row
            PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK);
            linkStatement.setString(1, tenantIdStr);
            linkStatement.setString(2, schemaName);
            linkStatement.setString(3, parentTableName);
            linkStatement.setString(4, tableName);
            linkStatement.setByte(5, LinkType.INDEX_TABLE.getSerializedValue());
            linkStatement.setLong(6, parent.getSequenceNumber());
            linkStatement.setString(7, PTableType.INDEX.getSerializedValue());
            linkStatement.execute();
        }
        PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
        String pkName = null;
        List<Pair<ColumnName, SortOrder>> pkColumnsNames = Collections.<Pair<ColumnName, SortOrder>>emptyList();
        Iterator<Pair<ColumnName, SortOrder>> pkColumnsIterator = Iterators.emptyIterator();
        if (pkConstraint != null) {
            pkColumnsNames = pkConstraint.getColumnNames();
            pkColumnsIterator = pkColumnsNames.iterator();
            pkName = pkConstraint.getName();
        }
        // This tells Phoenix that you're managing the index maintenance yourself.
        if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) {
            // TODO remove TableProperty.IMMUTABLE_ROWS at the next major release
            Boolean isImmutableRowsProp = statement.immutableRows() != null ? statement.immutableRows() : (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps);
            if (isImmutableRowsProp == null) {
                isImmutableRows = connection.getQueryServices().getProps().getBoolean(QueryServices.IMMUTABLE_ROWS_ATTRIB, QueryServicesOptions.DEFAULT_IMMUTABLE_ROWS);
            } else {
                isImmutableRows = isImmutableRowsProp;
            }
        }
        if (tableType == PTableType.TABLE) {
            Boolean isAppendOnlySchemaProp = (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps);
            isAppendOnlySchema = isAppendOnlySchemaProp != null ? isAppendOnlySchemaProp : false;
        }
        // Can't set any of these on views or shared indexes on views
        if (tableType != PTableType.VIEW && !allocateIndexId) {
            saltBucketNum = (Integer) TableProperty.SALT_BUCKETS.getValue(tableProps);
            if (saltBucketNum != null) {
                if (saltBucketNum < 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM).build().buildException();
                }
            }
            // Salt the index table if the data table is salted
            if (saltBucketNum == null) {
                if (parent != null) {
                    saltBucketNum = parent.getBucketNum();
                }
            } else if (saltBucketNum.intValue() == 0) {
                // Provides a way for an index to not be salted if its data table is salted
                saltBucketNum = null;
            }
            addSaltColumn = (saltBucketNum != null);
        }
        // Can't set MULTI_TENANT or DEFAULT_COLUMN_FAMILY_NAME on an INDEX or a non mapped VIEW
        if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) {
            Boolean multiTenantProp = (Boolean) tableProps.get(PhoenixDatabaseMetaData.MULTI_TENANT);
            multiTenant = Boolean.TRUE.equals(multiTenantProp);
            defaultFamilyName = (String) TableProperty.DEFAULT_COLUMN_FAMILY.getValue(tableProps);
        }
        boolean disableWAL = false;
        Boolean disableWALProp = (Boolean) TableProperty.DISABLE_WAL.getValue(tableProps);
        if (disableWALProp != null) {
            disableWAL = disableWALProp;
        }
        long updateCacheFrequency = connection.getQueryServices().getProps().getLong(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB, QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY);
        Long updateCacheFrequencyProp = (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps);
        if (updateCacheFrequencyProp != null) {
            updateCacheFrequency = updateCacheFrequencyProp;
        }
        String autoPartitionSeq = (String) TableProperty.AUTO_PARTITION_SEQ.getValue(tableProps);
        Long guidePostsWidth = (Long) TableProperty.GUIDE_POSTS_WIDTH.getValue(tableProps);
        Boolean storeNullsProp = (Boolean) TableProperty.STORE_NULLS.getValue(tableProps);
        if (storeNullsProp == null) {
            if (parent == null) {
                storeNulls = connection.getQueryServices().getProps().getBoolean(QueryServices.DEFAULT_STORE_NULLS_ATTRIB, QueryServicesOptions.DEFAULT_STORE_NULLS);
                tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.valueOf(storeNulls));
            }
        } else {
            storeNulls = storeNullsProp;
        }
        Boolean transactionalProp = (Boolean) TableProperty.TRANSACTIONAL.getValue(tableProps);
        if (transactionalProp != null && parent != null) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        if (parent == null) {
            if (transactionalProp == null) {
                transactional = connection.getQueryServices().getProps().getBoolean(QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB, QueryServicesOptions.DEFAULT_TABLE_ISTRANSACTIONAL);
            } else {
                transactional = transactionalProp;
            }
        }
        boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
        // can't create a transactional table if transactions are not enabled
        if (!transactionsEnabled && transactional) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        // can't create a transactional table if it has a row timestamp column
        if (pkConstraint.getNumColumnsWithRowTimestamp() > 0 && transactional) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        // Put potentially inferred value into tableProps as it's used by the createTable call below
        // to determine which coprocessors to install on the new table.
        tableProps.put(PhoenixDatabaseMetaData.TRANSACTIONAL, transactional);
        if (transactional) {
            // If TTL set, use Tephra TTL property name instead
            Object ttl = commonFamilyProps.remove(HColumnDescriptor.TTL);
            if (ttl != null) {
                commonFamilyProps.put(TxConstants.PROPERTY_TTL, ttl);
            }
        }
        boolean useStatsForParallelization = true;
        Boolean useStatsForParallelizationProp = (Boolean) TableProperty.USE_STATS_FOR_PARALLELIZATION.getValue(tableProps);
        if (useStatsForParallelizationProp != null) {
            useStatsForParallelization = useStatsForParallelizationProp;
        } else {
            useStatsForParallelization = connection.getQueryServices().getProps().getBoolean(QueryServices.USE_STATS_FOR_PARALLELIZATION, QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION);
        }
        boolean sharedTable = statement.getTableType() == PTableType.VIEW || allocateIndexId;
        if (transactional) {
            // maintenance code being able to see the prior state to update the rows correctly.
            if (Boolean.FALSE.equals(storeNullsProp)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            // Force STORE_NULLS to true when transactional as Tephra cannot deal with column deletes
            storeNulls = true;
            tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.TRUE);
            if (!sharedTable) {
                Integer maxVersionsProp = (Integer) commonFamilyProps.get(HConstants.VERSIONS);
                if (maxVersionsProp == null) {
                    if (parent != null) {
                        HTableDescriptor desc = connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes());
                        if (desc != null) {
                            maxVersionsProp = desc.getFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions();
                        }
                    }
                    if (maxVersionsProp == null) {
                        maxVersionsProp = connection.getQueryServices().getProps().getInt(QueryServices.MAX_VERSIONS_TRANSACTIONAL_ATTRIB, QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL);
                    }
                    commonFamilyProps.put(HConstants.VERSIONS, maxVersionsProp);
                }
            }
        }
        timestamp = timestamp == null ? TransactionUtil.getTableTimestamp(connection, transactional) : timestamp;
        // Delay this check as it is supported to have IMMUTABLE_ROWS and SALT_BUCKETS defined on views
        if (sharedTable) {
            if (tableProps.get(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME) != null) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            if (SchemaUtil.hasHTableDescriptorProps(tableProps)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException();
            }
        }
        List<ColumnDef> colDefs = statement.getColumnDefs();
        LinkedHashMap<PColumn, PColumn> columns;
        LinkedHashSet<PColumn> pkColumns;
        if (tenantId != null && !sharedTable) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TENANT_SPECIFIC_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        if (autoPartitionSeq != null) {
            int autoPartitionColIndex = multiTenant ? 1 : 0;
            PDataType dataType = colDefs.get(autoPartitionColIndex).getDataType();
            if (!PLong.INSTANCE.isCastableTo(dataType)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
        }
        if (tableType == PTableType.VIEW) {
            physicalNames = Collections.singletonList(PNameFactory.newName(parent.getPhysicalName().getString()));
            if (viewType == ViewType.MAPPED) {
                columns = Maps.newLinkedHashMap();
                pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size());
            } else {
                // Propagate property values to VIEW.
                // TODO: formalize the known set of these properties
                // Manually transfer the ROW_KEY_ORDER_OPTIMIZABLE_BYTES from parent as we don't
                // want to add this hacky flag to the schema (see PHOENIX-2067).
                rowKeyOrderOptimizable = parent.rowKeyOrderOptimizable();
                if (rowKeyOrderOptimizable) {
                    UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetaData, SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName), clientTimeStamp);
                }
                multiTenant = parent.isMultiTenant();
                saltBucketNum = parent.getBucketNum();
                isAppendOnlySchema = parent.isAppendOnlySchema();
                isImmutableRows = parent.isImmutableRows();
                if (updateCacheFrequencyProp == null) {
                    // set to the parent value if the property is not set on the view
                    updateCacheFrequency = parent.getUpdateCacheFrequency();
                }
                disableWAL = (disableWALProp == null ? parent.isWALDisabled() : disableWALProp);
                defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
                List<PColumn> allColumns = parent.getColumns();
                if (saltBucketNum != null) {
                    // Don't include salt column in columns, as it should not have it when created
                    allColumns = allColumns.subList(1, allColumns.size());
                }
                columns = new LinkedHashMap<PColumn, PColumn>(allColumns.size() + colDefs.size());
                for (PColumn column : allColumns) {
                    columns.put(column, column);
                }
                pkColumns = newLinkedHashSet(parent.getPKColumns());
                // Add row linking view to its parent 
                PreparedStatement linkStatement = connection.prepareStatement(CREATE_VIEW_LINK);
                linkStatement.setString(1, tenantIdStr);
                linkStatement.setString(2, schemaName);
                linkStatement.setString(3, tableName);
                linkStatement.setString(4, parent.getName().getString());
                linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue());
                linkStatement.setString(6, parent.getTenantId() == null ? null : parent.getTenantId().getString());
                linkStatement.execute();
                // Add row linking parent to view
                linkStatement = connection.prepareStatement(CREATE_CHILD_LINK);
                linkStatement.setString(1, parent.getTenantId() == null ? null : parent.getTenantId().getString());
                linkStatement.setString(2, parent.getSchemaName() == null ? null : parent.getSchemaName().getString());
                linkStatement.setString(3, parent.getTableName().getString());
                linkStatement.setString(4, tenantIdStr);
                linkStatement.setString(5, SchemaUtil.getTableName(schemaName, tableName));
                linkStatement.setByte(6, LinkType.CHILD_TABLE.getSerializedValue());
                linkStatement.execute();
            }
        } else {
            columns = new LinkedHashMap<PColumn, PColumn>(colDefs.size());
            // in case salted
            pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size() + 1);
        }
        // fail because it looks like there's always a view associated with it.
        if (!physicalNames.isEmpty()) {
            // Otherwise, we end up with a self-referencing link and then cannot ever drop the view.
            if (viewType != ViewType.MAPPED || !physicalNames.get(0).getString().equals(SchemaUtil.getTableName(schemaName, tableName))) {
                // Add row linking from data table row to physical table row
                PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK);
                for (PName physicalName : physicalNames) {
                    linkStatement.setString(1, tenantIdStr);
                    linkStatement.setString(2, schemaName);
                    linkStatement.setString(3, tableName);
                    linkStatement.setString(4, physicalName.getString());
                    linkStatement.setByte(5, LinkType.PHYSICAL_TABLE.getSerializedValue());
                    if (tableType == PTableType.VIEW) {
                        PTable physicalTable = connection.getTable(new PTableKey(null, physicalName.getString().replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)));
                        linkStatement.setLong(6, physicalTable.getSequenceNumber());
                        linkStatement.setString(7, null);
                    } else {
                        linkStatement.setLong(6, parent.getSequenceNumber());
                        linkStatement.setString(7, PTableType.INDEX.getSerializedValue());
                    }
                    linkStatement.execute();
                }
                tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
                connection.rollback();
            }
        }
        Map<String, PName> familyNames = Maps.newLinkedHashMap();
        boolean isPK = false;
        boolean rowTimeStampColumnAlreadyFound = false;
        int positionOffset = columns.size();
        if (saltBucketNum != null) {
            positionOffset++;
            if (addSaltColumn) {
                pkColumns.add(SaltingUtil.SALTING_COLUMN);
            }
        }
        int pkPositionOffset = pkColumns.size();
        int position = positionOffset;
        EncodedCQCounter cqCounter = NULL_COUNTER;
        PTable viewPhysicalTable = null;
        if (tableType == PTableType.VIEW) {
            /*
                 * We can't control what column qualifiers are used in HTable mapped to Phoenix views. So we are not
                 * able to encode column names.
                 */
            if (viewType != MAPPED) {
                /*
                     * For regular phoenix views, use the storage scheme of the physical table since they all share the
                     * the same HTable. Views always use the base table's column qualifier counter for doling out
                     * encoded column qualifier.
                     */
                viewPhysicalTable = PhoenixRuntime.getTable(connection, physicalNames.get(0).getString());
                immutableStorageScheme = viewPhysicalTable.getImmutableStorageScheme();
                encodingScheme = viewPhysicalTable.getEncodingScheme();
                if (EncodedColumnsUtil.usesEncodedColumnNames(viewPhysicalTable)) {
                    cqCounter = viewPhysicalTable.getEncodedCQCounter();
                }
            }
        } else // System tables have hard-coded column qualifiers. So we can't use column encoding for them.
        if (!SchemaUtil.isSystemTable(Bytes.toBytes(SchemaUtil.getTableName(schemaName, tableName)))) {
            /*
                 * Indexes inherit the storage scheme of the parent data tables. Otherwise, we always attempt to 
                 * create tables with encoded column names. 
                 * 
                 * Also of note is the case with shared indexes i.e. local indexes and view indexes. In these cases, 
                 * column qualifiers for covered columns don't have to be unique because rows of the logical indexes are 
                 * partitioned by the virtue of indexId present in the row key. As such, different shared indexes can use
                 * potentially overlapping column qualifiers.
                 * 
                 * If the hbase table already exists, then possibly encoded or non-encoded column qualifiers were used. 
                 * In this case we pursue ahead with non-encoded column qualifier scheme. If the phoenix metadata for this table already exists 
                 * then we rely on the PTable, with appropriate storage scheme, returned in the MetadataMutationResult to be updated 
                 * in the client cache. If the phoenix table metadata already doesn't exist then the non-encoded column qualifier scheme works
                 * because we cannot control the column qualifiers that were used when populating the hbase table.
                 */
            byte[] tableNameBytes = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
            boolean tableExists = true;
            try {
                HTableDescriptor tableDescriptor = connection.getQueryServices().getTableDescriptor(tableNameBytes);
                if (tableDescriptor == null) {
                    // for connectionless
                    tableExists = false;
                }
            } catch (org.apache.phoenix.schema.TableNotFoundException e) {
                tableExists = false;
            }
            if (tableExists) {
                encodingScheme = NON_ENCODED_QUALIFIERS;
                immutableStorageScheme = ONE_CELL_PER_COLUMN;
            } else if (parent != null) {
                encodingScheme = parent.getEncodingScheme();
                immutableStorageScheme = parent.getImmutableStorageScheme();
            } else {
                Byte encodingSchemeSerializedByte = (Byte) TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps);
                if (encodingSchemeSerializedByte == null) {
                    encodingSchemeSerializedByte = (byte) connection.getQueryServices().getProps().getInt(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, QueryServicesOptions.DEFAULT_COLUMN_ENCODED_BYTES);
                }
                encodingScheme = QualifierEncodingScheme.fromSerializedValue(encodingSchemeSerializedByte);
                if (isImmutableRows) {
                    immutableStorageScheme = (ImmutableStorageScheme) TableProperty.IMMUTABLE_STORAGE_SCHEME.getValue(tableProps);
                    if (immutableStorageScheme == null) {
                        if (multiTenant) {
                            immutableStorageScheme = ImmutableStorageScheme.valueOf(connection.getQueryServices().getProps().get(QueryServices.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, QueryServicesOptions.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME));
                        } else {
                            immutableStorageScheme = ImmutableStorageScheme.valueOf(connection.getQueryServices().getProps().get(QueryServices.DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, QueryServicesOptions.DEFAULT_IMMUTABLE_STORAGE_SCHEME));
                        }
                    }
                    if (immutableStorageScheme != ONE_CELL_PER_COLUMN && encodingScheme == NON_ENCODED_QUALIFIERS) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES).setSchemaName(schemaName).setTableName(tableName).build().buildException();
                    }
                }
            }
            cqCounter = encodingScheme != NON_ENCODED_QUALIFIERS ? new EncodedCQCounter() : NULL_COUNTER;
        }
        Map<String, Integer> changedCqCounters = new HashMap<>(colDefs.size());
        for (ColumnDef colDef : colDefs) {
            rowTimeStampColumnAlreadyFound = checkAndValidateRowTimestampCol(colDef, pkConstraint, rowTimeStampColumnAlreadyFound, tableType);
            if (colDef.isPK()) {
                // i.e. the column is declared as CREATE TABLE COLNAME DATATYPE PRIMARY KEY...
                if (isPK) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
                }
                isPK = true;
            } else {
                // do not allow setting NOT-NULL constraint on non-primary columns.
                if (Boolean.FALSE.equals(colDef.isNull()) && (isPK || (pkConstraint != null && !pkConstraint.contains(colDef.getColumnDefName())))) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_NOT_NULL_CONSTRAINT).setSchemaName(schemaName).setTableName(tableName).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
                }
            }
            ColumnName columnDefName = colDef.getColumnDefName();
            String colDefFamily = columnDefName.getFamilyName();
            boolean isPkColumn = isPkColumn(pkConstraint, colDef, columnDefName);
            String cqCounterFamily = null;
            if (!isPkColumn) {
                if (immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS && encodingScheme != NON_ENCODED_QUALIFIERS) {
                    // For this scheme we track column qualifier counters at the column family level.
                    cqCounterFamily = colDefFamily != null ? colDefFamily : (defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY);
                } else {
                    // For other schemes, column qualifier counters are tracked using the default column family.
                    cqCounterFamily = defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY;
                }
            }
            Integer encodedCQ = isPkColumn ? null : cqCounter.getNextQualifier(cqCounterFamily);
            byte[] columnQualifierBytes = null;
            try {
                columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(columnDefName.getColumnName(), encodedCQ, encodingScheme, isPkColumn);
            } catch (QualifierOutOfRangeException e) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            PColumn column = newColumn(position++, colDef, pkConstraint, defaultFamilyName, false, columnQualifierBytes);
            if (cqCounter.increment(cqCounterFamily)) {
                changedCqCounters.put(cqCounterFamily, cqCounter.getNextQualifier(cqCounterFamily));
            }
            if (SchemaUtil.isPKColumn(column)) {
                // TODO: remove this constraint?
                if (pkColumnsIterator.hasNext() && !column.getName().getString().equals(pkColumnsIterator.next().getFirst().getColumnName())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER).setSchemaName(schemaName).setTableName(tableName).setColumnName(column.getName().getString()).build().buildException();
                }
                if (tableType == PTableType.VIEW && viewType != ViewType.MAPPED) {
                    throwIfLastPKOfParentIsFixedLength(parent, schemaName, tableName, colDef);
                }
                if (!pkColumns.add(column)) {
                    throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
                }
            }
            if (columns.put(column, column) != null) {
                throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
            }
            if ((colDef.getDataType() == PVarbinary.INSTANCE || colDef.getDataType().isArrayType()) && SchemaUtil.isPKColumn(column) && pkColumnsIterator.hasNext()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_IN_ROW_KEY).setSchemaName(schemaName).setTableName(tableName).setColumnName(column.getName().getString()).build().buildException();
            }
            if (column.getFamilyName() != null) {
                familyNames.put(IndexUtil.getActualColumnFamilyName(column.getFamilyName().getString()), column.getFamilyName());
            }
        }
        // We need a PK definition for a TABLE or mapped VIEW
        if (!isPK && pkColumnsNames.isEmpty() && tableType != PTableType.VIEW && viewType != ViewType.MAPPED) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        if (!pkColumnsNames.isEmpty() && pkColumnsNames.size() != pkColumns.size() - pkPositionOffset) {
            // Then a column name in the primary key constraint wasn't resolved
            Iterator<Pair<ColumnName, SortOrder>> pkColumnNamesIterator = pkColumnsNames.iterator();
            while (pkColumnNamesIterator.hasNext()) {
                ColumnName colName = pkColumnNamesIterator.next().getFirst();
                ColumnDef colDef = findColumnDefOrNull(colDefs, colName);
                if (colDef == null) {
                    throw new ColumnNotFoundException(schemaName, tableName, null, colName.getColumnName());
                }
                if (colDef.getColumnDefName().getFamilyName() != null) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME).setSchemaName(schemaName).setTableName(tableName).setColumnName(colDef.getColumnDefName().getColumnName()).setFamilyName(colDef.getColumnDefName().getFamilyName()).build().buildException();
                }
            }
            // The above should actually find the specific one, but just in case...
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_PRIMARY_KEY_CONSTRAINT).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        List<Pair<byte[], Map<String, Object>>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size());
        if (!statement.getProps().isEmpty()) {
            for (String familyName : statement.getProps().keySet()) {
                if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
                    if (familyNames.get(familyName) == null) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY).setFamilyName(familyName).build().buildException();
                    } else if (statement.getTableType() == PTableType.VIEW) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException();
                    }
                }
            }
        }
        throwIfInsufficientColumns(schemaName, tableName, pkColumns, saltBucketNum != null, multiTenant);
        for (PName familyName : familyNames.values()) {
            String fam = familyName.getString();
            Collection<Pair<String, Object>> props = statement.getProps().get(IndexUtil.getActualColumnFamilyName(fam));
            if (props.isEmpty()) {
                familyPropList.add(new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps));
            } else {
                Map<String, Object> combinedFamilyProps = Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size());
                combinedFamilyProps.putAll(commonFamilyProps);
                for (Pair<String, Object> prop : props) {
                    // i.e. it can't be column family specific.
                    if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) && prop.getFirst().equals(TTL)) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL).build().buildException();
                    }
                    combinedFamilyProps.put(prop.getFirst(), prop.getSecond());
                }
                familyPropList.add(new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps));
            }
        }
        if (familyNames.isEmpty()) {
            //if there are no family names, use the default column family name. This also takes care of the case when
            //the table ddl has only PK cols present (which means familyNames is empty).
            byte[] cf = defaultFamilyName == null ? (!isLocalIndex ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES) : Bytes.toBytes(defaultFamilyName);
            familyPropList.add(new Pair<byte[], Map<String, Object>>(cf, commonFamilyProps));
        }
        // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists
        if (SchemaUtil.isMetaTable(schemaName, tableName)) {
            // TODO: what about stats for system catalog?
            PName newSchemaName = PNameFactory.newName(schemaName);
            // Column names and qualifiers and hardcoded for system tables.
            PTable table = PTableImpl.makePTable(tenantId, newSchemaName, PNameFactory.newName(tableName), tableType, null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, PNameFactory.newName(QueryConstants.SYSTEM_TABLE_PK_NAME), null, columns.values(), null, null, Collections.<PTable>emptyList(), isImmutableRows, Collections.<PName>emptyList(), defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), null, Boolean.TRUE.equals(disableWAL), false, false, null, null, indexType, true, false, 0, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, true);
            connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP);
        }
        // Update column qualifier counters
        if (EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme)) {
            // Store the encoded column counter for phoenix entities that have their own hbase
            // tables i.e. base tables and indexes.
            String schemaNameToUse = tableType == VIEW ? viewPhysicalTable.getSchemaName().getString() : schemaName;
            String tableNameToUse = tableType == VIEW ? viewPhysicalTable.getTableName().getString() : tableName;
            boolean sharedIndex = tableType == PTableType.INDEX && (indexType == IndexType.LOCAL || parent.getType() == PTableType.VIEW);
            // For local indexes and indexes on views, pass on the the tenant id since all their meta-data rows have
            // tenant ids in there.
            String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
            // too since we want clients to get the latest PTable of the base table.
            for (Entry<String, Integer> entry : changedCqCounters.entrySet()) {
                try (PreparedStatement linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER)) {
                    linkStatement.setString(1, tenantIdToUse);
                    linkStatement.setString(2, schemaNameToUse);
                    linkStatement.setString(3, tableNameToUse);
                    linkStatement.setString(4, entry.getKey());
                    linkStatement.setInt(5, entry.getValue());
                    linkStatement.execute();
                }
            }
            if (tableType == VIEW && !changedCqCounters.isEmpty()) {
                PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
                incrementStatement.setString(1, null);
                incrementStatement.setString(2, viewPhysicalTable.getSchemaName().getString());
                incrementStatement.setString(3, viewPhysicalTable.getTableName().getString());
                incrementStatement.setLong(4, viewPhysicalTable.getSequenceNumber() + 1);
                incrementStatement.execute();
            }
            if (connection.getMutationState().toMutations(timestamp).hasNext()) {
                tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
                connection.rollback();
            }
        }
        short nextKeySeq = 0;
        List<Mutation> columnMetadata = Lists.newArrayListWithExpectedSize(columns.size());
        try (PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN_CREATE_TABLE)) {
            for (Map.Entry<PColumn, PColumn> entry : columns.entrySet()) {
                PColumn column = entry.getValue();
                final int columnPosition = column.getPosition();
                // set the autoPartition column attributes
                if (parent != null && parent.getAutoPartitionSeqName() != null && parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent)).equals(column)) {
                    entry.setValue(column = new DelegateColumn(column) {

                        @Override
                        public byte[] getViewConstant() {
                            // will be set correctly on the server
                            return QueryConstants.EMPTY_COLUMN_VALUE_BYTES;
                        }

                        @Override
                        public boolean isViewReferenced() {
                            return true;
                        }
                    });
                } else if (isViewColumnReferenced != null) {
                    if (viewColumnConstants != null && columnPosition < viewColumnConstants.length) {
                        entry.setValue(column = new DelegateColumn(column) {

                            @Override
                            public byte[] getViewConstant() {
                                return viewColumnConstants[columnPosition];
                            }

                            @Override
                            public boolean isViewReferenced() {
                                return isViewColumnReferenced.get(columnPosition);
                            }
                        });
                    } else {
                        entry.setValue(column = new DelegateColumn(column) {

                            @Override
                            public boolean isViewReferenced() {
                                return isViewColumnReferenced.get(columnPosition);
                            }
                        });
                    }
                }
                Short keySeq = SchemaUtil.isPKColumn(column) ? ++nextKeySeq : null;
                addColumnMutation(schemaName, tableName, column, colUpsert, parentTableName, pkName, keySeq, saltBucketNum != null);
                columnMetadata.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
                connection.rollback();
            }
        }
        // add the columns in reverse order since we reverse the list later
        Collections.reverse(columnMetadata);
        tableMetaData.addAll(columnMetadata);
        String dataTableName = parent == null || tableType == PTableType.VIEW ? null : parent.getTableName().getString();
        PIndexState indexState = parent == null || tableType == PTableType.VIEW ? null : PIndexState.BUILDING;
        PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE);
        tableUpsert.setString(1, tenantIdStr);
        tableUpsert.setString(2, schemaName);
        tableUpsert.setString(3, tableName);
        tableUpsert.setString(4, tableType.getSerializedValue());
        tableUpsert.setLong(5, PTable.INITIAL_SEQ_NUM);
        tableUpsert.setInt(6, position);
        if (saltBucketNum != null) {
            tableUpsert.setInt(7, saltBucketNum);
        } else {
            tableUpsert.setNull(7, Types.INTEGER);
        }
        tableUpsert.setString(8, pkName);
        tableUpsert.setString(9, dataTableName);
        tableUpsert.setString(10, indexState == null ? null : indexState.getSerializedValue());
        tableUpsert.setBoolean(11, isImmutableRows);
        tableUpsert.setString(12, defaultFamilyName);
        if (parent != null && parent.getAutoPartitionSeqName() != null && viewStatement == null) {
            // set to non-null value so that we will generate a Put that
            // will be set correctly on the server
            tableUpsert.setString(13, QueryConstants.EMPTY_COLUMN_VALUE);
        } else {
            tableUpsert.setString(13, viewStatement);
        }
        tableUpsert.setBoolean(14, disableWAL);
        tableUpsert.setBoolean(15, multiTenant);
        if (viewType == null) {
            tableUpsert.setNull(16, Types.TINYINT);
        } else {
            tableUpsert.setByte(16, viewType.getSerializedValue());
        }
        if (indexType == null) {
            tableUpsert.setNull(17, Types.TINYINT);
        } else {
            tableUpsert.setByte(17, indexType.getSerializedValue());
        }
        tableUpsert.setBoolean(18, storeNulls);
        if (parent != null && tableType == PTableType.VIEW) {
            tableUpsert.setInt(19, parent.getColumns().size());
        } else {
            tableUpsert.setInt(19, BASE_TABLE_BASE_COLUMN_COUNT);
        }
        tableUpsert.setBoolean(20, transactional);
        tableUpsert.setLong(21, updateCacheFrequency);
        tableUpsert.setBoolean(22, isNamespaceMapped);
        if (autoPartitionSeq == null) {
            tableUpsert.setNull(23, Types.VARCHAR);
        } else {
            tableUpsert.setString(23, autoPartitionSeq);
        }
        tableUpsert.setBoolean(24, isAppendOnlySchema);
        if (guidePostsWidth == null) {
            tableUpsert.setNull(25, Types.BIGINT);
        } else {
            tableUpsert.setLong(25, guidePostsWidth);
        }
        tableUpsert.setByte(26, immutableStorageScheme.getSerializedMetadataValue());
        tableUpsert.setByte(27, encodingScheme.getSerializedMetadataValue());
        tableUpsert.setBoolean(28, useStatsForParallelization);
        tableUpsert.execute();
        if (asyncCreatedDate != null) {
            PreparedStatement setAsync = connection.prepareStatement(SET_ASYNC_CREATED_DATE);
            setAsync.setString(1, tenantIdStr);
            setAsync.setString(2, schemaName);
            setAsync.setString(3, tableName);
            setAsync.setDate(4, asyncCreatedDate);
            setAsync.execute();
        }
        tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
        connection.rollback();
        /*
             * The table metadata must be in the following order:
             * 1) table header row
             * 2) ordered column rows
             * 3) parent table header row
             */
        Collections.reverse(tableMetaData);
        if (indexType != IndexType.LOCAL) {
            splits = SchemaUtil.processSplits(splits, pkColumns, saltBucketNum, connection.getQueryServices().getProps().getBoolean(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, QueryServicesOptions.DEFAULT_FORCE_ROW_KEY_ORDER));
        }
        MetaDataMutationResult result = connection.getQueryServices().createTable(tableMetaData, viewType == ViewType.MAPPED || allocateIndexId ? physicalNames.get(0).getBytes() : null, tableType, tableProps, familyPropList, splits, isNamespaceMapped, allocateIndexId);
        MutationCode code = result.getMutationCode();
        switch(code) {
            case TABLE_ALREADY_EXISTS:
                if (result.getTable() != null) {
                    // Can happen for transactional table that already exists as HBase table
                    addTableToCache(result);
                }
                if (!statement.ifNotExists()) {
                    throw new TableAlreadyExistsException(schemaName, tableName, result.getTable());
                }
                return null;
            case PARENT_TABLE_NOT_FOUND:
                throw new TableNotFoundException(schemaName, parent.getName().getString());
            case NEWER_TABLE_FOUND:
                // it to this connection as we can't see it.
                if (!statement.ifNotExists()) {
                    throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
                }
            case UNALLOWED_TABLE_MUTATION:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            case CONCURRENT_TABLE_MUTATION:
                addTableToCache(result);
                throw new ConcurrentTableMutationException(schemaName, tableName);
            case AUTO_PARTITION_SEQUENCE_NOT_FOUND:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_PARTITION_SEQUENCE_UNDEFINED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            case CANNOT_COERCE_AUTO_PARTITION_ID:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_COERCE_AUTO_PARTITION_ID).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            case TOO_MANY_INDEXES:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.TOO_MANY_INDEXES).setSchemaName(SchemaUtil.getSchemaNameFromFullName(parent.getPhysicalName().getString())).setTableName(SchemaUtil.getTableNameFromFullName(parent.getPhysicalName().getString())).build().buildException();
            default:
                // set the view statement and relevant partition column attributes correctly
                if (parent != null && parent.getAutoPartitionSeqName() != null) {
                    final PColumn autoPartitionCol = parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent));
                    final Long autoPartitionNum = Long.valueOf(result.getAutoPartitionNum());
                    columns.put(autoPartitionCol, new DelegateColumn(autoPartitionCol) {

                        @Override
                        public byte[] getViewConstant() {
                            PDataType dataType = autoPartitionCol.getDataType();
                            Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
                            byte[] bytes = new byte[dataType.getByteSize() + 1];
                            dataType.toBytes(val, bytes, 0);
                            return bytes;
                        }

                        @Override
                        public boolean isViewReferenced() {
                            return true;
                        }
                    });
                    String viewPartitionClause = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parent), autoPartitionNum);
                    if (viewStatement != null) {
                        viewStatement = viewStatement + " AND " + viewPartitionClause;
                    } else {
                        viewStatement = QueryUtil.getViewStatement(parent.getSchemaName().getString(), parent.getTableName().getString(), viewPartitionClause);
                    }
                }
                PName newSchemaName = PNameFactory.newName(schemaName);
                /*
                 * It doesn't hurt for the PTable of views to have the cqCounter. However, views always rely on the
                 * parent table's counter to dole out encoded column qualifiers. So setting the counter as NULL_COUNTER
                 * for extra safety.
                 */
                EncodedCQCounter cqCounterToBe = tableType == PTableType.VIEW ? NULL_COUNTER : cqCounter;
                PTable table = PTableImpl.makePTable(tenantId, newSchemaName, PNameFactory.newName(tableName), tableType, indexState, timestamp != null ? timestamp : result.getMutationTime(), PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns.values(), parent == null ? null : parent.getSchemaName(), parent == null ? null : parent.getTableName(), Collections.<PTable>emptyList(), isImmutableRows, physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType, result.getViewIndexId(), indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, immutableStorageScheme, encodingScheme, cqCounterToBe, useStatsForParallelization);
                result = new MetaDataMutationResult(code, result.getMutationTime(), table, true);
                addTableToCache(result);
                return table;
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) PDataType(org.apache.phoenix.schema.types.PDataType) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ColumnDef(org.apache.phoenix.parse.ColumnDef) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) TableName(org.apache.phoenix.parse.TableName) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) QualifierOutOfRangeException(org.apache.phoenix.schema.PTable.QualifierEncodingScheme.QualifierOutOfRangeException) QualifierEncodingScheme(org.apache.phoenix.schema.PTable.QualifierEncodingScheme) ImmutableStorageScheme(org.apache.phoenix.schema.PTable.ImmutableStorageScheme) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Pair(org.apache.hadoop.hbase.util.Pair) PreparedStatement(java.sql.PreparedStatement) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) PInteger(org.apache.phoenix.schema.types.PInteger) ColumnName(org.apache.phoenix.parse.ColumnName) EncodedCQCounter(org.apache.phoenix.schema.PTable.EncodedCQCounter)

Example 47 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataClient method dropColumn.

public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        connection.setAutoCommit(false);
        PName tenantId = connection.getTenantId();
        TableName tableNameNode = statement.getTable().getName();
        String schemaName = tableNameNode.getSchemaName();
        String tableName = tableNameNode.getTableName();
        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
        boolean retried = false;
        while (true) {
            final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
            TableRef tableRef = resolver.getTables().get(0);
            PTable table = tableRef.getTable();
            List<ColumnName> columnRefs = statement.getColumnRefs();
            if (columnRefs == null) {
                columnRefs = Lists.newArrayListWithCapacity(0);
            }
            List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
            List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
            List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
            List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
            for (ColumnName column : columnRefs) {
                ColumnRef columnRef = null;
                try {
                    columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
                } catch (ColumnNotFoundException e) {
                    if (statement.ifExists()) {
                        return new MutationState(0, 0, connection);
                    }
                    throw e;
                }
                PColumn columnToDrop = columnRef.getColumn();
                tableColumnsToDrop.add(columnToDrop);
                if (SchemaUtil.isPKColumn(columnToDrop)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
                } else if (table.isAppendOnlySchema()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
                }
                columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
            }
            dropColumnMutations(table, tableColumnsToDrop);
            boolean removedIndexTableOrColumn = false;
            Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
            for (PTable index : table.getIndexes()) {
                IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
                // get the covered columns 
                List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
                Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
                Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
                for (PColumn columnToDrop : tableColumnsToDrop) {
                    Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
                    ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
                    boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
                    if (isColumnIndexed) {
                        if (index.getViewIndexId() == null) {
                            indexesToDrop.add(new TableRef(index));
                        }
                        connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
                        removedIndexTableOrColumn = true;
                    } else if (coveredCols.contains(colDropRef)) {
                        String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
                        PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
                        indexColumnsToDrop.add(indexColumn);
                        // add the index column to be dropped so that we actually delete the column values
                        columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
                        removedIndexTableOrColumn = true;
                    }
                }
                if (!indexColumnsToDrop.isEmpty()) {
                    long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
                    dropColumnMutations(index, indexColumnsToDrop);
                    long clientTimestamp = MutationState.getMutationTimestamp(timeStamp, connection.getSCN());
                    connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
                }
            }
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            // Force table header to be first in list
            Collections.reverse(tableMetaData);
            /*
                 * Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
                 * in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
                 * ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
                 * support declaring what the empty column family is on a table, as:
                 * - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
                 * - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
                 *    the empty column family name on the PTable.
                 */
            for (ColumnRef columnRefToDrop : columnsToDrop) {
                PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
                byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
                if (emptyCF != null) {
                    try {
                        tableContainingColumnToDrop.getColumnFamily(emptyCF);
                    } catch (ColumnFamilyNotFoundException e) {
                        // Only if it's not already a column family do we need to ensure it's created
                        Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
                        family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
                        // Just use a Put without any key values as the Mutation, as addColumn will treat this specially
                        // TODO: pass through schema name and table name instead to these methods as it's cleaner
                        byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                        if (tenantIdBytes == null)
                            tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
                        connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
                    }
                }
            }
            MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
            try {
                MutationCode code = processMutationResult(schemaName, tableName, result);
                if (code == MutationCode.COLUMN_NOT_FOUND) {
                    addTableToCache(result);
                    if (!statement.ifExists()) {
                        throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
                    }
                    return new MutationState(0, 0, connection);
                }
                // the server when needed.
                if (tableColumnsToDrop.size() > 0) {
                    if (removedIndexTableOrColumn)
                        connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                    else
                        connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
                }
                // If we have a VIEW, then only delete the metadata, and leave the table data alone
                if (table.getType() != PTableType.VIEW) {
                    MutationState state = null;
                    connection.setAutoCommit(true);
                    Long scn = connection.getSCN();
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    PostDDLCompiler compiler = new PostDDLCompiler(connection);
                    boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                    // if the index is a local index or view index it uses a shared physical table
                    // so we need to issue deletes markers for all the rows of the index
                    final List<TableRef> tableRefsToDrop = Lists.newArrayList();
                    Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
                    if (result.getSharedTablesToDelete() != null) {
                        for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
                            PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
                            TableRef indexTableRef = new TableRef(viewIndexTable);
                            PName indexTableTenantId = sharedTableState.getTenantId();
                            if (indexTableTenantId == null) {
                                tableRefsToDrop.add(indexTableRef);
                            } else {
                                if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
                                    tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
                                }
                                tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
                            }
                        }
                    }
                    // they would have been dropped in ConnectionQueryServices.dropColumn)
                    if (!dropMetaData) {
                        tableRefsToDrop.addAll(indexesToDrop);
                    }
                    // Drop any index tables that had the dropped column in the PK
                    state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
                    // Drop any tenant-specific indexes
                    if (!tenantIdTableRefMap.isEmpty()) {
                        for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
                            String indexTenantId = entry.getKey();
                            Properties props = new Properties(connection.getClientInfo());
                            props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
                            try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
                                PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
                                state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
                            }
                        }
                    }
                    // See https://issues.apache.org/jira/browse/PHOENIX-3605
                    if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
                        // Update empty key value column if necessary
                        for (ColumnRef droppedColumnRef : columnsToDrop) {
                            // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
                            // to get any updates from the region server.
                            // TODO: move this into PostDDLCompiler
                            // TODO: consider filtering mutable indexes here, but then the issue is that
                            // we'd need to force an update of the data row empty key value if a mutable
                            // secondary index is changing its empty key value family.
                            droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
                            TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
                            PColumn droppedColumn = droppedColumnRef.getColumn();
                            MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
                            state = connection.getQueryServices().updateData(plan);
                        }
                    }
                    // Return the last MutationState
                    return state;
                }
                return new MutationState(0, 0, connection);
            } catch (ConcurrentTableMutationException e) {
                if (retried) {
                    throw e;
                }
                table = connection.getTable(new PTableKey(tenantId, fullTableName));
                retried = true;
            }
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) Properties(java.util.Properties) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ArrayList(java.util.ArrayList) List(java.util.List) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) Pair(org.apache.hadoop.hbase.util.Pair) MutationPlan(org.apache.phoenix.compile.MutationPlan) Put(org.apache.hadoop.hbase.client.Put) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) TableName(org.apache.phoenix.parse.TableName) ColumnName(org.apache.phoenix.parse.ColumnName) SharedTableState(org.apache.phoenix.coprocessor.MetaDataProtocol.SharedTableState) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 48 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataClient method createSchema.

public MutationState createSchema(CreateSchemaStatement create) throws SQLException {
    boolean wasAutoCommit = connection.getAutoCommit();
    connection.rollback();
    try {
        if (!SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CREATE_SCHEMA_NOT_ALLOWED).setSchemaName(create.getSchemaName()).build().buildException();
        }
        boolean isIfNotExists = create.isIfNotExists();
        validateSchema(create.getSchemaName());
        PSchema schema = new PSchema(create.getSchemaName());
        connection.setAutoCommit(false);
        List<Mutation> schemaMutations;
        try (PreparedStatement schemaUpsert = connection.prepareStatement(CREATE_SCHEMA)) {
            schemaUpsert.setString(1, schema.getSchemaName());
            schemaUpsert.setString(2, MetaDataClient.EMPTY_TABLE);
            schemaUpsert.execute();
            schemaMutations = connection.getMutationState().toMutations(null).next().getSecond();
            connection.rollback();
        }
        MetaDataMutationResult result = connection.getQueryServices().createSchema(schemaMutations, schema.getSchemaName());
        MutationCode code = result.getMutationCode();
        switch(code) {
            case SCHEMA_ALREADY_EXISTS:
                if (result.getSchema() != null) {
                    addSchemaToCache(result);
                }
                if (!isIfNotExists) {
                    throw new SchemaAlreadyExistsException(schema.getSchemaName());
                }
                break;
            case NEWER_SCHEMA_FOUND:
                throw new NewerSchemaAlreadyExistsException(schema.getSchemaName());
            default:
                result = new MetaDataMutationResult(code, schema, result.getMutationTime());
                addSchemaToCache(result);
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
    return new MutationState(0, 0, connection);
}
Also used : MutationState(org.apache.phoenix.execute.MutationState) PSchema(org.apache.phoenix.parse.PSchema) PreparedStatement(java.sql.PreparedStatement) Mutation(org.apache.hadoop.hbase.client.Mutation) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)

Example 49 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataClient method alterIndex.

public MutationState alterIndex(AlterIndexStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        String dataTableName = statement.getTableName();
        String schemaName = statement.getTable().getName().getSchemaName();
        String indexName = statement.getTable().getName().getTableName();
        boolean isAsync = statement.isAsync();
        PIndexState newIndexState = statement.getIndexState();
        if (isAsync && newIndexState != PIndexState.REBUILD) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.ASYNC_NOT_ALLOWED).setMessage(" ASYNC building of index is allowed only with REBUILD index state").setSchemaName(schemaName).setTableName(indexName).build().buildException();
        }
        if (newIndexState == PIndexState.REBUILD) {
            newIndexState = PIndexState.BUILDING;
        }
        connection.setAutoCommit(false);
        // Confirm index table is valid and up-to-date
        TableRef indexRef = FromCompiler.getResolver(statement, connection).getTables().get(0);
        PreparedStatement tableUpsert = null;
        try {
            if (newIndexState == PIndexState.ACTIVE) {
                tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE);
            } else {
                tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE);
            }
            tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
            tableUpsert.setString(2, schemaName);
            tableUpsert.setString(3, indexName);
            tableUpsert.setString(4, newIndexState.getSerializedValue());
            tableUpsert.setLong(5, 0);
            if (newIndexState == PIndexState.ACTIVE) {
                tableUpsert.setLong(6, 0);
            }
            tableUpsert.execute();
        } finally {
            if (tableUpsert != null) {
                tableUpsert.close();
            }
        }
        Long timeStamp = indexRef.getTable().isTransactional() ? indexRef.getTimeStamp() : null;
        List<Mutation> tableMetadata = connection.getMutationState().toMutations(timeStamp).next().getSecond();
        connection.rollback();
        MetaDataMutationResult result = connection.getQueryServices().updateIndexState(tableMetadata, dataTableName);
        MutationCode code = result.getMutationCode();
        if (code == MutationCode.TABLE_NOT_FOUND) {
            throw new TableNotFoundException(schemaName, indexName);
        }
        if (code == MutationCode.UNALLOWED_TABLE_MUTATION) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION).setMessage(" currentState=" + indexRef.getTable().getIndexState() + ". requestedState=" + newIndexState).setSchemaName(schemaName).setTableName(indexName).build().buildException();
        }
        if (code == MutationCode.TABLE_ALREADY_EXISTS) {
            if (result.getTable() != null) {
                // To accommodate connection-less update of index state
                addTableToCache(result);
                // Set so that we get the table below with the potentially modified rowKeyOrderOptimizable flag set
                indexRef.setTable(result.getTable());
                if (newIndexState == PIndexState.BUILDING && isAsync) {
                    try {
                        tableUpsert = connection.prepareStatement(UPDATE_INDEX_REBUILD_ASYNC_STATE);
                        tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
                        tableUpsert.setString(2, schemaName);
                        tableUpsert.setString(3, indexName);
                        tableUpsert.setLong(4, result.getTable().getTimeStamp());
                        tableUpsert.execute();
                        connection.commit();
                    } finally {
                        if (tableUpsert != null) {
                            tableUpsert.close();
                        }
                    }
                }
            }
        }
        if (newIndexState == PIndexState.BUILDING && !isAsync) {
            PTable index = indexRef.getTable();
            // First delete any existing rows of the index
            Long scn = connection.getSCN();
            long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
            MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
            connection.getQueryServices().updateData(plan);
            NamedTableNode dataTableNode = NamedTableNode.create(null, TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
            // Next rebuild the index
            connection.setAutoCommit(true);
            if (connection.getSCN() != null) {
                return buildIndexAtTimeStamp(index, dataTableNode);
            }
            TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0);
            return buildIndex(index, dataTableRef);
        }
        return new MutationState(1, 1000, connection);
    } catch (TableNotFoundException e) {
        if (!statement.ifExists()) {
            throw e;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : PreparedStatement(java.sql.PreparedStatement) MutationPlan(org.apache.phoenix.compile.MutationPlan) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) Mutation(org.apache.hadoop.hbase.client.Mutation) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 50 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataClient method dropTable.

private MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType, boolean ifExists, boolean cascade) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PName tenantId = connection.getTenantId();
        String tenantIdStr = tenantId == null ? null : tenantId.getString();
        byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete tableDelete = new Delete(key, clientTimeStamp);
        tableMetaData.add(tableDelete);
        boolean hasViewIndexTable = false;
        if (parentTableName != null) {
            byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
            Delete linkDelete = new Delete(linkKey, clientTimeStamp);
            tableMetaData.add(linkDelete);
        }
        MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
        MutationCode code = result.getMutationCode();
        PTable table = result.getTable();
        switch(code) {
            case TABLE_NOT_FOUND:
                if (!ifExists) {
                    throw new TableNotFoundException(schemaName, tableName);
                }
                break;
            case NEWER_TABLE_FOUND:
                throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
            case UNALLOWED_TABLE_MUTATION:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            default:
                connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
                if (table != null) {
                    boolean dropMetaData = false;
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
                    connection.setAutoCommit(true);
                    if (tableType == PTableType.VIEW) {
                        for (PTable index : table.getIndexes()) {
                            tableRefs.add(new TableRef(null, index, ts, false));
                        }
                    } else {
                        dropMetaData = result.getTable().getViewIndexId() == null && connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                        // All multi-tenant tables have a view index table, so no need to check in that case
                        if (parentTableName == null) {
                            // keeping always true for deletion of stats if view index present
                            hasViewIndexTable = true;
                            // or not
                            MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped());
                            byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
                            if (!dropMetaData) {
                                // we need to drop rows only when actually view index exists
                                try (HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
                                    hasViewIndexTable = admin.tableExists(viewIndexPhysicalName);
                                } catch (IOException e1) {
                                // absorbing as it is not critical check
                                }
                            }
                        }
                        if (tableType == PTableType.TABLE && (table.isMultiTenant() || hasViewIndexTable)) {
                            if (hasViewIndexTable) {
                                byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
                                PTable viewIndexTable = new PTableImpl(null, SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName), SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName), ts, table.getColumnFamilies(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.useStatsForParallelization());
                                tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
                            }
                        }
                        tableRefs.add(new TableRef(null, table, ts, false));
                        // TODO: Let the standard mutable secondary index maintenance handle this?
                        for (PTable index : table.getIndexes()) {
                            tableRefs.add(new TableRef(null, index, ts, false));
                        }
                        deleteFromStatsTable(tableRefs, ts);
                    }
                    if (!dropMetaData) {
                        MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
                        // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                        return connection.getQueryServices().updateData(plan);
                    }
                }
                break;
        }
        return new MutationState(0, 0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) IOException(java.io.IOException) MutationPlan(org.apache.phoenix.compile.MutationPlan) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12