Search in sources :

Example 1 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class DropColumnIT method testDropCol.

@Test
public void testDropCol() throws Exception {
    String indexTableName = generateUniqueName();
    String dataTableName = generateUniqueName();
    String localIndexTableName = "LOCAL_" + indexTableName;
    try (Connection conn = getConnection()) {
        conn.setAutoCommit(false);
        conn.createStatement().execute("CREATE TABLE " + dataTableName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR) " + tableDDLOptions);
        // create one global and one local index
        conn.createStatement().execute("CREATE INDEX " + indexTableName + " ON " + dataTableName + " (v1) INCLUDE (v2, v3)");
        conn.createStatement().execute("CREATE LOCAL INDEX " + localIndexTableName + " ON " + dataTableName + " (v1) INCLUDE (v2, v3)");
        // upsert a single row
        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableName + " VALUES(?,?,?,?)");
        stmt.setString(1, "a");
        stmt.setString(2, "x");
        stmt.setString(3, "1");
        stmt.setString(4, "2");
        stmt.execute();
        conn.commit();
        // verify v2 exists in the data table
        PTable dataTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, dataTableName));
        PColumn dataColumn = dataTable.getColumnForColumnName("V2");
        byte[] dataCq = dataColumn.getColumnQualifierBytes();
        // verify v2 exists in the global index table
        PTable globalIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, indexTableName));
        PColumn glovalIndexCol = globalIndexTable.getColumnForColumnName("0:V2");
        byte[] globalIndexCq = glovalIndexCol.getColumnQualifierBytes();
        // verify v2 exists in the global index table
        PTable localIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, localIndexTableName));
        PColumn localIndexCol = localIndexTable.getColumnForColumnName("0:V2");
        byte[] localIndexCq = localIndexCol.getColumnQualifierBytes();
        verifyColValue(indexTableName, dataTableName, conn, dataTable, dataColumn, dataCq, globalIndexTable, glovalIndexCol, globalIndexCq, localIndexTable, localIndexCol, localIndexCq);
        // drop v2 column
        conn.createStatement().execute("ALTER TABLE " + dataTableName + " DROP COLUMN v2 ");
        conn.createStatement().execute("SELECT * FROM " + dataTableName);
        // verify that the column was dropped from the data table
        dataTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, dataTableName));
        try {
            dataTable.getColumnForColumnName("V2");
            fail("Column V2 should have been dropped from data table");
        } catch (ColumnNotFoundException e) {
        }
        // verify that the column was dropped from the global index table
        globalIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, indexTableName));
        try {
            globalIndexTable.getColumnForColumnName("V2");
            fail("Column V2 should have been dropped from global index table");
        } catch (ColumnNotFoundException e) {
        }
        // verify that the column was dropped from the local index table
        localIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, indexTableName));
        try {
            localIndexTable.getColumnForColumnName("V2");
            fail("Column V2 should have been dropped from global index table");
        } catch (ColumnNotFoundException e) {
        }
        if (mutable || !columnEncoded) {
            byte[] key = Bytes.toBytes("a");
            Scan scan = new Scan();
            scan.setRaw(true);
            scan.setStartRow(key);
            scan.setStopRow(key);
            HTable table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(dataTableName.getBytes());
            ResultScanner results = table.getScanner(scan);
            Result result = results.next();
            assertNotNull(result);
            assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dataCq).get(0).getTypeByte());
            assertNull(results.next());
            // key value for v2 should have been deleted from the global index table
            scan = new Scan();
            scan.setRaw(true);
            table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(indexTableName.getBytes());
            results = table.getScanner(scan);
            result = results.next();
            assertNotNull(result);
            assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, globalIndexCq).get(0).getTypeByte());
            assertNull(results.next());
            // key value for v2 should have been deleted from the local index table
            scan = new Scan();
            scan.setRaw(true);
            scan.addFamily(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES);
            table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(dataTableName.getBytes());
            results = table.getScanner(scan);
            result = results.next();
            assertNotNull(result);
            assertEquals("data table col" + "umn value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCq).get(0).getTypeByte());
            assertNull(results.next());
        } else {
            // verify we don't issue deletes when we drop a column from an immutable encoded table
            verifyColValue(indexTableName, dataTableName, conn, dataTable, dataColumn, dataCq, globalIndexTable, glovalIndexCol, globalIndexCq, localIndexTable, localIndexCol, localIndexCq);
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PreparedStatement(java.sql.PreparedStatement) HTable(org.apache.hadoop.hbase.client.HTable) PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result) PColumn(org.apache.phoenix.schema.PColumn) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) Scan(org.apache.hadoop.hbase.client.Scan) PTableKey(org.apache.phoenix.schema.PTableKey) Test(org.junit.Test)

Example 2 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class BaseQueryPlan method iterator.

public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    if (scan == null) {
        scan = context.getScan();
    }
    /*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
    if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    if (tableRef == TableRef.EMPTY_TABLE_REF) {
        return newIterator(scanGrouper, scan);
    }
    // Set miscellaneous scan attributes. This is the last chance to set them before we
    // clone the scan for each parallelized chunk.
    TableRef tableRef = context.getCurrentTable();
    PTable table = tableRef.getTable();
    if (dynamicFilter != null) {
        WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);
    }
    if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
        ScanUtil.setReversed(scan);
        // Hack for working around PHOENIX-3121 and HBASE-16296.
        // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
        int scannerCacheSize = context.getStatement().getFetchSize();
        if (limit != null && limit % scannerCacheSize == 0) {
            scan.setCaching(scannerCacheSize + 1);
        }
    }
    if (statement.getHint().hasHint(Hint.SMALL)) {
        scan.setSmall(true);
    }
    PhoenixConnection connection = context.getConnection();
    // set read consistency
    if (table.getType() != PTableType.SYSTEM) {
        scan.setConsistency(connection.getConsistency());
    }
    // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
    if (!table.isTransactional()) {
        // Get the time range of row_timestamp column
        TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
        // Get the already existing time range on the scan.
        TimeRange scanTimeRange = scan.getTimeRange();
        Long scn = connection.getSCN();
        if (scn == null) {
            // If we haven't resolved the time at the beginning of compilation, don't
            // force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
            scn = tableRef.getTimeStamp();
            if (scn == QueryConstants.UNSET_TIMESTAMP) {
                scn = HConstants.LATEST_TIMESTAMP;
            }
        }
        try {
            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
            if (timeRangeToUse == null) {
                return ResultIterator.EMPTY_ITERATOR;
            }
            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    byte[] tenantIdBytes;
    if (table.isMultiTenant() == true) {
        tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null);
    } else {
        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    }
    ScanUtil.setTenantId(scan, tenantIdBytes);
    String customAnnotations = LogUtil.customAnnotationsToString(connection);
    ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
    // Set local index related scan attributes. 
    if (table.getIndexType() == IndexType.LOCAL) {
        ScanUtil.setLocalIndex(scan);
        Set<PColumn> dataColumns = context.getDataColumns();
        // project is not present in the index then we need to skip this plan.
        if (!dataColumns.isEmpty()) {
            // Set data columns to be join back from data table.
            PTable parentTable = context.getCurrentTable().getTable();
            String parentSchemaName = parentTable.getParentSchemaName().getString();
            String parentTableName = parentTable.getParentTableName().getString();
            final ParseNodeFactory FACTORY = new ParseNodeFactory();
            // TODO: is it necessary to re-resolve the table?
            TableRef dataTableRef = FromCompiler.getResolver(FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), context.getConnection()).resolveTable(parentSchemaName, parentTableName);
            PTable dataTable = dataTableRef.getTable();
            // Set data columns to be join back from data table.
            serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
            KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
            // Set key value schema of the data columns.
            serializeSchemaIntoScan(scan, schema);
            // Set index maintainer of the local index.
            serializeIndexMaintainerIntoScan(scan, dataTable);
            // Set view constants if exists.
            serializeViewConstantsIntoScan(scan, dataTable);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
    }
    ResultIterator iterator = newIterator(scanGrouper, scan);
    iterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {

        @Override
        public void close() throws SQLException {
            try {
                super.close();
            } finally {
                SQLCloseables.closeAll(dependencies);
            }
        }
    };
    if (LOG.isDebugEnabled()) {
        LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
    }
    // wrap the iterator so we start/end tracing as we expect
    TraceScope scope = Tracing.startNewSpan(context.getConnection(), "Creating basic query for " + getPlanSteps(iterator));
    return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) TracingIterator(org.apache.phoenix.trace.TracingIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) TraceScope(org.apache.htrace.TraceScope) DelegateResultIterator(org.apache.phoenix.iterate.DelegateResultIterator) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) PColumn(org.apache.phoenix.schema.PColumn) TimeRange(org.apache.hadoop.hbase.io.TimeRange) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) TableRef(org.apache.phoenix.schema.TableRef) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Example 3 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class BaseQueryPlan method serializeViewConstantsIntoScan.

private void serializeViewConstantsIntoScan(Scan scan, PTable dataTable) {
    int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0);
    int nViewConstants = 0;
    if (dataTable.getType() == PTableType.VIEW) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        List<PColumn> dataPkColumns = dataTable.getPKColumns();
        for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
            PColumn dataPKColumn = dataPkColumns.get(i);
            if (dataPKColumn.getViewConstant() != null) {
                nViewConstants++;
            }
        }
        if (nViewConstants > 0) {
            byte[][] viewConstants = new byte[nViewConstants][];
            int j = 0;
            for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
                PColumn dataPkColumn = dataPkColumns.get(i);
                if (dataPkColumn.getViewConstant() != null) {
                    if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) {
                        viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                    } else {
                        throw new IllegalStateException();
                    }
                }
            }
            serializeViewConstantsIntoScan(viewConstants, scan);
        }
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Hint(org.apache.phoenix.parse.HintNode.Hint)

Example 4 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class BaseQueryPlan method serializeDataTableColumnsToJoin.

private void serializeDataTableColumnsToJoin(Scan scan, Set<PColumn> dataColumns, PTable dataTable) {
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    try {
        DataOutputStream output = new DataOutputStream(stream);
        boolean storeColsInSingleCell = dataTable.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS;
        if (storeColsInSingleCell) {
            // if storeColsInSingleCell is true all columns of a given column family are stored in a single cell
            scan.setAttribute(BaseScannerRegionObserver.COLUMNS_STORED_IN_SINGLE_CELL, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        }
        WritableUtils.writeVInt(output, dataColumns.size());
        for (PColumn column : dataColumns) {
            byte[] cf = column.getFamilyName().getBytes();
            byte[] cq = column.getColumnQualifierBytes();
            Bytes.writeByteArray(output, cf);
            Bytes.writeByteArray(output, cq);
        }
        scan.setAttribute(BaseScannerRegionObserver.DATA_TABLE_COLUMNS_TO_JOIN, stream.toByteArray());
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException)

Example 5 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class FormatToBytesWritableMapper method initColumnIndexes.

/*
    Map all unique pairs <family, name>  to index. Table name is part of TableRowkey, so we do
    not care about it
     */
private void initColumnIndexes() throws SQLException {
    columnIndexes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    int columnIndex = 0;
    for (int index = 0; index < logicalNames.size(); index++) {
        PTable table = PhoenixRuntime.getTable(conn, logicalNames.get(index));
        if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) {
            List<PColumnFamily> cfs = table.getColumnFamilies();
            for (int i = 0; i < cfs.size(); i++) {
                byte[] family = cfs.get(i).getName().getBytes();
                byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES);
                columnIndexes.put(cfn, new Integer(columnIndex));
                columnIndex++;
            }
        } else {
            List<PColumn> cls = table.getColumns();
            for (int i = 0; i < cls.size(); i++) {
                PColumn c = cls.get(i);
                byte[] family = new byte[0];
                byte[] cq;
                if (!SchemaUtil.isPKColumn(c)) {
                    family = c.getFamilyName().getBytes();
                    cq = c.getColumnQualifierBytes();
                } else {
                    cq = c.getName().getBytes();
                }
                byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq);
                if (!columnIndexes.containsKey(cfn)) {
                    columnIndexes.put(cfn, new Integer(columnIndex));
                    columnIndex++;
                }
            }
            byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table);
            byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst();
            byte[] cfn = Bytes.add(emptyColumnFamily, QueryConstants.NAMESPACE_SEPARATOR_BYTES, emptyKeyValue);
            columnIndexes.put(cfn, new Integer(columnIndex));
            columnIndex++;
        }
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable)

Aggregations

PColumn (org.apache.phoenix.schema.PColumn)101 PTable (org.apache.phoenix.schema.PTable)59 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)26 Expression (org.apache.phoenix.expression.Expression)21 TableRef (org.apache.phoenix.schema.TableRef)20 ArrayList (java.util.ArrayList)19 PName (org.apache.phoenix.schema.PName)18 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)17 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)17 ColumnRef (org.apache.phoenix.schema.ColumnRef)17 Hint (org.apache.phoenix.parse.HintNode.Hint)14 PTableKey (org.apache.phoenix.schema.PTableKey)14 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)13 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)13 SQLException (java.sql.SQLException)12 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)12 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)12 Map (java.util.Map)11