Search in sources :

Example 61 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MutationState method generateMutations.

private void generateMutations(final TableRef tableRef, long timestamp, final Map<ImmutableBytesPtr, RowMutationState> values, final List<Mutation> mutationList, final List<Mutation> mutationsPertainingToIndex) {
    final PTable table = tableRef.getTable();
    boolean tableWithRowTimestampCol = table.getRowTimestampColPos() != -1;
    Iterator<Map.Entry<ImmutableBytesPtr, RowMutationState>> iterator = values.entrySet().iterator();
    long timestampToUse = timestamp;
    Map<ImmutableBytesPtr, RowMutationState> modifiedValues = Maps.newHashMap();
    while (iterator.hasNext()) {
        Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry = iterator.next();
        byte[] onDupKeyBytes = rowEntry.getValue().getOnDupKeyBytes();
        boolean hasOnDupKey = onDupKeyBytes != null;
        ImmutableBytesPtr key = rowEntry.getKey();
        RowMutationState state = rowEntry.getValue();
        if (tableWithRowTimestampCol) {
            RowTimestampColInfo rowTsColInfo = state.getRowTimestampColInfo();
            if (rowTsColInfo.useServerTimestamp()) {
                // since we are about to modify the byte[] stored in key (which changes its hashcode)
                // we need to remove the entry from the values map and add a new entry with the modified byte[]
                modifiedValues.put(key, state);
                iterator.remove();
                // regenerate the key with this timestamp.
                key = getNewRowKeyWithRowTimestamp(key, timestampToUse, table);
            } else {
                if (rowTsColInfo.getTimestamp() != null) {
                    timestampToUse = rowTsColInfo.getTimestamp();
                }
            }
        }
        PRow row = tableRef.getTable().newRow(connection.getKeyValueBuilder(), timestampToUse, key, hasOnDupKey);
        List<Mutation> rowMutations, rowMutationsPertainingToIndex;
        if (rowEntry.getValue().getColumnValues() == PRow.DELETE_MARKER) {
            // means delete
            row.delete();
            rowMutations = row.toRowMutations();
            // Row deletes for index tables are processed by running a re-written query
            // against the index table (as this allows for flexibility in being able to
            // delete rows).
            rowMutationsPertainingToIndex = Collections.emptyList();
        } else {
            for (Map.Entry<PColumn, byte[]> valueEntry : rowEntry.getValue().getColumnValues().entrySet()) {
                row.setValue(valueEntry.getKey(), valueEntry.getValue());
            }
            rowMutations = row.toRowMutations();
            // TODO: use our ServerCache 
            for (Mutation mutation : rowMutations) {
                if (onDupKeyBytes != null) {
                    mutation.setAttribute(PhoenixIndexBuilder.ATOMIC_OP_ATTRIB, onDupKeyBytes);
                }
            }
            rowMutationsPertainingToIndex = rowMutations;
        }
        mutationList.addAll(rowMutations);
        if (connection.isReplayMutations()) {
            // correct index rows on replay.
            for (Mutation mutation : rowMutations) {
                mutation.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
            }
        }
        if (mutationsPertainingToIndex != null)
            mutationsPertainingToIndex.addAll(rowMutationsPertainingToIndex);
    }
    values.putAll(modifiedValues);
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) Entry(java.util.Map.Entry) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map)

Example 62 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MutationState method joinMutationState.

private void joinMutationState(TableRef tableRef, Map<ImmutableBytesPtr, RowMutationState> srcRows, Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> dstMutations) {
    PTable table = tableRef.getTable();
    boolean isIndex = table.getType() == PTableType.INDEX;
    boolean incrementRowCount = dstMutations == this.mutations;
    Map<ImmutableBytesPtr, RowMutationState> existingRows = dstMutations.put(tableRef, srcRows);
    if (existingRows != null) {
        // Loop through new rows and replace existing with new
        for (Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry : srcRows.entrySet()) {
            // Replace existing row with new row
            RowMutationState existingRowMutationState = existingRows.put(rowEntry.getKey(), rowEntry.getValue());
            if (existingRowMutationState != null) {
                Map<PColumn, byte[]> existingValues = existingRowMutationState.getColumnValues();
                if (existingValues != PRow.DELETE_MARKER) {
                    Map<PColumn, byte[]> newRow = rowEntry.getValue().getColumnValues();
                    // if new row is PRow.DELETE_MARKER, it means delete, and we don't need to merge it with existing row. 
                    if (newRow != PRow.DELETE_MARKER) {
                        // Merge existing column values with new column values
                        existingRowMutationState.join(rowEntry.getValue());
                        // Now that the existing row has been merged with the new row, replace it back
                        // again (since it was merged with the new one above).
                        existingRows.put(rowEntry.getKey(), existingRowMutationState);
                    }
                }
            } else {
                if (incrementRowCount && !isIndex) {
                    // Don't count index rows in row count
                    numRows++;
                }
            }
        }
        // Put the existing one back now that it's merged
        dstMutations.put(tableRef, existingRows);
    } else {
        // Size new map at batch size as that's what it'll likely grow to.
        Map<ImmutableBytesPtr, RowMutationState> newRows = Maps.newHashMapWithExpectedSize(connection.getMutateBatchSize());
        newRows.putAll(srcRows);
        dstMutations.put(tableRef, newRows);
        if (incrementRowCount && !isIndex) {
            numRows += srcRows.size();
        }
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Map(java.util.Map) PTable(org.apache.phoenix.schema.PTable)

Example 63 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class MutationState method validate.

private long validate(TableRef tableRef, Map<ImmutableBytesPtr, RowMutationState> rowKeyToColumnMap) throws SQLException {
    Long scn = connection.getSCN();
    MetaDataClient client = new MetaDataClient(connection);
    long serverTimeStamp = tableRef.getTimeStamp();
    // If we're auto committing, we've already validated the schema when we got the ColumnResolver,
    // so no need to do it again here.
    PTable table = tableRef.getTable();
    MetaDataMutationResult result = client.updateCache(table.getSchemaName().getString(), table.getTableName().getString());
    PTable resolvedTable = result.getTable();
    if (resolvedTable == null) {
        throw new TableNotFoundException(table.getSchemaName().getString(), table.getTableName().getString());
    }
    // Always update tableRef table as the one we've cached may be out of date since when we executed
    // the UPSERT VALUES call and updated in the cache before this.
    tableRef.setTable(resolvedTable);
    List<PTable> indexes = resolvedTable.getIndexes();
    for (PTable idxTtable : indexes) {
        // our failure mode is block writes on index failure.
        if (idxTtable.getIndexState() == PIndexState.ACTIVE && idxTtable.getIndexDisableTimestamp() > 0) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_FAILURE_BLOCK_WRITE).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
    }
    long timestamp = result.getMutationTime();
    if (timestamp != QueryConstants.UNSET_TIMESTAMP) {
        serverTimeStamp = timestamp;
        if (result.wasUpdated()) {
            List<PColumn> columns = Lists.newArrayListWithExpectedSize(table.getColumns().size());
            for (Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry : rowKeyToColumnMap.entrySet()) {
                RowMutationState valueEntry = rowEntry.getValue();
                if (valueEntry != null) {
                    Map<PColumn, byte[]> colValues = valueEntry.getColumnValues();
                    if (colValues != PRow.DELETE_MARKER) {
                        for (PColumn column : colValues.keySet()) {
                            if (!column.isDynamic())
                                columns.add(column);
                        }
                    }
                }
            }
            for (PColumn column : columns) {
                if (column != null) {
                    resolvedTable.getColumnFamily(column.getFamilyName().getString()).getPColumnForColumnName(column.getName().getString());
                }
            }
        }
    }
    return scn == null ? serverTimeStamp == QueryConstants.UNSET_TIMESTAMP ? HConstants.LATEST_TIMESTAMP : serverTimeStamp : scn;
}
Also used : MetaDataClient(org.apache.phoenix.schema.MetaDataClient) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) PLong(org.apache.phoenix.schema.types.PLong) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Map(java.util.Map)

Example 64 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class QueryOptimizerTest method testCharArrayLength.

@Test
public void testCharArrayLength() throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    conn.createStatement().execute("CREATE TABLE TEST.TEST (testInt INTEGER, testCharArray CHAR(3)[], testByteArray BINARY(7)[], " + "CONSTRAINT test_pk PRIMARY KEY(testInt)) DEFAULT_COLUMN_FAMILY='T'");
    conn.createStatement().execute("CREATE INDEX TEST_INDEX ON TEST.TEST (testInt) INCLUDE (testCharArray, testByteArray)");
    PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
    QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(TEST.TEST TEST_INDEX)*/ testCharArray,testByteArray FROM TEST.TEST");
    List<PColumn> columns = plan.getTableRef().getTable().getColumns();
    assertEquals(3, columns.size());
    assertEquals(3, columns.get(1).getMaxLength().intValue());
    assertEquals(7, columns.get(2).getMaxLength().intValue());
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) Connection(java.sql.Connection) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 65 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class FormatToKeyValueReducer method initColumnsMap.

private void initColumnsMap(PhoenixConnection conn) throws SQLException {
    Map<byte[], Integer> indexMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    columnIndexes = new HashMap<>();
    int columnIndex = 0;
    for (int index = 0; index < logicalNames.size(); index++) {
        PTable table = PhoenixRuntime.getTable(conn, logicalNames.get(index));
        if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) {
            List<PColumnFamily> cfs = table.getColumnFamilies();
            for (int i = 0; i < cfs.size(); i++) {
                byte[] family = cfs.get(i).getName().getBytes();
                Pair<byte[], byte[]> pair = new Pair<>(family, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES);
                columnIndexes.put(new Integer(columnIndex), pair);
                columnIndex++;
            }
        } else {
            List<PColumn> cls = table.getColumns();
            for (int i = 0; i < cls.size(); i++) {
                PColumn c = cls.get(i);
                byte[] family = new byte[0];
                byte[] cq;
                if (!SchemaUtil.isPKColumn(c)) {
                    family = c.getFamilyName().getBytes();
                    cq = c.getColumnQualifierBytes();
                } else {
                    cq = c.getName().getBytes();
                }
                byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq);
                Pair<byte[], byte[]> pair = new Pair<>(family, cq);
                if (!indexMap.containsKey(cfn)) {
                    indexMap.put(cfn, new Integer(columnIndex));
                    columnIndexes.put(new Integer(columnIndex), pair);
                    columnIndex++;
                }
            }
        }
        byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table);
        byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst();
        Pair<byte[], byte[]> pair = new Pair<>(emptyColumnFamily, emptyKeyValue);
        columnIndexes.put(new Integer(columnIndex), pair);
        columnIndex++;
    }
}
Also used : TreeMap(java.util.TreeMap) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) TableRowkeyPair(org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair) Pair(org.apache.hadoop.hbase.util.Pair)

Aggregations

PColumn (org.apache.phoenix.schema.PColumn)101 PTable (org.apache.phoenix.schema.PTable)59 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)26 Expression (org.apache.phoenix.expression.Expression)21 TableRef (org.apache.phoenix.schema.TableRef)20 ArrayList (java.util.ArrayList)19 PName (org.apache.phoenix.schema.PName)18 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)17 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)17 ColumnRef (org.apache.phoenix.schema.ColumnRef)17 Hint (org.apache.phoenix.parse.HintNode.Hint)14 PTableKey (org.apache.phoenix.schema.PTableKey)14 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)13 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)13 SQLException (java.sql.SQLException)12 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)12 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)12 Map (java.util.Map)11