Search in sources :

Example 41 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class IndexMaintainerTest method testIndexRowKeyBuilding.

private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns, String pk, String indexColumns, Object[] values, String includeColumns, String dataProps, String indexProps, KeyValueBuilder builder) throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier(tableName));
    String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier("idx"));
    conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + "))  " + (dataProps.isEmpty() ? "" : dataProps));
    try {
        conn.createStatement().execute("CREATE INDEX idx ON " + fullTableName + "(" + indexColumns + ") " + (includeColumns.isEmpty() ? "" : "INCLUDE (" + includeColumns + ") ") + (indexProps.isEmpty() ? "" : indexProps));
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName));
        PTable index = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName));
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        table.getIndexMaintainers(ptr, pconn);
        List<IndexMaintainer> c1 = IndexMaintainer.deserialize(ptr, builder, true);
        assertEquals(1, c1.size());
        IndexMaintainer im1 = c1.get(0);
        StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES(");
        for (int i = 0; i < values.length; i++) {
            buf.append("?,");
        }
        buf.setCharAt(buf.length() - 1, ')');
        PreparedStatement stmt = conn.prepareStatement(buf.toString());
        for (int i = 0; i < values.length; i++) {
            stmt.setObject(i + 1, values[i]);
        }
        stmt.execute();
        Iterator<Pair<byte[], List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
        List<KeyValue> dataKeyValues = iterator.next().getSecond();
        Map<ColumnReference, byte[]> valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size());
        byte[] row = dataKeyValues.get(0).getRow();
        ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(row);
        Put dataMutation = new Put(rowKeyPtr.copyBytes());
        for (KeyValue kv : dataKeyValues) {
            valueMap.put(new ColumnReference(kv.getFamily(), kv.getQualifier()), kv.getValue());
            dataMutation.add(kv);
        }
        ValueGetter valueGetter = newValueGetter(row, valueMap);
        List<Mutation> indexMutations = IndexTestUtil.generateIndexData(index, table, dataMutation, ptr, builder);
        assertEquals(1, indexMutations.size());
        assertTrue(indexMutations.get(0) instanceof Put);
        Mutation indexMutation = indexMutations.get(0);
        ImmutableBytesWritable indexKeyPtr = new ImmutableBytesWritable(indexMutation.getRow());
        ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength());
        byte[] mutablelndexRowKey = im1.buildRowKey(valueGetter, ptr, null, null, HConstants.LATEST_TIMESTAMP);
        byte[] immutableIndexRowKey = indexKeyPtr.copyBytes();
        assertArrayEquals(immutableIndexRowKey, mutablelndexRowKey);
        for (ColumnReference ref : im1.getCoveredColumns()) {
            valueMap.get(ref);
        }
        byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null);
        assertArrayEquals(dataRowKey, dataKeyValues.get(0).getRow());
    } finally {
        try {
            conn.createStatement().execute("DROP TABLE " + fullTableName);
        } finally {
            conn.close();
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) KeyValue(org.apache.hadoop.hbase.KeyValue) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PreparedStatement(java.sql.PreparedStatement) PTable(org.apache.phoenix.schema.PTable) Put(org.apache.hadoop.hbase.client.Put) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableKey(org.apache.phoenix.schema.PTableKey) Pair(org.apache.hadoop.hbase.util.Pair) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 42 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class BaseIndexTest method setup.

@Before
public void setup() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
    conn = DriverManager.getConnection(getUrl(), props);
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), DATA_TABLE_FULL_NAME));
    pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), INDEX_TABLE_FULL_NAME));
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) PTableKey(org.apache.phoenix.schema.PTableKey) Before(org.junit.Before)

Example 43 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class CreateTableIT method assertColumnEncodingMetadata.

private void assertColumnEncodingMetadata(QualifierEncodingScheme expectedEncodingScheme, ImmutableStorageScheme expectedStorageScheme, String tableName, Connection conn) throws Exception {
    PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
    PTable table = phxConn.getTable(new PTableKey(null, tableName));
    assertEquals(expectedEncodingScheme, table.getEncodingScheme());
    assertEquals(expectedStorageScheme, table.getImmutableStorageScheme());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTableKey(org.apache.phoenix.schema.PTableKey) PTable(org.apache.phoenix.schema.PTable)

Example 44 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class PhoenixRuntime method getTable.

/**
 * Returns the table if it is found in the connection metadata cache. If the metadata of this
 * table has changed since it was put in the cache these changes will not necessarily be
 * reflected in the returned table. If the table is not found, makes a call to the server to
 * fetch the latest metadata of the table. This is different than how a table is resolved when
 * it is referenced from a query (a call is made to the server to fetch the latest metadata of the table
 * depending on the UPDATE_CACHE_FREQUENCY property)
 * See https://issues.apache.org/jira/browse/PHOENIX-4475
 * @param conn
 * @param name requires a pre-normalized table name or a pre-normalized schema and table name
 * @return
 * @throws SQLException
 */
public static PTable getTable(Connection conn, String name) throws SQLException {
    PTable table = null;
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    try {
        table = pconn.getTable(new PTableKey(pconn.getTenantId(), name));
    } catch (TableNotFoundException e) {
        String schemaName = SchemaUtil.getSchemaNameFromFullName(name);
        String tableName = SchemaUtil.getTableNameFromFullName(name);
        MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(schemaName, tableName);
        if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
            throw e;
        }
        table = result.getTable();
    }
    return table;
}
Also used : MetaDataClient(org.apache.phoenix.schema.MetaDataClient) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PTableKey(org.apache.phoenix.schema.PTableKey) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PTable(org.apache.phoenix.schema.PTable)

Example 45 with PTableKey

use of org.apache.phoenix.schema.PTableKey in project phoenix by apache.

the class UpsertCompiler method upsertSelect.

public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException {
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    boolean isAutoCommit = connection.getAutoCommit();
    int numSplColumns = (tableRef.getTable().isMultiTenant() ? 1 : 0) + (tableRef.getTable().getViewIndexId() != null ? 1 : 0);
    byte[][] values = new byte[columnIndexes.length + numSplColumns][];
    if (prefixSysColValues) {
        int i = 0;
        if (tableRef.getTable().isMultiTenant()) {
            values[i++] = connection.getTenantId().getBytes();
        }
        if (tableRef.getTable().getViewIndexId() != null) {
            values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId());
        }
    }
    int rowCount = 0;
    MultiRowMutationState mutation = new MultiRowMutationState(batchSize);
    PTable table = tableRef.getTable();
    IndexMaintainer indexMaintainer = null;
    byte[][] viewConstants = null;
    if (table.getIndexType() == IndexType.LOCAL) {
        PTable parentTable = statement.getConnection().getMetaDataCache().getTableRef(new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())).getTable();
        indexMaintainer = table.getIndexMaintainer(parentTable, connection);
        viewConstants = IndexUtil.getViewConstants(parentTable);
    }
    try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        while (rs.next()) {
            for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
                PColumn column = table.getColumns().get(columnIndexes[i]);
                byte[] bytes = rs.getBytes(i + 1);
                ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
                Object value = rs.getObject(i + 1);
                int rsPrecision = rs.getMetaData().getPrecision(i + 1);
                Integer precision = rsPrecision == 0 ? null : rsPrecision;
                int rsScale = rs.getMetaData().getScale(i + 1);
                Integer scale = rsScale == 0 ? null : rsScale;
                // as we checked that before.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), SortOrder.getDefault(), precision, scale, column.getMaxLength(), column.getScale())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString()).setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
                }
                column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
                values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr);
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, indexMaintainer, viewConstants, null, numSplColumns);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                connection.getMutationState().send();
                mutation.clear();
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, maxSizeBytes, connection);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) MultiRowMutationState(org.apache.phoenix.execute.MutationState.MultiRowMutationState) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) MultiRowMutationState(org.apache.phoenix.execute.MutationState.MultiRowMutationState) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) ResultSet(java.sql.ResultSet) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Aggregations

PTableKey (org.apache.phoenix.schema.PTableKey)89 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)77 PTable (org.apache.phoenix.schema.PTable)55 Connection (java.sql.Connection)48 Test (org.junit.Test)40 Properties (java.util.Properties)23 ResultSet (java.sql.ResultSet)14 PColumn (org.apache.phoenix.schema.PColumn)14 PreparedStatement (java.sql.PreparedStatement)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 SQLException (java.sql.SQLException)11 TableNotFoundException (org.apache.phoenix.schema.TableNotFoundException)11 PMetaData (org.apache.phoenix.schema.PMetaData)10 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 PName (org.apache.phoenix.schema.PName)9 BaseTest (org.apache.phoenix.query.BaseTest)8 Result (org.apache.hadoop.hbase.client.Result)7 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)7 Scan (org.apache.hadoop.hbase.client.Scan)7 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)6