Search in sources :

Example 36 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class PhoenixInputFormat method getSplits.

@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
    String tableName = jobConf.get(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
    String query;
    String executionEngine = jobConf.get(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.getDefaultValue());
    if (LOG.isDebugEnabled()) {
        LOG.debug("Target table name at split phase : " + tableName + "with whereCondition :" + jobConf.get(TableScanDesc.FILTER_TEXT_CONF_STR) + " and " + HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname + " : " + executionEngine);
    }
    if (PhoenixStorageHandlerConstants.MR.equals(executionEngine)) {
        List<IndexSearchCondition> conditionList = null;
        String filterExprSerialized = jobConf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
        if (filterExprSerialized != null) {
            ExprNodeGenericFuncDesc filterExpr = Utilities.deserializeExpression(filterExprSerialized);
            PhoenixPredicateDecomposer predicateDecomposer = PhoenixPredicateDecomposer.create(Arrays.asList(jobConf.get(serdeConstants.LIST_COLUMNS).split(",")));
            predicateDecomposer.decomposePredicate(filterExpr);
            if (predicateDecomposer.isCalledPPD()) {
                conditionList = predicateDecomposer.getSearchConditionList();
            }
        }
        query = PhoenixQueryBuilder.getInstance().buildQuery(jobConf, tableName, PhoenixStorageHandlerUtil.getReadColumnNames(jobConf), conditionList);
    } else if (PhoenixStorageHandlerConstants.TEZ.equals(executionEngine)) {
        Map<String, TypeInfo> columnTypeMap = PhoenixStorageHandlerUtil.createColumnTypeMap(jobConf);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Column type map for TEZ : " + columnTypeMap);
        }
        String whereClause = jobConf.get(TableScanDesc.FILTER_TEXT_CONF_STR);
        query = PhoenixQueryBuilder.getInstance().buildQuery(jobConf, tableName, PhoenixStorageHandlerUtil.getReadColumnNames(jobConf), whereClause, columnTypeMap);
    } else {
        throw new IOException(executionEngine + " execution engine unsupported yet.");
    }
    final QueryPlan queryPlan = getQueryPlan(jobConf, query);
    final List<KeyRange> allSplits = queryPlan.getSplits();
    final List<InputSplit> splits = generateSplits(jobConf, queryPlan, allSplits, query);
    return splits.toArray(new InputSplit[splits.size()]);
}
Also used : IndexSearchCondition(org.apache.phoenix.hive.ql.index.IndexSearchCondition) KeyRange(org.apache.phoenix.query.KeyRange) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) IOException(java.io.IOException) QueryPlan(org.apache.phoenix.compile.QueryPlan) Map(java.util.Map) InputSplit(org.apache.hadoop.mapred.InputSplit) PhoenixPredicateDecomposer(org.apache.phoenix.hive.ppd.PhoenixPredicateDecomposer)

Example 37 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class SkipScanFilter method intersect.

private boolean intersect(final byte[] lowerInclusiveKey, final byte[] upperExclusiveKey, List<List<KeyRange>> newSlots) {
    resetState();
    boolean lowerUnbound = (lowerInclusiveKey.length == 0);
    int startPos = 0;
    int lastSlot = slots.size() - 1;
    if (!lowerUnbound) {
        // Find the position of the first slot of the lower range
        schema.next(ptr, 0, schema.iterator(lowerInclusiveKey, ptr), slotSpan[0]);
        startPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, 0, schema.getField(0));
        // Lower range is past last upper range of first slot, so cannot possibly be in range
        if (startPos >= slots.get(0).size()) {
            return false;
        }
    }
    boolean upperUnbound = (upperExclusiveKey.length == 0);
    int endPos = slots.get(0).size() - 1;
    if (!upperUnbound) {
        // Find the position of the first slot of the upper range
        schema.next(ptr, 0, schema.iterator(upperExclusiveKey, ptr), slotSpan[0]);
        endPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, startPos, schema.getField(0));
        // Past last position, so we can include everything from the start position
        if (endPos >= slots.get(0).size()) {
            upperUnbound = true;
            endPos = slots.get(0).size() - 1;
        } else if (slots.get(0).get(endPos).compareLowerToUpperBound(upperExclusiveKey, ScanUtil.getComparator(schema.getField(0))) >= 0) {
            // We know that the endPos range is higher than the previous range, but we need
            // to test if it ends before the next range starts.
            endPos--;
        }
        if (endPos < startPos) {
            return false;
        }
    }
    // Short circuit out if we only have a single set of keys
    if (slots.size() == 1) {
        if (newSlots != null) {
            List<KeyRange> newRanges = slots.get(0).subList(startPos, endPos + 1);
            newSlots.add(newRanges);
        }
        return true;
    }
    if (!lowerUnbound) {
        position[0] = startPos;
        navigate(lowerInclusiveKey, 0, lowerInclusiveKey.length, Terminate.AFTER);
        if (filterAllRemaining()) {
            return false;
        }
    }
    if (upperUnbound) {
        if (newSlots != null) {
            newSlots.add(slots.get(0).subList(startPos, endPos + 1));
            newSlots.addAll(slots.subList(1, slots.size()));
        }
        return true;
    }
    int[] lowerPosition = Arrays.copyOf(position, position.length);
    // Navigate to the upperExclusiveKey, but not past it
    // TODO: We're including everything between the lowerPosition and end position, which is
    // more than we need. We can optimize this by tracking whether each range in each slot position
    // intersects.
    ReturnCode endCode = navigate(upperExclusiveKey, 0, upperExclusiveKey.length, Terminate.AT);
    if (endCode == ReturnCode.INCLUDE || endCode == ReturnCode.INCLUDE_AND_NEXT_COL) {
        setStartKey();
        // our upper key is exclusive. In that case, go to the previous key
        if (Bytes.compareTo(startKey, 0, startKeyLength, upperExclusiveKey, 0, upperExclusiveKey.length) == 0 && (previousPosition(lastSlot) < 0 || position[0] < lowerPosition[0])) {
            // If by backing up one position we have an empty range, then return
            return false;
        }
    } else if (endCode == ReturnCode.SEEK_NEXT_USING_HINT) {
        // as the slots for lowerInclusive. If so, there is no intersection.
        if (Arrays.equals(lowerPosition, position) && areSlotsSingleKey(0, position.length - 1)) {
            return false;
        }
    } else if (filterAllRemaining()) {
        // be used below as the end bounds to formulate the list of intersecting slots.
        for (int i = 0; i <= lastSlot; i++) {
            position[i] = slots.get(i).size() - 1;
        }
    }
    int prevRowKeyPos = -1;
    ImmutableBytesWritable lowerPtr = new ImmutableBytesWritable();
    ImmutableBytesWritable upperPtr = new ImmutableBytesWritable();
    schema.iterator(lowerInclusiveKey, lowerPtr);
    schema.iterator(upperExclusiveKey, upperPtr);
    // Copy inclusive all positions 
    for (int i = 0; i <= lastSlot; i++) {
        List<KeyRange> newRanges = slots.get(i).subList(lowerPosition[i], Math.min(position[i] + 1, slots.get(i).size()));
        if (newRanges.isEmpty()) {
            return false;
        }
        if (newSlots != null) {
            newSlots.add(newRanges);
        }
        // 1) a more-significant slot was incremented
        if (position[i] > lowerPosition[i]) {
            if (newSlots != null) {
                newSlots.addAll(slots.subList(i + 1, slots.size()));
            }
            break;
        }
        //    since less-significant slots may be lower after traversal than where they started.
        if (!slots.get(i).get(position[i]).isSingleKey()) {
            int rowKeyPos = ScanUtil.getRowKeyPosition(slotSpan, i);
            // Position lowerPtr/upperPtr within lowerInclusiveKey/upperExclusiveKey at value for slot i
            // The reposition method will do this incrementally, where we we're initially have prevRowKeyPos = -1. 
            schema.reposition(lowerPtr, prevRowKeyPos, rowKeyPos, 0, lowerInclusiveKey.length, slotSpan[i]);
            schema.reposition(upperPtr, prevRowKeyPos, rowKeyPos, 0, upperExclusiveKey.length, slotSpan[i]);
            // next slot value of 2 is less than the next corresponding slot value of the 5.
            if (lowerPtr.compareTo(upperPtr) != 0) {
                if (newSlots != null) {
                    newSlots.addAll(slots.subList(i + 1, slots.size()));
                }
                break;
            }
            prevRowKeyPos = rowKeyPos;
        }
    }
    return true;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) KeyRange(org.apache.phoenix.query.KeyRange)

Example 38 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class CreateTableIT method testStartKeyStopKey.

@Test
public void testStartKeyStopKey() throws SQLException {
    long ts = nextTimestamp();
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.createStatement().execute("CREATE TABLE start_stop_test (pk char(2) not null primary key) SPLIT ON ('EA','EZ')");
    conn.close();
    String query = "select count(*) from start_stop_test where pk >= 'EA' and pk < 'EZ'";
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
    conn = DriverManager.getConnection(getUrl(), props);
    Statement statement = conn.createStatement();
    statement.execute(query);
    PhoenixStatement pstatement = statement.unwrap(PhoenixStatement.class);
    List<KeyRange> splits = pstatement.getQueryPlan().getSplits();
    assertTrue(splits.size() > 0);
}
Also used : PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Statement(java.sql.Statement) KeyRange(org.apache.phoenix.query.KeyRange) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Test(org.junit.Test)

Example 39 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class TransactionalViewIT method testReadOwnWritesWithStats.

@Test
public void testReadOwnWritesWithStats() throws Exception {
    try (Connection conn1 = DriverManager.getConnection(getUrl());
        Connection conn2 = DriverManager.getConnection(getUrl())) {
        String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) TRANSACTIONAL=true";
        conn1.createStatement().execute(ddl);
        ddl = "CREATE VIEW " + fullViewName + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " where k>5";
        conn1.createStatement().execute(ddl);
        for (int i = 0; i < 10; i++) {
            conn1.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + i + ")");
        }
        // verify you can read your own writes
        int count = 0;
        ResultSet rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullTableName);
        while (rs.next()) {
            assertEquals(count++, rs.getInt(1));
        }
        assertEquals(10, count);
        count = 0;
        rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullViewName);
        while (rs.next()) {
            assertEquals(6 + count++, rs.getInt(1));
        }
        assertEquals(4, count);
        // verify stats can see the read own writes rows
        analyzeTable(conn2, fullViewName, true);
        List<KeyRange> splits = getAllSplits(conn2, fullViewName);
        assertEquals(4, splits.size());
    }
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) Connection(java.sql.Connection) ResultSet(java.sql.ResultSet) Test(org.junit.Test)

Example 40 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class TransactionalViewIT method testInvalidRowsWithStats.

@Test
public void testInvalidRowsWithStats() throws Exception {
    try (Connection conn1 = DriverManager.getConnection(getUrl());
        Connection conn2 = DriverManager.getConnection(getUrl())) {
        String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) TRANSACTIONAL=true";
        conn1.createStatement().execute(ddl);
        ddl = "CREATE VIEW " + fullViewName + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " where k>5";
        conn1.createStatement().execute(ddl);
        for (int i = 0; i < 10; i++) {
            conn1.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + i + ")");
        }
        // verify you can read your own writes
        int count = 0;
        ResultSet rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullTableName);
        while (rs.next()) {
            assertEquals(count++, rs.getInt(1));
        }
        assertEquals(10, count);
        count = 0;
        rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullViewName);
        while (rs.next()) {
            assertEquals(6 + count++, rs.getInt(1));
        }
        assertEquals(4, count);
        // Thread.sleep(DEFAULT_TXN_TIMEOUT_SECONDS*1000+20000);
        // assertEquals("There should be one invalid transaction", 1, txManager.getInvalidSize());
        // verify stats can see the rows from the invalid transaction
        analyzeTable(conn2, fullViewName, true);
        List<KeyRange> splits = getAllSplits(conn2, fullViewName);
        assertEquals(4, splits.size());
    }
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) Connection(java.sql.Connection) ResultSet(java.sql.ResultSet) Test(org.junit.Test)

Aggregations

KeyRange (org.apache.phoenix.query.KeyRange)51 Test (org.junit.Test)23 Connection (java.sql.Connection)16 ResultSet (java.sql.ResultSet)14 PreparedStatement (java.sql.PreparedStatement)9 ArrayList (java.util.ArrayList)9 List (java.util.List)8 Properties (java.util.Properties)7 Scan (org.apache.hadoop.hbase.client.Scan)7 ScanRanges (org.apache.phoenix.compile.ScanRanges)6 BigDecimal (java.math.BigDecimal)5 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)5 KeyPart (org.apache.phoenix.compile.KeyPart)4 QueryPlan (org.apache.phoenix.compile.QueryPlan)4 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)4 Field (org.apache.phoenix.schema.ValueSchema.Field)4 KeyValue (org.apache.hadoop.hbase.KeyValue)3 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)3 IOException (java.io.IOException)2 Statement (java.sql.Statement)2