use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class SkipScanFilter method write.
@Override
public void write(DataOutput out) throws IOException {
assert (slots.size() == slotSpan.length);
schema.write(out);
int nSlots = slots.size();
out.writeInt(this.includeMultipleVersions ? -nSlots : nSlots);
for (int i = 0; i < nSlots; i++) {
List<KeyRange> orLen = slots.get(i);
int span = slotSpan[i];
int orLenWithSlotSpan = -(((span << KEY_RANGE_LENGTH_BITS) | orLen.size()) + 1);
out.writeInt(orLenWithSlotSpan);
for (KeyRange range : orLen) {
range.write(out);
}
}
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class SkipScanFilter method readFields.
@Override
public void readFields(DataInput in) throws IOException {
RowKeySchema schema = new RowKeySchema();
schema.readFields(in);
int andLen = in.readInt();
boolean includeMultipleVersions = false;
if (andLen < 0) {
andLen = -andLen;
includeMultipleVersions = true;
}
int[] slotSpan = new int[andLen];
List<List<KeyRange>> slots = Lists.newArrayListWithExpectedSize(andLen);
for (int i = 0; i < andLen; i++) {
int orLenWithSlotSpan = in.readInt();
int orLen = orLenWithSlotSpan;
/*
* For 4.2+ clients, we serialize the slotSpan array. To maintain backward
* compatibility, we encode the slotSpan values with the size of the list
* of key ranges. We reserve 21 bits for the key range list and 10 bits
* for the slotSpan value (up to 1024 which should be plenty).
*/
if (orLenWithSlotSpan < 0) {
orLenWithSlotSpan = -orLenWithSlotSpan - 1;
slotSpan[i] = orLenWithSlotSpan >>> KEY_RANGE_LENGTH_BITS;
orLen = (orLenWithSlotSpan << SLOT_SPAN_BITS) >>> SLOT_SPAN_BITS;
}
List<KeyRange> orClause = Lists.newArrayListWithExpectedSize(orLen);
slots.add(orClause);
for (int j = 0; j < orLen; j++) {
KeyRange range = KeyRange.read(in);
orClause.add(range);
}
}
this.init(slots, slotSpan, schema, includeMultipleVersions);
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class StatsCollectorIT method testWithMultiCF.
@Test
public void testWithMultiCF() throws Exception {
int nRows = 20;
Connection conn = getConnection(0);
PreparedStatement stmt;
conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, c.v INTEGER NULL, d.v INTEGER NULL) " + tableDDLOptions);
stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?, ?, ?, ?)");
byte[] val = new byte[250];
for (int i = 0; i < nRows; i++) {
stmt.setString(1, Character.toString((char) ('a' + i)) + Bytes.toString(val));
stmt.setInt(2, i);
stmt.setInt(3, i);
stmt.setInt(4, i);
stmt.setInt(5, i);
stmt.executeUpdate();
}
conn.commit();
stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, c.v, d.v) VALUES(?,?,?)");
for (int i = 0; i < 5; i++) {
stmt.setString(1, Character.toString((char) ('a' + 'z' + i)) + Bytes.toString(val));
stmt.setInt(2, i);
stmt.setInt(3, i);
stmt.executeUpdate();
}
conn.commit();
ResultSet rs;
TestUtil.analyzeTable(conn, fullTableName);
List<KeyRange> keyRanges = getAllSplits(conn, fullTableName);
assertEquals(26, keyRanges.size());
rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
assertEquals("CLIENT 26-CHUNK 25 ROWS " + (columnEncoded ? (mutable ? "12530" : "13902") : "12420") + " BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName, QueryUtil.getExplainPlan(rs));
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
List<HRegionLocation> regions = services.getAllTableRegions(Bytes.toBytes(physicalTableName));
assertEquals(1, regions.size());
TestUtil.analyzeTable(conn, fullTableName);
String query = "UPDATE STATISTICS " + fullTableName + " SET \"" + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(1000);
conn.createStatement().execute(query);
keyRanges = getAllSplits(conn, fullTableName);
boolean oneCellPerColFamliyStorageScheme = !mutable && columnEncoded;
assertEquals(oneCellPerColFamliyStorageScheme ? 13 : 12, keyRanges.size());
rs = conn.createStatement().executeQuery("SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from \"SYSTEM\".STATS where PHYSICAL_NAME = '" + physicalTableName + "' GROUP BY COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
assertTrue(rs.next());
assertEquals("A", rs.getString(1));
assertEquals(24, rs.getInt(2));
assertEquals(columnEncoded ? (mutable ? 12252 : 13624) : 12144, rs.getInt(3));
assertEquals(oneCellPerColFamliyStorageScheme ? 12 : 11, rs.getInt(4));
assertTrue(rs.next());
assertEquals("B", rs.getString(1));
assertEquals(oneCellPerColFamliyStorageScheme ? 24 : 20, rs.getInt(2));
assertEquals(columnEncoded ? (mutable ? 5600 : 6972) : 5540, rs.getInt(3));
assertEquals(oneCellPerColFamliyStorageScheme ? 6 : 5, rs.getInt(4));
assertTrue(rs.next());
assertEquals("C", rs.getString(1));
assertEquals(24, rs.getInt(2));
assertEquals(columnEncoded ? (mutable ? 6724 : 6988) : 6652, rs.getInt(3));
assertEquals(6, rs.getInt(4));
assertTrue(rs.next());
assertEquals("D", rs.getString(1));
assertEquals(24, rs.getInt(2));
assertEquals(columnEncoded ? (mutable ? 6724 : 6988) : 6652, rs.getInt(3));
assertEquals(6, rs.getInt(4));
assertFalse(rs.next());
// Disable stats
conn.createStatement().execute("ALTER TABLE " + fullTableName + " SET " + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH + "=0");
TestUtil.analyzeTable(conn, fullTableName);
// Assert that there are no more guideposts
rs = conn.createStatement().executeQuery("SELECT count(1) FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " WHERE " + PhoenixDatabaseMetaData.PHYSICAL_NAME + "='" + physicalTableName + "' AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NOT NULL");
assertTrue(rs.next());
assertEquals(0, rs.getLong(1));
assertFalse(rs.next());
rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
assertEquals("CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName, QueryUtil.getExplainPlan(rs));
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class TestUtil method getSplits.
public static List<KeyRange> getSplits(Connection conn, String tableName, String pkCol, byte[] lowerRange, byte[] upperRange, String whereClauseSuffix, String selectClause) throws SQLException {
String whereClauseStart = (lowerRange == null && upperRange == null ? "" : " WHERE " + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + (upperRange != null ? (pkCol + " < ?") : "")));
String whereClause = whereClauseSuffix == null ? whereClauseStart : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) : (" AND " + whereClauseSuffix);
String query = "SELECT /*+ NO_INDEX */ " + selectClause + " FROM " + tableName + whereClause;
PhoenixPreparedStatement pstmt = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class);
if (lowerRange != null) {
pstmt.setBytes(1, lowerRange);
}
if (upperRange != null) {
pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange);
}
pstmt.execute();
List<KeyRange> keyRanges = pstmt.getQueryPlan().getSplits();
return keyRanges;
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class BaseViewIT method testUpdatableViewIndex.
protected Pair<String, Scan> testUpdatableViewIndex(Integer saltBuckets, boolean localIndex, String viewName) throws Exception {
ResultSet rs;
Connection conn = DriverManager.getConnection(getUrl());
String viewIndexName1 = "I_" + generateUniqueName();
String viewIndexPhysicalName = MetaDataUtil.getViewIndexName(schemaName, tableName);
if (localIndex) {
conn.createStatement().execute("CREATE LOCAL INDEX " + viewIndexName1 + " on " + viewName + "(k3)");
} else {
conn.createStatement().execute("CREATE INDEX " + viewIndexName1 + " on " + viewName + "(k3) include (s)");
}
conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,S,k3) VALUES(120,'foo',50.0)");
conn.commit();
analyzeTable(conn, viewName);
List<KeyRange> splits = getAllSplits(conn, viewIndexName1);
// More guideposts with salted, since it's already pre-split at salt buckets
assertEquals(saltBuckets == null ? 6 : 8, splits.size());
String query = "SELECT k1, k2, k3, s FROM " + viewName + " WHERE k3 = 51.0";
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(121, rs.getInt(2));
assertTrue(BigDecimal.valueOf(51.0).compareTo(rs.getBigDecimal(3)) == 0);
assertEquals("bar", rs.getString(4));
assertFalse(rs.next());
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
String queryPlan = QueryUtil.getExplainPlan(rs);
if (localIndex) {
assertEquals("CLIENT PARALLEL " + (saltBuckets == null ? 1 : saltBuckets) + "-WAY RANGE SCAN OVER " + fullTableName + " [1,51]\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", queryPlan);
} else {
assertEquals(saltBuckets == null ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [" + Short.MIN_VALUE + ",51]" : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + Short.MIN_VALUE + ",51] - [" + (saltBuckets.intValue() - 1) + "," + Short.MIN_VALUE + ",51]\nCLIENT MERGE SORT", queryPlan);
}
String viewIndexName2 = "I_" + generateUniqueName();
if (localIndex) {
conn.createStatement().execute("CREATE LOCAL INDEX " + viewIndexName2 + " on " + viewName + "(s)");
} else {
conn.createStatement().execute("CREATE INDEX " + viewIndexName2 + " on " + viewName + "(s)");
}
// new index hasn't been analyzed yet
splits = getAllSplits(conn, viewIndexName2);
assertEquals(saltBuckets == null ? 1 : 3, splits.size());
// analyze table should analyze all view data
analyzeTable(conn, fullTableName);
splits = getAllSplits(conn, viewIndexName2);
assertEquals(saltBuckets == null ? 6 : 8, splits.size());
query = "SELECT k1, k2, s FROM " + viewName + " WHERE s = 'foo'";
Statement statement = conn.createStatement();
rs = statement.executeQuery(query);
Scan scan = statement.unwrap(PhoenixStatement.class).getQueryPlan().getContext().getScan();
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(120, rs.getInt(2));
assertEquals("foo", rs.getString(3));
assertFalse(rs.next());
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
String physicalTableName;
if (localIndex) {
physicalTableName = tableName;
assertEquals("CLIENT PARALLEL " + (saltBuckets == null ? 1 : saltBuckets) + "-WAY RANGE SCAN OVER " + fullTableName + " [" + (2) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
} else {
physicalTableName = viewIndexPhysicalName;
assertEquals(saltBuckets == null ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [" + (Short.MIN_VALUE + 1) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY" : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + (Short.MIN_VALUE + 1) + ",'foo'] - [" + (saltBuckets.intValue() - 1) + "," + (Short.MIN_VALUE + 1) + ",'foo']\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
}
conn.close();
return new Pair<>(physicalTableName, scan);
}
Aggregations