use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project phoenix by apache.
the class PhoenixRuntimeIT method testGetTenantIdExpression.
private void testGetTenantIdExpression(boolean isSalted) throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(true);
String tableName = generateUniqueName();
String sequenceName = generateUniqueName();
String t1 = generateUniqueName();
// ensure bigger
String t2 = t1 + generateUniqueName();
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true" + (isSalted ? ",SALT_BUCKETS=3" : ""));
conn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t1 + "','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t2 + "','y')");
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, t1);
Connection tsconn = DriverManager.getConnection(getUrl(), props);
tsconn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
Expression e1 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
HTableInterface htable1 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] { "", t1 });
String viewName = generateUniqueName();
tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 VARCHAR) AS SELECT * FROM " + tableName);
Expression e2 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
HTableInterface htable2 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new String[] { "", t1 });
Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, tableName);
HTableInterface htable3 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] { t1, t2 });
String basTableName = generateUniqueName();
conn.createStatement().execute("CREATE TABLE " + basTableName + " (k1 VARCHAR PRIMARY KEY)");
Expression e4 = PhoenixRuntime.getTenantIdExpression(conn, basTableName);
assertNull(e4);
String indexName1 = generateUniqueName();
tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + viewName + "(V1)");
Expression e5 = PhoenixRuntime.getTenantIdExpression(tsconn, indexName1);
HTableInterface htable5 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] { t1 });
String indexName2 = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + indexName2 + " ON " + tableName + "(k2)");
Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, indexName2);
HTableInterface htable6 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName2));
assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] { t1, t2 });
tableName = generateUniqueName() + "BAR_" + (isSalted ? "SALTED" : "UNSALTED");
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) " + (isSalted ? "SALT_BUCKETS=3" : ""));
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t1 + "','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t2 + "','y')");
Expression e7 = PhoenixRuntime.getFirstPKColumnExpression(conn, tableName);
HTableInterface htable7 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] { t1, t2 });
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project phoenix by apache.
the class QueryCompilerTest method testMultiCFProjection.
@Test
public void testMultiCFProjection() throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE multiCF (k integer primary key, a.a varchar, b.b varchar)";
conn.createStatement().execute(ddl);
String query = "SELECT COUNT(*) FROM multiCF";
QueryPlan plan = getQueryPlan(query, Collections.emptyList());
plan.iterator();
Scan scan = plan.getContext().getScan();
assertTrue(scan.getFilter() instanceof FirstKeyOnlyFilter);
assertEquals(1, scan.getFamilyMap().size());
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project phoenix by apache.
the class QueryDatabaseMetaDataIT method testCreateDropTable.
@Test
public void testCreateDropTable() throws Exception {
long ts = nextTimestamp();
String tenantId = getOrganizationId();
initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), null, ts, getUrl(), null);
ensureTableCreated(getUrl(), BTABLE_NAME, BTABLE_NAME, ts - 2);
ensureTableCreated(getUrl(), PTSDB_NAME, PTSDB_NAME, ts - 2);
Properties props = new Properties();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
Connection conn5 = DriverManager.getConnection(getUrl(), props);
String query = "SELECT a_string FROM aTable";
// Data should still be there b/c we only dropped the schema
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 8));
assertTrue(conn5.prepareStatement(query).executeQuery().next());
conn5.createStatement().executeUpdate("DROP TABLE " + ATABLE_NAME);
// Confirm that data is no longer there because we dropped the table
// This needs to be done natively b/c the metadata is gone
HTableInterface htable = conn5.unwrap(PhoenixConnection.class).getQueryServices().getTable(SchemaUtil.getTableNameAsBytes(ATABLE_SCHEMA_NAME, ATABLE_NAME));
Scan scan = new Scan();
scan.setFilter(new FirstKeyOnlyFilter());
scan.setTimeRange(0, ts + 9);
assertNull(htable.getScanner(scan).next());
conn5.close();
// Still should work b/c we're at an earlier timestamp than when table was deleted
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
Connection conn2 = DriverManager.getConnection(getUrl(), props);
assertTrue(conn2.prepareStatement(query).executeQuery().next());
conn2.close();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
Connection conn10 = DriverManager.getConnection(getUrl(), props);
try {
conn10.prepareStatement(query).executeQuery().next();
fail();
} catch (TableNotFoundException e) {
}
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project phoenix by apache.
the class MetaDataEndpointImpl method buildDeletedSchema.
private PSchema buildDeletedSchema(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
return null;
}
Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
scan.setFilter(new FirstKeyOnlyFilter());
scan.setRaw(true);
List<Cell> results = Lists.<Cell>newArrayList();
try (RegionScanner scanner = region.getScanner(scan)) {
scanner.next(results);
}
// HBase ignores the time range on a raw scan (HBASE-7362)
if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
Cell kv = results.get(0);
if (kv.getTypeByte() == Type.Delete.getCode()) {
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
PSchema schema = newDeletedSchemaMarker(kv.getTimestamp());
metaDataCache.put(cacheKey, schema);
return schema;
}
}
return null;
}
use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project phoenix by apache.
the class MetaDataEndpointImpl method buildDeletedFunction.
private PFunction buildDeletedFunction(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
return null;
}
Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
scan.setFilter(new FirstKeyOnlyFilter());
scan.setRaw(true);
List<Cell> results = Lists.<Cell>newArrayList();
try (RegionScanner scanner = region.getScanner(scan)) {
scanner.next(results);
}
// HBase ignores the time range on a raw scan (HBASE-7362)
if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
Cell kv = results.get(0);
if (kv.getTypeByte() == Type.Delete.getCode()) {
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
PFunction function = newDeletedFunctionMarker(kv.getTimestamp());
metaDataCache.put(cacheKey, function);
return function;
}
}
return null;
}
Aggregations