use of org.apache.phoenix.schema.ColumnNotFoundException in project phoenix by apache.
the class PostDDLCompiler method compile.
public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public List<PFunction> getFunctions() {
return Collections.<PFunction>emptyList();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new FunctionNotFoundException(functionName);
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
}, scan, new SequenceManager(statement));
return new BaseMutationPlan(context, Operation.UPSERT) {
/* FIXME */
@Override
public MutationState execute() throws SQLException {
if (tableRefs.isEmpty()) {
return new MutationState(0, 1000, connection);
}
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(true);
SQLException sqlE = null;
/*
* Handles:
* 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
* 2) deletion of all column values for a ALTER TABLE DROP COLUMN
* 3) updating the necessary rows to have an empty KV
* 4) updating table stats
*/
long totalMutationCount = 0;
for (final TableRef tableRef : tableRefs) {
Scan scan = ScanUtil.newScan(context.getScan());
SelectStatement select = SelectStatement.COUNT_ONE;
// We need to use this tableRef
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return Collections.singletonList(tableRef);
}
@Override
public java.util.List<PFunction> getFunctions() {
return Collections.emptyList();
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
return new ColumnRef(tableRef, column.getPosition());
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
};
PhoenixStatement statement = new PhoenixStatement(connection);
StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
long ts = timestamp;
// in this case, so maybe this is ok.
if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
ts = TransactionUtil.convertToNanoseconds(ts);
}
ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
if (emptyCF != null) {
scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
}
ServerCache cache = null;
try {
if (deleteList != null) {
if (deleteList.isEmpty()) {
scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
// In the case of a row deletion, add index metadata so mutable secondary indexing works
/* TODO: we currently manually run a scan to delete the index data here
ImmutableBytesWritable ptr = context.getTempPtr();
tableRef.getTable().getIndexMaintainers(ptr);
if (ptr.getLength() > 0) {
IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
byte[] uuidValue = cache.getId();
scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
}
*/
} else {
// In the case of the empty key value column family changing, do not send the index
// metadata, as we're currently managing this from the client. It's possible for the
// data empty column family to stay the same, while the index empty column family
// changes.
PColumn column = deleteList.get(0);
byte[] cq = column.getColumnQualifierBytes();
if (emptyCF == null) {
scan.addColumn(column.getFamilyName().getBytes(), cq);
}
scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
}
}
List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
if (projectCFs == null) {
for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
columnFamilies.add(family.getName().getBytes());
}
} else {
for (byte[] projectCF : projectCFs) {
columnFamilies.add(projectCF);
}
}
// Need to project all column families into the scan, since we haven't yet created our empty key value
RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
// since at this point we haven't added the empty key value everywhere.
if (columnFamilies != null) {
scan.getFamilyMap().clear();
for (byte[] family : columnFamilies) {
scan.addFamily(family);
}
projector = new RowProjector(projector, false);
}
// any other Post DDL operations.
try {
// Since dropping a VIEW does not affect the underlying data, we do
// not need to pass through the view statement here.
// Push where clause into scan
WhereCompiler.compile(context, select);
} catch (ColumnFamilyNotFoundException e) {
continue;
} catch (ColumnNotFoundException e) {
continue;
} catch (AmbiguousColumnException e) {
continue;
}
QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null);
try {
ResultIterator iterator = plan.iterator();
try {
Tuple row = iterator.next();
ImmutableBytesWritable ptr = context.getTempPtr();
totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
} catch (SQLException e) {
sqlE = e;
} finally {
try {
iterator.close();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
if (sqlE != null) {
throw sqlE;
}
}
}
} catch (TableNotFoundException e) {
// Ignore and continue, as HBase throws when table hasn't been written to
// FIXME: Remove if this is fixed in 0.96
}
} finally {
if (cache != null) {
// Remove server cache if there is one
cache.close();
}
}
}
final long count = totalMutationCount;
return new MutationState(1, 1000, connection) {
@Override
public long getUpdateCount() {
return count;
}
};
} finally {
if (!wasAutoCommit)
connection.setAutoCommit(wasAutoCommit);
}
}
};
}
use of org.apache.phoenix.schema.ColumnNotFoundException in project phoenix by apache.
the class AlterMultiTenantTableWithViewsIT method testCacheInvalidatedAfterDroppingColumnFromBaseTableWithViews.
@Test
public void testCacheInvalidatedAfterDroppingColumnFromBaseTableWithViews() throws Exception {
String baseTable = "testCacheInvalidatedAfterDroppingColumnFromBaseTableWithViews";
String viewName = baseTable + "_view";
String tenantId = "tenantId";
try (Connection globalConn = DriverManager.getConnection(getUrl())) {
String tableDDL = "CREATE TABLE " + baseTable + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true";
globalConn.createStatement().execute(tableDDL);
Properties tenantProps = new Properties();
tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
// create a tenant specific view
try (Connection tenantConn = DriverManager.getConnection(getUrl(), tenantProps)) {
String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
tenantConn.createStatement().execute(viewDDL);
// Add a column to the base table using global connection
globalConn.createStatement().execute("ALTER TABLE " + baseTable + " DROP COLUMN V1");
// Check now whether the tenant connection can see the column that was dropped
try {
tenantConn.createStatement().execute("SELECT V1 FROM " + viewName);
fail();
} catch (ColumnNotFoundException e) {
}
try {
tenantConn.createStatement().execute("SELECT V1 FROM " + baseTable);
fail();
} catch (ColumnNotFoundException e) {
}
}
}
}
use of org.apache.phoenix.schema.ColumnNotFoundException in project phoenix by apache.
the class AutoPartitionViewsIT method testAddDropColumns.
@Test
public void testAddDropColumns() throws SQLException {
try (Connection conn = DriverManager.getConnection(getUrl());
Connection viewConn1 = isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1) : DriverManager.getConnection(getUrl())) {
String tableName = generateUniqueName();
String autoSeqName = generateUniqueName();
String ddl = String.format("CREATE TABLE " + tableName + " (%s metricId INTEGER NOT NULL, val1 DOUBLE, CONSTRAINT PK PRIMARY KEY( %s metricId)) %s", isMultiTenant ? "tenantId VARCHAR NOT NULL, " : "", isMultiTenant ? "tenantId, " : "", String.format(tableDDLOptions, autoSeqName));
conn.createStatement().execute(ddl);
conn.createStatement().execute("CREATE SEQUENCE " + autoSeqName + " CACHE 1");
String metricView = generateUniqueName() + "_VIEW";
// create a view
viewConn1.createStatement().execute("CREATE VIEW " + metricView + " AS SELECT * FROM " + tableName);
// add a column to the base table
conn.createStatement().execute("ALTER TABLE " + tableName + " add val2 DOUBLE");
// add a column to the view
viewConn1.createStatement().execute("ALTER VIEW " + metricView + " add val3 DOUBLE");
// upsert a row into the view
viewConn1.createStatement().execute("UPSERT INTO " + metricView + "(val1,val2,val3) VALUES(1.1,1.2,1.3)");
viewConn1.commit();
// query the base table
ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " + tableName);
assertTrue(rs.next());
int offset = 0;
if (isMultiTenant) {
assertEquals("tenant1", rs.getString(1));
offset = 1;
}
assertEquals(1, rs.getInt(1 + offset));
assertEquals(1.1, rs.getDouble(2 + offset), 1e-6);
assertEquals(1.2, rs.getDouble(3 + offset), 1e-6);
assertFalse(rs.next());
// query the view
rs = viewConn1.createStatement().executeQuery("SELECT * FROM " + metricView);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(1.1, rs.getDouble(2), 1e-6);
assertEquals(1.2, rs.getDouble(3), 1e-6);
assertEquals(1.3, rs.getDouble(4), 1e-6);
assertFalse(rs.next());
// drop a column from the base table
conn.createStatement().execute("ALTER TABLE " + tableName + " DROP COLUMN val2");
// add a column to the view
viewConn1.createStatement().execute("ALTER VIEW " + metricView + " DROP COLUMN val3");
// verify columns don't exist
try {
viewConn1.createStatement().executeQuery("SELECT val2 FROM " + metricView);
fail("column should have been dropped");
} catch (ColumnNotFoundException e) {
}
try {
viewConn1.createStatement().executeQuery("SELECT val3 FROM " + metricView);
fail("column should have been dropped");
} catch (ColumnNotFoundException e) {
}
}
}
use of org.apache.phoenix.schema.ColumnNotFoundException in project phoenix by apache.
the class ParseNodeRewriter method visit.
@Override
public ParseNode visit(ColumnParseNode node) throws SQLException {
// check if we find the name in our alias map.
if (aliasMap != null && node.getTableName() == null) {
ParseNode aliasedNode = aliasMap.get(node.getName());
// If we found something, then try to resolve it unless the two nodes are the same
if (aliasedNode != null && !node.equals(aliasedNode)) {
ColumnRef ref;
try {
ref = resolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName());
} catch (ColumnNotFoundException e) {
// Not able to resolve alias as a column name as well, so we use the alias
return aliasedNode;
}
// We have resolved it to a column, so now check if the aliased node can be resolved as the same column
if (aliasedNode instanceof ColumnParseNode) {
ColumnParseNode aliasedColumnNode = (ColumnParseNode) aliasedNode;
ColumnRef aliasedRef = resolver.resolveColumn(aliasedColumnNode.getSchemaName(), aliasedColumnNode.getTableName(), aliasedColumnNode.getName());
if (aliasedRef.equals(ref)) {
return aliasedNode;
}
}
// Otherwise it means we have a conflict
throw new AmbiguousColumnException(node.getName());
}
}
return node;
}
use of org.apache.phoenix.schema.ColumnNotFoundException in project phoenix by apache.
the class TenantSpecificTablesDDLIT method testAddDropColumn.
@Test
public void testAddDropColumn() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
conn.setAutoCommit(true);
try {
conn.createStatement().execute("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (1, 'Viva Las Vegas')");
conn.createStatement().execute("alter view " + TENANT_TABLE_NAME + " add tenant_col2 char(1) null");
conn.createStatement().execute("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col2) values (2, 'a')");
ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME);
rs.next();
assertEquals(2, rs.getInt(1));
rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME + " where tenant_col2 = 'a'");
rs.next();
assertEquals(1, rs.getInt(1));
conn.createStatement().execute("alter view " + TENANT_TABLE_NAME + " drop column tenant_col");
rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME + "");
rs.next();
assertEquals(2, rs.getInt(1));
try {
rs = conn.createStatement().executeQuery("select tenant_col from " + TENANT_TABLE_NAME);
fail();
} catch (ColumnNotFoundException expected) {
}
} finally {
conn.close();
}
}
Aggregations