use of org.apache.phoenix.schema.TableNotFoundException in project phoenix by apache.
the class AlterTableIT method testDropColumnsWithImutability.
@Test
public void testDropColumnsWithImutability() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
try {
conn.createStatement().execute("CREATE TABLE " + dataTableFullName + " (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer " + " CONSTRAINT pk PRIMARY KEY (a_string)) " + generateDDLOptions("immutable_rows=true , SALT_BUCKETS=3 " + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : "")));
String query = "SELECT * FROM " + dataTableFullName;
ResultSet rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
conn.createStatement().execute("CREATE INDEX " + indexTableName + " ON " + dataTableFullName + " (col1) include (cf1.col2) SALT_BUCKETS=4");
query = "SELECT * FROM " + indexTableFullName;
rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
String dml = "UPSERT INTO " + dataTableFullName + " VALUES(?,?,?,?,?)";
PreparedStatement stmt = conn.prepareStatement(dml);
stmt.setString(1, "b");
stmt.setInt(2, 10);
stmt.setInt(3, 20);
stmt.setInt(4, 30);
stmt.setInt(5, 40);
stmt.execute();
stmt.setString(1, "a");
stmt.setInt(2, 101);
stmt.setInt(3, 201);
stmt.setInt(4, 301);
stmt.setInt(5, 401);
stmt.execute();
conn.commit();
query = "SELECT * FROM " + dataTableFullName + " order by col1";
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals("b", rs.getString(1));
assertTrue(rs.next());
assertEquals("a", rs.getString(1));
assertFalse(rs.next());
String ddl = "ALTER TABLE " + dataTableFullName + " DROP COLUMN IF EXISTS col2,col3";
conn.createStatement().execute(ddl);
ddl = "ALTER TABLE " + dataTableFullName + " DROP COLUMN a_string,col1";
try {
conn.createStatement().execute(ddl);
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.CANNOT_DROP_PK.getErrorCode(), e.getErrorCode());
}
ddl = "ALTER TABLE " + dataTableFullName + " DROP COLUMN col4,col5";
try {
conn.createStatement().execute(ddl);
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
assertTrue(e.getMessage(), e.getMessage().contains("ERROR 504 (42703): Undefined column. columnName=" + dataTableFullName + ".COL5"));
}
ddl = "ALTER TABLE " + dataTableFullName + " DROP COLUMN IF EXISTS col1";
conn.createStatement().execute(ddl);
query = "SELECT * FROM " + indexTableFullName;
try {
rs = conn.createStatement().executeQuery(query);
fail();
} catch (TableNotFoundException e) {
}
query = "select col4 FROM " + dataTableFullName;
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertTrue(rs.next());
query = "select col2,col3 FROM " + dataTableFullName;
try {
rs = conn.createStatement().executeQuery(query);
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
}
} finally {
conn.close();
}
}
use of org.apache.phoenix.schema.TableNotFoundException in project phoenix by apache.
the class TenantSpecificTablesDMLIT method testTenantTableCannotBeUsedInStatementsInNonMultitenantConnections.
@Test
public void testTenantTableCannotBeUsedInStatementsInNonMultitenantConnections() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
try {
conn.createStatement().execute("select * from " + TENANT_TABLE_NAME);
fail();
} catch (TableNotFoundException expected) {
}
;
} finally {
conn.close();
}
}
use of org.apache.phoenix.schema.TableNotFoundException in project phoenix by apache.
the class PostDDLCompiler method compile.
public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public List<PFunction> getFunctions() {
return Collections.<PFunction>emptyList();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new FunctionNotFoundException(functionName);
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
}, scan, new SequenceManager(statement));
return new BaseMutationPlan(context, Operation.UPSERT) {
/* FIXME */
@Override
public MutationState execute() throws SQLException {
if (tableRefs.isEmpty()) {
return new MutationState(0, 1000, connection);
}
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(true);
SQLException sqlE = null;
/*
* Handles:
* 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
* 2) deletion of all column values for a ALTER TABLE DROP COLUMN
* 3) updating the necessary rows to have an empty KV
* 4) updating table stats
*/
long totalMutationCount = 0;
for (final TableRef tableRef : tableRefs) {
Scan scan = ScanUtil.newScan(context.getScan());
SelectStatement select = SelectStatement.COUNT_ONE;
// We need to use this tableRef
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<TableRef> getTables() {
return Collections.singletonList(tableRef);
}
@Override
public java.util.List<PFunction> getFunctions() {
return Collections.emptyList();
}
;
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
return new ColumnRef(tableRef, column.getPosition());
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
;
@Override
public boolean hasUDFs() {
return false;
}
@Override
public List<PSchema> getSchemas() {
throw new UnsupportedOperationException();
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
throw new SchemaNotFoundException(schemaName);
}
};
PhoenixStatement statement = new PhoenixStatement(connection);
StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
long ts = timestamp;
// in this case, so maybe this is ok.
if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
ts = TransactionUtil.convertToNanoseconds(ts);
}
ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
if (emptyCF != null) {
scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
}
ServerCache cache = null;
try {
if (deleteList != null) {
if (deleteList.isEmpty()) {
scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
// In the case of a row deletion, add index metadata so mutable secondary indexing works
/* TODO: we currently manually run a scan to delete the index data here
ImmutableBytesWritable ptr = context.getTempPtr();
tableRef.getTable().getIndexMaintainers(ptr);
if (ptr.getLength() > 0) {
IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
byte[] uuidValue = cache.getId();
scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
}
*/
} else {
// In the case of the empty key value column family changing, do not send the index
// metadata, as we're currently managing this from the client. It's possible for the
// data empty column family to stay the same, while the index empty column family
// changes.
PColumn column = deleteList.get(0);
byte[] cq = column.getColumnQualifierBytes();
if (emptyCF == null) {
scan.addColumn(column.getFamilyName().getBytes(), cq);
}
scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
}
}
List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
if (projectCFs == null) {
for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
columnFamilies.add(family.getName().getBytes());
}
} else {
for (byte[] projectCF : projectCFs) {
columnFamilies.add(projectCF);
}
}
// Need to project all column families into the scan, since we haven't yet created our empty key value
RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
// since at this point we haven't added the empty key value everywhere.
if (columnFamilies != null) {
scan.getFamilyMap().clear();
for (byte[] family : columnFamilies) {
scan.addFamily(family);
}
projector = new RowProjector(projector, false);
}
// any other Post DDL operations.
try {
// Since dropping a VIEW does not affect the underlying data, we do
// not need to pass through the view statement here.
// Push where clause into scan
WhereCompiler.compile(context, select);
} catch (ColumnFamilyNotFoundException e) {
continue;
} catch (ColumnNotFoundException e) {
continue;
} catch (AmbiguousColumnException e) {
continue;
}
QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
try {
ResultIterator iterator = plan.iterator();
try {
Tuple row = iterator.next();
ImmutableBytesWritable ptr = context.getTempPtr();
totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
} catch (SQLException e) {
sqlE = e;
} finally {
try {
iterator.close();
} catch (SQLException e) {
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
} finally {
if (sqlE != null) {
throw sqlE;
}
}
}
} catch (TableNotFoundException e) {
// Ignore and continue, as HBase throws when table hasn't been written to
// FIXME: Remove if this is fixed in 0.96
}
} finally {
if (cache != null) {
// Remove server cache if there is one
cache.close();
}
}
}
final long count = totalMutationCount;
return new MutationState(1, 1000, connection) {
@Override
public long getUpdateCount() {
return count;
}
};
} finally {
if (!wasAutoCommit)
connection.setAutoCommit(wasAutoCommit);
}
}
};
}
use of org.apache.phoenix.schema.TableNotFoundException in project phoenix by apache.
the class ConnectionQueryServicesImpl method metaDataMutated.
/**
* Ensures that metaData mutations are handled in the correct order
*/
private PMetaData metaDataMutated(PName tenantId, String tableName, long tableSeqNum, Mutator mutator) throws SQLException {
synchronized (latestMetaDataLock) {
throwConnectionClosedIfNullMetaData();
PMetaData metaData = latestMetaData;
PTable table;
long endTime = System.currentTimeMillis() + DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS;
while (true) {
try {
try {
table = metaData.getTableRef(new PTableKey(tenantId, tableName)).getTable();
/* If the table is at the prior sequence number, then we're good to go.
* We know if we've got this far, that the server validated the mutations,
* so we'd just need to wait until the other connection that mutated the same
* table is processed.
*/
if (table.getSequenceNumber() + 1 == tableSeqNum) {
// TODO: assert that timeStamp is bigger that table timeStamp?
mutator.mutate(metaData);
break;
} else if (table.getSequenceNumber() >= tableSeqNum) {
logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
break;
}
} catch (TableNotFoundException e) {
}
long waitTime = endTime - System.currentTimeMillis();
// and the next time it's used it'll be pulled over from the server.
if (waitTime <= 0) {
logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS / 1000) + " seconds for " + tableName);
// There will never be a parentTableName here, as that would only
// be non null for an index an we never add/remove columns from an index.
metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
break;
}
latestMetaDataLock.wait(waitTime);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
}
}
latestMetaData = metaData;
latestMetaDataLock.notifyAll();
return metaData;
}
}
use of org.apache.phoenix.schema.TableNotFoundException in project phoenix by apache.
the class ConnectionQueryServicesImpl method getAllTableRegions.
@Override
public List<HRegionLocation> getAllTableRegions(byte[] tableName) throws SQLException {
/*
* Use HConnection.getRegionLocation as it uses the cache in HConnection, while getting
* all region locations from the HTable doesn't.
*/
int retryCount = 0, maxRetryCount = 1;
boolean reload = false;
while (true) {
try {
// We could surface the package projected HConnectionImplementation.getNumberOfCachedRegionLocations
// to get the sizing info we need, but this would require a new class in the same package and a cast
// to this implementation class, so it's probably not worth it.
List<HRegionLocation> locations = Lists.newArrayList();
byte[] currentKey = HConstants.EMPTY_START_ROW;
do {
HRegionLocation regionLocation = connection.getRegionLocation(TableName.valueOf(tableName), currentKey, reload);
locations.add(regionLocation);
currentKey = regionLocation.getRegionInfo().getEndKey();
} while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW));
return locations;
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
String fullName = Bytes.toString(tableName);
throw new TableNotFoundException(fullName);
} catch (IOException e) {
if (retryCount++ < maxRetryCount) {
// One retry, in case split occurs while navigating
reload = true;
continue;
}
throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL).setRootCause(e).build().buildException();
}
}
}
Aggregations