use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class FromCompiler method getResolverForCreation.
public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection) throws SQLException {
TableName baseTable = statement.getBaseTableName();
String schemaName;
if (baseTable == null) {
if (SchemaUtil.isSchemaCheckRequired(statement.getTableType(), connection.getQueryServices().getProps())) {
schemaName = statement.getTableName().getSchemaName();
if (schemaName != null) {
new SchemaResolver(connection, statement.getTableName().getSchemaName(), true);
} else if (connection.getSchema() != null) {
// To ensure schema set through properties or connection string exists before creating table
new SchemaResolver(connection, connection.getSchema(), true);
}
}
return EMPTY_TABLE_RESOLVER;
}
NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
// Always use non-tenant-specific connection here
try {
// We need to always get the latest meta data for the parent table of a create view call to ensure that
// that we're copying the current table meta data as of when the view is created. Once we no longer
// copy the parent meta data, but store only the local diffs (PHOENIX-3534), we will no longer need
// to do this.
SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true, true);
return visitor;
} catch (TableNotFoundException e) {
// A tenant-specific connection may not create a mapped VIEW.
if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
ConnectionQueryServices services = connection.getQueryServices();
byte[] fullTableName = SchemaUtil.getPhysicalName(SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName()), connection.getQueryServices().getProps()).getName();
HTableInterface htable = null;
try {
htable = services.getTable(fullTableName);
} catch (UnsupportedOperationException ignore) {
// For Connectionless
throw e;
} finally {
if (htable != null)
Closeables.closeQuietly(htable);
}
tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs());
return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap<String, UDFParseNode>(1), false);
}
throw e;
}
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class ServerCacheClient method removeServerCache.
/**
* Remove the cached table from all region servers
* @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable, Scan, Set)})
* @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
* @throws SQLException
* @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
*/
private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
ConnectionQueryServices services = connection.getQueryServices();
Throwable lastThrowable = null;
TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
final PTable cacheUsingTable = cacheUsingTableRef.getTable();
byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
HTableInterface iterateOverTable = services.getTable(tableName);
try {
List<HRegionLocation> locations = services.getAllTableRegions(tableName);
Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
/**
* Allow for the possibility that the region we based where to send our cache has split and been
* relocated to another region server *after* we sent it, but before we removed it. To accommodate
* this, we iterate through the current metadata boundaries and remove the cache once for each
* server that we originally sent to.
*/
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
}
for (HRegionLocation entry : locations) {
if (remainingOnServers.contains(entry)) {
// Call once per server
try {
byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
iterateOverTable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
@Override
public RemoveServerCacheResponse call(ServerCachingService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback = new BlockingRpcCallback<RemoveServerCacheResponse>();
RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
final byte[] tenantIdBytes;
if (cacheUsingTable.isMultiTenant()) {
try {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
} catch (SQLException e) {
throw new IOException(e);
}
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
if (tenantIdBytes != null) {
builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
}
builder.setCacheId(ByteStringer.wrap(cacheId));
instance.removeServerCache(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
});
remainingOnServers.remove(entry);
} catch (Throwable t) {
lastThrowable = t;
LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), t);
}
}
}
if (!remainingOnServers.isEmpty()) {
LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), lastThrowable);
}
} finally {
Closeables.closeQuietly(iterateOverTable);
}
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class HashJoinPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = delegate.getContext().getScan();
}
int count = subPlans.length;
PhoenixConnection connection = getContext().getConnection();
ConnectionQueryServices services = connection.getQueryServices();
ExecutorService executor = services.getExecutor();
List<Future<ServerCache>> futures = Lists.newArrayListWithExpectedSize(count);
if (joinInfo != null) {
hashClient = hashClient != null ? hashClient : new HashCacheClient(delegate.getContext().getConnection());
firstJobEndTime = new AtomicLong(0);
keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
}
for (int i = 0; i < count; i++) {
final int index = i;
futures.add(executor.submit(new JobCallable<ServerCache>() {
@Override
public ServerCache call() throws Exception {
ServerCache cache = subPlans[index].execute(HashJoinPlan.this);
return cache;
}
@Override
public Object getJobId() {
return HashJoinPlan.this;
}
@Override
public TaskExecutionMetricsHolder getTaskExecutionMetric() {
return NO_OP_INSTANCE;
}
}));
}
SQLException firstException = null;
for (int i = 0; i < count; i++) {
try {
ServerCache result = futures.get(i).get();
if (result != null) {
dependencies.add(result);
}
subPlans[i].postProcess(result, this);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (firstException == null) {
firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException();
}
} catch (ExecutionException e) {
if (firstException == null) {
firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", e.getCause());
}
}
}
if (firstException != null) {
SQLCloseables.closeAllQuietly(dependencies);
throw firstException;
}
Expression postFilter = null;
boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
if (recompileWhereClause || hasKeyRangeExpressions) {
StatementContext context = delegate.getContext();
PTable table = context.getCurrentTable().getTable();
ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
if (recompileWhereClause) {
postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
}
if (hasKeyRangeExpressions) {
WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
}
}
if (joinInfo != null) {
HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
}
ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan);
if (statement.getInnerSelectStatement() != null && postFilter != null) {
iterator = new FilterResultIterator(iterator, postFilter);
}
return iterator;
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class BaseResultIterators method getIterators.
/**
* Executes the scan in parallel across all regions, blocking until all scans are complete.
* @return the result iterators for the scan of each region
*/
@Override
public List<PeekingResultIterator> getIterators() throws SQLException {
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this, ScanUtil.getCustomAnnotations(scan)));
}
boolean isReverse = ScanUtil.isReversed(scan);
boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL;
final ConnectionQueryServices services = context.getConnection().getQueryServices();
// Get query time out from Statement
final long startTime = System.currentTimeMillis();
final long maxQueryEndTime = startTime + context.getStatement().getQueryTimeoutInMillis();
int numScans = size();
// Capture all iterators so that if something goes wrong, we close them all
// The iterators list is based on the submission of work, so it may not
// contain them all (for example if work was rejected from the queue)
Queue<PeekingResultIterator> allIterators = new ConcurrentLinkedQueue<>();
List<PeekingResultIterator> iterators = new ArrayList<PeekingResultIterator>(numScans);
ScanWrapper previousScan = new ScanWrapper(null);
return getIterators(scans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, splits.size(), previousScan);
}
use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.
the class UpgradeIT method testConcurrentUpgradeThrowsUprgadeInProgressException.
@Test
public void testConcurrentUpgradeThrowsUprgadeInProgressException() throws Exception {
final AtomicBoolean mutexStatus1 = new AtomicBoolean(false);
final AtomicBoolean mutexStatus2 = new AtomicBoolean(false);
final CountDownLatch latch = new CountDownLatch(2);
final AtomicInteger numExceptions = new AtomicInteger(0);
ConnectionQueryServices services = null;
final byte[] mutexKey = Bytes.toBytes(generateUniqueName());
try (Connection conn = getConnection(false, null)) {
services = conn.unwrap(PhoenixConnection.class).getQueryServices();
putUnlockKVInSysMutex(mutexKey);
FutureTask<Void> task1 = new FutureTask<>(new AcquireMutexRunnable(mutexStatus1, services, latch, numExceptions, mutexKey));
FutureTask<Void> task2 = new FutureTask<>(new AcquireMutexRunnable(mutexStatus2, services, latch, numExceptions, mutexKey));
Thread t1 = new Thread(task1);
t1.setDaemon(true);
Thread t2 = new Thread(task2);
t2.setDaemon(true);
t1.start();
t2.start();
latch.await();
// make sure tasks didn't fail by calling get()
task1.get();
task2.get();
assertTrue("One of the threads should have acquired the mutex", mutexStatus1.get() || mutexStatus2.get());
assertNotEquals("One and only one thread should have acquired the mutex ", mutexStatus1.get(), mutexStatus2.get());
assertEquals("One and only one thread should have caught UpgradeRequiredException ", 1, numExceptions.get());
}
}
Aggregations