use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class ParallelIteratorsSplitTest method testGetSplitsWithSkipScanFilter.
@Test
public void testGetSplitsWithSkipScanFilter() throws Exception {
byte[][] splits = new byte[][] { Ka1A, Ka1B, Ka1E, Ka1G, Ka1I, Ka2A };
createTestTable(getUrl(), DDL, splits, null);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME));
TableRef tableRef = new TableRef(table);
List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
List<KeyRange> ranges = getSplits(tableRef, scan, regions, scanRanges);
assertEquals("Unexpected number of splits: " + ranges.size(), expectedSplits.size(), ranges.size());
for (int i = 0; i < expectedSplits.size(); i++) {
assertEquals(expectedSplits.get(i), ranges.get(i));
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class ServerCacheClient method addServerCache.
public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
ConnectionQueryServices services = connection.getQueryServices();
MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
List<Closeable> closeables = new ArrayList<Closeable>();
closeables.add(chunk);
ServerCache hashCacheSpec = null;
SQLException firstException = null;
final byte[] cacheId = generateId();
/**
* Execute EndPoint in parallel on each server to send compressed hash cache
*/
// TODO: generalize and package as a per region server EndPoint caller
// (ideally this would be functionality provided by the coprocessor framework)
boolean success = false;
ExecutorService executor = services.getExecutor();
List<Future<Boolean>> futures = Collections.emptyList();
try {
final PTable cacheUsingTable = cacheUsingTableRef.getTable();
List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
int nRegions = locations.size();
// Size these based on worst case
futures = new ArrayList<Future<Boolean>>(nRegions);
Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
for (HRegionLocation entry : locations) {
// Keep track of servers we've sent to and only send once
byte[] regionStartKey = entry.getRegionInfo().getStartKey();
byte[] regionEndKey = entry.getRegionInfo().getEndKey();
if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
// Call RPC once per server
servers.add(entry);
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
}
final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
closeables.add(htable);
futures.add(executor.submit(new JobCallable<Boolean>() {
@Override
public Boolean call() throws Exception {
final Map<byte[], AddServerCacheResponse> results;
try {
results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
@Override
public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
final byte[] tenantIdBytes;
if (cacheUsingTable.isMultiTenant()) {
try {
tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
} catch (SQLException e) {
throw new IOException(e);
}
} else {
tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
}
if (tenantIdBytes != null) {
builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
}
builder.setCacheId(ByteStringer.wrap(cacheId));
builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
builder.setHasProtoBufIndexMaintainer(true);
ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
builder.setCacheFactory(svrCacheFactoryBuider.build());
builder.setTxState(ByteStringer.wrap(txState));
instance.addServerCache(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
});
} catch (Throwable t) {
throw new Exception(t);
}
if (results != null && results.size() == 1) {
return results.values().iterator().next().getReturn();
}
return false;
}
/**
* Defines the grouping for round robin behavior. All threads spawned to process
* this scan will be grouped together and time sliced with other simultaneously
* executing parallel scans.
*/
@Override
public Object getJobId() {
return ServerCacheClient.this;
}
@Override
public TaskExecutionMetricsHolder getTaskExecutionMetric() {
return NO_OP_INSTANCE;
}
}));
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
}
}
}
hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
// Execute in parallel
int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
for (Future<Boolean> future : futures) {
future.get(timeoutMs, TimeUnit.MILLISECONDS);
}
cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
success = true;
} catch (SQLException e) {
firstException = e;
} catch (Exception e) {
firstException = new SQLException(e);
} finally {
try {
if (!success) {
SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
for (Future<Boolean> future : futures) {
future.cancel(true);
}
}
} finally {
try {
Closeables.closeAll(closeables);
} catch (IOException e) {
if (firstException == null) {
firstException = new SQLException(e);
}
} finally {
if (firstException != null) {
throw firstException;
}
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
}
return hashCacheSpec;
}
use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class ConnectionlessQueryServicesImpl method generateRegionLocations.
private static List<HRegionLocation> generateRegionLocations(byte[] physicalName, byte[][] splits) {
byte[] startKey = HConstants.EMPTY_START_ROW;
List<HRegionLocation> regions = Lists.newArrayListWithExpectedSize(splits.length);
for (byte[] split : splits) {
regions.add(new HRegionLocation(new HRegionInfo(TableName.valueOf(physicalName), startKey, split), SERVER_NAME, -1));
startKey = split;
}
regions.add(new HRegionLocation(new HRegionInfo(TableName.valueOf(physicalName), startKey, HConstants.EMPTY_END_ROW), SERVER_NAME, -1));
return regions;
}
use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class ConnectionQueryServicesImpl method checkClientServerCompatibility.
private void checkClientServerCompatibility(byte[] metaTable) throws SQLException {
StringBuilder buf = new StringBuilder("Newer Phoenix clients can't communicate with older Phoenix servers. The following servers require an updated " + QueryConstants.DEFAULT_COPROCESS_JAR_NAME + " to be put in the classpath of HBase: ");
boolean isIncompatible = false;
int minHBaseVersion = Integer.MAX_VALUE;
boolean isTableNamespaceMappingEnabled = false;
HTableInterface ht = null;
try {
List<HRegionLocation> locations = this.getAllTableRegions(metaTable);
Set<HRegionLocation> serverMap = Sets.newHashSetWithExpectedSize(locations.size());
TreeMap<byte[], HRegionLocation> regionMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
List<byte[]> regionKeys = Lists.newArrayListWithExpectedSize(locations.size());
for (HRegionLocation entry : locations) {
if (!serverMap.contains(entry)) {
regionKeys.add(entry.getRegionInfo().getStartKey());
regionMap.put(entry.getRegionInfo().getRegionName(), entry);
serverMap.add(entry);
}
}
ht = this.getTable(metaTable);
final Map<byte[], Long> results = ht.coprocessorService(MetaDataService.class, null, null, new Batch.Call<MetaDataService, Long>() {
@Override
public Long call(MetaDataService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<GetVersionResponse> rpcCallback = new BlockingRpcCallback<GetVersionResponse>();
GetVersionRequest.Builder builder = GetVersionRequest.newBuilder();
builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
instance.getVersion(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get().getVersion();
}
});
for (Map.Entry<byte[], Long> result : results.entrySet()) {
// This is the "phoenix.jar" is in-place, but server is out-of-sync with client case.
long version = result.getValue();
isTableNamespaceMappingEnabled |= MetaDataUtil.decodeTableNamespaceMappingEnabled(version);
if (!isCompatible(result.getValue())) {
isIncompatible = true;
HRegionLocation name = regionMap.get(result.getKey());
buf.append(name);
buf.append(';');
}
hasIndexWALCodec &= hasIndexWALCodec(result.getValue());
if (minHBaseVersion > MetaDataUtil.decodeHBaseVersion(result.getValue())) {
minHBaseVersion = MetaDataUtil.decodeHBaseVersion(result.getValue());
}
}
if (isTableNamespaceMappingEnabled != SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, getProps())) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES).setMessage("Ensure that config " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is consistent on client and server.").build().buildException();
}
lowestClusterHBaseVersion = minHBaseVersion;
} catch (SQLException e) {
throw e;
} catch (Throwable t) {
// This is the case if the "phoenix.jar" is not on the classpath of HBase on the region server
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR).setRootCause(t).setMessage("Ensure that " + QueryConstants.DEFAULT_COPROCESS_JAR_NAME + " is put on the classpath of HBase in every region server: " + t.getMessage()).build().buildException();
} finally {
if (ht != null) {
try {
ht.close();
} catch (IOException e) {
logger.warn("Could not close HTable", e);
}
}
}
if (isIncompatible) {
buf.setLength(buf.length() - 1);
throw new SQLExceptionInfo.Builder(SQLExceptionCode.OUTDATED_JARS).setMessage(buf.toString()).build().buildException();
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class ConnectionQueryServicesImpl method getAllTableRegions.
@Override
public List<HRegionLocation> getAllTableRegions(byte[] tableName) throws SQLException {
/*
* Use HConnection.getRegionLocation as it uses the cache in HConnection, while getting
* all region locations from the HTable doesn't.
*/
int retryCount = 0, maxRetryCount = 1;
boolean reload = false;
while (true) {
try {
// We could surface the package projected HConnectionImplementation.getNumberOfCachedRegionLocations
// to get the sizing info we need, but this would require a new class in the same package and a cast
// to this implementation class, so it's probably not worth it.
List<HRegionLocation> locations = Lists.newArrayList();
byte[] currentKey = HConstants.EMPTY_START_ROW;
do {
HRegionLocation regionLocation = connection.getRegionLocation(TableName.valueOf(tableName), currentKey, reload);
locations.add(regionLocation);
currentKey = regionLocation.getRegionInfo().getEndKey();
} while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW));
return locations;
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
String fullName = Bytes.toString(tableName);
throw new TableNotFoundException(fullName);
} catch (IOException e) {
if (retryCount++ < maxRetryCount) {
// One retry, in case split occurs while navigating
reload = true;
continue;
}
throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL).setRootCause(e).build().buildException();
}
}
}
Aggregations