Search in sources :

Example 56 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.

the class TestEndToEndSplitTransaction method blockUntilRegionIsInMeta.

public static void blockUntilRegionIsInMeta(Connection conn, long timeout, HRegionInfo hri) throws IOException, InterruptedException {
    log("blocking until region is in META: " + hri.getRegionNameAsString());
    long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < timeout) {
        HRegionLocation loc = MetaTableAccessor.getRegionLocation(conn, hri);
        if (loc != null && !loc.getRegionInfo().isOffline()) {
            log("found region in META: " + hri.getRegionNameAsString());
            break;
        }
        Threads.sleep(10);
    }
}
Also used : HRegionLocation(org.apache.hadoop.hbase.HRegionLocation)

Example 57 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.

the class TestAccessController method testRegionOffline.

@Test(timeout = 180000)
public void testRegionOffline() throws Exception {
    List<HRegionLocation> regions;
    try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) {
        regions = locator.getAllRegionLocations();
    }
    HRegionLocation location = regions.get(0);
    final HRegionInfo hri = location.getRegionInfo();
    AccessTestAction action = new AccessTestAction() {

        @Override
        public Object run() throws Exception {
            ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri);
            return null;
        }
    };
    verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Test(org.junit.Test)

Example 58 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.

the class OfflineMetaRebuildTestCore method deleteRegion.

protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey) throws IOException {
    LOG.info("Before delete:");
    HTableDescriptor htd = tbl.getTableDescriptor();
    dumpMeta(htd);
    List<HRegionLocation> regions;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
        regions = rl.getAllRegionLocations();
    }
    for (HRegionLocation e : regions) {
        HRegionInfo hri = e.getRegionInfo();
        ServerName hsa = e.getServerName();
        if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
            LOG.info("RegionName: " + hri.getRegionNameAsString());
            byte[] deleteRow = hri.getRegionName();
            TEST_UTIL.getAdmin().unassign(deleteRow, true);
            LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
            Path rootDir = FSUtils.getRootDir(conf);
            FileSystem fs = rootDir.getFileSystem(conf);
            Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
            fs.delete(p, true);
            try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
                Delete delete = new Delete(deleteRow);
                meta.delete(delete);
            }
        }
        LOG.info(hri.toString() + hsa.toString());
    }
    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("After delete:");
    dumpMeta(htd);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) Delete(org.apache.hadoop.hbase.client.Delete) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Table(org.apache.hadoop.hbase.client.Table) ServerName(org.apache.hadoop.hbase.ServerName) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 59 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project SpyGlass by ParallelAI.

the class HBaseInputFormatGranular method getSplits.

@SuppressWarnings("deprecation")
@Override
public HBaseTableSplitGranular[] getSplits(JobConf job, int numSplits) throws IOException {
    if (this.table == null) {
        throw new IOException("No table was provided");
    }
    if (this.inputColumns == null || this.inputColumns.length == 0) {
        throw new IOException("Expecting at least one column");
    }
    Pair<byte[][], byte[][]> keys = table.getStartEndKeys();
    if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) {
        HRegionLocation regLoc = table.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false);
        if (null == regLoc) {
            throw new IOException("Expecting at least one region.");
        }
        List<HBaseTableSplitGranular> splits = new ArrayList<HBaseTableSplitGranular>(1);
        HBaseTableSplitGranular split = new HBaseTableSplitGranular(table.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regLoc.getRegionInfo().getRegionNameAsString(), SourceMode.EMPTY, false);
        splits.add(split);
        return splits.toArray(new HBaseTableSplitGranular[splits.size()]);
    }
    if (keys.getSecond() == null || keys.getSecond().length == 0) {
        throw new IOException("Expecting at least one region.");
    }
    if (keys.getFirst().length != keys.getSecond().length) {
        throw new IOException("Regions for start and end key do not match");
    }
    byte[] minKey = keys.getFirst()[keys.getFirst().length - 1];
    byte[] maxKey = keys.getSecond()[0];
    LOG.debug(String.format("SETTING min key (%s) and max key (%s)", Bytes.toString(minKey), Bytes.toString(maxKey)));
    byte[][] regStartKeys = keys.getFirst();
    byte[][] regStopKeys = keys.getSecond();
    String[] regions = new String[regStartKeys.length];
    String[] regionNames = new String[regStartKeys.length];
    for (int i = 0; i < regStartKeys.length; i++) {
        minKey = (regStartKeys[i] != null && regStartKeys[i].length != 0) && (Bytes.compareTo(regStartKeys[i], minKey) < 0) ? regStartKeys[i] : minKey;
        maxKey = (regStopKeys[i] != null && regStopKeys[i].length != 0) && (Bytes.compareTo(regStopKeys[i], maxKey) > 0) ? regStopKeys[i] : maxKey;
        HRegionLocation regionLoc = table.getRegionLocation(keys.getFirst()[i]);
        String regionServerHostnamePort = regionLoc.getHostnamePort();
        InetAddress regionAddress = toInetAddress(regionServerHostnamePort);
        String regionLocation;
        try {
            regionLocation = reverseDNS(regionAddress);
        } catch (NamingException e) {
            LOG.error("Cannot resolve the host name for " + regionAddress + " because of " + e);
            regionLocation = toHostname(regionServerHostnamePort);
        }
        regionNames[i] = regionLoc.getRegionInfo().getRegionNameAsString();
        LOG.debug("***** " + regionLocation);
        if (regionLocation == null || regionLocation.length() == 0)
            throw new IOException("The region info for regiosn " + i + " is null or empty");
        regions[i] = regionLocation;
        LOG.debug(String.format("Region (%s) has start key (%s) and stop key (%s)", regions[i], Bytes.toString(regStartKeys[i]), Bytes.toString(regStopKeys[i])));
    }
    byte[] startRow = HConstants.EMPTY_START_ROW;
    byte[] stopRow = HConstants.EMPTY_END_ROW;
    LOG.debug(String.format("Found min key (%s) and max key (%s)", Bytes.toString(minKey), Bytes.toString(maxKey)));
    LOG.debug("SOURCE MODE is : " + sourceMode);
    switch(sourceMode) {
        case SCAN_ALL:
            startRow = HConstants.EMPTY_START_ROW;
            stopRow = HConstants.EMPTY_END_ROW;
            LOG.debug(String.format("SCAN ALL: Found start key (%s) and stop key (%s)", Bytes.toString(startRow), Bytes.toString(stopRow)));
            break;
        case SCAN_RANGE:
            startRow = (startKey != null && startKey.length() != 0) ? Bytes.toBytes(startKey) : HConstants.EMPTY_START_ROW;
            stopRow = (stopKey != null && stopKey.length() != 0) ? Bytes.toBytes(stopKey) : HConstants.EMPTY_END_ROW;
            LOG.debug(String.format("SCAN RANGE: Found start key (%s) and stop key (%s)", Bytes.toString(startRow), Bytes.toString(stopRow)));
            break;
    }
    switch(sourceMode) {
        case EMPTY:
        case SCAN_ALL:
        case SCAN_RANGE:
            {
                // startRow = (Bytes.compareTo(startRow, minKey) < 0) ? minKey :
                // startRow;
                // stopRow = (Bytes.compareTo(stopRow, maxKey) > 0) ? maxKey :
                // stopRow;
                List<HBaseTableSplitGranular> splits = new ArrayList<HBaseTableSplitGranular>();
                if (!useSalt) {
                    List<HRegionLocation> validRegions = table.getRegionsInRange(startRow, stopRow);
                    int maxRegions = validRegions.size();
                    int currentRegion = 1;
                    for (HRegionLocation cRegion : validRegions) {
                        byte[] rStart = cRegion.getRegionInfo().getStartKey();
                        byte[] rStop = cRegion.getRegionInfo().getEndKey();
                        String regionServerHostnamePort = cRegion.getHostnamePort();
                        InetAddress regionAddress = toInetAddress(regionServerHostnamePort);
                        String regionLocation;
                        try {
                            regionLocation = reverseDNS(regionAddress);
                        } catch (NamingException e) {
                            LOG.error("Cannot resolve the host name for " + regionAddress + " because of " + e);
                            regionLocation = toHostname(regionServerHostnamePort);
                        }
                        String regionName = cRegion.getRegionInfo().getRegionNameAsString();
                        byte[] sStart = (startRow == HConstants.EMPTY_START_ROW || (Bytes.compareTo(startRow, rStart) <= 0) ? rStart : startRow);
                        byte[] sStop = (stopRow == HConstants.EMPTY_END_ROW || (Bytes.compareTo(stopRow, rStop) >= 0 && rStop.length != 0) ? rStop : stopRow);
                        LOG.debug(String.format("BOOL start (%s) stop (%s) length (%d)", (startRow == HConstants.EMPTY_START_ROW || (Bytes.compareTo(startRow, rStart) <= 0)), (stopRow == HConstants.EMPTY_END_ROW || (Bytes.compareTo(stopRow, rStop) >= 0)), rStop.length));
                        HBaseTableSplitGranular split = new HBaseTableSplitGranular(table.getTableName(), sStart, sStop, regionLocation, regionName, SourceMode.SCAN_RANGE, useSalt);
                        split.setEndRowInclusive(currentRegion == maxRegions);
                        currentRegion++;
                        LOG.debug(String.format("START KEY (%s) STOP KEY (%s) rSTART (%s) rSTOP (%s) sSTART (%s) sSTOP (%s) REGION [%s] SPLIT [%s]", Bytes.toString(startRow), Bytes.toString(stopRow), Bytes.toString(rStart), Bytes.toString(rStop), Bytes.toString(sStart), Bytes.toString(sStop), cRegion.getHostnamePort(), split));
                        splits.add(split);
                    }
                } else {
                    LOG.debug("Using SALT : " + useSalt);
                    // prefixes.
                    for (int i = 0; i < regions.length; i++) {
                        Pair<byte[], byte[]>[] intervals = HBaseSalter.getDistributedIntervals(startRow, stopRow, regStartKeys[i], regStopKeys[i], prefixList);
                        for (Pair<byte[], byte[]> pair : intervals) {
                            LOG.debug("".format("Using SALT, Region (%s) Start (%s) Stop (%s)", regions[i], Bytes.toString(pair.getFirst()), Bytes.toString(pair.getSecond())));
                            HBaseTableSplitGranular split = new HBaseTableSplitGranular(table.getTableName(), pair.getFirst(), pair.getSecond(), regions[i], regionNames[i], SourceMode.SCAN_RANGE, useSalt);
                            split.setEndRowInclusive(true);
                            splits.add(split);
                        }
                    }
                }
                LOG.debug("RETURNED NO OF SPLITS: split -> " + splits.size());
                for (HBaseTableSplitGranular s : splits) {
                    LOG.debug("RETURNED SPLITS: split -> " + s);
                }
                return splits.toArray(new HBaseTableSplitGranular[splits.size()]);
            }
        case GET_LIST:
            {
                // if( keyList == null || keyList.size() == 0 ) {
                if (keyList == null) {
                    throw new IOException("Source Mode is GET_LIST but key list is EMPTY");
                }
                if (useSalt) {
                    TreeSet<String> tempKeyList = new TreeSet<String>();
                    for (String key : keyList) {
                        tempKeyList.add(HBaseSalter.addSaltPrefix(key));
                    }
                    keyList = tempKeyList;
                }
                LOG.debug("".format("Splitting Key List (%s)", keyList));
                List<HBaseTableSplitGranular> splits = new ArrayList<HBaseTableSplitGranular>();
                for (int i = 0; i < keys.getFirst().length; i++) {
                    if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
                        continue;
                    }
                    LOG.debug(String.format("Getting region (%s) subset (%s) to (%s)", regions[i], Bytes.toString(regStartKeys[i]), Bytes.toString(regStopKeys[i])));
                    Set<String> regionsSubSet = null;
                    if ((regStartKeys[i] == null || regStartKeys[i].length == 0) && (regStopKeys[i] == null || regStopKeys[i].length == 0)) {
                        LOG.debug("REGION start is empty");
                        LOG.debug("REGION stop is empty");
                        regionsSubSet = keyList;
                    } else if (regStartKeys[i] == null || regStartKeys[i].length == 0) {
                        LOG.debug("REGION start is empty");
                        regionsSubSet = keyList.headSet(Bytes.toString(regStopKeys[i]), true);
                    } else if (regStopKeys[i] == null || regStopKeys[i].length == 0) {
                        LOG.debug("REGION stop is empty");
                        regionsSubSet = keyList.tailSet(Bytes.toString(regStartKeys[i]), true);
                    } else if (Bytes.compareTo(regStartKeys[i], regStopKeys[i]) <= 0) {
                        regionsSubSet = keyList.subSet(Bytes.toString(regStartKeys[i]), true, Bytes.toString(regStopKeys[i]), true);
                    } else {
                        throw new IOException(String.format("For REGION (%s) Start Key (%s) > Stop Key(%s)", regions[i], Bytes.toString(regStartKeys[i]), Bytes.toString(regStopKeys[i])));
                    }
                    if (regionsSubSet == null || regionsSubSet.size() == 0) {
                        LOG.debug("EMPTY: Key is for region " + regions[i] + " is null");
                        continue;
                    }
                    TreeSet<String> regionKeyList = new TreeSet<String>(regionsSubSet);
                    LOG.debug(String.format("Regions [%s] has key list <%s>", regions[i], regionKeyList));
                    HBaseTableSplitGranular split = new HBaseTableSplitGranular(table.getTableName(), regionKeyList, versions, regions[i], regionNames[i], SourceMode.GET_LIST, useSalt);
                    splits.add(split);
                }
                LOG.debug("RETURNED SPLITS: split -> " + splits);
                return splits.toArray(new HBaseTableSplitGranular[splits.size()]);
            }
        default:
            throw new IOException("Unknown source Mode : " + sourceMode);
    }
}
Also used : TreeSet(java.util.TreeSet) Set(java.util.Set) ArrayList(java.util.ArrayList) IOException(java.io.IOException) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) TreeSet(java.util.TreeSet) NamingException(javax.naming.NamingException) ArrayList(java.util.ArrayList) List(java.util.List) InetAddress(java.net.InetAddress) Pair(org.apache.hadoop.hbase.util.Pair)

Example 60 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.

the class ServerCacheClient method addServerCache.

public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /**
         * Execute EndPoint in parallel on each server to send compressed hash cache 
         */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
        final PTable cacheUsingTable = cacheUsingTableRef.getTable();
        List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
        int nRegions = locations.size();
        // Size these based on worst case
        futures = new ArrayList<Future<Boolean>>(nRegions);
        Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
        for (HRegionLocation entry : locations) {
            // Keep track of servers we've sent to and only send once
            byte[] regionStartKey = entry.getRegionInfo().getStartKey();
            byte[] regionEndKey = entry.getRegionInfo().getEndKey();
            if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                // Call RPC once per server
                servers.add(entry);
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
                }
                final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                closeables.add(htable);
                futures.add(executor.submit(new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                        final Map<byte[], AddServerCacheResponse> results;
                        try {
                            results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {

                                @Override
                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                    final byte[] tenantIdBytes;
                                    if (cacheUsingTable.isMultiTenant()) {
                                        try {
                                            tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                        } catch (SQLException e) {
                                            throw new IOException(e);
                                        }
                                    } else {
                                        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                                    }
                                    if (tenantIdBytes != null) {
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    builder.setHasProtoBufIndexMaintainer(true);
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(ByteStringer.wrap(txState));
                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                        throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                }
                            });
                        } catch (Throwable t) {
                            throw new Exception(t);
                        }
                        if (results != null && results.size() == 1) {
                            return results.values().iterator().next().getReturn();
                        }
                        return false;
                    }

                    /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                    @Override
                    public Object getJobId() {
                        return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return NO_OP_INSTANCE;
                    }
                }));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
                }
            }
        }
        hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
        // Execute in parallel
        int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
        for (Future<Boolean> future : futures) {
            future.get(timeoutMs, TimeUnit.MILLISECONDS);
        }
        cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
        success = true;
    } catch (SQLException e) {
        firstException = e;
    } catch (Exception e) {
        firstException = new SQLException(e);
    } finally {
        try {
            if (!success) {
                SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
                for (Future<Boolean> future : futures) {
                    future.cancel(true);
                }
            }
        } finally {
            try {
                Closeables.closeAll(closeables);
            } catch (IOException e) {
                if (firstException == null) {
                    firstException = new SQLException(e);
                }
            } finally {
                if (firstException != null) {
                    throw firstException;
                }
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
}
Also used : SQLException(java.sql.SQLException) SQLCloseable(org.apache.phoenix.util.SQLCloseable) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) HashSet(java.util.HashSet) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) AddServerCacheRequest(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest) IOException(java.io.IOException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Aggregations

HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)132 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)52 Test (org.junit.Test)50 ServerName (org.apache.hadoop.hbase.ServerName)44 TableName (org.apache.hadoop.hbase.TableName)39 IOException (java.io.IOException)31 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)30 RegionLocations (org.apache.hadoop.hbase.RegionLocations)29 ArrayList (java.util.ArrayList)25 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)18 Table (org.apache.hadoop.hbase.client.Table)18 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)16 List (java.util.List)12 HashMap (java.util.HashMap)11 Map (java.util.Map)11 Result (org.apache.hadoop.hbase.client.Result)10 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)10 Connection (org.apache.hadoop.hbase.client.Connection)9 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)9 Admin (org.apache.hadoop.hbase.client.Admin)8