Search in sources :

Example 46 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ServerCacheClient method addServerCache.

public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /**
         * Execute EndPoint in parallel on each server to send compressed hash cache 
         */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
        final PTable cacheUsingTable = cacheUsingTableRef.getTable();
        List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
        int nRegions = locations.size();
        // Size these based on worst case
        futures = new ArrayList<Future<Boolean>>(nRegions);
        Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
        for (HRegionLocation entry : locations) {
            // Keep track of servers we've sent to and only send once
            byte[] regionStartKey = entry.getRegionInfo().getStartKey();
            byte[] regionEndKey = entry.getRegionInfo().getEndKey();
            if (!servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
                // Call RPC once per server
                servers.add(entry);
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
                }
                final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                closeables.add(htable);
                futures.add(executor.submit(new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                        final Map<byte[], AddServerCacheResponse> results;
                        try {
                            results = htable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() {

                                @Override
                                public AddServerCacheResponse call(ServerCachingService instance) throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                    final byte[] tenantIdBytes;
                                    if (cacheUsingTable.isMultiTenant()) {
                                        try {
                                            tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                        } catch (SQLException e) {
                                            throw new IOException(e);
                                        }
                                    } else {
                                        tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                                    }
                                    if (tenantIdBytes != null) {
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    builder.setHasProtoBufIndexMaintainer(true);
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                    svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(ByteStringer.wrap(txState));
                                    instance.addServerCache(controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                        throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                }
                            });
                        } catch (Throwable t) {
                            throw new Exception(t);
                        }
                        if (results != null && results.size() == 1) {
                            return results.values().iterator().next().getReturn();
                        }
                        return false;
                    }

                    /**
                         * Defines the grouping for round robin behavior.  All threads spawned to process
                         * this scan will be grouped together and time sliced with other simultaneously
                         * executing parallel scans.
                         */
                    @Override
                    public Object getJobId() {
                        return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return NO_OP_INSTANCE;
                    }
                }));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));
                }
            }
        }
        hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
        // Execute in parallel
        int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
        for (Future<Boolean> future : futures) {
            future.get(timeoutMs, TimeUnit.MILLISECONDS);
        }
        cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
        success = true;
    } catch (SQLException e) {
        firstException = e;
    } catch (Exception e) {
        firstException = new SQLException(e);
    } finally {
        try {
            if (!success) {
                SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
                for (Future<Boolean> future : futures) {
                    future.cancel(true);
                }
            }
        } finally {
            try {
                Closeables.closeAll(closeables);
            } catch (IOException e) {
                if (firstException == null) {
                    firstException = new SQLException(e);
                }
            } finally {
                if (firstException != null) {
                    throw firstException;
                }
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
}
Also used : SQLException(java.sql.SQLException) SQLCloseable(org.apache.phoenix.util.SQLCloseable) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) JobCallable(org.apache.phoenix.job.JobManager.JobCallable) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) HashSet(java.util.HashSet) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) MemoryChunk(org.apache.phoenix.memory.MemoryManager.MemoryChunk) AddServerCacheRequest(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest) IOException(java.io.IOException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ServerCacheFactory(org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) AddServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse)

Example 47 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class TestUtil method doMajorCompaction.

/**
     * Runs a major compaction, and then waits until the compaction is complete before returning.
     *
     * @param tableName name of the table to be compacted
     */
public static void doMajorCompaction(Connection conn, String tableName) throws Exception {
    tableName = SchemaUtil.normalizeIdentifier(tableName);
    // We simply write a marker row, request a major compaction, and then wait until the marker
    // row is gone
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName));
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    MutationState mutationState = pconn.getMutationState();
    if (table.isTransactional()) {
        mutationState.startTransaction();
    }
    try (HTableInterface htable = mutationState.getHTable(table)) {
        byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
        Put put = new Put(markerRowKey);
        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.put(put);
        Delete delete = new Delete(markerRowKey);
        delete.deleteColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.delete(delete);
        htable.close();
        if (table.isTransactional()) {
            mutationState.commit();
        }
        HBaseAdmin hbaseAdmin = services.getAdmin();
        hbaseAdmin.flush(tableName);
        hbaseAdmin.majorCompact(tableName);
        hbaseAdmin.close();
        boolean compactionDone = false;
        while (!compactionDone) {
            Thread.sleep(6000L);
            Scan scan = new Scan();
            scan.setStartRow(markerRowKey);
            scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
            scan.setRaw(true);
            try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                ResultScanner scanner = htableForRawScan.getScanner(scan);
                List<Result> results = Lists.newArrayList(scanner);
                LOG.info("Results: " + results);
                compactionDone = results.isEmpty();
                scanner.close();
            }
            LOG.info("Compaction done: " + compactionDone);
            // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
            if (!compactionDone && table.isTransactional()) {
                hbaseAdmin = services.getAdmin();
                hbaseAdmin.flush(tableName);
                hbaseAdmin.majorCompact(tableName);
                hbaseAdmin.close();
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTable(org.apache.phoenix.schema.PTable) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 48 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class TestUtil method clearMetaDataCache.

public static void clearMetaDataCache(Connection conn) throws Throwable {
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    HTableInterface htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearCacheResponse>() {

        @Override
        public ClearCacheResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<ClearCacheResponse> rpcCallback = new BlockingRpcCallback<ClearCacheResponse>();
            ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder();
            instance.clearCache(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) ClearCacheResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController)

Example 49 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class PhoenixTransactionalIndexer method getIndexUpdates.

private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
    Transaction tx = indexMetaData.getTransaction();
    if (tx == null) {
        throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
    }
    boolean isRollback = txRollbackAttribute != null;
    boolean isImmutable = indexMetaData.isImmutableRows();
    ResultScanner currentScanner = null;
    TransactionAwareHTable txTable = null;
    // Collect up all mutations in batch
    Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
    if (isImmutable && !isRollback) {
        findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    } else {
        findPriorValueMutations = mutations;
    }
    while (mutationIterator.hasNext()) {
        Mutation m = mutationIterator.next();
        // add the mutation to the batch set
        ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
        if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
            addMutation(findPriorValueMutations, row, m);
        }
        addMutation(mutations, row, m);
    }
    // Collect the set of mutable ColumnReferences so that we can first
    // run a scan to get the current state. We'll need this to delete
    // the existing index rows.
    List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
    int estimatedSize = indexMaintainers.size() * 10;
    Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
    for (IndexMaintainer indexMaintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
        mutableColumns.addAll(allColumns);
    }
    Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
    try {
        // this logic will work there too.
        if (!findPriorValueMutations.isEmpty()) {
            List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
            for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
                keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
            }
            Scan scan = new Scan();
            // Project all mutable columns
            for (ColumnReference ref : mutableColumns) {
                scan.addColumn(ref.getFamily(), ref.getQualifier());
            }
            /*
                 * Indexes inherit the storage scheme of the data table which means all the indexes have the same
                 * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
                 * supporting new indexes over existing data tables to have a different storage scheme than the data
                 * table.
                 */
            byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
            // Project empty key value column
            scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
            ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
            scanRanges.initializeScan(scan);
            TableName tableName = env.getRegion().getRegionInfo().getTable();
            HTableInterface htable = env.getTable(tableName);
            txTable = new TransactionAwareHTable(htable);
            txTable.startTx(tx);
            // For rollback, we need to see all versions, including
            // the last committed version as there may be multiple
            // checkpointed versions.
            SkipScanFilter filter = scanRanges.getSkipScanFilter();
            if (isRollback) {
                filter = new SkipScanFilter(filter, true);
                tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
            }
            scan.setFilter(filter);
            currentScanner = txTable.getScanner(scan);
        }
        if (isRollback) {
            processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
        } else {
            processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
        }
    } finally {
        if (txTable != null)
            txTable.close();
    }
    return indexUpdates;
}
Also used : MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) HashMap(java.util.HashMap) KeyRange(org.apache.phoenix.query.KeyRange) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Pair(org.apache.hadoop.hbase.util.Pair) TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ScanRanges(org.apache.phoenix.compile.ScanRanges) TableName(org.apache.hadoop.hbase.TableName) Transaction(org.apache.tephra.Transaction) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 50 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method clearTableFromCache.

@Override
public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, final long clientTS) throws SQLException {
    // clear the meta data cache for the table here
    try {
        SQLException sqlE = null;
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
        try {
            htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearTableFromCacheResponse>() {

                @Override
                public ClearTableFromCacheResponse call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<ClearTableFromCacheResponse> rpcCallback = new BlockingRpcCallback<ClearTableFromCacheResponse>();
                    ClearTableFromCacheRequest.Builder builder = ClearTableFromCacheRequest.newBuilder();
                    builder.setTenantId(ByteStringer.wrap(tenantId));
                    builder.setTableName(ByteStringer.wrap(tableName));
                    builder.setSchemaName(ByteStringer.wrap(schemaName));
                    builder.setClientTimestamp(clientTS);
                    builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
                    instance.clearTableFromCache(controller, builder.build(), rpcCallback);
                    if (controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get();
                }
            });
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } catch (Throwable e) {
            sqlE = new SQLException(e);
        } finally {
            try {
                htable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            } finally {
                if (sqlE != null) {
                    throw sqlE;
                }
            }
        }
    } catch (Exception e) {
        throw new SQLException(ServerUtil.parseServerException(e));
    }
}
Also used : SQLException(java.sql.SQLException) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) TableAlreadyExistsException(org.apache.phoenix.schema.TableAlreadyExistsException) UpgradeInProgressException(org.apache.phoenix.exception.UpgradeInProgressException) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) RetriableUpgradeException(org.apache.phoenix.exception.RetriableUpgradeException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) UpgradeNotRequiredException(org.apache.phoenix.exception.UpgradeNotRequiredException) NewerTableAlreadyExistsException(org.apache.phoenix.schema.NewerTableAlreadyExistsException) NewerSchemaAlreadyExistsException(org.apache.phoenix.schema.NewerSchemaAlreadyExistsException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) ColumnAlreadyExistsException(org.apache.phoenix.schema.ColumnAlreadyExistsException) TimeoutException(java.util.concurrent.TimeoutException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) ClearTableFromCacheResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)122 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)42 IOException (java.io.IOException)38 ArrayList (java.util.ArrayList)27 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 HashMap (java.util.HashMap)17 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 Delete (org.apache.hadoop.hbase.client.Delete)12 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Map (java.util.Map)9 Configuration (org.apache.hadoop.conf.Configuration)9