Search in sources :

Example 1 with ClientServiceCallable

use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.

the class LoadIncrementalHFiles method bulkLoadPhase.

/**
   * This takes the LQI's grouped by likely regions and attempts to bulk load
   * them.  Any failures are re-queued for another pass with the
   * groupOrSplitPhase.
   */
protected void bulkLoadPhase(final Table table, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
    // atomically bulk load the groups.
    Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<>();
    for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
        final byte[] first = e.getKey().array();
        final Collection<LoadQueueItem> lqis = e.getValue();
        final ClientServiceCallable<byte[]> serviceCallable = buildClientServiceCallable(conn, table.getName(), first, lqis, copyFile);
        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {

            @Override
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> toRetry = tryAtomicRegionLoad(serviceCallable, table.getName(), first, lqis);
                return toRetry;
            }
        };
        if (item2RegionMap != null) {
            for (LoadQueueItem lqi : lqis) {
                item2RegionMap.put(lqi, e.getKey());
            }
        }
        loadingFutures.add(pool.submit(call));
    }
    // get all the results.
    for (Future<List<LoadQueueItem>> future : loadingFutures) {
        try {
            List<LoadQueueItem> toRetry = future.get();
            if (item2RegionMap != null) {
                for (LoadQueueItem lqi : toRetry) {
                    item2RegionMap.remove(lqi);
                }
            }
            // LQIs that are requeued to be regrouped.
            queue.addAll(toRetry);
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                // TODO Implement bulk load recovery
                throw new IOException("BulkLoad encountered an unrecoverable problem", t);
            }
            LOG.error("Unexpected execution exception during bulk load", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during bulk load", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) ClientServiceCallable(org.apache.hadoop.hbase.client.ClientServiceCallable) Callable(java.util.concurrent.Callable) Future(java.util.concurrent.Future) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) ExecutionException(java.util.concurrent.ExecutionException) HashSet(java.util.HashSet)

Example 2 with ClientServiceCallable

use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testBulkLoadPhaseFailure.

/**
   * Test that shows that exception thrown from the RS side will result in an
   * exception on the LIHFile client.
   */
@Test(expected = IOException.class, timeout = 120000)
public void testBulkLoadPhaseFailure() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    final AtomicInteger attmptedCalls = new AtomicInteger();
    final AtomicInteger failedCalls = new AtomicInteger();
    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable, TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
                int i = attmptedCalls.incrementAndGet();
                if (i == 1) {
                    Connection errConn;
                    try {
                        errConn = getMockedConnection(util.getConfiguration());
                        serviceCallable = this.buildClientServiceCallable(errConn, table, first, lqis, true);
                    } catch (Exception e) {
                        LOG.fatal("mocking cruft, should never happen", e);
                        throw new RuntimeException("mocking cruft, should never happen");
                    }
                    failedCalls.incrementAndGet();
                    return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
                }
                return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
            }
        };
        try {
            // create HFiles for different column families
            Path dir = buildBulkFiles(table, 1);
            try (Table t = connection.getTable(table);
                RegionLocator locator = connection.getRegionLocator(table);
                Admin admin = connection.getAdmin()) {
                lih.doBulkLoad(dir, admin, t, locator);
            }
        } finally {
            util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
        }
        fail("doBulkLoad should have thrown an exception");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableExistsException(org.apache.hadoop.hbase.TableExistsException) IOException(java.io.IOException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) TableName(org.apache.hadoop.hbase.TableName) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Collection(java.util.Collection) ClientServiceCallable(org.apache.hadoop.hbase.client.ClientServiceCallable) Test(org.junit.Test)

Example 3 with ClientServiceCallable

use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testRetryOnIOException.

/**
   * Test that shows that exception thrown from the RS side will result in the
   * expected number of retries set by ${@link HConstants#HBASE_CLIENT_RETRIES_NUMBER}
   * when ${@link LoadIncrementalHFiles#RETRY_ON_IO_EXCEPTION} is set
   */
@Test
public void testRetryOnIOException() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    final AtomicInteger calls = new AtomicInteger(1);
    final Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
    util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, true);
    final LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

        @Override
        protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serverCallable, TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
            if (calls.getAndIncrement() < util.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) - 1) {
                ClientServiceCallable<byte[]> newServerCallable = new ClientServiceCallable<byte[]>(conn, tableName, first, new RpcControllerFactory(util.getConfiguration()).newController()) {

                    @Override
                    public byte[] rpcCall() throws Exception {
                        throw new IOException("Error calling something on RegionServer");
                    }
                };
                return super.tryAtomicRegionLoad(newServerCallable, tableName, first, lqis);
            } else {
                return super.tryAtomicRegionLoad(serverCallable, tableName, first, lqis);
            }
        }
    };
    setupTable(conn, table, 10);
    Path dir = buildBulkFiles(table, 1);
    lih.doBulkLoad(dir, conn.getAdmin(), conn.getTable(table), conn.getRegionLocator(table));
    util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, false);
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Collection(java.util.Collection) IOException(java.io.IOException) RpcControllerFactory(org.apache.hadoop.hbase.ipc.RpcControllerFactory) ClientServiceCallable(org.apache.hadoop.hbase.client.ClientServiceCallable) Test(org.junit.Test)

Example 4 with ClientServiceCallable

use of org.apache.hadoop.hbase.client.ClientServiceCallable in project hbase by apache.

the class LoadIncrementalHFiles method buildClientServiceCallable.

protected ClientServiceCallable<byte[]> buildClientServiceCallable(final Connection conn, TableName tableName, byte[] first, Collection<LoadQueueItem> lqis, boolean copyFile) {
    final List<Pair<byte[], String>> famPaths = new ArrayList<>(lqis.size());
    for (LoadQueueItem lqi : lqis) {
        famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
    }
    return new ClientServiceCallable<byte[]>(conn, tableName, first, rpcControllerFactory.newController()) {

        @Override
        protected byte[] rpcCall() throws Exception {
            SecureBulkLoadClient secureClient = null;
            boolean success = false;
            try {
                LOG.debug("Going to connect to server " + getLocation() + " for row " + Bytes.toStringBinary(getRow()) + " with hfile group " + LoadIncrementalHFiles.this.toString(famPaths));
                byte[] regionName = getLocation().getRegionInfo().getRegionName();
                try (Table table = conn.getTable(getTableName())) {
                    secureClient = new SecureBulkLoadClient(getConf(), table);
                    success = secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName, assignSeqIds, fsDelegationToken.getUserToken(), bulkToken, copyFile);
                }
                return success ? regionName : null;
            } finally {
                //in user directory
                if (secureClient != null && !success) {
                    FileSystem targetFs = FileSystem.get(getConf());
                    // fs is the source filesystem
                    if (fs == null) {
                        fs = lqis.iterator().next().hfilePath.getFileSystem(getConf());
                    }
                    // because previously we moved them to the staging directory.
                    if (FSHDFSUtils.isSameHdfs(getConf(), fs, targetFs)) {
                        for (Pair<byte[], String> el : famPaths) {
                            Path hfileStagingPath = null;
                            Path hfileOrigPath = new Path(el.getSecond());
                            try {
                                hfileStagingPath = new Path(new Path(bulkToken, Bytes.toString(el.getFirst())), hfileOrigPath.getName());
                                if (targetFs.rename(hfileStagingPath, hfileOrigPath)) {
                                    LOG.debug("Moved back file " + hfileOrigPath + " from " + hfileStagingPath);
                                } else if (targetFs.exists(hfileStagingPath)) {
                                    LOG.debug("Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath);
                                }
                            } catch (Exception ex) {
                                LOG.debug("Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath, ex);
                            }
                        }
                    }
                }
            }
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) ArrayList(java.util.ArrayList) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) FileNotFoundException(java.io.FileNotFoundException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SecureBulkLoadClient(org.apache.hadoop.hbase.client.SecureBulkLoadClient) FileSystem(org.apache.hadoop.fs.FileSystem) Pair(org.apache.hadoop.hbase.util.Pair) ClientServiceCallable(org.apache.hadoop.hbase.client.ClientServiceCallable)

Aggregations

IOException (java.io.IOException)4 ClientServiceCallable (org.apache.hadoop.hbase.client.ClientServiceCallable)4 Path (org.apache.hadoop.fs.Path)3 InterruptedIOException (java.io.InterruptedIOException)2 ArrayList (java.util.ArrayList)2 Collection (java.util.Collection)2 ExecutionException (java.util.concurrent.ExecutionException)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 TableName (org.apache.hadoop.hbase.TableName)2 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)2 Connection (org.apache.hadoop.hbase.client.Connection)2 Table (org.apache.hadoop.hbase.client.Table)2 Test (org.junit.Test)2 FileNotFoundException (java.io.FileNotFoundException)1 ByteBuffer (java.nio.ByteBuffer)1 HashSet (java.util.HashSet)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 Callable (java.util.concurrent.Callable)1 Future (java.util.concurrent.Future)1