Search in sources :

Example 66 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testBulkLoadPhaseFailure.

/**
   * Test that shows that exception thrown from the RS side will result in an
   * exception on the LIHFile client.
   */
@Test(expected = IOException.class, timeout = 120000)
public void testBulkLoadPhaseFailure() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    final AtomicInteger attmptedCalls = new AtomicInteger();
    final AtomicInteger failedCalls = new AtomicInteger();
    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable, TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
                int i = attmptedCalls.incrementAndGet();
                if (i == 1) {
                    Connection errConn;
                    try {
                        errConn = getMockedConnection(util.getConfiguration());
                        serviceCallable = this.buildClientServiceCallable(errConn, table, first, lqis, true);
                    } catch (Exception e) {
                        LOG.fatal("mocking cruft, should never happen", e);
                        throw new RuntimeException("mocking cruft, should never happen");
                    }
                    failedCalls.incrementAndGet();
                    return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
                }
                return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
            }
        };
        try {
            // create HFiles for different column families
            Path dir = buildBulkFiles(table, 1);
            try (Table t = connection.getTable(table);
                RegionLocator locator = connection.getRegionLocator(table);
                Admin admin = connection.getAdmin()) {
                lih.doBulkLoad(dir, admin, t, locator);
            }
        } finally {
            util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
        }
        fail("doBulkLoad should have thrown an exception");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableExistsException(org.apache.hadoop.hbase.TableExistsException) IOException(java.io.IOException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) TableName(org.apache.hadoop.hbase.TableName) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Collection(java.util.Collection) ClientServiceCallable(org.apache.hadoop.hbase.client.ClientServiceCallable) Test(org.junit.Test)

Example 67 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitFailure.

/**
   * This simulates an remote exception which should cause LIHF to exit with an
   * exception.
   */
@Test(expected = IOException.class, timeout = 120000)
public void testGroupOrSplitFailure() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, tableName, 10);
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            int i = 0;

            @Override
            protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table table, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
                i++;
                if (i == 5) {
                    throw new IOException("failure");
                }
                return super.groupOrSplit(regionGroups, item, table, startEndKeys);
            }
        };
        // create HFiles for different column families
        Path dir = buildBulkFiles(tableName, 1);
        try (Table t = connection.getTable(tableName);
            RegionLocator locator = connection.getRegionLocator(tableName);
            Admin admin = connection.getAdmin()) {
            lih.doBulkLoad(dir, admin, t, locator);
        }
    }
    fail("doBulkLoad should have thrown an exception");
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 68 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testSplitWhileBulkLoadPhase.

/**
   * This test exercises the path where there is a split after initial
   * validation but before the atomic bulk load call. We cannot use presplitting
   * to test this path, so we actually inject a split just before the atomic
   * region load.
   */
@Test(timeout = 120000)
public void testSplitWhileBulkLoadPhase() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        populateTable(connection, table, 1);
        assertExpectedTable(table, ROWCOUNT, 1);
        // Now let's cause trouble.  This will occur after checks and cause bulk
        // files to fail when attempt to atomically import.  This is recoverable.
        final AtomicInteger attemptedCalls = new AtomicInteger();
        LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected void bulkLoadPhase(final Table htable, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
                int i = attemptedCalls.incrementAndGet();
                if (i == 1) {
                    // On first attempt force a split.
                    forceSplit(table);
                }
                super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap);
            }
        };
        // create HFiles for different column families
        try (Table t = connection.getTable(table);
            RegionLocator locator = connection.getRegionLocator(table);
            Admin admin = connection.getAdmin()) {
            Path bulk = buildBulkFiles(table, 2);
            lih2.doBulkLoad(bulk, admin, t, locator);
        }
        // check that data was loaded
        // The three expected attempts are 1) failure because need to split, 2)
        // load of split top 3) load of split bottom
        assertEquals(attemptedCalls.get(), 3);
        assertExpectedTable(table, ROWCOUNT, 2);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Deque(java.util.Deque) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Map(java.util.Map) NavigableMap(java.util.NavigableMap) Test(org.junit.Test)

Example 69 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitPresplit.

/**
   * This test splits a table and attempts to bulk load.  The bulk import files
   * should be split before atomically importing.
   */
@Test(timeout = 120000)
public void testGroupOrSplitPresplit() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        populateTable(connection, table, 1);
        assertExpectedTable(connection, table, ROWCOUNT, 1);
        forceSplit(table);
        final AtomicInteger countedLqis = new AtomicInteger();
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
                Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
                if (lqis != null && lqis.getFirst() != null) {
                    countedLqis.addAndGet(lqis.getFirst().size());
                }
                return lqis;
            }
        };
        // create HFiles for different column families
        Path bulk = buildBulkFiles(table, 2);
        try (Table t = connection.getTable(table);
            RegionLocator locator = connection.getRegionLocator(table);
            Admin admin = connection.getAdmin()) {
            lih.doBulkLoad(bulk, admin, t, locator);
        }
        assertExpectedTable(connection, table, ROWCOUNT, 2);
        assertEquals(20, countedLqis.get());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 70 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestHFileOutputFormat2 method testJobConfiguration.

@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void testJobConfiguration() throws Exception {
    Configuration conf = new Configuration(this.util.getConfiguration());
    conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration").toString());
    Job job = new Job(conf);
    job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
    Table table = Mockito.mock(Table.class);
    RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
    setupMockStartKeys(regionLocator);
    setupMockTableName(regionLocator);
    HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
    assertEquals(job.getNumReduceTasks(), 4);
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Job(org.apache.hadoop.mapreduce.Job) Ignore(org.junit.Ignore) Test(org.junit.Test)

Aggregations

RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)84 Table (org.apache.hadoop.hbase.client.Table)59 Test (org.junit.Test)49 TableName (org.apache.hadoop.hbase.TableName)39 Admin (org.apache.hadoop.hbase.client.Admin)33 Path (org.apache.hadoop.fs.Path)31 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)30 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)29 Connection (org.apache.hadoop.hbase.client.Connection)25 Configuration (org.apache.hadoop.conf.Configuration)21 IOException (java.io.IOException)19 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)15 FileSystem (org.apache.hadoop.fs.FileSystem)14 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)13 ServerName (org.apache.hadoop.hbase.ServerName)13 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)12 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)10 Put (org.apache.hadoop.hbase.client.Put)10 ArrayList (java.util.ArrayList)9 Result (org.apache.hadoop.hbase.client.Result)8