Search in sources :

Example 6 with AsyncClusterConnection

use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.

the class SyncReplicationTestBase method verifyReplicationRequestRejection.

protected final void verifyReplicationRequestRejection(HBaseTestingUtil utility, boolean expectedRejection) throws Exception {
    HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
    AsyncClusterConnection connection = regionServer.getAsyncClusterConnection();
    Entry[] entries = new Entry[10];
    for (int i = 0; i < entries.length; i++) {
        entries[i] = new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit());
    }
    if (!expectedRejection) {
        ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
    } else {
        try {
            ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
            fail("Should throw IOException when sync-replication state is in A or DA");
        } catch (RemoteException e) {
            assertRejection(e.unwrapRemoteException());
        } catch (DoNotRetryIOException e) {
            assertRejection(e);
        }
    }
}
Also used : Entry(org.apache.hadoop.hbase.wal.WAL.Entry) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) RemoteException(org.apache.hadoop.ipc.RemoteException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer)

Example 7 with AsyncClusterConnection

use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.

the class TestBulkLoadHFilesSplitRecovery method testSplitWhileBulkLoadPhase.

/**
 * This test exercises the path where there is a split after initial validation but before the
 * atomic bulk load call. We cannot use presplitting to test this path, so we actually inject a
 * split just before the atomic region load.
 */
@Test
public void testSplitWhileBulkLoadPhase() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    setupTable(util.getConnection(), table, 10);
    populateTable(util.getConnection(), table, 1);
    assertExpectedTable(table, ROWCOUNT, 1);
    // Now let's cause trouble. This will occur after checks and cause bulk
    // files to fail when attempt to atomically import. This is recoverable.
    final AtomicInteger attemptedCalls = new AtomicInteger();
    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {

        @Override
        protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
            int i = attemptedCalls.incrementAndGet();
            if (i == 1) {
                // On first attempt force a split.
                forceSplit(table);
            }
            super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
        }
    };
    // create HFiles for different column families
    Path dir = buildBulkFiles(table, 2);
    loader.bulkLoad(table, dir);
    // check that data was loaded
    // The three expected attempts are 1) failure because need to split, 2)
    // load of split top 3) load of split bottom
    assertEquals(3, attemptedCalls.get());
    assertExpectedTable(table, ROWCOUNT, 2);
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) Multimap(org.apache.hbase.thirdparty.com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) Deque(java.util.Deque) Map(java.util.Map) Test(org.junit.Test)

Example 8 with AsyncClusterConnection

use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.

the class TestBulkLoadHFilesSplitRecovery method testCorrectSplitPoint.

@Test
public void testCorrectSplitPoint() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"), Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"), Bytes.toBytes("row_00000050"), Bytes.toBytes("row_00000060"), Bytes.toBytes("row_00000070") };
    setupTableWithSplitkeys(table, NUM_CFS, SPLIT_KEYS);
    final AtomicInteger bulkloadRpcTimes = new AtomicInteger();
    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {

        @Override
        protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
            bulkloadRpcTimes.addAndGet(1);
            super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
        }
    };
    Path dir = buildBulkFiles(table, 1);
    loader.bulkLoad(table, dir);
    // before HBASE-25281 we need invoke bulkload rpc 8 times
    assertEquals(4, bulkloadRpcTimes.get());
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) Multimap(org.apache.hbase.thirdparty.com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) Deque(java.util.Deque) Map(java.util.Map) Test(org.junit.Test)

Example 9 with AsyncClusterConnection

use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.

the class TestBulkLoadHFilesSplitRecovery method testSplitWhileBulkLoadPhaseWithoutItemMap.

/**
 * We are testing a split after initial validation but before the atomic bulk load call.
 * We cannot use presplitting to test this path, so we actually inject a
 * split just before the atomic region load. However, we will pass null item2RegionMap
 * and that should not affect the bulk load behavior.
 */
@Test
public void testSplitWhileBulkLoadPhaseWithoutItemMap() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    setupTable(util.getConnection(), table, 10);
    populateTable(util.getConnection(), table, 1);
    assertExpectedTable(table, ROWCOUNT, 1);
    // Now let's cause trouble. This will occur after checks and cause bulk
    // files to fail when attempt to atomically import. This is recoverable.
    final AtomicInteger attemptedCalls = new AtomicInteger();
    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {

        @Override
        protected void bulkLoadPhase(final AsyncClusterConnection conn, final TableName tableName, final Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, final boolean copyFiles, final Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
            int i = attemptedCalls.incrementAndGet();
            if (i == 1) {
                // On first attempt force a split.
                forceSplit(table);
            }
            // Passing item2RegionMap null
            // In the absence of LoadQueueItem, bulk load should work as expected
            super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, null);
        }
    };
    // create HFiles for different column families
    Path dir = buildBulkFiles(table, 2);
    loader.bulkLoad(table, dir);
    // check that data was loaded
    // The three expected attempts are 1) failure because need to split, 2)
    // load of split top 3) load of split bottom
    assertEquals(3, attemptedCalls.get());
    assertExpectedTable(table, ROWCOUNT, 2);
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) Multimap(org.apache.hbase.thirdparty.com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) Deque(java.util.Deque) Map(java.util.Map) Test(org.junit.Test)

Example 10 with AsyncClusterConnection

use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.

the class TestBulkLoadHFilesSplitRecovery method mockAndInjectError.

private static AsyncClusterConnection mockAndInjectError(AsyncClusterConnection conn) {
    AsyncClusterConnection errConn = spy(conn);
    doReturn(failedFuture(new IOException("injecting bulk load error"))).when(errConn).bulkLoad(any(), anyList(), any(), anyBoolean(), any(), any(), anyBoolean(), anyList(), anyBoolean());
    return errConn;
}
Also used : AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) IOException(java.io.IOException)

Aggregations

AsyncClusterConnection (org.apache.hadoop.hbase.client.AsyncClusterConnection)14 TableName (org.apache.hadoop.hbase.TableName)9 Path (org.apache.hadoop.fs.Path)8 Multimap (org.apache.hbase.thirdparty.com.google.common.collect.Multimap)8 Test (org.junit.Test)8 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)7 Deque (java.util.Deque)6 Map (java.util.Map)6 IOException (java.io.IOException)2 List (java.util.List)2 Configuration (org.apache.hadoop.conf.Configuration)2 ArgumentMatchers.anyList (org.mockito.ArgumentMatchers.anyList)2 Collection (java.util.Collection)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 Table (org.apache.hadoop.hbase.client.Table)1 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)1 BulkLoadHFilesTool (org.apache.hadoop.hbase.tool.BulkLoadHFilesTool)1 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)1 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)1