use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class SyncReplicationTestBase method verifyReplicationRequestRejection.
protected final void verifyReplicationRequestRejection(HBaseTestingUtil utility, boolean expectedRejection) throws Exception {
HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
AsyncClusterConnection connection = regionServer.getAsyncClusterConnection();
Entry[] entries = new Entry[10];
for (int i = 0; i < entries.length; i++) {
entries[i] = new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit());
}
if (!expectedRejection) {
ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
} else {
try {
ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
fail("Should throw IOException when sync-replication state is in A or DA");
} catch (RemoteException e) {
assertRejection(e.unwrapRemoteException());
} catch (DoNotRetryIOException e) {
assertRejection(e);
}
}
}
use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFilesSplitRecovery method testSplitWhileBulkLoadPhase.
/**
* This test exercises the path where there is a split after initial validation but before the
* atomic bulk load call. We cannot use presplitting to test this path, so we actually inject a
* split just before the atomic region load.
*/
@Test
public void testSplitWhileBulkLoadPhase() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
setupTable(util.getConnection(), table, 10);
populateTable(util.getConnection(), table, 1);
assertExpectedTable(table, ROWCOUNT, 1);
// Now let's cause trouble. This will occur after checks and cause bulk
// files to fail when attempt to atomically import. This is recoverable.
final AtomicInteger attemptedCalls = new AtomicInteger();
BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
@Override
protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
int i = attemptedCalls.incrementAndGet();
if (i == 1) {
// On first attempt force a split.
forceSplit(table);
}
super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
}
};
// create HFiles for different column families
Path dir = buildBulkFiles(table, 2);
loader.bulkLoad(table, dir);
// check that data was loaded
// The three expected attempts are 1) failure because need to split, 2)
// load of split top 3) load of split bottom
assertEquals(3, attemptedCalls.get());
assertExpectedTable(table, ROWCOUNT, 2);
}
use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFilesSplitRecovery method testCorrectSplitPoint.
@Test
public void testCorrectSplitPoint() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"), Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"), Bytes.toBytes("row_00000050"), Bytes.toBytes("row_00000060"), Bytes.toBytes("row_00000070") };
setupTableWithSplitkeys(table, NUM_CFS, SPLIT_KEYS);
final AtomicInteger bulkloadRpcTimes = new AtomicInteger();
BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
@Override
protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
bulkloadRpcTimes.addAndGet(1);
super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
}
};
Path dir = buildBulkFiles(table, 1);
loader.bulkLoad(table, dir);
// before HBASE-25281 we need invoke bulkload rpc 8 times
assertEquals(4, bulkloadRpcTimes.get());
}
use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFilesSplitRecovery method testSplitWhileBulkLoadPhaseWithoutItemMap.
/**
* We are testing a split after initial validation but before the atomic bulk load call.
* We cannot use presplitting to test this path, so we actually inject a
* split just before the atomic region load. However, we will pass null item2RegionMap
* and that should not affect the bulk load behavior.
*/
@Test
public void testSplitWhileBulkLoadPhaseWithoutItemMap() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
setupTable(util.getConnection(), table, 10);
populateTable(util.getConnection(), table, 1);
assertExpectedTable(table, ROWCOUNT, 1);
// Now let's cause trouble. This will occur after checks and cause bulk
// files to fail when attempt to atomically import. This is recoverable.
final AtomicInteger attemptedCalls = new AtomicInteger();
BulkLoadHFilesTool loader = new BulkLoadHFilesTool(util.getConfiguration()) {
@Override
protected void bulkLoadPhase(final AsyncClusterConnection conn, final TableName tableName, final Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, final boolean copyFiles, final Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
int i = attemptedCalls.incrementAndGet();
if (i == 1) {
// On first attempt force a split.
forceSplit(table);
}
// Passing item2RegionMap null
// In the absence of LoadQueueItem, bulk load should work as expected
super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, null);
}
};
// create HFiles for different column families
Path dir = buildBulkFiles(table, 2);
loader.bulkLoad(table, dir);
// check that data was loaded
// The three expected attempts are 1) failure because need to split, 2)
// load of split top 3) load of split bottom
assertEquals(3, attemptedCalls.get());
assertExpectedTable(table, ROWCOUNT, 2);
}
use of org.apache.hadoop.hbase.client.AsyncClusterConnection in project hbase by apache.
the class TestBulkLoadHFilesSplitRecovery method mockAndInjectError.
private static AsyncClusterConnection mockAndInjectError(AsyncClusterConnection conn) {
AsyncClusterConnection errConn = spy(conn);
doReturn(failedFuture(new IOException("injecting bulk load error"))).when(errConn).bulkLoad(any(), anyList(), any(), anyBoolean(), any(), any(), anyBoolean(), anyList(), anyBoolean());
return errConn;
}
Aggregations