use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestRecoveredEditsReplayAndAbort method test.
@Test
public void test() throws Exception {
// set flush size to 10MB
CONF.setInt("hbase.hregion.memstore.flush.size", 1024 * 1024 * 10);
// set the report interval to a very small value
CONF.setInt("hbase.hstore.report.interval.edits", 1);
CONF.setInt("hbase.hstore.report.period", 0);
// mock a RegionServerServices
final RegionServerAccounting rsAccounting = new RegionServerAccounting(CONF);
RegionServerServices rs = Mockito.mock(RegionServerServices.class);
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
Mockito.when(rs.getRegionServerAccounting()).thenReturn(rsAccounting);
Mockito.when(rs.isAborted()).thenReturn(false);
Mockito.when(rs.getNonceManager()).thenReturn(null);
Mockito.when(rs.getServerName()).thenReturn(ServerName.valueOf("test", 0, 111));
Mockito.when(rs.getConfiguration()).thenReturn(CONF);
// create a region
TableName testTable = TableName.valueOf("testRecoveredEidtsReplayAndAbort");
TableDescriptor htd = TableDescriptorBuilder.newBuilder(testTable).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).build()).build();
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
Path logDir = TEST_UTIL.getDataTestDirOnTestFS("TestRecoveredEidtsReplayAndAbort.log");
final WAL wal = HBaseTestingUtil.createWal(CONF, logDir, info);
Path rootDir = TEST_UTIL.getDataTestDir();
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
HRegionFileSystem.createRegionOnFileSystem(CONF, TEST_UTIL.getTestFileSystem(), tableDir, info);
region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs);
// create some recovered.edits
final WALFactory wals = new WALFactory(CONF, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1200;
long minSeqId = 1000;
long totalEdits = maxSeqId - minSeqId;
for (long i = minSeqId; i <= maxSeqId; i += 100) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
LOG.info("Begin to write recovered.edits : " + recoveredEdits);
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
for (long j = i; j < i + 100; j++) {
long time = System.nanoTime();
WALEdit edit = new WALEdit();
// 200KB kv
byte[] value = new byte[200 * 1024];
random.nextBytes(value);
edit.add(new KeyValue(row, fam1, Bytes.toBytes(j), time, KeyValue.Type.Put, value));
writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, j, time, HConstants.DEFAULT_CLUSTER_ID), edit));
}
writer.close();
}
TaskMonitor.get().createStatus(method);
// try to replay the edits
try {
region.initialize(new CancelableProgressable() {
private long replayedEdits = 0;
@Override
public boolean progress() {
replayedEdits++;
// during replay, rsAccounting should align with global memstore, because
// there is only one memstore here
Assert.assertEquals(rsAccounting.getGlobalMemStoreDataSize(), region.getMemStoreDataSize());
Assert.assertEquals(rsAccounting.getGlobalMemStoreHeapSize(), region.getMemStoreHeapSize());
Assert.assertEquals(rsAccounting.getGlobalMemStoreOffHeapSize(), region.getMemStoreOffHeapSize());
// abort the replay before finishing, leaving some edits in the memory
return replayedEdits < totalEdits - 10;
}
});
Assert.fail("Should not reach here");
} catch (IOException t) {
LOG.info("Current memstore: " + region.getMemStoreDataSize() + ", " + region.getMemStoreHeapSize() + ", " + region.getMemStoreOffHeapSize());
}
// After aborting replay, there should be no data in the memory
Assert.assertEquals(0, rsAccounting.getGlobalMemStoreDataSize());
Assert.assertEquals(0, region.getMemStoreDataSize());
// All the chunk in the MSLAB should be recycled, otherwise, there might be
// a memory leak.
Assert.assertEquals(0, ChunkCreator.getInstance().numberOfMappedChunks());
} finally {
HBaseTestingUtil.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestMiniBatchOperationInProgress method testMiniBatchOperationInProgressMethods.
@Test
public void testMiniBatchOperationInProgressMethods() {
Pair<Mutation, Integer>[] operations = new Pair[10];
OperationStatus[] retCodeDetails = new OperationStatus[10];
WALEdit[] walEditsFromCoprocessors = new WALEdit[10];
for (int i = 0; i < 10; i++) {
operations[i] = new Pair<>(new Put(Bytes.toBytes(i)), null);
}
MiniBatchOperationInProgress<Pair<Mutation, Integer>> miniBatch = new MiniBatchOperationInProgress<>(operations, retCodeDetails, walEditsFromCoprocessors, 0, 5, 5);
assertEquals(5, miniBatch.size());
assertTrue(Bytes.equals(Bytes.toBytes(0), miniBatch.getOperation(0).getFirst().getRow()));
assertTrue(Bytes.equals(Bytes.toBytes(2), miniBatch.getOperation(2).getFirst().getRow()));
assertTrue(Bytes.equals(Bytes.toBytes(4), miniBatch.getOperation(4).getFirst().getRow()));
try {
miniBatch.getOperation(5);
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
miniBatch.setOperationStatus(1, OperationStatus.FAILURE);
assertEquals(OperationStatus.FAILURE, retCodeDetails[1]);
try {
miniBatch.setOperationStatus(6, OperationStatus.FAILURE);
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
try {
miniBatch.setWalEdit(5, new WALEdit());
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
miniBatch = new MiniBatchOperationInProgress<>(operations, retCodeDetails, walEditsFromCoprocessors, 7, 10, 3);
try {
miniBatch.setWalEdit(-1, new WALEdit());
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
try {
miniBatch.getOperation(-1);
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
try {
miniBatch.getOperation(3);
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
try {
miniBatch.getOperationStatus(9);
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
try {
miniBatch.setOperationStatus(3, OperationStatus.FAILURE);
fail("Should throw Exception while accessing out of range");
} catch (ArrayIndexOutOfBoundsException e) {
}
assertTrue(Bytes.equals(Bytes.toBytes(7), miniBatch.getOperation(0).getFirst().getRow()));
assertTrue(Bytes.equals(Bytes.toBytes(9), miniBatch.getOperation(2).getFirst().getRow()));
miniBatch.setOperationStatus(1, OperationStatus.SUCCESS);
assertEquals(OperationStatus.SUCCESS, retCodeDetails[8]);
WALEdit wal = new WALEdit();
miniBatch.setWalEdit(0, wal);
assertEquals(wal, walEditsFromCoprocessors[7]);
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class ReplicationSourceWALReader method updateBatchStats.
private void updateBatchStats(WALEntryBatch batch, Entry entry, long entrySize) {
WALEdit edit = entry.getEdit();
batch.incrementHeapSize(entrySize);
Pair<Integer, Integer> nbRowsAndHFiles = countDistinctRowKeysAndHFiles(edit);
batch.incrementNbRowKeys(nbRowsAndHFiles.getFirst());
batch.incrementNbHFiles(nbRowsAndHFiles.getSecond());
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class SyncReplicationTestBase method verifyReplicationRequestRejection.
protected final void verifyReplicationRequestRejection(HBaseTestingUtil utility, boolean expectedRejection) throws Exception {
HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
AsyncClusterConnection connection = regionServer.getAsyncClusterConnection();
Entry[] entries = new Entry[10];
for (int i = 0; i < entries.length; i++) {
entries[i] = new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit());
}
if (!expectedRejection) {
ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
} else {
try {
ReplicationProtobufUtil.replicateWALEntry(connection.getRegionServerAdmin(regionServer.getServerName()), entries, null, null, null, HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT);
fail("Should throw IOException when sync-replication state is in A or DA");
} catch (RemoteException e) {
assertRejection(e.unwrapRemoteException());
} catch (DoNotRetryIOException e) {
assertRejection(e);
}
}
}
use of org.apache.hadoop.hbase.wal.WALEdit in project hbase by apache.
the class TestReplicationSmallTests method testReplicationInReplay.
/**
* Test for HBase-15259 WALEdits under replay will also be replicated
*/
@Test
public void testReplicationInReplay() throws Exception {
final TableName tableName = htable1.getName();
HRegion region = UTIL1.getMiniHBaseCluster().getRegions(tableName).get(0);
RegionInfo hri = region.getRegionInfo();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htable1.getDescriptor().getColumnFamilyNames()) {
scopes.put(fam, 1);
}
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
int index = UTIL1.getMiniHBaseCluster().getServerWith(hri.getRegionName());
WAL wal = UTIL1.getMiniHBaseCluster().getRegionServer(index).getWAL(region.getRegionInfo());
final byte[] rowName = Bytes.toBytes("testReplicationInReplay");
final byte[] qualifier = Bytes.toBytes("q");
final byte[] value = Bytes.toBytes("v");
WALEdit edit = new WALEdit(true);
long now = EnvironmentEdgeManager.currentTime();
edit.add(new KeyValue(rowName, famName, qualifier, now, value));
WALKeyImpl walKey = new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes);
wal.appendData(hri, walKey, edit);
wal.sync();
Get get = new Get(rowName);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
break;
}
Result res = htable2.get(get);
if (res.size() >= 1) {
fail("Not supposed to be replicated for " + Bytes.toString(res.getRow()));
} else {
LOG.info("Row not replicated, let's wait a bit more...");
Thread.sleep(SLEEP_TIME);
}
}
}
Aggregations