use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.
the class TestHRegion method testSkipRecoveredEditsReplay.
@Test
public void testSkipRecoveredEditsReplay() throws Exception {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, CONF, family);
final WALFactory wals = new WALFactory(CONF, null, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
MonitoredTask status = TaskMonitor.get().createStatus(method);
Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Store store : region.getStores()) {
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
}
long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
assertEquals(maxSeqId, seqId);
region.getMVCC().advanceTo(seqId);
Get get = new Get(row);
Result result = region.get(get);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
assertEquals(1, kvs.size());
assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
}
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.
the class TestActiveMasterManager method testRestartMaster.
@Test
public void testRestartMaster() throws IOException, KeeperException {
ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true);
try {
ZKUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode);
ZKUtil.deleteNode(zk, zk.znodePaths.clusterStateZNode);
} catch (KeeperException.NoNodeException nne) {
}
// Create the master node with a dummy address
ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
// Should not have a master yet
DummyMaster dummyMaster = new DummyMaster(zk, master);
ClusterStatusTracker clusterStatusTracker = dummyMaster.getClusterStatusTracker();
ActiveMasterManager activeMasterManager = dummyMaster.getActiveMasterManager();
assertFalse(activeMasterManager.clusterHasActiveMaster.get());
// First test becoming the active master uninterrupted
MonitoredTask status = Mockito.mock(MonitoredTask.class);
clusterStatusTracker.setClusterUp();
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
// Now pretend master restart
DummyMaster secondDummyMaster = new DummyMaster(zk, master);
ActiveMasterManager secondActiveMasterManager = secondDummyMaster.getActiveMasterManager();
assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
}
use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.
the class TestHRegion method testSkipRecoveredEditsReplaySomeIgnored.
@Test
public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, CONF, family);
final WALFactory wals = new WALFactory(CONF, null, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
long recoverSeqId = 1030;
MonitoredTask status = TaskMonitor.get().createStatus(method);
Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Store store : region.getStores()) {
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
}
long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
assertEquals(maxSeqId, seqId);
region.getMVCC().advanceTo(seqId);
Get get = new Get(row);
Result result = region.get(get);
for (long i = minSeqId; i <= maxSeqId; i += 10) {
List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
if (i < recoverSeqId) {
assertEquals(0, kvs.size());
} else {
assertEquals(1, kvs.size());
assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
}
}
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.
the class TestHRegion method testSkipRecoveredEditsReplayTheLastFileIgnored.
@Test
public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, CONF, family);
final WALFactory wals = new WALFactory(CONF, null, method);
try {
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
byte[][] columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]);
assertEquals(0, region.getStoreFileList(columns).size());
Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
long maxSeqId = 1050;
long minSeqId = 1000;
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
long time = System.nanoTime();
WALEdit edit = null;
if (i == maxSeqId) {
edit = WALEdit.createCompaction(region.getRegionInfo(), CompactionDescriptor.newBuilder().setTableName(ByteString.copyFrom(tableName.getName())).setFamilyName(ByteString.copyFrom(regionName)).setEncodedRegionName(ByteString.copyFrom(regionName)).setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))).setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())).build());
} else {
edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
}
writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
long recoverSeqId = 1030;
Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
MonitoredTask status = TaskMonitor.get().createStatus(method);
for (Store store : region.getStores()) {
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
}
long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
assertEquals(maxSeqId, seqId);
// assert that the files are flushed
assertEquals(1, region.getStoreFileList(columns).size());
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
wals.close();
}
}
use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.
the class TestRestoreSnapshotHelper method getRestoreHelper.
/**
* Initialize the restore helper, based on the snapshot and table information provided.
*/
private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir, final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
MonitoredTask status = Mockito.mock(MonitoredTask.class);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
return new RestoreSnapshotHelper(conf, fs, manifest, htdClone, rootDir, monitor, status);
}
Aggregations