Search in sources :

Example 11 with MonitoredTask

use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.

the class TestHRegion method testSkipRecoveredEditsReplay.

@Test
public void testSkipRecoveredEditsReplay() throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, null, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
        long maxSeqId = 1050;
        long minSeqId = 1000;
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            long time = System.nanoTime();
            WALEdit edit = new WALEdit();
            edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
            writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
            writer.close();
        }
        MonitoredTask status = TaskMonitor.get().createStatus(method);
        Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (Store store : region.getStores()) {
            maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
        }
        long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
        assertEquals(maxSeqId, seqId);
        region.getMVCC().advanceTo(seqId);
        Get get = new Get(row);
        Result result = region.get(get);
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
            assertEquals(1, kvs.size());
            assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
        }
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) MetricsWAL(org.apache.hadoop.hbase.regionserver.wal.MetricsWAL) TreeMap(java.util.TreeMap) Result(org.apache.hadoop.hbase.client.Result) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) Matchers.anyLong(org.mockito.Matchers.anyLong) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Cell(org.apache.hadoop.hbase.Cell) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 12 with MonitoredTask

use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.

the class TestActiveMasterManager method testRestartMaster.

@Test
public void testRestartMaster() throws IOException, KeeperException {
    ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true);
    try {
        ZKUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode);
        ZKUtil.deleteNode(zk, zk.znodePaths.clusterStateZNode);
    } catch (KeeperException.NoNodeException nne) {
    }
    // Create the master node with a dummy address
    ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
    // Should not have a master yet
    DummyMaster dummyMaster = new DummyMaster(zk, master);
    ClusterStatusTracker clusterStatusTracker = dummyMaster.getClusterStatusTracker();
    ActiveMasterManager activeMasterManager = dummyMaster.getActiveMasterManager();
    assertFalse(activeMasterManager.clusterHasActiveMaster.get());
    // First test becoming the active master uninterrupted
    MonitoredTask status = Mockito.mock(MonitoredTask.class);
    clusterStatusTracker.setClusterUp();
    activeMasterManager.blockUntilBecomingActiveMaster(100, status);
    assertTrue(activeMasterManager.clusterHasActiveMaster.get());
    assertMaster(zk, master);
    // Now pretend master restart
    DummyMaster secondDummyMaster = new DummyMaster(zk, master);
    ActiveMasterManager secondActiveMasterManager = secondDummyMaster.getActiveMasterManager();
    assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
    activeMasterManager.blockUntilBecomingActiveMaster(100, status);
    assertTrue(activeMasterManager.clusterHasActiveMaster.get());
    assertMaster(zk, master);
}
Also used : ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ServerName(org.apache.hadoop.hbase.ServerName) ClusterStatusTracker(org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker) KeeperException(org.apache.zookeeper.KeeperException) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 13 with MonitoredTask

use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.

the class TestHRegion method testSkipRecoveredEditsReplaySomeIgnored.

@Test
public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, null, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
        long maxSeqId = 1050;
        long minSeqId = 1000;
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            long time = System.nanoTime();
            WALEdit edit = new WALEdit();
            edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
            writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
            writer.close();
        }
        long recoverSeqId = 1030;
        MonitoredTask status = TaskMonitor.get().createStatus(method);
        Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (Store store : region.getStores()) {
            maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
        }
        long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
        assertEquals(maxSeqId, seqId);
        region.getMVCC().advanceTo(seqId);
        Get get = new Get(row);
        Result result = region.get(get);
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
            if (i < recoverSeqId) {
                assertEquals(0, kvs.size());
            } else {
                assertEquals(1, kvs.size());
                assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
            }
        }
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) MetricsWAL(org.apache.hadoop.hbase.regionserver.wal.MetricsWAL) TreeMap(java.util.TreeMap) Result(org.apache.hadoop.hbase.client.Result) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) Matchers.anyLong(org.mockito.Matchers.anyLong) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Cell(org.apache.hadoop.hbase.Cell) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 14 with MonitoredTask

use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.

the class TestHRegion method testSkipRecoveredEditsReplayTheLastFileIgnored.

@Test
public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, null, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        byte[][] columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]);
        assertEquals(0, region.getStoreFileList(columns).size());
        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
        long maxSeqId = 1050;
        long minSeqId = 1000;
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            long time = System.nanoTime();
            WALEdit edit = null;
            if (i == maxSeqId) {
                edit = WALEdit.createCompaction(region.getRegionInfo(), CompactionDescriptor.newBuilder().setTableName(ByteString.copyFrom(tableName.getName())).setFamilyName(ByteString.copyFrom(regionName)).setEncodedRegionName(ByteString.copyFrom(regionName)).setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))).setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())).build());
            } else {
                edit = new WALEdit();
                edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
            }
            writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
            writer.close();
        }
        long recoverSeqId = 1030;
        Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        MonitoredTask status = TaskMonitor.get().createStatus(method);
        for (Store store : region.getStores()) {
            maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
        }
        long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
        assertEquals(maxSeqId, seqId);
        // assert that the files are flushed
        assertEquals(1, region.getStoreFileList(columns).size());
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) MetricsWAL(org.apache.hadoop.hbase.regionserver.wal.MetricsWAL) TreeMap(java.util.TreeMap) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Matchers.anyLong(org.mockito.Matchers.anyLong) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 15 with MonitoredTask

use of org.apache.hadoop.hbase.monitoring.MonitoredTask in project hbase by apache.

the class TestRestoreSnapshotHelper method getRestoreHelper.

/**
   * Initialize the restore helper, based on the snapshot and table information provided.
   */
private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir, final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
    ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
    MonitoredTask status = Mockito.mock(MonitoredTask.class);
    SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
    return new RestoreSnapshotHelper(conf, fs, manifest, htdClone, rootDir, monitor, status);
}
Also used : ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask)

Aggregations

MonitoredTask (org.apache.hadoop.hbase.monitoring.MonitoredTask)20 Path (org.apache.hadoop.fs.Path)8 FileSystem (org.apache.hadoop.fs.FileSystem)7 Test (org.junit.Test)7 IOException (java.io.IOException)6 InterruptedIOException (java.io.InterruptedIOException)5 WAL (org.apache.hadoop.hbase.wal.WAL)5 WALKey (org.apache.hadoop.hbase.wal.WALKey)5 TreeMap (java.util.TreeMap)4 FaultyFileSystem (org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem)4 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)4 Cell (org.apache.hadoop.hbase.Cell)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 KeyValue (org.apache.hadoop.hbase.KeyValue)3 Get (org.apache.hadoop.hbase.client.Get)3 Result (org.apache.hadoop.hbase.client.Result)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 Configuration (org.apache.hadoop.conf.Configuration)2