Search in sources :

Example 51 with HRegionServer

use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.

the class TestSerialReplication method testRegionMove.

@Test
public void testRegionMove() throws Exception {
    TableName tableName = createTable();
    try (Table table = UTIL.getConnection().getTable(tableName)) {
        for (int i = 0; i < 100; i++) {
            table.put(new Put(Bytes.toBytes(i)).addColumn(CF, CQ, Bytes.toBytes(i)));
        }
    }
    RegionInfo region = UTIL.getAdmin().getRegions(tableName).get(0);
    HRegionServer rs = UTIL.getOtherRegionServer(UTIL.getRSForFirstRegionInTable(tableName));
    moveRegion(region, rs);
    try (Table table = UTIL.getConnection().getTable(tableName)) {
        for (int i = 100; i < 200; i++) {
            table.put(new Put(Bytes.toBytes(i)).addColumn(CF, CQ, Bytes.toBytes(i)));
        }
    }
    enablePeerAndWaitUntilReplicationDone(200);
    checkOrder(200);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Put(org.apache.hadoop.hbase.client.Put) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 52 with HRegionServer

use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.

the class TestSyncReplicationMoreLogsInLocalCopyToRemote method testSplitLog.

@Test
public void testSplitLog() throws Exception {
    UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
    HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
    DualAsyncFSWALForTest wal = (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
    wal.setRemoteBroken();
    try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
        AsyncTable<?> table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).build();
        try {
            table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))).get();
            fail("Should fail since the rs will crash and we will not retry");
        } catch (ExecutionException e) {
            // expected
            LOG.info("Expected error:", e);
        }
    }
    UTIL1.waitFor(60000, new ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            try (Table table = UTIL1.getConnection().getTable(TABLE_NAME)) {
                return table.exists(new Get(Bytes.toBytes(0)));
            }
        }

        @Override
        public String explainFailure() throws Exception {
            return "The row is still not available";
        }
    });
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
    // We should have copied the local log to remote, so we should be able to get the value
    try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
        assertEquals(0, Bytes.toInt(table.get(new Get(Bytes.toBytes(0))).getValue(CF, CQ)));
    }
}
Also used : AsyncTable(org.apache.hadoop.hbase.client.AsyncTable) Table(org.apache.hadoop.hbase.client.Table) Get(org.apache.hadoop.hbase.client.Get) AsyncConnection(org.apache.hadoop.hbase.client.AsyncConnection) ExecutionException(java.util.concurrent.ExecutionException) Put(org.apache.hadoop.hbase.client.Put) ExecutionException(java.util.concurrent.ExecutionException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 53 with HRegionServer

use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.

the class TestDrainReplicationQueuesForStandBy method test.

@Test
public void test() throws Exception {
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
    UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
    write(UTIL1, 0, 100);
    HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
    String walGroupId = AbstractFSWALProvider.getWALPrefixFromWALName(((AbstractFSWAL<?>) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build())).getCurrentFileName().getName());
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
    // transit cluster2 to DA and cluster 1 to S
    verify(UTIL2, 0, 100);
    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
    // delete the original value, and then major compact
    try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
        for (int i = 0; i < 100; i++) {
            table.delete(new Delete(Bytes.toBytes(i)));
        }
    }
    UTIL2.flush(TABLE_NAME);
    UTIL2.compact(TABLE_NAME, true);
    // wait until the new values are replicated back to cluster1
    HRegion region = rs.getRegions(TABLE_NAME).get(0);
    UTIL1.waitFor(30000, new ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return region.get(new Get(Bytes.toBytes(99))).isEmpty();
        }

        @Override
        public String explainFailure() throws Exception {
            return "Replication has not been catched up yet";
        }
    });
    // transit cluster1 to DA and cluster2 to S, then we will start replicating from cluster1 to
    // cluster2
    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
    UTIL1.getAdmin().enableReplicationPeer(PEER_ID);
    // confirm that we will not replicate the old data which causes inconsistency
    ReplicationSource source = (ReplicationSource) ((Replication) rs.getReplicationSourceService()).getReplicationManager().getSource(PEER_ID);
    UTIL1.waitFor(30000, new ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return !source.workerThreads.containsKey(walGroupId);
        }

        @Override
        public String explainFailure() throws Exception {
            return "Replication has not been catched up yet";
        }
    });
    HRegion region2 = UTIL2.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0);
    for (int i = 0; i < 100; i++) {
        assertTrue(region2.get(new Get(Bytes.toBytes(i))).isEmpty());
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) AbstractFSWAL(org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL) Table(org.apache.hadoop.hbase.client.Table) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 54 with HRegionServer

use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.

the class TestRefreshRecoveredReplication method testReplicationRefreshSource.

@Test
public void testReplicationRefreshSource() throws Exception {
    // put some data
    for (int i = 0; i < BATCH; i++) {
        byte[] r = Bytes.toBytes(i);
        table1.put(new Put(r).addColumn(famName, famName, r));
    }
    // Kill rs holding table region. There are only TWO servers. We depend on it.
    List<RegionServerThread> rss = UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads();
    assertEquals(2, rss.size());
    Optional<RegionServerThread> server = rss.stream().filter(rst -> CollectionUtils.isNotEmpty(rst.getRegionServer().getRegions(tablename))).findAny();
    Assert.assertTrue(server.isPresent());
    HRegionServer otherServer = rss.get(0).getRegionServer() == server.get().getRegionServer() ? rss.get(1).getRegionServer() : rss.get(0).getRegionServer();
    server.get().getRegionServer().abort("stopping for test");
    // waiting for recovered peer to appear.
    Replication replication = (Replication) otherServer.getReplicationSourceService();
    UTIL1.waitFor(60000, () -> !replication.getReplicationManager().getOldSources().isEmpty());
    // Wait on only one server being up.
    UTIL1.waitFor(60000, () -> UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() == NUM_SLAVES1 - 1);
    UTIL1.waitTableAvailable(tablename);
    LOG.info("Available {}", tablename);
    // disable peer to trigger refreshSources
    hbaseAdmin.disableReplicationPeer(PEER_ID2);
    LOG.info("has replicated {} rows before disable peer", checkReplicationData());
    hbaseAdmin.enableReplicationPeer(PEER_ID2);
    // waiting to replicate all data to slave
    UTIL2.waitFor(60000, () -> {
        int count = checkReplicationData();
        LOG.info("Waiting all logs pushed to slave. Expected {} , actual {}", BATCH, count);
        return count == BATCH;
    });
}
Also used : BeforeClass(org.junit.BeforeClass) Result(org.apache.hadoop.hbase.client.Result) LoggerFactory(org.slf4j.LoggerFactory) HConstants(org.apache.hadoop.hbase.HConstants) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) TestName(org.junit.rules.TestName) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) ClassRule(org.junit.ClassRule) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) AfterClass(org.junit.AfterClass) Logger(org.slf4j.Logger) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Collection(java.util.Collection) Put(org.apache.hadoop.hbase.client.Put) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) IOException(java.io.IOException) TestReplicationBase(org.apache.hadoop.hbase.replication.TestReplicationBase) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Scan(org.apache.hadoop.hbase.client.Scan) CollectionUtils(org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) List(java.util.List) Rule(org.junit.Rule) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Optional(java.util.Optional) Table(org.apache.hadoop.hbase.client.Table) ReplicationTests(org.apache.hadoop.hbase.testclassification.ReplicationTests) Assert(org.junit.Assert) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Assert.assertEquals(org.junit.Assert.assertEquals) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Put(org.apache.hadoop.hbase.client.Put) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 55 with HRegionServer

use of org.apache.hadoop.hbase.regionserver.HRegionServer in project hbase by apache.

the class HBaseTestingUtil method getRSForFirstRegionInTable.

/**
 * Tool to get the reference to the region server object that holds the region of the specified
 * user table.
 * @param tableName user table to lookup in hbase:meta
 * @return region server that holds it, null if the row doesn't exist
 */
public HRegionServer getRSForFirstRegionInTable(TableName tableName) throws IOException, InterruptedException {
    List<RegionInfo> regions = getAdmin().getRegions(tableName);
    if (regions == null || regions.isEmpty()) {
        return null;
    }
    LOG.debug("Found " + regions.size() + " regions for table " + tableName);
    byte[] firstRegionName = regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst().orElseThrow(() -> new IOException("online regions not found in table " + tableName));
    LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
    long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
    int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
    RetryCounter retrier = new RetryCounter(numRetries + 1, (int) pause, TimeUnit.MICROSECONDS);
    while (retrier.shouldRetry()) {
        int index = getMiniHBaseCluster().getServerWith(firstRegionName);
        if (index != -1) {
            return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
        }
        // Came back -1. Region may not be online yet. Sleep a while.
        retrier.sleepUntilNextRetry();
    }
    return null;
}
Also used : ZKConfig(org.apache.hadoop.hbase.zookeeper.ZKConfig) Arrays(java.util.Arrays) UserProvider(org.apache.hadoop.hbase.security.UserProvider) SplitAlgorithm(org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm) VisibilityLabelsCache(org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) BooleanSupplier(java.util.function.BooleanSupplier) RegionState(org.apache.hadoop.hbase.master.RegionState) InetAddress(java.net.InetAddress) ServerSocket(java.net.ServerSocket) Delete(org.apache.hadoop.hbase.client.Delete) MemStoreLAB(org.apache.hadoop.hbase.regionserver.MemStoreLAB) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Consistency(org.apache.hadoop.hbase.client.Consistency) BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator) WAL(org.apache.hadoop.hbase.wal.WAL) Closeables(org.apache.hbase.thirdparty.com.google.common.io.Closeables) Pair(org.apache.hadoop.hbase.util.Pair) ZooKeeper(org.apache.zookeeper.ZooKeeper) ChecksumUtil(org.apache.hadoop.hbase.io.hfile.ChecksumUtil) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) Get(org.apache.hadoop.hbase.client.Get) Set(java.util.Set) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) StandardCharsets(java.nio.charset.StandardCharsets) UncheckedIOException(java.io.UncheckedIOException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) DFSClient(org.apache.hadoop.hdfs.DFSClient) RpcServerInterface(org.apache.hadoop.hbase.ipc.RpcServerInterface) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) Region(org.apache.hadoop.hbase.regionserver.Region) JVM(org.apache.hadoop.hbase.util.JVM) FSUtils(org.apache.hadoop.hbase.util.FSUtils) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ChunkCreator(org.apache.hadoop.hbase.regionserver.ChunkCreator) HStore(org.apache.hadoop.hbase.regionserver.HStore) EmptyWatcher(org.apache.hadoop.hbase.zookeeper.EmptyWatcher) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) DatagramSocket(java.net.DatagramSocket) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) Properties(java.util.Properties) DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Predicate(org.apache.hadoop.hbase.Waiter.Predicate) AssignmentTestingUtil(org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) EditLogFileOutputStream(org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) FileUtils(org.apache.commons.io.FileUtils) Field(java.lang.reflect.Field) UnknownHostException(java.net.UnknownHostException) RegionServerStoppedException(org.apache.hadoop.hbase.regionserver.RegionServerStoppedException) File(java.io.File) ConnectionFactory(org.apache.hadoop.hbase.client.ConnectionFactory) Scan(org.apache.hadoop.hbase.client.Scan) Admin(org.apache.hadoop.hbase.client.Admin) Connection(org.apache.hadoop.hbase.client.Connection) RegionStateStore(org.apache.hadoop.hbase.master.assignment.RegionStateStore) Nullable(edu.umd.cs.findbugs.annotations.Nullable) Assert.assertEquals(org.junit.Assert.assertEquals) Log4jUtils(org.apache.hadoop.hbase.logging.Log4jUtils) MobFileCache(org.apache.hadoop.hbase.mob.MobFileCache) Result(org.apache.hadoop.hbase.client.Result) Random(java.util.Random) MiniMRCluster(org.apache.hadoop.mapred.MiniMRCluster) ServerManager(org.apache.hadoop.hbase.master.ServerManager) States(org.apache.zookeeper.ZooKeeper.States) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) Assert.fail(org.junit.Assert.fail) Durability(org.apache.hadoop.hbase.client.Durability) HFile(org.apache.hadoop.hbase.io.hfile.HFile) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) RegionSplitter(org.apache.hadoop.hbase.util.RegionSplitter) Collection(java.util.Collection) Compression(org.apache.hadoop.hbase.io.compress.Compression) NavigableSet(java.util.NavigableSet) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) HBaseKerberosUtils(org.apache.hadoop.hbase.security.HBaseKerberosUtils) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) List(java.util.List) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Modifier(java.lang.reflect.Modifier) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) RandomStringUtils(org.apache.commons.lang3.RandomStringUtils) MasterRegistry(org.apache.hadoop.hbase.client.MasterRegistry) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) HMaster(org.apache.hadoop.hbase.master.HMaster) Socket(java.net.Socket) MessageDigest(java.security.MessageDigest) TableState(org.apache.hadoop.hbase.client.TableState) BindException(java.net.BindException) Hbck(org.apache.hadoop.hbase.client.Hbck) AtomicReference(java.util.concurrent.atomic.AtomicReference) User(org.apache.hadoop.hbase.security.User) HashSet(java.util.HashSet) InterfaceStability(org.apache.yetus.audience.InterfaceStability) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) Threads(org.apache.hadoop.hbase.util.Threads) TaskLog(org.apache.hadoop.mapred.TaskLog) Bytes(org.apache.hadoop.hbase.util.Bytes) OutputStream(java.io.OutputStream) ReadType(org.apache.hadoop.hbase.client.Scan.ReadType) RegionStates(org.apache.hadoop.hbase.master.assignment.RegionStates) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Iterator(java.util.Iterator) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) ReflectionUtils(org.apache.hadoop.hbase.util.ReflectionUtils) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Put(org.apache.hadoop.hbase.client.Put) MiniKdc(org.apache.hadoop.minikdc.MiniKdc) WatchedEvent(org.apache.zookeeper.WatchedEvent) TimeUnit(java.util.concurrent.TimeUnit) JobConf(org.apache.hadoop.mapred.JobConf) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) ClusterConnectionFactory(org.apache.hadoop.hbase.client.ClusterConnectionFactory) MapreduceTestingShim(org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim) Table(org.apache.hadoop.hbase.client.Table) ExplainingPredicate(org.apache.hadoop.hbase.Waiter.ExplainingPredicate) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException)

Aggregations

HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)253 Test (org.junit.Test)188 TableName (org.apache.hadoop.hbase.TableName)70 Table (org.apache.hadoop.hbase.client.Table)67 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)59 IOException (java.io.IOException)53 Region (org.apache.hadoop.hbase.regionserver.Region)49 Configuration (org.apache.hadoop.conf.Configuration)47 ServerName (org.apache.hadoop.hbase.ServerName)46 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)41 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)41 Put (org.apache.hadoop.hbase.client.Put)39 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)32 RegionServerThread (org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread)32 JVMClusterUtil (org.apache.hadoop.hbase.util.JVMClusterUtil)23 List (java.util.List)22 HMaster (org.apache.hadoop.hbase.master.HMaster)22 ArrayList (java.util.ArrayList)21 HBaseClassTestRule (org.apache.hadoop.hbase.HBaseClassTestRule)21 Waiter (org.apache.hadoop.hbase.Waiter)21