Search in sources :

Example 61 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hive by apache.

the class TestTxnCommands2 method testInitiatorWithMultipleFailedCompactionsForVariousTblProperties.

void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tblProperties) throws Exception {
    String tblName = "hive12353";
    runStatementOnDriver("drop table if exists " + tblName);
    runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + //currently ACID requires table to be bucketed
    " CLUSTERED BY(a) INTO 1 BUCKETS" + " STORED AS ORC  TBLPROPERTIES ( " + tblProperties + " )");
    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 4);
    for (int i = 0; i < 5; i++) {
        //generate enough delta files so that Initiator can trigger auto compaction
        runStatementOnDriver("insert into " + tblName + " values(" + (i + 1) + ", 'foo'),(" + (i + 2) + ", 'bar'),(" + (i + 3) + ", 'baz')");
    }
    hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
    int numFailedCompactions = hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
    TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
    AtomicBoolean stop = new AtomicBoolean(true);
    //create failed compactions
    for (int i = 0; i < numFailedCompactions; i++) {
        //each of these should fail
        txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
        runWorker(hiveConf);
    }
    //this should not schedule a new compaction due to prior failures, but will create Attempted entry
    Initiator init = new Initiator();
    init.setThreadId((int) init.getId());
    init.setHiveConf(hiveConf);
    init.init(stop, new AtomicBoolean());
    init.run();
    int numAttemptedCompactions = 1;
    checkCompactionState(new CompactionsByState(numAttemptedCompactions, numFailedCompactions, 0, 0, 0, 0, numFailedCompactions + numAttemptedCompactions), countCompacts(txnHandler));
    hiveConf.setTimeVar(HiveConf.ConfVars.COMPACTOR_HISTORY_REAPER_INTERVAL, 10, TimeUnit.MILLISECONDS);
    AcidCompactionHistoryService compactionHistoryService = new AcidCompactionHistoryService();
    //should not remove anything from history
    runHouseKeeperService(compactionHistoryService, hiveConf);
    checkCompactionState(new CompactionsByState(numAttemptedCompactions, numFailedCompactions, 0, 0, 0, 0, numFailedCompactions + numAttemptedCompactions), countCompacts(txnHandler));
    txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MAJOR));
    //will fail
    runWorker(hiveConf);
    txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
    //will fail
    runWorker(hiveConf);
    init.run();
    numAttemptedCompactions++;
    init.run();
    numAttemptedCompactions++;
    checkCompactionState(new CompactionsByState(numAttemptedCompactions, numFailedCompactions + 2, 0, 0, 0, 0, numFailedCompactions + 2 + numAttemptedCompactions), countCompacts(txnHandler));
    //should remove history so that we have
    runHouseKeeperService(compactionHistoryService, hiveConf);
    //COMPACTOR_HISTORY_RETENTION_FAILED failed compacts left (and no other since we only have failed ones here)
    checkCompactionState(new CompactionsByState(hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 0, 0, 0, 0, hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)), countCompacts(txnHandler));
    hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false);
    txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
    //at this point "show compactions" should have (COMPACTOR_HISTORY_RETENTION_FAILED) failed + 1 initiated (explicitly by user)
    checkCompactionState(new CompactionsByState(hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 1, 0, 0, 0, hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED) + 1), countCompacts(txnHandler));
    //will succeed and transition to Initiated->Working->Ready for Cleaning
    runWorker(hiveConf);
    checkCompactionState(new CompactionsByState(hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 0, 1, 0, 0, hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED) + 1), countCompacts(txnHandler));
    // transition to Success state
    runCleaner(hiveConf);
    //should not purge anything as all items within retention sizes
    runHouseKeeperService(compactionHistoryService, hiveConf);
    checkCompactionState(new CompactionsByState(hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 0, 0, 1, 0, hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED) + 1), countCompacts(txnHandler));
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Initiator(org.apache.hadoop.hive.ql.txn.compactor.Initiator) TxnStore(org.apache.hadoop.hive.metastore.txn.TxnStore) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) AcidCompactionHistoryService(org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService)

Example 62 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hive by apache.

the class TestTxnCommands2 method runCleaner.

public static void runCleaner(HiveConf hiveConf) throws MetaException {
    AtomicBoolean stop = new AtomicBoolean(true);
    Cleaner t = new Cleaner();
    t.setThreadId((int) t.getId());
    t.setHiveConf(hiveConf);
    AtomicBoolean looped = new AtomicBoolean();
    t.init(stop, looped);
    t.run();
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Cleaner(org.apache.hadoop.hive.ql.txn.compactor.Cleaner)

Example 63 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project kafka by apache.

the class KafkaBasedLogTest method testSendAndReadToEnd.

@Test
public void testSendAndReadToEnd() throws Exception {
    expectStart();
    TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
    ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
    TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
    ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp1Record), EasyMock.capture(callback1))).andReturn(tp1Future);
    // Producer flushes when read to log end is called
    producer.flush();
    PowerMock.expectLastCall();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 0L);
    endOffsets.put(TP1, 0L);
    consumer.updateEndOffsets(endOffsets);
    store.start();
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(0L, consumer.position(TP0));
    assertEquals(0L, consumer.position(TP1));
    // Set some keys
    final AtomicInteger invoked = new AtomicInteger(0);
    org.apache.kafka.clients.producer.Callback producerCallback = new org.apache.kafka.clients.producer.Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            invoked.incrementAndGet();
        }
    };
    store.send(TP0_KEY, TP0_VALUE, producerCallback);
    store.send(TP1_KEY, TP1_VALUE, producerCallback);
    assertEquals(0, invoked.get());
    // Output not used, so safe to not return a real value for testing
    tp1Future.resolve((RecordMetadata) null);
    callback1.getValue().onCompletion(null, null);
    assertEquals(1, invoked.get());
    tp0Future.resolve((RecordMetadata) null);
    callback0.getValue().onCompletion(null, null);
    assertEquals(2, invoked.get());
    // Now we should have to wait for the records to be read back when we call readToEnd()
    final AtomicBoolean getInvoked = new AtomicBoolean(false);
    final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>(new Callback<Void>() {

        @Override
        public void onCompletion(Throwable error, Void result) {
            getInvoked.set(true);
        }
    });
    consumer.schedulePollTask(new Runnable() {

        @Override
        public void run() {
            // Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
            // that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
            // returning any data.
            Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
            newEndOffsets.put(TP0, 2L);
            newEndOffsets.put(TP1, 2L);
            consumer.updateEndOffsets(newEndOffsets);
            store.readToEnd(readEndFutureCallback);
            // Should keep polling until it reaches current log end offset for all partitions
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE_NEW));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE));
                }
            });
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE_NEW));
                }
            });
        // Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
        }
    });
    readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(getInvoked.get());
    assertEquals(2, consumedRecords.size());
    assertEquals(2, consumedRecords.get(TP0).size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
    assertEquals(2, consumedRecords.get(TP1).size());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
    // Cleanup
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) KafkaException(org.apache.kafka.common.KafkaException) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) WakeupException(org.apache.kafka.common.errors.WakeupException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) HashMap(java.util.HashMap) Map(java.util.Map) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 64 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project kafka by apache.

the class KafkaOffsetBackingStoreTest method testGetSet.

@Test
public void testGetSet() throws Exception {
    expectConfigure();
    expectStart(Collections.EMPTY_LIST);
    expectStop();
    // First get() against an empty store
    final Capture<Callback<Void>> firstGetReadToEndCallback = EasyMock.newCapture();
    storeLog.readToEnd(EasyMock.capture(firstGetReadToEndCallback));
    PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {

        @Override
        public Object answer() throws Throwable {
            firstGetReadToEndCallback.getValue().onCompletion(null, null);
            return null;
        }
    });
    // Set offsets
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    storeLog.send(EasyMock.aryEq(TP0_KEY.array()), EasyMock.aryEq(TP0_VALUE.array()), EasyMock.capture(callback0));
    PowerMock.expectLastCall();
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    storeLog.send(EasyMock.aryEq(TP1_KEY.array()), EasyMock.aryEq(TP1_VALUE.array()), EasyMock.capture(callback1));
    PowerMock.expectLastCall();
    // Second get() should get the produced data and return the new values
    final Capture<Callback<Void>> secondGetReadToEndCallback = EasyMock.newCapture();
    storeLog.readToEnd(EasyMock.capture(secondGetReadToEndCallback));
    PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {

        @Override
        public Object answer() throws Throwable {
            capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY.array(), TP0_VALUE.array()));
            capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY.array(), TP1_VALUE.array()));
            secondGetReadToEndCallback.getValue().onCompletion(null, null);
            return null;
        }
    });
    // Third get() should pick up data produced by someone else and return those values
    final Capture<Callback<Void>> thirdGetReadToEndCallback = EasyMock.newCapture();
    storeLog.readToEnd(EasyMock.capture(thirdGetReadToEndCallback));
    PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {

        @Override
        public Object answer() throws Throwable {
            capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY.array(), TP0_VALUE_NEW.array()));
            capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY.array(), TP1_VALUE_NEW.array()));
            thirdGetReadToEndCallback.getValue().onCompletion(null, null);
            return null;
        }
    });
    PowerMock.replayAll();
    store.configure(DEFAULT_DISTRIBUTED_CONFIG);
    store.start();
    // Getting from empty store should return nulls
    final AtomicBoolean getInvokedAndPassed = new AtomicBoolean(false);
    store.get(Arrays.asList(TP0_KEY, TP1_KEY), new Callback<Map<ByteBuffer, ByteBuffer>>() {

        @Override
        public void onCompletion(Throwable error, Map<ByteBuffer, ByteBuffer> result) {
            // Since we didn't read them yet, these will be null
            assertEquals(null, result.get(TP0_KEY));
            assertEquals(null, result.get(TP1_KEY));
            getInvokedAndPassed.set(true);
        }
    }).get(10000, TimeUnit.MILLISECONDS);
    assertTrue(getInvokedAndPassed.get());
    // Set some offsets
    Map<ByteBuffer, ByteBuffer> toSet = new HashMap<>();
    toSet.put(TP0_KEY, TP0_VALUE);
    toSet.put(TP1_KEY, TP1_VALUE);
    final AtomicBoolean invoked = new AtomicBoolean(false);
    Future<Void> setFuture = store.set(toSet, new Callback<Void>() {

        @Override
        public void onCompletion(Throwable error, Void result) {
            invoked.set(true);
        }
    });
    assertFalse(setFuture.isDone());
    // Out of order callbacks shouldn't matter, should still require all to be invoked before invoking the callback
    // for the store's set callback
    callback1.getValue().onCompletion(null, null);
    assertFalse(invoked.get());
    callback0.getValue().onCompletion(null, null);
    setFuture.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(invoked.get());
    // Getting data should read to end of our published data and return it
    final AtomicBoolean secondGetInvokedAndPassed = new AtomicBoolean(false);
    store.get(Arrays.asList(TP0_KEY, TP1_KEY), new Callback<Map<ByteBuffer, ByteBuffer>>() {

        @Override
        public void onCompletion(Throwable error, Map<ByteBuffer, ByteBuffer> result) {
            assertEquals(TP0_VALUE, result.get(TP0_KEY));
            assertEquals(TP1_VALUE, result.get(TP1_KEY));
            secondGetInvokedAndPassed.set(true);
        }
    }).get(10000, TimeUnit.MILLISECONDS);
    assertTrue(secondGetInvokedAndPassed.get());
    // Getting data should read to end of our published data and return it
    final AtomicBoolean thirdGetInvokedAndPassed = new AtomicBoolean(false);
    store.get(Arrays.asList(TP0_KEY, TP1_KEY), new Callback<Map<ByteBuffer, ByteBuffer>>() {

        @Override
        public void onCompletion(Throwable error, Map<ByteBuffer, ByteBuffer> result) {
            assertEquals(TP0_VALUE_NEW, result.get(TP0_KEY));
            assertEquals(TP1_VALUE_NEW, result.get(TP1_KEY));
            thirdGetInvokedAndPassed.set(true);
        }
    }).get(10000, TimeUnit.MILLISECONDS);
    assertTrue(thirdGetInvokedAndPassed.get());
    store.stop();
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Callback(org.apache.kafka.connect.util.Callback) HashMap(java.util.HashMap) Map(java.util.Map) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 65 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hbase by apache.

the class TestLogRolling method testLogRollOnDatanodeDeath.

/**
   * Tests that logs are rolled upon detecting datanode death Requires an HDFS jar with HDFS-826 &
   * syncFs() support (HDFS-200)
   */
@Test
public void testLogRollOnDatanodeDeath() throws Exception {
    TEST_UTIL.ensureSomeRegionServersAvailable(2);
    assertTrue("This test requires WAL file replication set to 2.", fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) == 2);
    LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
    this.server = cluster.getRegionServer(0);
    // Create the test table and open it
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(getName()));
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc);
    Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
    server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
    HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo();
    final FSHLog log = (FSHLog) server.getWAL(region);
    final AtomicBoolean lowReplicationHookCalled = new AtomicBoolean(false);
    log.registerWALActionsListener(new WALActionsListener.Base() {

        @Override
        public void logRollRequested(boolean lowReplication) {
            if (lowReplication) {
                lowReplicationHookCalled.lazySet(true);
            }
        }
    });
    // add up the datanode count, to ensure proper replication when we kill 1
    // This function is synchronous; when it returns, the dfs cluster is active
    // We start 3 servers and then stop 2 to avoid a directory naming conflict
    // when we stop/start a namenode later, as mentioned in HBASE-5163
    List<DataNode> existingNodes = dfsCluster.getDataNodes();
    int numDataNodes = 3;
    dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), numDataNodes, true, null, null);
    List<DataNode> allNodes = dfsCluster.getDataNodes();
    for (int i = allNodes.size() - 1; i >= 0; i--) {
        if (existingNodes.contains(allNodes.get(i))) {
            dfsCluster.stopDataNode(i);
        }
    }
    assertTrue("DataNodes " + dfsCluster.getDataNodes().size() + " default replication " + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()), dfsCluster.getDataNodes().size() >= fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1);
    writeData(table, 2);
    long curTime = System.currentTimeMillis();
    LOG.info("log.getCurrentFileName(): " + log.getCurrentFileName());
    long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
    assertTrue("Log should have a timestamp older than now", curTime > oldFilenum && oldFilenum != -1);
    assertTrue("The log shouldn't have rolled yet", oldFilenum == AbstractFSWALProvider.extractFileNumFromWAL(log));
    final DatanodeInfo[] pipeline = log.getPipeline();
    assertTrue(pipeline.length == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
    // kill a datanode in the pipeline to force a log roll on the next sync()
    // This function is synchronous, when it returns the node is killed.
    assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null);
    // this write should succeed, but trigger a log roll
    writeData(table, 2);
    long newFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
    assertTrue("Missing datanode should've triggered a log roll", newFilenum > oldFilenum && newFilenum > curTime);
    assertTrue("The log rolling hook should have been called with the low replication flag", lowReplicationHookCalled.get());
    // write some more log data (this should use a new hdfs_out)
    writeData(table, 3);
    assertTrue("The log should not roll again.", AbstractFSWALProvider.extractFileNumFromWAL(log) == newFilenum);
    // kill another datanode in the pipeline, so the replicas will be lower than
    // the configured value 2.
    assertTrue(dfsCluster.stopDataNode(pipeline[1].getName()) != null);
    batchWriteAndWait(table, log, 3, false, 14000);
    int replication = log.getLogReplication();
    assertTrue("LowReplication Roller should've been disabled, current replication=" + replication, !log.isLowReplicationRollEnabled());
    dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), 1, true, null, null);
    // Force roll writer. The new log file will have the default replications,
    // and the LowReplication Roller will be enabled.
    log.rollWriter(true);
    batchWriteAndWait(table, log, 13, true, 10000);
    replication = log.getLogReplication();
    assertTrue("New log file should have the default replication instead of " + replication, replication == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
    assertTrue("LowReplication Roller should've been enabled", log.isLowReplicationRollEnabled());
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Aggregations

AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2412 Test (org.junit.Test)1002 CountDownLatch (java.util.concurrent.CountDownLatch)394 IOException (java.io.IOException)336 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)301 ArrayList (java.util.ArrayList)214 AtomicReference (java.util.concurrent.atomic.AtomicReference)202 ENotificationImpl (org.eclipse.emf.ecore.impl.ENotificationImpl)108 Test (org.testng.annotations.Test)106 List (java.util.List)98 Ignite (org.apache.ignite.Ignite)98 AtomicLong (java.util.concurrent.atomic.AtomicLong)94 HashMap (java.util.HashMap)93 ExecutorService (java.util.concurrent.ExecutorService)90 Map (java.util.Map)88 ExecutionException (java.util.concurrent.ExecutionException)87 File (java.io.File)68 Random (java.util.Random)68 CyclicBarrier (java.util.concurrent.CyclicBarrier)68 HashSet (java.util.HashSet)63