Search in sources :

Example 1 with LinkedMap

use of org.apache.commons.collections.map.LinkedMap in project hadoop by apache.

the class TestShortCircuitCache method testShmBasedStaleness.

@Test(timeout = 60000)
public void testShmBasedStaleness() throws Exception {
    BlockReaderTestUtil.enableShortCircuitShmTracing();
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShmBasedStaleness", sockDir);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 8193;
    final int SEED = 0xFADED;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    FSDataInputStream fis = fs.open(new Path(TEST_FILE));
    int first = fis.read();
    final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path(TEST_FILE));
    Assert.assertTrue(first != -1);
    cache.accept(new CacheVisitor() {

        @Override
        public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
            ShortCircuitReplica replica = replicas.get(ExtendedBlockId.fromExtendedBlock(block));
            Assert.assertNotNull(replica);
            Assert.assertTrue(replica.getSlot().isValid());
        }
    });
    // Stop the Namenode.  This will close the socket keeping the client's
    // shared memory segment alive, and make it stale.
    cluster.getDataNodes().get(0).shutdown();
    cache.accept(new CacheVisitor() {

        @Override
        public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
            ShortCircuitReplica replica = replicas.get(ExtendedBlockId.fromExtendedBlock(block));
            Assert.assertNotNull(replica);
            Assert.assertFalse(replica.getSlot().isValid());
        }
    });
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LinkedMap(org.apache.commons.collections.map.LinkedMap) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) CacheVisitor(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 2 with LinkedMap

use of org.apache.commons.collections.map.LinkedMap in project flink by apache.

the class FlinkKafkaConsumerBaseTest method checkUseFetcherWhenNoCheckpoint.

/**
	 * Tests that on snapshots, states and offsets to commit to Kafka are correct
	 */
@SuppressWarnings("unchecked")
@Test
public void checkUseFetcherWhenNoCheckpoint() throws Exception {
    FlinkKafkaConsumerBase<String> consumer = getConsumer(null, new LinkedMap(), true);
    List<KafkaTopicPartition> partitionList = new ArrayList<>(1);
    partitionList.add(new KafkaTopicPartition("test", 0));
    consumer.setSubscribedPartitions(partitionList);
    OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
    TestingListState<Serializable> listState = new TestingListState<>();
    when(operatorStateStore.getSerializableListState(Matchers.any(String.class))).thenReturn(listState);
    StateInitializationContext initializationContext = mock(StateInitializationContext.class);
    when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore);
    // make the context signal that there is no restored state, then validate that
    when(initializationContext.isRestored()).thenReturn(false);
    consumer.initializeState(initializationContext);
    consumer.run(mock(SourceFunction.SourceContext.class));
}
Also used : OperatorStateStore(org.apache.flink.api.common.state.OperatorStateStore) Serializable(java.io.Serializable) StateInitializationContext(org.apache.flink.runtime.state.StateInitializationContext) ArrayList(java.util.ArrayList) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) LinkedMap(org.apache.commons.collections.map.LinkedMap) Test(org.junit.Test)

Example 3 with LinkedMap

use of org.apache.commons.collections.map.LinkedMap in project flink by apache.

the class FlinkKafkaConsumerBaseTest method ignoreCheckpointWhenNotRunning.

/**
	 * Tests that no checkpoints happen when the fetcher is not running.
	 */
@Test
public void ignoreCheckpointWhenNotRunning() throws Exception {
    @SuppressWarnings("unchecked") final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);
    FlinkKafkaConsumerBase<String> consumer = getConsumer(fetcher, new LinkedMap(), false);
    OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
    TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>();
    when(operatorStateStore.getOperatorState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);
    consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
    assertFalse(listState.get().iterator().hasNext());
    consumer.notifyCheckpointComplete(66L);
}
Also used : OperatorStateStore(org.apache.flink.api.common.state.OperatorStateStore) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StateSnapshotContextSynchronousImpl(org.apache.flink.runtime.state.StateSnapshotContextSynchronousImpl) ListStateDescriptor(org.apache.flink.api.common.state.ListStateDescriptor) LinkedMap(org.apache.commons.collections.map.LinkedMap) Test(org.junit.Test)

Example 4 with LinkedMap

use of org.apache.commons.collections.map.LinkedMap in project flink by apache.

the class FlinkKafkaConsumerBaseTest method checkRestoredCheckpointWhenFetcherNotReady.

/**
	 * Tests that no checkpoints happen when the fetcher is not running.
	 */
@Test
public void checkRestoredCheckpointWhenFetcherNotReady() throws Exception {
    OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
    TestingListState<Serializable> listState = new TestingListState<>();
    listState.add(Tuple2.of(new KafkaTopicPartition("abc", 13), 16768L));
    listState.add(Tuple2.of(new KafkaTopicPartition("def", 7), 987654321L));
    FlinkKafkaConsumerBase<String> consumer = getConsumer(null, new LinkedMap(), true);
    when(operatorStateStore.getSerializableListState(Matchers.any(String.class))).thenReturn(listState);
    StateInitializationContext initializationContext = mock(StateInitializationContext.class);
    when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore);
    when(initializationContext.isRestored()).thenReturn(true);
    consumer.initializeState(initializationContext);
    consumer.open(new Configuration());
    consumer.snapshotState(new StateSnapshotContextSynchronousImpl(17, 17));
    // ensure that the list was cleared and refilled. while this is an implementation detail, we use it here
    // to figure out that snapshotState() actually did something.
    Assert.assertTrue(listState.isClearCalled());
    Set<Serializable> expected = new HashSet<>();
    for (Serializable serializable : listState.get()) {
        expected.add(serializable);
    }
    int counter = 0;
    for (Serializable serializable : listState.get()) {
        assertTrue(expected.contains(serializable));
        counter++;
    }
    assertEquals(expected.size(), counter);
}
Also used : OperatorStateStore(org.apache.flink.api.common.state.OperatorStateStore) Serializable(java.io.Serializable) Configuration(org.apache.flink.configuration.Configuration) StateSnapshotContextSynchronousImpl(org.apache.flink.runtime.state.StateSnapshotContextSynchronousImpl) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) LinkedMap(org.apache.commons.collections.map.LinkedMap) StateInitializationContext(org.apache.flink.runtime.state.StateInitializationContext) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 5 with LinkedMap

use of org.apache.commons.collections.map.LinkedMap in project jackrabbit by apache.

the class ChildNodeEntries method ensureModifiable.

/**
     * Ensures that the {@link #nameMap} and {@link #entries} map are
     * modifiable.
     */
@SuppressWarnings("unchecked")
private void ensureModifiable() {
    if (nameMap == Collections.EMPTY_MAP) {
        nameMap = new HashMap<Name, Object>();
        entries = new LinkedMap();
    } else if (shared) {
        entries = (LinkedMap) entries.clone();
        nameMap = new HashMap<Name, Object>(nameMap);
        for (Map.Entry<Name, Object> entry : nameMap.entrySet()) {
            Object value = entry.getValue();
            if (value instanceof List<?>) {
                entry.setValue(new ArrayList<ChildNodeEntry>((List<ChildNodeEntry>) value));
            }
        }
        shared = false;
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LinkedMap(org.apache.commons.collections.map.LinkedMap) EmptyLinkedMap(org.apache.jackrabbit.core.util.EmptyLinkedMap) Name(org.apache.jackrabbit.spi.Name)

Aggregations

LinkedMap (org.apache.commons.collections.map.LinkedMap)11 Test (org.junit.Test)8 OperatorStateStore (org.apache.flink.api.common.state.OperatorStateStore)6 Serializable (java.io.Serializable)5 StateInitializationContext (org.apache.flink.runtime.state.StateInitializationContext)5 StateSnapshotContextSynchronousImpl (org.apache.flink.runtime.state.StateSnapshotContextSynchronousImpl)5 Configuration (org.apache.flink.configuration.Configuration)4 KafkaTopicPartition (org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition)4 HashMap (java.util.HashMap)3 ListStateDescriptor (org.apache.flink.api.common.state.ListStateDescriptor)3 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)3 CacheVisitor (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor)3 ArrayList (java.util.ArrayList)2 Map (java.util.Map)2 MutableBoolean (org.apache.commons.lang.mutable.MutableBoolean)2 StreamingRuntimeContext (org.apache.flink.streaming.api.operators.StreamingRuntimeContext)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 ExtendedBlockId (org.apache.hadoop.hdfs.ExtendedBlockId)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2