Search in sources :

Example 16 with ArrayDeque

use of java.util.ArrayDeque in project flink by apache.

the class RecordWriterTest method testBroadcastEventBufferReferenceCounting.

/**
	 * Tests that event buffers are properly recycled when broadcasting events
	 * to multiple channels.
	 *
	 * @throws Exception
	 */
@Test
public void testBroadcastEventBufferReferenceCounting() throws Exception {
    Buffer buffer = EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE);
    // Partial mocking of static method...
    PowerMockito.stub(PowerMockito.method(EventSerializer.class, "toBuffer")).toReturn(buffer);
    @SuppressWarnings("unchecked") ArrayDeque<BufferOrEvent>[] queues = new ArrayDeque[] { new ArrayDeque(), new ArrayDeque() };
    ResultPartitionWriter partition = createCollectingPartitionWriter(queues, new TestInfiniteBufferProvider());
    RecordWriter<?> writer = new RecordWriter<>(partition);
    writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);
    // Verify added to all queues
    assertEquals(1, queues[0].size());
    assertEquals(1, queues[1].size());
    assertTrue(buffer.isRecycled());
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) TestInfiniteBufferProvider(org.apache.flink.runtime.io.network.util.TestInfiniteBufferProvider) ArrayDeque(java.util.ArrayDeque) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 17 with ArrayDeque

use of java.util.ArrayDeque in project flink by apache.

the class StreamGraphHasherV1 method traverseStreamGraphAndGenerateHashes.

@Override
public Map<Integer, byte[]> traverseStreamGraphAndGenerateHashes(StreamGraph streamGraph) {
    // The hash function used to generate the hash
    final HashFunction hashFunction = Hashing.murmur3_128(0);
    final Map<Integer, byte[]> hashes = new HashMap<>();
    Set<Integer> visited = new HashSet<>();
    Queue<StreamNode> remaining = new ArrayDeque<>();
    // We need to make the source order deterministic. The source IDs are
    // not returned in the same order, which means that submitting the same
    // program twice might result in different traversal, which breaks the
    // deterministic hash assignment.
    List<Integer> sources = new ArrayList<>();
    for (Integer sourceNodeId : streamGraph.getSourceIDs()) {
        sources.add(sourceNodeId);
    }
    Collections.sort(sources);
    // Start with source nodes
    for (Integer sourceNodeId : sources) {
        remaining.add(streamGraph.getStreamNode(sourceNodeId));
        visited.add(sourceNodeId);
    }
    StreamNode currentNode;
    while ((currentNode = remaining.poll()) != null) {
        // generate the hash code.
        if (generateNodeHash(currentNode, hashFunction, hashes, streamGraph.isChainingEnabled())) {
            // Add the child nodes
            for (StreamEdge outEdge : currentNode.getOutEdges()) {
                StreamNode child = outEdge.getTargetVertex();
                if (!visited.contains(child.getId())) {
                    remaining.add(child);
                    visited.add(child.getId());
                }
            }
        } else {
            // We will revisit this later.
            visited.remove(currentNode.getId());
        }
    }
    return hashes;
}
Also used : HashFunction(com.google.common.hash.HashFunction) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StreamEdge(org.apache.flink.streaming.api.graph.StreamEdge) StreamNode(org.apache.flink.streaming.api.graph.StreamNode) ArrayDeque(java.util.ArrayDeque) HashSet(java.util.HashSet)

Example 18 with ArrayDeque

use of java.util.ArrayDeque in project flink by apache.

the class StreamGraphHasherV2 method traverseStreamGraphAndGenerateHashes.

/**
	 * Returns a map with a hash for each {@link StreamNode} of the {@link
	 * StreamGraph}. The hash is used as the {@link JobVertexID} in order to
	 * identify nodes across job submissions if they didn't change.
	 *
	 * <p>
	 * <p>The complete {@link StreamGraph} is traversed. The hash is either
	 * computed from the transformation's user-specified id (see
	 * {@link StreamTransformation#getUid()}) or generated in a deterministic way.
	 *
	 * <p>
	 * <p>The generated hash is deterministic with respect to:
	 * <ul>
	 * <li>node-local properties (like parallelism, UDF, node ID),
	 * <li>chained output nodes, and
	 * <li>input nodes hashes
	 * </ul>
	 *
	 * @return A map from {@link StreamNode#id} to hash as 16-byte array.
	 */
@Override
public Map<Integer, byte[]> traverseStreamGraphAndGenerateHashes(StreamGraph streamGraph) {
    // The hash function used to generate the hash
    final HashFunction hashFunction = Hashing.murmur3_128(0);
    final Map<Integer, byte[]> hashes = new HashMap<>();
    Set<Integer> visited = new HashSet<>();
    Queue<StreamNode> remaining = new ArrayDeque<>();
    // We need to make the source order deterministic. The source IDs are
    // not returned in the same order, which means that submitting the same
    // program twice might result in different traversal, which breaks the
    // deterministic hash assignment.
    List<Integer> sources = new ArrayList<>();
    for (Integer sourceNodeId : streamGraph.getSourceIDs()) {
        sources.add(sourceNodeId);
    }
    Collections.sort(sources);
    // Start with source nodes
    for (Integer sourceNodeId : sources) {
        remaining.add(streamGraph.getStreamNode(sourceNodeId));
        visited.add(sourceNodeId);
    }
    StreamNode currentNode;
    while ((currentNode = remaining.poll()) != null) {
        // generate the hash code.
        if (generateNodeHash(currentNode, hashFunction, hashes, streamGraph.isChainingEnabled())) {
            // Add the child nodes
            for (StreamEdge outEdge : currentNode.getOutEdges()) {
                StreamNode child = outEdge.getTargetVertex();
                if (!visited.contains(child.getId())) {
                    remaining.add(child);
                    visited.add(child.getId());
                }
            }
        } else {
            // We will revisit this later.
            visited.remove(currentNode.getId());
        }
    }
    return hashes;
}
Also used : HashFunction(com.google.common.hash.HashFunction) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArrayDeque(java.util.ArrayDeque) HashSet(java.util.HashSet)

Example 19 with ArrayDeque

use of java.util.ArrayDeque in project flink by apache.

the class ContinuousFileProcessingRescalingTest method testReaderScalingDown.

@Test
public void testReaderScalingDown() throws Exception {
    // simulates the scenario of scaling down from 2 to 1 instances
    final OneShotLatch waitingLatch = new OneShotLatch();
    // create the first instance and let it process the first split till element 5
    final OneShotLatch triggerLatch1 = new OneShotLatch();
    BlockingFileInputFormat format1 = new BlockingFileInputFormat(triggerLatch1, waitingLatch, new Path("test"), 20, 5);
    FileInputSplit[] splits = format1.createInputSplits(2);
    OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness1 = getTestHarness(format1, 2, 0);
    testHarness1.open();
    testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[0])));
    // wait until its arrives to element 5
    if (!triggerLatch1.isTriggered()) {
        triggerLatch1.await();
    }
    // create the second instance and let it process the second split till element 15
    final OneShotLatch triggerLatch2 = new OneShotLatch();
    BlockingFileInputFormat format2 = new BlockingFileInputFormat(triggerLatch2, waitingLatch, new Path("test"), 20, 15);
    OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness2 = getTestHarness(format2, 2, 1);
    testHarness2.open();
    testHarness2.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[1])));
    // wait until its arrives to element 15
    if (!triggerLatch2.isTriggered()) {
        triggerLatch2.await();
    }
    // 1) clear the outputs of the two previous instances so that
    // we can compare their newly produced outputs with the merged one
    testHarness1.getOutput().clear();
    testHarness2.getOutput().clear();
    // 2) and take the snapshots from the previous instances and merge them
    // into a new one which will be then used to initialize a third instance
    OperatorStateHandles mergedState = AbstractStreamOperatorTestHarness.repackageState(testHarness2.snapshot(0, 0), testHarness1.snapshot(0, 0));
    // create the third instance
    final OneShotLatch wLatch = new OneShotLatch();
    final OneShotLatch tLatch = new OneShotLatch();
    BlockingFileInputFormat format = new BlockingFileInputFormat(wLatch, tLatch, new Path("test"), 20, 5);
    OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness = getTestHarness(format, 1, 0);
    // initialize the state of the new operator with the constructed by
    // combining the partial states of the instances above.
    testHarness.initializeState(mergedState);
    testHarness.open();
    // now restart the waiting operators
    wLatch.trigger();
    tLatch.trigger();
    waitingLatch.trigger();
    // and wait for the processing to finish
    synchronized (testHarness1.getCheckpointLock()) {
        testHarness1.close();
    }
    synchronized (testHarness2.getCheckpointLock()) {
        testHarness2.close();
    }
    synchronized (testHarness.getCheckpointLock()) {
        testHarness.close();
    }
    Queue<Object> expectedResult = new ArrayDeque<>();
    putElementsInQ(expectedResult, testHarness1.getOutput());
    putElementsInQ(expectedResult, testHarness2.getOutput());
    Queue<Object> actualResult = new ArrayDeque<>();
    putElementsInQ(actualResult, testHarness.getOutput());
    Assert.assertEquals(20, actualResult.size());
    Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray());
}
Also used : Path(org.apache.flink.core.fs.Path) TimestampedFileInputSplit(org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit) ArrayDeque(java.util.ArrayDeque) FileInputSplit(org.apache.flink.core.fs.FileInputSplit) TimestampedFileInputSplit(org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit) OperatorStateHandles(org.apache.flink.streaming.runtime.tasks.OperatorStateHandles) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) Test(org.junit.Test)

Example 20 with ArrayDeque

use of java.util.ArrayDeque in project camel by apache.

the class DdbStreamConsumer method createExchanges.

private Queue<Exchange> createExchanges(List<Record> records, String lastSeenSequenceNumber) {
    Queue<Exchange> exchanges = new ArrayDeque<>();
    BigIntComparisons condition = null;
    BigInteger providedSeqNum = null;
    if (lastSeenSequenceNumber != null) {
        providedSeqNum = new BigInteger(lastSeenSequenceNumber);
        condition = BigIntComparisons.Conditions.LT;
    }
    switch(getEndpoint().getIteratorType()) {
        case AFTER_SEQUENCE_NUMBER:
            condition = BigIntComparisons.Conditions.LT;
            providedSeqNum = new BigInteger(getEndpoint().getSequenceNumberProvider().getSequenceNumber());
            break;
        case AT_SEQUENCE_NUMBER:
            condition = BigIntComparisons.Conditions.LTEQ;
            providedSeqNum = new BigInteger(getEndpoint().getSequenceNumberProvider().getSequenceNumber());
            break;
        default:
    }
    for (Record record : records) {
        BigInteger recordSeqNum = new BigInteger(record.getDynamodb().getSequenceNumber());
        if (condition == null || condition.matches(providedSeqNum, recordSeqNum)) {
            exchanges.add(getEndpoint().createExchange(record));
        }
    }
    return exchanges;
}
Also used : Exchange(org.apache.camel.Exchange) BigInteger(java.math.BigInteger) Record(com.amazonaws.services.dynamodbv2.model.Record) ArrayDeque(java.util.ArrayDeque)

Aggregations

ArrayDeque (java.util.ArrayDeque)217 ArrayList (java.util.ArrayList)36 Test (org.junit.Test)36 IOException (java.io.IOException)27 HashMap (java.util.HashMap)23 List (java.util.List)20 HashSet (java.util.HashSet)19 Map (java.util.Map)17 Deque (java.util.Deque)11 Iterator (java.util.Iterator)10 NoSuchElementException (java.util.NoSuchElementException)8 AtomicLong (java.util.concurrent.atomic.AtomicLong)8 File (java.io.File)7 Path (java.nio.file.Path)7 Random (java.util.Random)7 ByteBuffer (java.nio.ByteBuffer)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 HttpFields (org.eclipse.jetty.http.HttpFields)5 Name (com.github.anba.es6draft.ast.scope.Name)4 ExecutionContext (com.github.anba.es6draft.runtime.ExecutionContext)4