Search in sources :

Example 36 with VisibleForTesting

use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.

the class ConfigOptionsDocGenerator method processConfigOptions.

@VisibleForTesting
static void processConfigOptions(String rootDir, String module, String packageName, String pathPrefix, ThrowingConsumer<Class<?>, IOException> classConsumer) throws IOException, ClassNotFoundException {
    Path configDir = Paths.get(rootDir, module, pathPrefix, packageName.replaceAll("\\.", "/"));
    try (DirectoryStream<Path> stream = Files.newDirectoryStream(configDir)) {
        for (Path entry : stream) {
            String fileName = entry.getFileName().toString();
            Matcher matcher = CLASS_NAME_PATTERN.matcher(fileName);
            if (matcher.matches()) {
                final String className = packageName + '.' + matcher.group(CLASS_NAME_GROUP);
                if (!EXCLUSIONS.contains(className)) {
                    Class<?> optionsClass = Class.forName(className);
                    classConsumer.accept(optionsClass);
                }
            }
        }
    }
}
Also used : Path(java.nio.file.Path) Matcher(java.util.regex.Matcher) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting)

Example 37 with VisibleForTesting

use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.

the class FlinkKafkaInternalProducer method getTransactionCoordinatorId.

@VisibleForTesting
public int getTransactionCoordinatorId() {
    Object transactionManager = getField(kafkaProducer, "transactionManager");
    Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION);
    return node.id();
}
Also used : Node(org.apache.kafka.common.Node) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting)

Example 38 with VisibleForTesting

use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.

the class KafkaConsumerThread method reassignPartitions.

// ------------------------------------------------------------------------
/**
 * Reestablishes the assigned partitions for the consumer. The reassigned partitions consists of
 * the provided new partitions and whatever partitions was already previously assigned to the
 * consumer.
 *
 * <p>The reassignment process is protected against wakeup calls, so that after this method
 * returns, the consumer is either untouched or completely reassigned with the correct offset
 * positions.
 *
 * <p>If the consumer was already woken-up prior to a reassignment resulting in an interruption
 * any time during the reassignment, the consumer is guaranteed to roll back as if it was
 * untouched. On the other hand, if there was an attempt to wakeup the consumer during the
 * reassignment, the wakeup call is "buffered" until the reassignment completes.
 *
 * <p>This method is exposed for testing purposes.
 */
@VisibleForTesting
void reassignPartitions(List<KafkaTopicPartitionState<T, TopicPartition>> newPartitions) throws Exception {
    if (newPartitions.size() == 0) {
        return;
    }
    hasAssignedPartitions = true;
    boolean reassignmentStarted = false;
    // since the reassignment may introduce several Kafka blocking calls that cannot be
    // interrupted,
    // the consumer needs to be isolated from external wakeup calls in setOffsetsToCommit() and
    // shutdown()
    // until the reassignment is complete.
    final KafkaConsumer<byte[], byte[]> consumerTmp;
    synchronized (consumerReassignmentLock) {
        consumerTmp = this.consumer;
        this.consumer = null;
    }
    final Map<TopicPartition, Long> oldPartitionAssignmentsToPosition = new HashMap<>();
    try {
        for (TopicPartition oldPartition : consumerTmp.assignment()) {
            oldPartitionAssignmentsToPosition.put(oldPartition, consumerTmp.position(oldPartition));
        }
        final List<TopicPartition> newPartitionAssignments = new ArrayList<>(newPartitions.size() + oldPartitionAssignmentsToPosition.size());
        newPartitionAssignments.addAll(oldPartitionAssignmentsToPosition.keySet());
        newPartitionAssignments.addAll(convertKafkaPartitions(newPartitions));
        // reassign with the new partitions
        consumerTmp.assign(newPartitionAssignments);
        reassignmentStarted = true;
        // old partitions should be seeked to their previous position
        for (Map.Entry<TopicPartition, Long> oldPartitionToPosition : oldPartitionAssignmentsToPosition.entrySet()) {
            consumerTmp.seek(oldPartitionToPosition.getKey(), oldPartitionToPosition.getValue());
        }
        // replace those with actual offsets, according to what the sentinel value represent.
        for (KafkaTopicPartitionState<T, TopicPartition> newPartitionState : newPartitions) {
            if (newPartitionState.getOffset() == KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET) {
                consumerTmp.seekToBeginning(Collections.singletonList(newPartitionState.getKafkaPartitionHandle()));
                newPartitionState.setOffset(consumerTmp.position(newPartitionState.getKafkaPartitionHandle()) - 1);
            } else if (newPartitionState.getOffset() == KafkaTopicPartitionStateSentinel.LATEST_OFFSET) {
                consumerTmp.seekToEnd(Collections.singletonList(newPartitionState.getKafkaPartitionHandle()));
                newPartitionState.setOffset(consumerTmp.position(newPartitionState.getKafkaPartitionHandle()) - 1);
            } else if (newPartitionState.getOffset() == KafkaTopicPartitionStateSentinel.GROUP_OFFSET) {
                // the KafkaConsumer by default will automatically seek the consumer position
                // to the committed group offset, so we do not need to do it.
                newPartitionState.setOffset(consumerTmp.position(newPartitionState.getKafkaPartitionHandle()) - 1);
            } else {
                consumerTmp.seek(newPartitionState.getKafkaPartitionHandle(), newPartitionState.getOffset() + 1);
            }
        }
    } catch (WakeupException e) {
        synchronized (consumerReassignmentLock) {
            this.consumer = consumerTmp;
            // we do a full roll back so that it is as if it was left untouched
            if (reassignmentStarted) {
                this.consumer.assign(new ArrayList<>(oldPartitionAssignmentsToPosition.keySet()));
                for (Map.Entry<TopicPartition, Long> oldPartitionToPosition : oldPartitionAssignmentsToPosition.entrySet()) {
                    this.consumer.seek(oldPartitionToPosition.getKey(), oldPartitionToPosition.getValue());
                }
            }
            // no need to restore the wakeup state in this case,
            // since only the last wakeup call is effective anyways
            hasBufferedWakeup = false;
            // again
            for (KafkaTopicPartitionState<T, TopicPartition> newPartition : newPartitions) {
                unassignedPartitionsQueue.add(newPartition);
            }
            // this signals the main fetch loop to continue through the loop
            throw new AbortedReassignmentException();
        }
    }
    // reassignment complete; expose the reassigned consumer
    synchronized (consumerReassignmentLock) {
        this.consumer = consumerTmp;
        // restore wakeup state for the consumer if necessary
        if (hasBufferedWakeup) {
            this.consumer.wakeup();
            hasBufferedWakeup = false;
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) WakeupException(org.apache.kafka.common.errors.WakeupException) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting)

Example 39 with VisibleForTesting

use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.

the class JarHandlerUtils method tokenizeArguments.

/**
 * Takes program arguments as a single string, and splits them into a list of string.
 *
 * <pre>
 * tokenizeArguments("--foo bar")            = ["--foo" "bar"]
 * tokenizeArguments("--foo \"bar baz\"")    = ["--foo" "bar baz"]
 * tokenizeArguments("--foo 'bar baz'")      = ["--foo" "bar baz"]
 * tokenizeArguments(null)                   = []
 * </pre>
 *
 * <strong>WARNING: </strong>This method does not respect escaped quotes.
 */
@VisibleForTesting
static List<String> tokenizeArguments(@Nullable final String args) {
    if (args == null) {
        return Collections.emptyList();
    }
    final Matcher matcher = ARGUMENTS_TOKENIZE_PATTERN.matcher(args);
    final List<String> tokens = new ArrayList<>();
    while (matcher.find()) {
        tokens.add(matcher.group().trim().replace("\"", "").replace("\'", ""));
    }
    return tokens;
}
Also used : Matcher(java.util.regex.Matcher) ArrayList(java.util.ArrayList) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting)

Example 40 with VisibleForTesting

use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.

the class JarRunHandler method handleRequest.

@Override
@VisibleForTesting
public CompletableFuture<JarRunResponseBody> handleRequest(@Nonnull final HandlerRequest<JarRunRequestBody> request, @Nonnull final DispatcherGateway gateway) throws RestHandlerException {
    final Configuration effectiveConfiguration = new Configuration(configuration);
    effectiveConfiguration.set(DeploymentOptions.ATTACHED, false);
    effectiveConfiguration.set(DeploymentOptions.TARGET, EmbeddedExecutor.NAME);
    final JarHandlerContext context = JarHandlerContext.fromRequest(request, jarDir, log);
    context.applyToConfiguration(effectiveConfiguration);
    SavepointRestoreSettings.toConfiguration(getSavepointRestoreSettings(request), effectiveConfiguration);
    final PackagedProgram program = context.toPackagedProgram(effectiveConfiguration);
    return CompletableFuture.supplyAsync(() -> applicationRunner.run(gateway, program, effectiveConfiguration), executor).handle((jobIds, throwable) -> {
        program.close();
        if (throwable != null) {
            throw new CompletionException(new RestHandlerException("Could not execute application.", HttpResponseStatus.BAD_REQUEST, throwable));
        } else if (jobIds.isEmpty()) {
            throw new CompletionException(new RestHandlerException("No jobs included in application.", HttpResponseStatus.BAD_REQUEST));
        }
        return new JarRunResponseBody(jobIds.get(0));
    });
}
Also used : PackagedProgram(org.apache.flink.client.program.PackagedProgram) Configuration(org.apache.flink.configuration.Configuration) JarHandlerContext(org.apache.flink.runtime.webmonitor.handlers.utils.JarHandlerUtils.JarHandlerContext) CompletionException(java.util.concurrent.CompletionException) RestHandlerException(org.apache.flink.runtime.rest.handler.RestHandlerException) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting)

Aggregations

VisibleForTesting (org.apache.flink.annotation.VisibleForTesting)64 HashMap (java.util.HashMap)11 IOException (java.io.IOException)8 ArrayList (java.util.ArrayList)7 Configuration (org.apache.flink.configuration.Configuration)7 Map (java.util.Map)6 File (java.io.File)5 URI (java.net.URI)4 List (java.util.List)4 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)4 Field (java.lang.reflect.Field)3 Set (java.util.Set)3 Nullable (javax.annotation.Nullable)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 InputStream (java.io.InputStream)2 Path (java.nio.file.Path)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 Matcher (java.util.regex.Matcher)2 MetricGroup (org.apache.flink.metrics.MetricGroup)2 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)2