Search in sources :

Example 21 with Pair

use of org.apache.heron.common.basics.Pair in project heron by twitter.

the class TopologyResource method submit.

@POST
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Produces(MediaType.APPLICATION_JSON)
@SuppressWarnings({ "IllegalCatch", "JavadocMethod" })
public Response submit(FormDataMultiPart form) throws IOException {
    // verify that all we have all the required params
    final List<String> missingDataKeys = verifyKeys(form.getFields().keySet(), REQUIRED_SUBMIT_TOPOLOGY_PARAMS);
    if (!missingDataKeys.isEmpty()) {
        // return error since we are missing required parameters
        final String message = String.format("Validation failed missing required params: %s", missingDataKeys.toString());
        return Response.status(HTTP_UNPROCESSABLE_ENTITY_CODE).type(MediaType.APPLICATION_JSON).entity(Utils.createValidationError(message, missingDataKeys)).build();
    }
    final String cluster = Forms.getString(form, FORM_KEY_CLUSTER);
    if (!doesClusterMatch(cluster)) {
        return Response.status(HTTP_UNPROCESSABLE_ENTITY_CODE).type(MediaType.APPLICATION_JSON).entity(Utils.createMessage(String.format("Unknown cluster %s expecting '%s'", cluster, getCluster()))).build();
    }
    final String topologyName = Forms.getString(form, FORM_KEY_NAME);
    final String role = Forms.getString(form, FORM_KEY_ROLE);
    final String environment = Forms.getString(form, FORM_KEY_ENVIRONMENT, Constants.DEFAULT_HERON_ENVIRONMENT);
    final String user = Forms.getString(form, FORM_KEY_USER, role);
    // submit overrides are passed key=value
    final Map<String, String> submitOverrides = getSubmitOverrides(form);
    final String topologyDirectory = Files.createTempDirectory(topologyName).toFile().getAbsolutePath();
    try {
        // upload the topology definition file to the topology directory
        final FormDataBodyPart definitionFilePart = form.getField(FORM_KEY_DEFINITION);
        final File topologyDefinitionFile = Forms.uploadFile(definitionFilePart, topologyDirectory);
        // upload the topology binary file to the topology directory
        final FormDataBodyPart topologyFilePart = form.getField(FORM_KEY_TOPOLOGY);
        final File topologyBinaryFile = Forms.uploadFile(topologyFilePart, topologyDirectory);
        final boolean isDryRun = form.getFields().containsKey(PARAM_DRY_RUN);
        final boolean isVerbose = form.getFields().containsKey("verbose");
        final boolean isVerboseGC = form.getFields().containsKey("verbose_gc");
        // copy configuration files to the sandbox config location
        // topology-dir/<default-heron-sandbox-config>
        FileHelper.copyDirectory(Paths.get(getConfigurationDirectory()), Paths.get(topologyDirectory, Constants.DEFAULT_HERON_SANDBOX_CONFIG));
        final java.nio.file.Path overridesPath = Paths.get(topologyDirectory, Constants.DEFAULT_HERON_SANDBOX_CONFIG, Constants.OVERRIDE_FILE);
        // copy override file into topology configuration directory
        FileHelper.copy(Paths.get(getConfigurationOverridePath()), overridesPath);
        // apply submit overrides
        ConfigUtils.applyOverrides(overridesPath, submitOverrides);
        // apply overrides to state manager config
        ConfigUtils.applyOverridesToStateManagerConfig(overridesPath, Paths.get(topologyDirectory, Constants.DEFAULT_HERON_SANDBOX_CONFIG, Constants.STATE_MANAGER_FILE));
        // create tar file from the contents of the topology directory
        final File topologyPackageFile = Paths.get(topologyDirectory, TOPOLOGY_TAR_GZ_FILENAME).toFile();
        FileHelper.createTarGz(topologyPackageFile, FileHelper.getChildren(topologyDirectory));
        // create configs
        Config topologyConfig = ConfigUtils.getTopologyConfig(topologyPackageFile.getAbsolutePath(), topologyBinaryFile.getName(), topologyDefinitionFile.getAbsolutePath());
        List<Pair<String, Object>> val = new LinkedList<>();
        for (Map.Entry<String, Object> entry : topologyConfig.getEntrySet()) {
            val.add(Pair.create(entry.getKey(), entry.getValue()));
        }
        val.addAll(Arrays.asList(Pair.create(Key.CLUSTER.value(), cluster), Pair.create(Key.TOPOLOGY_NAME.value(), topologyName), Pair.create(Key.ROLE.value(), role), Pair.create(Key.ENVIRON.value(), environment), Pair.create(Key.SUBMIT_USER.value(), user), Pair.create(Key.DRY_RUN.value(), isDryRun), Pair.create(Key.VERBOSE.value(), isVerbose), Pair.create(Key.VERBOSE_GC.value(), isVerboseGC)));
        final Config config = createConfig(val, submitOverrides);
        // submit the topology
        getActionFactory().createSubmitAction(config, topologyPackageFile.getAbsolutePath(), topologyBinaryFile.getName(), topologyDefinitionFile.getAbsolutePath()).execute();
        return Response.created(URI.create(String.format(TOPOLOGY_PATH_FORMAT, cluster, role, environment, topologyName))).type(MediaType.APPLICATION_JSON).entity(createdResponse(cluster, role, environment, topologyName)).build();
    } catch (SubmitDryRunResponse response) {
        return createDryRunResponse(response, Forms.getString(form, PARAM_DRY_RUN_FORMAT, DEFAULT_DRY_RUN_FORMAT));
    } catch (Exception ex) {
        LOG.error("error submitting topology {}", topologyName, ex);
        return Response.serverError().type(MediaType.APPLICATION_JSON).entity(Utils.createMessage(ex.getMessage())).build();
    } finally {
        FileUtils.deleteDir(topologyDirectory);
    }
}
Also used : Config(org.apache.heron.spi.common.Config) LinkedList(java.util.LinkedList) IOException(java.io.IOException) SubmitDryRunResponse(org.apache.heron.scheduler.dryrun.SubmitDryRunResponse) FormDataBodyPart(org.glassfish.jersey.media.multipart.FormDataBodyPart) File(java.io.File) HashMap(java.util.HashMap) Map(java.util.Map) MultivaluedMap(javax.ws.rs.core.MultivaluedMap) Pair(org.apache.heron.common.basics.Pair) POST(javax.ws.rs.POST) Consumes(javax.ws.rs.Consumes) Produces(javax.ws.rs.Produces)

Example 22 with Pair

use of org.apache.heron.common.basics.Pair in project heron by twitter.

the class NetworkUtils method establishSSHTunnelIfNeeded.

/**
 * Tests if a network location is reachable. This is best effort and may give false
 * not reachable.
 *
 * @param endpoint the endpoint to connect to
 * @param tunnelHost the host used to tunnel
 * @param tunnelType what type of tunnel should be established
 * @param timeout Open connection will wait for this timeout in ms.
 * @param retryCount In case of connection timeout try retryCount times.
 * @param retryInterval the interval in ms to retryCount
 * @param verifyCount In case of longer tunnel setup, try verify times to wait
 * @return a &lt;new_reachable_endpoint, tunnelProcess&gt; pair.
 * If the endpoint already reachable, then new_reachable_endpoint equals to original endpoint, and
 * tunnelProcess is null.
 * If no way to reach even through ssh tunneling,
 * then both new_reachable_endpoint and tunnelProcess are null.
 */
private static Pair<InetSocketAddress, Process> establishSSHTunnelIfNeeded(InetSocketAddress endpoint, String tunnelHost, TunnelType tunnelType, Duration timeout, int retryCount, Duration retryInterval, int verifyCount) {
    if (NetworkUtils.isLocationReachable(endpoint, timeout, retryCount, retryInterval)) {
        // Already reachable, return original endpoint directly
        return new Pair<InetSocketAddress, Process>(endpoint, null);
    } else {
        // Can not reach directly, trying to do ssh tunnel
        int localFreePort = SysUtils.getFreePort();
        InetSocketAddress newEndpoint = new InetSocketAddress(LOCAL_HOST, localFreePort);
        LOG.log(Level.FINE, "Trying to opening up tunnel to {0} from {1}", new Object[] { endpoint.toString(), newEndpoint.toString() });
        // Set up the tunnel process
        final Process tunnelProcess;
        switch(tunnelType) {
            case PORT_FORWARD:
                tunnelProcess = ShellUtils.establishSSHTunnelProcess(tunnelHost, localFreePort, endpoint.getHostString(), endpoint.getPort());
                break;
            case SOCKS_PROXY:
                tunnelProcess = ShellUtils.establishSocksProxyProcess(tunnelHost, localFreePort);
                break;
            default:
                throw new IllegalArgumentException("Unrecognized TunnelType passed: " + tunnelType);
        }
        // Verify whether the tunnel process is working fine.
        if (tunnelProcess != null && tunnelProcess.isAlive() && NetworkUtils.isLocationReachable(newEndpoint, timeout, verifyCount, retryInterval)) {
            java.lang.Runtime.getRuntime().addShutdownHook(new Thread() {

                @Override
                public void run() {
                    tunnelProcess.destroy();
                }
            });
            // Can reach the destination via ssh tunnel
            return new Pair<InetSocketAddress, Process>(newEndpoint, tunnelProcess);
        }
        LOG.log(Level.FINE, "Failed to opening up tunnel to {0} from {1}. Releasing process..", new Object[] { endpoint, newEndpoint });
        tunnelProcess.destroy();
    }
    // No way to reach the destination. Return null.
    return new Pair<InetSocketAddress, Process>(null, null);
}
Also used : InetSocketAddress(java.net.InetSocketAddress) Pair(org.apache.heron.common.basics.Pair)

Example 23 with Pair

use of org.apache.heron.common.basics.Pair in project heron by twitter.

the class KubernetesContextTest method createVolumeConfigs.

/**
 * Generate <code>Volume</code> Configs for testing.
 * @param testCases Test case container.
 *                  Input: [0] Config, [1] Boolean to indicate Manager/Executor.
 *                  Output: [0] expectedKeys, [1] expectedOptionsKeys, [2] expectedOptionsValues.
 * @param prefix Configuration prefix key to use in lookup.
 */
private void createVolumeConfigs(List<TestTuple<Pair<Config, Boolean>, Object[]>> testCases, String prefix) {
    final String keyPattern = prefix + "%%s.%%s";
    final String keyExecutor = String.format(keyPattern, KubernetesConstants.EXECUTOR_NAME);
    final String keyManager = String.format(keyPattern, KubernetesConstants.MANAGER_NAME);
    final String volumeNameOne = "volume-name-one";
    final String volumeNameTwo = "volume-name-two";
    final String claimName = "OnDeMaNd";
    final String storageClassField = VolumeConfigKeys.storageClassName.name();
    final String pathField = VolumeConfigKeys.path.name();
    final String claimNameField = VolumeConfigKeys.claimName.name();
    final String expectedStorageClass = "expected-storage-class";
    final String expectedPath = "/path/for/volume/expected";
    // Create test cases for Executor/Manager on even/odd indices respectively.
    for (int idx = 0; idx < 2; ++idx) {
        // Manager case is default.
        boolean isExecutor = false;
        String key = keyManager;
        String description = KubernetesConstants.MANAGER_NAME;
        // Executor case.
        if (idx % 2 == 0) {
            isExecutor = true;
            key = keyExecutor;
            description = KubernetesConstants.EXECUTOR_NAME;
        }
        final String storageClassKeyOne = String.format(key, volumeNameOne, storageClassField);
        final String storageClassKeyTwo = String.format(key, volumeNameTwo, storageClassField);
        final String pathKeyOne = String.format(key, volumeNameOne, pathField);
        final String pathKeyTwo = String.format(key, volumeNameTwo, pathField);
        final String claimNameKeyOne = String.format(key, volumeNameOne, claimNameField);
        final String claimNameKeyTwo = String.format(key, volumeNameTwo, claimNameField);
        final Config configPVC = Config.newBuilder().put(pathKeyOne, expectedPath).put(pathKeyTwo, expectedPath).put(claimNameKeyOne, claimName).put(claimNameKeyTwo, claimName).put(storageClassKeyOne, expectedStorageClass).put(storageClassKeyTwo, expectedStorageClass).build();
        final List<String> expectedKeys = Arrays.asList(volumeNameOne, volumeNameTwo);
        final List<VolumeConfigKeys> expectedOptionsKeys = Arrays.asList(VolumeConfigKeys.path, VolumeConfigKeys.storageClassName, VolumeConfigKeys.claimName);
        final List<String> expectedOptionsValues = Arrays.asList(expectedPath, expectedStorageClass, claimName);
        testCases.add(new TestTuple<>(description, new Pair<>(configPVC, isExecutor), new Object[] { expectedKeys, expectedOptionsKeys, expectedOptionsValues }));
        final Config configPVCDisabled = Config.newBuilder().put(KubernetesContext.KUBERNETES_VOLUME_FROM_CLI_DISABLED, "true").put(pathKeyOne, expectedPath).put(pathKeyTwo, expectedPath).put(claimNameKeyOne, claimName).put(claimNameKeyTwo, claimName).put(storageClassKeyOne, expectedStorageClass).put(storageClassKeyTwo, expectedStorageClass).build();
        testCases.add(new TestTuple<>(description + " Disabled should not error", new Pair<>(configPVCDisabled, !isExecutor), new Object[] { new LinkedList<String>(), new LinkedList<VolumeConfigKeys>(), new LinkedList<String>() }));
    }
}
Also used : VolumeConfigKeys(org.apache.heron.scheduler.kubernetes.KubernetesConstants.VolumeConfigKeys) Config(org.apache.heron.spi.common.Config) LinkedList(java.util.LinkedList) Pair(org.apache.heron.common.basics.Pair)

Example 24 with Pair

use of org.apache.heron.common.basics.Pair in project heron by twitter.

the class KubernetesContextTest method createVolumeNFS.

/**
 * Create test cases for <code>NFS</code>.
 * @param testCases Test case container.
 *                  Input: [0] Config, [1] Boolean to indicate Manager/Executor.
 *                  Output: <code>Map<String, Map<VolumeConfigKeys, String></code>
 * @param isExecutor Boolean to indicate Manager/Executor test case generation.
 */
private void createVolumeNFS(List<TestTuple<Pair<Config, Boolean>, Map<String, Map<VolumeConfigKeys, String>>>> testCases, boolean isExecutor) {
    final String volumeNameValid = "volume-name-valid";
    final String passingValue = "should-pass";
    final String processName = isExecutor ? KubernetesConstants.EXECUTOR_NAME : KubernetesConstants.MANAGER_NAME;
    final String keyPattern = String.format(KubernetesContext.KUBERNETES_VOLUME_NFS_PREFIX + "%%s.%%s", processName);
    // With readOnly.
    final Map<String, Map<VolumeConfigKeys, String>> expectedWithReadOnly = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {

        {
            put(VolumeConfigKeys.server, "nfs-server.default.local");
            put(VolumeConfigKeys.readOnly, "true");
            put(VolumeConfigKeys.pathOnNFS, passingValue);
            put(VolumeConfigKeys.path, passingValue);
            put(VolumeConfigKeys.subPath, passingValue);
            put(VolumeConfigKeys.readOnly, passingValue);
        }
    });
    final Config configWithReadOnly = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "server"), "nfs-server.default.local").put(String.format(keyPattern, volumeNameValid, "readOnly"), "true").put(String.format(keyPattern, volumeNameValid, "pathOnNFS"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
    testCases.add(new TestTuple<>(processName + ": `NFS` with `readOnly`", new Pair<>(configWithReadOnly, isExecutor), expectedWithReadOnly));
    // With readOnly.
    final Map<String, Map<VolumeConfigKeys, String>> expectedWithoutReadOnly = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {

        {
            put(VolumeConfigKeys.server, "nfs-server.default.local");
            put(VolumeConfigKeys.pathOnNFS, passingValue);
            put(VolumeConfigKeys.path, passingValue);
            put(VolumeConfigKeys.subPath, passingValue);
            put(VolumeConfigKeys.readOnly, passingValue);
        }
    });
    final Config configWithoutReadOnly = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "server"), "nfs-server.default.local").put(String.format(keyPattern, volumeNameValid, "pathOnNFS"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
    testCases.add(new TestTuple<>(processName + ": `NFS` without `readOnly`", new Pair<>(configWithoutReadOnly, isExecutor), expectedWithoutReadOnly));
    // Ignored.
    final Config configIgnored = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "server"), "nfs-server.default.local").put(String.format(keyPattern, volumeNameValid, "readOnly"), "true").put(String.format(keyPattern, volumeNameValid, "pathOnNFS"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
    testCases.add(new TestTuple<>(processName + ": `NFS` ignored", new Pair<>(configIgnored, !isExecutor), new HashMap<>()));
}
Also used : VolumeConfigKeys(org.apache.heron.scheduler.kubernetes.KubernetesConstants.VolumeConfigKeys) HashMap(java.util.HashMap) Config(org.apache.heron.spi.common.Config) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Map(java.util.Map) Pair(org.apache.heron.common.basics.Pair)

Example 25 with Pair

use of org.apache.heron.common.basics.Pair in project heron by twitter.

the class KubernetesContextTest method createVolumeEmptyDir.

/**
 * Create test cases for <code>Empty Directory</code>.
 * @param testCases Test case container.
 *                  Input: [0] Config, [1] Boolean to indicate Manager/Executor.
 *                  Output: <code>Map<String, Map<VolumeConfigKeys, String></code>
 * @param isExecutor Boolean to indicate Manager/Executor test case generation.
 */
private void createVolumeEmptyDir(List<TestTuple<Pair<Config, Boolean>, Map<String, Map<VolumeConfigKeys, String>>>> testCases, boolean isExecutor) {
    final String volumeNameValid = "volume-name-valid";
    final String passingValue = "should-pass";
    final String processName = isExecutor ? KubernetesConstants.EXECUTOR_NAME : KubernetesConstants.MANAGER_NAME;
    final String keyPattern = String.format(KubernetesContext.KUBERNETES_VOLUME_EMPTYDIR_PREFIX + "%%s.%%s", processName);
    // With Medium.
    final Map<String, Map<VolumeConfigKeys, String>> expectedWithMedium = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {

        {
            put(VolumeConfigKeys.sizeLimit, passingValue);
            put(VolumeConfigKeys.medium, "Memory");
            put(VolumeConfigKeys.path, passingValue);
            put(VolumeConfigKeys.subPath, passingValue);
            put(VolumeConfigKeys.readOnly, passingValue);
        }
    });
    final Config configWithMedium = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "medium"), "Memory").put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
    testCases.add(new TestTuple<>(processName + ": `emptyDir` with `medium`", new Pair<>(configWithMedium, isExecutor), expectedWithMedium));
    // With empty Medium.
    final Map<String, Map<VolumeConfigKeys, String>> expectedEmptyMedium = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {

        {
            put(VolumeConfigKeys.sizeLimit, passingValue);
            put(VolumeConfigKeys.medium, "");
            put(VolumeConfigKeys.path, passingValue);
            put(VolumeConfigKeys.subPath, passingValue);
            put(VolumeConfigKeys.readOnly, passingValue);
        }
    });
    final Config configEmptyMedium = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "medium"), "").put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
    testCases.add(new TestTuple<>(processName + ": `emptyDir` with empty `medium`", new Pair<>(configEmptyMedium, isExecutor), expectedEmptyMedium));
    // Without Medium.
    final Map<String, Map<VolumeConfigKeys, String>> expectedNoMedium = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {

        {
            put(VolumeConfigKeys.sizeLimit, passingValue);
            put(VolumeConfigKeys.path, passingValue);
            put(VolumeConfigKeys.subPath, passingValue);
            put(VolumeConfigKeys.readOnly, passingValue);
        }
    });
    final Config configNoMedium = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
    testCases.add(new TestTuple<>(processName + ": `emptyDir` without `medium`", new Pair<>(configNoMedium, isExecutor), expectedNoMedium));
    // Ignored.
    final Config configIgnored = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "medium"), "").put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
    testCases.add(new TestTuple<>(processName + ": `emptyDir` ignored", new Pair<>(configIgnored, !isExecutor), new HashMap<>()));
}
Also used : VolumeConfigKeys(org.apache.heron.scheduler.kubernetes.KubernetesConstants.VolumeConfigKeys) HashMap(java.util.HashMap) Config(org.apache.heron.spi.common.Config) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Map(java.util.Map) Pair(org.apache.heron.common.basics.Pair)

Aggregations

Pair (org.apache.heron.common.basics.Pair)37 Test (org.junit.Test)19 Config (org.apache.heron.spi.common.Config)15 HashMap (java.util.HashMap)13 InstanceId (org.apache.heron.spi.packing.InstanceId)10 LinkedList (java.util.LinkedList)8 Map (java.util.Map)7 VolumeConfigKeys (org.apache.heron.scheduler.kubernetes.KubernetesConstants.VolumeConfigKeys)7 Matchers.anyString (org.mockito.Matchers.anyString)7 ImmutableMap (com.google.common.collect.ImmutableMap)6 TestTuple (org.apache.heron.scheduler.kubernetes.KubernetesUtils.TestTuple)6 PackingPlan (org.apache.heron.spi.packing.PackingPlan)6 V1ConfigMap (io.kubernetes.client.openapi.models.V1ConfigMap)5 ArrayList (java.util.ArrayList)4 V1ConfigMapBuilder (io.kubernetes.client.openapi.models.V1ConfigMapBuilder)3 V1VolumeMount (io.kubernetes.client.openapi.models.V1VolumeMount)3 V1VolumeMountBuilder (io.kubernetes.client.openapi.models.V1VolumeMountBuilder)3 InetSocketAddress (java.net.InetSocketAddress)3 TopologySubmissionException (org.apache.heron.scheduler.TopologySubmissionException)3 V1Volume (io.kubernetes.client.openapi.models.V1Volume)2