use of org.apache.heron.common.basics.Pair in project heron by twitter.
the class TopologyResource method submit.
@POST
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Produces(MediaType.APPLICATION_JSON)
@SuppressWarnings({ "IllegalCatch", "JavadocMethod" })
public Response submit(FormDataMultiPart form) throws IOException {
// verify that all we have all the required params
final List<String> missingDataKeys = verifyKeys(form.getFields().keySet(), REQUIRED_SUBMIT_TOPOLOGY_PARAMS);
if (!missingDataKeys.isEmpty()) {
// return error since we are missing required parameters
final String message = String.format("Validation failed missing required params: %s", missingDataKeys.toString());
return Response.status(HTTP_UNPROCESSABLE_ENTITY_CODE).type(MediaType.APPLICATION_JSON).entity(Utils.createValidationError(message, missingDataKeys)).build();
}
final String cluster = Forms.getString(form, FORM_KEY_CLUSTER);
if (!doesClusterMatch(cluster)) {
return Response.status(HTTP_UNPROCESSABLE_ENTITY_CODE).type(MediaType.APPLICATION_JSON).entity(Utils.createMessage(String.format("Unknown cluster %s expecting '%s'", cluster, getCluster()))).build();
}
final String topologyName = Forms.getString(form, FORM_KEY_NAME);
final String role = Forms.getString(form, FORM_KEY_ROLE);
final String environment = Forms.getString(form, FORM_KEY_ENVIRONMENT, Constants.DEFAULT_HERON_ENVIRONMENT);
final String user = Forms.getString(form, FORM_KEY_USER, role);
// submit overrides are passed key=value
final Map<String, String> submitOverrides = getSubmitOverrides(form);
final String topologyDirectory = Files.createTempDirectory(topologyName).toFile().getAbsolutePath();
try {
// upload the topology definition file to the topology directory
final FormDataBodyPart definitionFilePart = form.getField(FORM_KEY_DEFINITION);
final File topologyDefinitionFile = Forms.uploadFile(definitionFilePart, topologyDirectory);
// upload the topology binary file to the topology directory
final FormDataBodyPart topologyFilePart = form.getField(FORM_KEY_TOPOLOGY);
final File topologyBinaryFile = Forms.uploadFile(topologyFilePart, topologyDirectory);
final boolean isDryRun = form.getFields().containsKey(PARAM_DRY_RUN);
final boolean isVerbose = form.getFields().containsKey("verbose");
final boolean isVerboseGC = form.getFields().containsKey("verbose_gc");
// copy configuration files to the sandbox config location
// topology-dir/<default-heron-sandbox-config>
FileHelper.copyDirectory(Paths.get(getConfigurationDirectory()), Paths.get(topologyDirectory, Constants.DEFAULT_HERON_SANDBOX_CONFIG));
final java.nio.file.Path overridesPath = Paths.get(topologyDirectory, Constants.DEFAULT_HERON_SANDBOX_CONFIG, Constants.OVERRIDE_FILE);
// copy override file into topology configuration directory
FileHelper.copy(Paths.get(getConfigurationOverridePath()), overridesPath);
// apply submit overrides
ConfigUtils.applyOverrides(overridesPath, submitOverrides);
// apply overrides to state manager config
ConfigUtils.applyOverridesToStateManagerConfig(overridesPath, Paths.get(topologyDirectory, Constants.DEFAULT_HERON_SANDBOX_CONFIG, Constants.STATE_MANAGER_FILE));
// create tar file from the contents of the topology directory
final File topologyPackageFile = Paths.get(topologyDirectory, TOPOLOGY_TAR_GZ_FILENAME).toFile();
FileHelper.createTarGz(topologyPackageFile, FileHelper.getChildren(topologyDirectory));
// create configs
Config topologyConfig = ConfigUtils.getTopologyConfig(topologyPackageFile.getAbsolutePath(), topologyBinaryFile.getName(), topologyDefinitionFile.getAbsolutePath());
List<Pair<String, Object>> val = new LinkedList<>();
for (Map.Entry<String, Object> entry : topologyConfig.getEntrySet()) {
val.add(Pair.create(entry.getKey(), entry.getValue()));
}
val.addAll(Arrays.asList(Pair.create(Key.CLUSTER.value(), cluster), Pair.create(Key.TOPOLOGY_NAME.value(), topologyName), Pair.create(Key.ROLE.value(), role), Pair.create(Key.ENVIRON.value(), environment), Pair.create(Key.SUBMIT_USER.value(), user), Pair.create(Key.DRY_RUN.value(), isDryRun), Pair.create(Key.VERBOSE.value(), isVerbose), Pair.create(Key.VERBOSE_GC.value(), isVerboseGC)));
final Config config = createConfig(val, submitOverrides);
// submit the topology
getActionFactory().createSubmitAction(config, topologyPackageFile.getAbsolutePath(), topologyBinaryFile.getName(), topologyDefinitionFile.getAbsolutePath()).execute();
return Response.created(URI.create(String.format(TOPOLOGY_PATH_FORMAT, cluster, role, environment, topologyName))).type(MediaType.APPLICATION_JSON).entity(createdResponse(cluster, role, environment, topologyName)).build();
} catch (SubmitDryRunResponse response) {
return createDryRunResponse(response, Forms.getString(form, PARAM_DRY_RUN_FORMAT, DEFAULT_DRY_RUN_FORMAT));
} catch (Exception ex) {
LOG.error("error submitting topology {}", topologyName, ex);
return Response.serverError().type(MediaType.APPLICATION_JSON).entity(Utils.createMessage(ex.getMessage())).build();
} finally {
FileUtils.deleteDir(topologyDirectory);
}
}
use of org.apache.heron.common.basics.Pair in project heron by twitter.
the class NetworkUtils method establishSSHTunnelIfNeeded.
/**
* Tests if a network location is reachable. This is best effort and may give false
* not reachable.
*
* @param endpoint the endpoint to connect to
* @param tunnelHost the host used to tunnel
* @param tunnelType what type of tunnel should be established
* @param timeout Open connection will wait for this timeout in ms.
* @param retryCount In case of connection timeout try retryCount times.
* @param retryInterval the interval in ms to retryCount
* @param verifyCount In case of longer tunnel setup, try verify times to wait
* @return a <new_reachable_endpoint, tunnelProcess> pair.
* If the endpoint already reachable, then new_reachable_endpoint equals to original endpoint, and
* tunnelProcess is null.
* If no way to reach even through ssh tunneling,
* then both new_reachable_endpoint and tunnelProcess are null.
*/
private static Pair<InetSocketAddress, Process> establishSSHTunnelIfNeeded(InetSocketAddress endpoint, String tunnelHost, TunnelType tunnelType, Duration timeout, int retryCount, Duration retryInterval, int verifyCount) {
if (NetworkUtils.isLocationReachable(endpoint, timeout, retryCount, retryInterval)) {
// Already reachable, return original endpoint directly
return new Pair<InetSocketAddress, Process>(endpoint, null);
} else {
// Can not reach directly, trying to do ssh tunnel
int localFreePort = SysUtils.getFreePort();
InetSocketAddress newEndpoint = new InetSocketAddress(LOCAL_HOST, localFreePort);
LOG.log(Level.FINE, "Trying to opening up tunnel to {0} from {1}", new Object[] { endpoint.toString(), newEndpoint.toString() });
// Set up the tunnel process
final Process tunnelProcess;
switch(tunnelType) {
case PORT_FORWARD:
tunnelProcess = ShellUtils.establishSSHTunnelProcess(tunnelHost, localFreePort, endpoint.getHostString(), endpoint.getPort());
break;
case SOCKS_PROXY:
tunnelProcess = ShellUtils.establishSocksProxyProcess(tunnelHost, localFreePort);
break;
default:
throw new IllegalArgumentException("Unrecognized TunnelType passed: " + tunnelType);
}
// Verify whether the tunnel process is working fine.
if (tunnelProcess != null && tunnelProcess.isAlive() && NetworkUtils.isLocationReachable(newEndpoint, timeout, verifyCount, retryInterval)) {
java.lang.Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
tunnelProcess.destroy();
}
});
// Can reach the destination via ssh tunnel
return new Pair<InetSocketAddress, Process>(newEndpoint, tunnelProcess);
}
LOG.log(Level.FINE, "Failed to opening up tunnel to {0} from {1}. Releasing process..", new Object[] { endpoint, newEndpoint });
tunnelProcess.destroy();
}
// No way to reach the destination. Return null.
return new Pair<InetSocketAddress, Process>(null, null);
}
use of org.apache.heron.common.basics.Pair in project heron by twitter.
the class KubernetesContextTest method createVolumeConfigs.
/**
* Generate <code>Volume</code> Configs for testing.
* @param testCases Test case container.
* Input: [0] Config, [1] Boolean to indicate Manager/Executor.
* Output: [0] expectedKeys, [1] expectedOptionsKeys, [2] expectedOptionsValues.
* @param prefix Configuration prefix key to use in lookup.
*/
private void createVolumeConfigs(List<TestTuple<Pair<Config, Boolean>, Object[]>> testCases, String prefix) {
final String keyPattern = prefix + "%%s.%%s";
final String keyExecutor = String.format(keyPattern, KubernetesConstants.EXECUTOR_NAME);
final String keyManager = String.format(keyPattern, KubernetesConstants.MANAGER_NAME);
final String volumeNameOne = "volume-name-one";
final String volumeNameTwo = "volume-name-two";
final String claimName = "OnDeMaNd";
final String storageClassField = VolumeConfigKeys.storageClassName.name();
final String pathField = VolumeConfigKeys.path.name();
final String claimNameField = VolumeConfigKeys.claimName.name();
final String expectedStorageClass = "expected-storage-class";
final String expectedPath = "/path/for/volume/expected";
// Create test cases for Executor/Manager on even/odd indices respectively.
for (int idx = 0; idx < 2; ++idx) {
// Manager case is default.
boolean isExecutor = false;
String key = keyManager;
String description = KubernetesConstants.MANAGER_NAME;
// Executor case.
if (idx % 2 == 0) {
isExecutor = true;
key = keyExecutor;
description = KubernetesConstants.EXECUTOR_NAME;
}
final String storageClassKeyOne = String.format(key, volumeNameOne, storageClassField);
final String storageClassKeyTwo = String.format(key, volumeNameTwo, storageClassField);
final String pathKeyOne = String.format(key, volumeNameOne, pathField);
final String pathKeyTwo = String.format(key, volumeNameTwo, pathField);
final String claimNameKeyOne = String.format(key, volumeNameOne, claimNameField);
final String claimNameKeyTwo = String.format(key, volumeNameTwo, claimNameField);
final Config configPVC = Config.newBuilder().put(pathKeyOne, expectedPath).put(pathKeyTwo, expectedPath).put(claimNameKeyOne, claimName).put(claimNameKeyTwo, claimName).put(storageClassKeyOne, expectedStorageClass).put(storageClassKeyTwo, expectedStorageClass).build();
final List<String> expectedKeys = Arrays.asList(volumeNameOne, volumeNameTwo);
final List<VolumeConfigKeys> expectedOptionsKeys = Arrays.asList(VolumeConfigKeys.path, VolumeConfigKeys.storageClassName, VolumeConfigKeys.claimName);
final List<String> expectedOptionsValues = Arrays.asList(expectedPath, expectedStorageClass, claimName);
testCases.add(new TestTuple<>(description, new Pair<>(configPVC, isExecutor), new Object[] { expectedKeys, expectedOptionsKeys, expectedOptionsValues }));
final Config configPVCDisabled = Config.newBuilder().put(KubernetesContext.KUBERNETES_VOLUME_FROM_CLI_DISABLED, "true").put(pathKeyOne, expectedPath).put(pathKeyTwo, expectedPath).put(claimNameKeyOne, claimName).put(claimNameKeyTwo, claimName).put(storageClassKeyOne, expectedStorageClass).put(storageClassKeyTwo, expectedStorageClass).build();
testCases.add(new TestTuple<>(description + " Disabled should not error", new Pair<>(configPVCDisabled, !isExecutor), new Object[] { new LinkedList<String>(), new LinkedList<VolumeConfigKeys>(), new LinkedList<String>() }));
}
}
use of org.apache.heron.common.basics.Pair in project heron by twitter.
the class KubernetesContextTest method createVolumeNFS.
/**
* Create test cases for <code>NFS</code>.
* @param testCases Test case container.
* Input: [0] Config, [1] Boolean to indicate Manager/Executor.
* Output: <code>Map<String, Map<VolumeConfigKeys, String></code>
* @param isExecutor Boolean to indicate Manager/Executor test case generation.
*/
private void createVolumeNFS(List<TestTuple<Pair<Config, Boolean>, Map<String, Map<VolumeConfigKeys, String>>>> testCases, boolean isExecutor) {
final String volumeNameValid = "volume-name-valid";
final String passingValue = "should-pass";
final String processName = isExecutor ? KubernetesConstants.EXECUTOR_NAME : KubernetesConstants.MANAGER_NAME;
final String keyPattern = String.format(KubernetesContext.KUBERNETES_VOLUME_NFS_PREFIX + "%%s.%%s", processName);
// With readOnly.
final Map<String, Map<VolumeConfigKeys, String>> expectedWithReadOnly = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {
{
put(VolumeConfigKeys.server, "nfs-server.default.local");
put(VolumeConfigKeys.readOnly, "true");
put(VolumeConfigKeys.pathOnNFS, passingValue);
put(VolumeConfigKeys.path, passingValue);
put(VolumeConfigKeys.subPath, passingValue);
put(VolumeConfigKeys.readOnly, passingValue);
}
});
final Config configWithReadOnly = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "server"), "nfs-server.default.local").put(String.format(keyPattern, volumeNameValid, "readOnly"), "true").put(String.format(keyPattern, volumeNameValid, "pathOnNFS"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
testCases.add(new TestTuple<>(processName + ": `NFS` with `readOnly`", new Pair<>(configWithReadOnly, isExecutor), expectedWithReadOnly));
// With readOnly.
final Map<String, Map<VolumeConfigKeys, String>> expectedWithoutReadOnly = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {
{
put(VolumeConfigKeys.server, "nfs-server.default.local");
put(VolumeConfigKeys.pathOnNFS, passingValue);
put(VolumeConfigKeys.path, passingValue);
put(VolumeConfigKeys.subPath, passingValue);
put(VolumeConfigKeys.readOnly, passingValue);
}
});
final Config configWithoutReadOnly = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "server"), "nfs-server.default.local").put(String.format(keyPattern, volumeNameValid, "pathOnNFS"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
testCases.add(new TestTuple<>(processName + ": `NFS` without `readOnly`", new Pair<>(configWithoutReadOnly, isExecutor), expectedWithoutReadOnly));
// Ignored.
final Config configIgnored = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "server"), "nfs-server.default.local").put(String.format(keyPattern, volumeNameValid, "readOnly"), "true").put(String.format(keyPattern, volumeNameValid, "pathOnNFS"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
testCases.add(new TestTuple<>(processName + ": `NFS` ignored", new Pair<>(configIgnored, !isExecutor), new HashMap<>()));
}
use of org.apache.heron.common.basics.Pair in project heron by twitter.
the class KubernetesContextTest method createVolumeEmptyDir.
/**
* Create test cases for <code>Empty Directory</code>.
* @param testCases Test case container.
* Input: [0] Config, [1] Boolean to indicate Manager/Executor.
* Output: <code>Map<String, Map<VolumeConfigKeys, String></code>
* @param isExecutor Boolean to indicate Manager/Executor test case generation.
*/
private void createVolumeEmptyDir(List<TestTuple<Pair<Config, Boolean>, Map<String, Map<VolumeConfigKeys, String>>>> testCases, boolean isExecutor) {
final String volumeNameValid = "volume-name-valid";
final String passingValue = "should-pass";
final String processName = isExecutor ? KubernetesConstants.EXECUTOR_NAME : KubernetesConstants.MANAGER_NAME;
final String keyPattern = String.format(KubernetesContext.KUBERNETES_VOLUME_EMPTYDIR_PREFIX + "%%s.%%s", processName);
// With Medium.
final Map<String, Map<VolumeConfigKeys, String>> expectedWithMedium = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {
{
put(VolumeConfigKeys.sizeLimit, passingValue);
put(VolumeConfigKeys.medium, "Memory");
put(VolumeConfigKeys.path, passingValue);
put(VolumeConfigKeys.subPath, passingValue);
put(VolumeConfigKeys.readOnly, passingValue);
}
});
final Config configWithMedium = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "medium"), "Memory").put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
testCases.add(new TestTuple<>(processName + ": `emptyDir` with `medium`", new Pair<>(configWithMedium, isExecutor), expectedWithMedium));
// With empty Medium.
final Map<String, Map<VolumeConfigKeys, String>> expectedEmptyMedium = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {
{
put(VolumeConfigKeys.sizeLimit, passingValue);
put(VolumeConfigKeys.medium, "");
put(VolumeConfigKeys.path, passingValue);
put(VolumeConfigKeys.subPath, passingValue);
put(VolumeConfigKeys.readOnly, passingValue);
}
});
final Config configEmptyMedium = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "medium"), "").put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
testCases.add(new TestTuple<>(processName + ": `emptyDir` with empty `medium`", new Pair<>(configEmptyMedium, isExecutor), expectedEmptyMedium));
// Without Medium.
final Map<String, Map<VolumeConfigKeys, String>> expectedNoMedium = ImmutableMap.of(volumeNameValid, new HashMap<VolumeConfigKeys, String>() {
{
put(VolumeConfigKeys.sizeLimit, passingValue);
put(VolumeConfigKeys.path, passingValue);
put(VolumeConfigKeys.subPath, passingValue);
put(VolumeConfigKeys.readOnly, passingValue);
}
});
final Config configNoMedium = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
testCases.add(new TestTuple<>(processName + ": `emptyDir` without `medium`", new Pair<>(configNoMedium, isExecutor), expectedNoMedium));
// Ignored.
final Config configIgnored = Config.newBuilder().put(String.format(keyPattern, volumeNameValid, "sizeLimit"), passingValue).put(String.format(keyPattern, volumeNameValid, "medium"), "").put(String.format(keyPattern, volumeNameValid, "path"), passingValue).put(String.format(keyPattern, volumeNameValid, "subPath"), passingValue).put(String.format(keyPattern, volumeNameValid, "readOnly"), passingValue).build();
testCases.add(new TestTuple<>(processName + ": `emptyDir` ignored", new Pair<>(configIgnored, !isExecutor), new HashMap<>()));
}
Aggregations