use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting in project beam by apache.
the class FlinkPipelineExecutionEnvironment method getJobGraph.
/**
* Retrieves the generated JobGraph which can be submitted against the cluster. For testing
* purposes.
*/
@VisibleForTesting
JobGraph getJobGraph(Pipeline p) {
translate(p);
StreamGraph streamGraph = flinkStreamEnv.getStreamGraph();
// Normally the job name is set when we execute the job, and JobGraph is immutable, so we need
// to set the job name here.
streamGraph.setJobName(p.getOptions().getJobName());
return streamGraph.getJobGraph();
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting in project beam by apache.
the class DataflowRunner method resolveArtifacts.
@VisibleForTesting
protected RunnerApi.Pipeline resolveArtifacts(RunnerApi.Pipeline pipeline) {
RunnerApi.Pipeline.Builder pipelineBuilder = pipeline.toBuilder();
RunnerApi.Components.Builder componentsBuilder = pipelineBuilder.getComponentsBuilder();
componentsBuilder.clearEnvironments();
for (Map.Entry<String, RunnerApi.Environment> entry : pipeline.getComponents().getEnvironmentsMap().entrySet()) {
RunnerApi.Environment.Builder environmentBuilder = entry.getValue().toBuilder();
environmentBuilder.clearDependencies();
for (RunnerApi.ArtifactInformation info : entry.getValue().getDependenciesList()) {
if (!BeamUrns.getUrn(RunnerApi.StandardArtifacts.Types.FILE).equals(info.getTypeUrn())) {
throw new RuntimeException(String.format("unsupported artifact type %s", info.getTypeUrn()));
}
RunnerApi.ArtifactFilePayload filePayload;
try {
filePayload = RunnerApi.ArtifactFilePayload.parseFrom(info.getTypePayload());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Error parsing artifact file payload.", e);
}
String stagedName;
if (BeamUrns.getUrn(RunnerApi.StandardArtifacts.Roles.STAGING_TO).equals(info.getRoleUrn())) {
try {
RunnerApi.ArtifactStagingToRolePayload stagingPayload = RunnerApi.ArtifactStagingToRolePayload.parseFrom(info.getRolePayload());
stagedName = stagingPayload.getStagedName();
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Error parsing artifact staging_to role payload.", e);
}
} else {
try {
File source = new File(filePayload.getPath());
HashCode hashCode = Files.asByteSource(source).hash(Hashing.sha256());
stagedName = Environments.createStagingFileName(source, hashCode);
} catch (IOException e) {
throw new RuntimeException(String.format("Error creating staged name for artifact %s", filePayload.getPath()), e);
}
}
environmentBuilder.addDependencies(info.toBuilder().setTypeUrn(BeamUrns.getUrn(RunnerApi.StandardArtifacts.Types.URL)).setTypePayload(RunnerApi.ArtifactUrlPayload.newBuilder().setUrl(FileSystems.matchNewResource(options.getStagingLocation(), true).resolve(stagedName, ResolveOptions.StandardResolveOptions.RESOLVE_FILE).toString()).setSha256(filePayload.getSha256()).build().toByteString()));
}
componentsBuilder.putEnvironments(entry.getKey(), environmentBuilder.build());
}
return pipelineBuilder.build();
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting in project beam by apache.
the class TestDataflowRunner method checkForPAssertSuccess.
/**
* Check that PAssert expectations were met.
*
* <p>If the pipeline is not in a failed/cancelled state and no PAsserts were used within the
* pipeline, then this method will state that all PAsserts succeeded.
*
* @return Optional.of(false) if we are certain a PAssert failed. Optional.of(true) if we are
* certain all PAsserts passed. Optional.absent() if the evidence is inconclusive, including
* when the pipeline may have failed for other reasons.
*/
@VisibleForTesting
Optional<Boolean> checkForPAssertSuccess(DataflowPipelineJob job) {
JobMetrics metrics = getJobMetrics(job);
if (metrics == null || metrics.getMetrics() == null) {
LOG.warn("Metrics not present for Dataflow job {}.", job.getJobId());
return Optional.absent();
}
int successes = 0;
int failures = 0;
for (MetricUpdate metric : metrics.getMetrics()) {
if (metric.getName() == null || metric.getName().getContext() == null || !metric.getName().getContext().containsKey(TENTATIVE_COUNTER)) {
// Don't double count using the non-tentative version of the metric.
continue;
}
if (PAssert.SUCCESS_COUNTER.equals(metric.getName().getName())) {
successes += ((BigDecimal) metric.getScalar()).intValue();
} else if (PAssert.FAILURE_COUNTER.equals(metric.getName().getName())) {
failures += ((BigDecimal) metric.getScalar()).intValue();
}
}
if (failures > 0) {
LOG.info("Failure result for Dataflow job {}. Found {} success, {} failures out of " + "{} expected assertions.", job.getJobId(), successes, failures, expectedNumberOfAssertions);
return Optional.of(false);
} else if (successes >= expectedNumberOfAssertions) {
LOG.info("Success result for Dataflow job {}." + " Found {} success, {} failures out of {} expected assertions.", job.getJobId(), successes, failures, expectedNumberOfAssertions);
return Optional.of(true);
}
// If the job failed, this is a definite failure. We only cancel jobs when they fail.
State state = job.getState();
if (state == State.FAILED || state == State.CANCELLED) {
LOG.info("Dataflow job {} terminated in failure state {} without reporting a failed assertion", job.getJobId(), state);
return Optional.absent();
}
LOG.info("Inconclusive results for Dataflow job {}." + " Found {} success, {} failures out of {} expected assertions.", job.getJobId(), successes, failures, expectedNumberOfAssertions);
return Optional.absent();
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting in project beam by apache.
the class MemoryMonitor method dumpHeap.
/**
* Dump the current heap profile to a file in the given directory and return its name.
*
* <p>NOTE: We deliberately don't salt the heap dump filename so as to minimize disk impact of
* repeated dumps. These files can be of comparable size to the local disk.
*/
@VisibleForTesting
static synchronized File dumpHeap(File directory) throws MalformedObjectNameException, InstanceNotFoundException, ReflectionException, MBeanException, IOException {
boolean liveObjectsOnly = false;
File fileName = new File(directory, "heap_dump.hprof");
if (fileName.exists() && !fileName.delete()) {
throw new IOException("heap_dump.hprof already existed and couldn't be deleted!");
}
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName oname = new ObjectName("com.sun.management:type=HotSpotDiagnostic");
Object[] parameters = { fileName.getPath(), liveObjectsOnly };
String[] signatures = { String.class.getName(), boolean.class.getName() };
mbs.invoke(oname, "dumpHeap", parameters, signatures);
Files.setPosixFilePermissions(fileName.toPath(), ImmutableSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.GROUP_READ, PosixFilePermission.OTHERS_READ));
LOG.warn("Heap dumped to {}", fileName);
return fileName;
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting in project beam by apache.
the class PortablePipelineJarCreator method copyResourcesFromJar.
/**
* Copy resources from {@code inputJar} to {@link #outputStream}.
*/
@VisibleForTesting
protected void copyResourcesFromJar(JarFile inputJar) throws IOException {
Enumeration<JarEntry> inputJarEntries = inputJar.entries();
// The zip spec allows multiple files with the same name; the Java zip libraries do not.
// Keep track of the files we've already written to filter out duplicates.
// Also, ignore the old manifest; we want to write our own.
Set<String> previousEntryNames = new HashSet<>(ImmutableList.of(JarFile.MANIFEST_NAME));
while (inputJarEntries.hasMoreElements()) {
JarEntry inputJarEntry = inputJarEntries.nextElement();
InputStream inputStream = inputJar.getInputStream(inputJarEntry);
String entryName = inputJarEntry.getName();
if (previousEntryNames.contains(entryName)) {
LOG.debug("Skipping duplicated file {}", entryName);
} else {
JarEntry outputJarEntry = new JarEntry(inputJarEntry);
outputStream.putNextEntry(outputJarEntry);
LOG.trace("Copying jar entry {}", inputJarEntry);
IOUtils.copy(inputStream, outputStream);
previousEntryNames.add(entryName);
}
}
}
Aggregations