Search in sources :

Example 21 with WorkUnit

use of org.apache.gobblin.source.workunit.WorkUnit in project incubator-gobblin by apache.

the class AbstractJobLauncher method launchJob.

@Override
public void launchJob(JobListener jobListener) throws JobException {
    String jobId = this.jobContext.getJobId();
    final JobState jobState = this.jobContext.getJobState();
    try {
        MDC.put(ConfigurationKeys.JOB_NAME_KEY, this.jobContext.getJobName());
        MDC.put(ConfigurationKeys.JOB_KEY_KEY, this.jobContext.getJobKey());
        TimingEvent launchJobTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.FULL_JOB_EXECUTION);
        try (Closer closer = Closer.create()) {
            closer.register(this.jobContext);
            notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_PREPARE, new JobListenerAction() {

                @Override
                public void apply(JobListener jobListener, JobContext jobContext) throws Exception {
                    jobListener.onJobPrepare(jobContext);
                }
            });
            if (this.jobContext.getSemantics() == DeliverySemantics.EXACTLY_ONCE) {
                // If exactly-once is used, commit sequences of the previous run must be successfully compelted
                // before this run can make progress.
                executeUnfinishedCommitSequences(jobState.getJobName());
            }
            TimingEvent workUnitsCreationTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.WORK_UNITS_CREATION);
            Source<?, ?> source = this.jobContext.getSource();
            WorkUnitStream workUnitStream;
            if (source instanceof WorkUnitStreamSource) {
                workUnitStream = ((WorkUnitStreamSource) source).getWorkunitStream(jobState);
            } else {
                workUnitStream = new BasicWorkUnitStream.Builder(source.getWorkunits(jobState)).build();
            }
            workUnitsCreationTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.WORK_UNITS_CREATION));
            // The absence means there is something wrong getting the work units
            if (workUnitStream == null || workUnitStream.getWorkUnits() == null) {
                this.eventSubmitter.submit(JobEvent.WORK_UNITS_MISSING);
                jobState.setState(JobState.RunningState.FAILED);
                throw new JobException("Failed to get work units for job " + jobId);
            }
            // No work unit to run
            if (!workUnitStream.getWorkUnits().hasNext()) {
                this.eventSubmitter.submit(JobEvent.WORK_UNITS_EMPTY);
                LOG.warn("No work units have been created for job " + jobId);
                jobState.setState(JobState.RunningState.COMMITTED);
                notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_COMPLETE, new JobListenerAction() {

                    @Override
                    public void apply(JobListener jobListener, JobContext jobContext) throws Exception {
                        jobListener.onJobCompletion(jobContext);
                    }
                });
                return;
            }
            // Initialize writer and converter(s)
            closer.register(WriterInitializerFactory.newInstace(jobState, workUnitStream)).initialize();
            closer.register(ConverterInitializerFactory.newInstance(jobState, workUnitStream)).initialize();
            TimingEvent stagingDataCleanTimer = this.eventSubmitter.getTimingEvent(TimingEvent.RunJobTimings.MR_STAGING_DATA_CLEAN);
            // Cleanup left-over staging data possibly from the previous run. This is particularly
            // important if the current batch of WorkUnits include failed WorkUnits from the previous
            // run which may still have left-over staging data not cleaned up yet.
            cleanLeftoverStagingData(workUnitStream, jobState);
            stagingDataCleanTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.MR_STAGING_DATA_CLEAN));
            long startTime = System.currentTimeMillis();
            jobState.setStartTime(startTime);
            jobState.setState(JobState.RunningState.RUNNING);
            try {
                LOG.info("Starting job " + jobId);
                notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_START, new JobListenerAction() {

                    @Override
                    public void apply(JobListener jobListener, JobContext jobContext) throws Exception {
                        jobListener.onJobStart(jobContext);
                    }
                });
                TimingEvent workUnitsPreparationTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.WORK_UNITS_PREPARATION);
                // Add task ids
                workUnitStream = prepareWorkUnits(workUnitStream, jobState);
                // Remove skipped workUnits from the list of work units to execute.
                workUnitStream = workUnitStream.filter(new SkippedWorkUnitsFilter(jobState));
                // Add surviving tasks to jobState
                workUnitStream = workUnitStream.transform(new MultiWorkUnitForEach() {

                    @Override
                    public void forWorkUnit(WorkUnit workUnit) {
                        jobState.incrementTaskCount();
                        jobState.addTaskState(new TaskState(new WorkUnitState(workUnit, jobState)));
                    }
                });
                workUnitsPreparationTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.WORK_UNITS_PREPARATION));
                // Write job execution info to the job history store before the job starts to run
                this.jobContext.storeJobExecutionInfo();
                TimingEvent jobRunTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_RUN);
                // Start the job and wait for it to finish
                runWorkUnitStream(workUnitStream);
                jobRunTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_RUN));
                this.eventSubmitter.submit(CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, "JOB_" + jobState.getState()));
                // Check and set final job jobPropsState upon job completion
                if (jobState.getState() == JobState.RunningState.CANCELLED) {
                    LOG.info(String.format("Job %s has been cancelled, aborting now", jobId));
                    return;
                }
                TimingEvent jobCommitTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_COMMIT);
                this.jobContext.finalizeJobStateBeforeCommit();
                this.jobContext.commit();
                postProcessJobState(jobState);
                jobCommitTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_COMMIT));
            } finally {
                long endTime = System.currentTimeMillis();
                jobState.setEndTime(endTime);
                jobState.setDuration(endTime - jobState.getStartTime());
            }
        } catch (Throwable t) {
            jobState.setState(JobState.RunningState.FAILED);
            String errMsg = "Failed to launch and run job " + jobId;
            LOG.error(errMsg + ": " + t, t);
        } finally {
            try {
                TimingEvent jobCleanupTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_CLEANUP);
                cleanupStagingData(jobState);
                jobCleanupTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_CLEANUP));
                // Write job execution info to the job history store upon job termination
                this.jobContext.storeJobExecutionInfo();
            } finally {
                launchJobTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.FULL_JOB_EXECUTION));
            }
        }
        for (JobState.DatasetState datasetState : this.jobContext.getDatasetStatesByUrns().values()) {
            // Set the overall job state to FAILED if the job failed to process any dataset
            if (datasetState.getState() == JobState.RunningState.FAILED) {
                jobState.setState(JobState.RunningState.FAILED);
                LOG.warn("At least one dataset state is FAILED. Setting job state to FAILED.");
                break;
            }
        }
        notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_COMPLETE, new JobListenerAction() {

            @Override
            public void apply(JobListener jobListener, JobContext jobContext) throws Exception {
                jobListener.onJobCompletion(jobContext);
            }
        });
        if (jobState.getState() == JobState.RunningState.FAILED) {
            notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_FAILED, new JobListenerAction() {

                @Override
                public void apply(JobListener jobListener, JobContext jobContext) throws Exception {
                    jobListener.onJobFailure(jobContext);
                }
            });
            throw new JobException(String.format("Job %s failed", jobId));
        }
    } finally {
        // Stop metrics reporting
        if (this.jobContext.getJobMetricsOptional().isPresent()) {
            JobMetrics.remove(jobState);
        }
        MDC.remove(ConfigurationKeys.JOB_NAME_KEY);
        MDC.remove(ConfigurationKeys.JOB_KEY_KEY);
    }
}
Also used : Closer(com.google.common.io.Closer) WorkUnitStream(org.apache.gobblin.source.workunit.WorkUnitStream) BasicWorkUnitStream(org.apache.gobblin.source.workunit.BasicWorkUnitStream) WorkUnitState(org.apache.gobblin.configuration.WorkUnitState) TimingEvent(org.apache.gobblin.metrics.event.TimingEvent) CloseableJobListener(org.apache.gobblin.runtime.listeners.CloseableJobListener) JobListener(org.apache.gobblin.runtime.listeners.JobListener) JobLockException(org.apache.gobblin.runtime.locks.JobLockException) IOException(java.io.IOException) WorkUnitStreamSource(org.apache.gobblin.source.WorkUnitStreamSource) MultiWorkUnit(org.apache.gobblin.source.workunit.MultiWorkUnit) WorkUnit(org.apache.gobblin.source.workunit.WorkUnit)

Example 22 with WorkUnit

use of org.apache.gobblin.source.workunit.WorkUnit in project incubator-gobblin by apache.

the class MRTaskStateTracker method onTaskCommitCompletion.

@Override
public void onTaskCommitCompletion(Task task) {
    WorkUnit workUnit = task.getTaskState().getWorkunit();
    if (GobblinMetrics.isEnabled(workUnit)) {
        task.updateRecordMetrics();
        task.updateByteMetrics();
        if (workUnit.getPropAsBoolean(ConfigurationKeys.MR_REPORT_METRICS_AS_COUNTERS_KEY, ConfigurationKeys.DEFAULT_MR_REPORT_METRICS_AS_COUNTERS)) {
            updateCounters(task);
        }
    }
    LOG.info(String.format("Task %s completed running in %dms with state %s", task.getTaskId(), task.getTaskState().getTaskDuration(), task.getTaskState().getWorkingState()));
}
Also used : WorkUnit(org.apache.gobblin.source.workunit.WorkUnit)

Example 23 with WorkUnit

use of org.apache.gobblin.source.workunit.WorkUnit in project incubator-gobblin by apache.

the class WatermarkTest method testWatermarkWorkUnitSerialization.

@Test
public void testWatermarkWorkUnitSerialization() {
    long lowWatermarkValue = 0;
    long expectedHighWatermarkValue = 100;
    TestWatermark lowWatermark = new TestWatermark();
    lowWatermark.setLongWatermark(lowWatermarkValue);
    TestWatermark expectedHighWatermark = new TestWatermark();
    expectedHighWatermark.setLongWatermark(expectedHighWatermarkValue);
    WatermarkInterval watermarkInterval = new WatermarkInterval(lowWatermark, expectedHighWatermark);
    WorkUnit workUnit = new WorkUnit(null, null, watermarkInterval);
    TestWatermark deserializedLowWatermark = WatermarkSerializerHelper.convertJsonToWatermark(workUnit.getLowWatermark(), TestWatermark.class);
    TestWatermark deserializedExpectedHighWatermark = WatermarkSerializerHelper.convertJsonToWatermark(workUnit.getExpectedHighWatermark(), TestWatermark.class);
    Assert.assertEquals(deserializedLowWatermark.getLongWatermark(), lowWatermarkValue);
    Assert.assertEquals(deserializedExpectedHighWatermark.getLongWatermark(), expectedHighWatermarkValue);
}
Also used : WorkUnit(org.apache.gobblin.source.workunit.WorkUnit) Test(org.testng.annotations.Test)

Example 24 with WorkUnit

use of org.apache.gobblin.source.workunit.WorkUnit in project incubator-gobblin by apache.

the class CopySource method getWorkunits.

/**
 * <ul>
 * Does the following:
 * <li>Instantiate a {@link DatasetsFinder}.
 * <li>Find all {@link Dataset} using {@link DatasetsFinder}.
 * <li>For each {@link CopyableDataset} get all {@link CopyEntity}s.
 * <li>Create a {@link WorkUnit} per {@link CopyEntity}.
 * </ul>
 *
 * <p>
 * In this implementation, one workunit is created for every {@link CopyEntity} found. But the extractor/converters
 * and writers are built to support multiple {@link CopyEntity}s per workunit
 * </p>
 *
 * @param state see {@link org.apache.gobblin.configuration.SourceState}
 * @return Work units for copying files.
 */
@Override
public List<WorkUnit> getWorkunits(final SourceState state) {
    this.metricContext = Instrumented.getMetricContext(state, CopySource.class);
    this.lineageInfo = LineageInfo.getLineageInfo(state.getBroker());
    try {
        DeprecationUtils.renameDeprecatedKeys(state, CopyConfiguration.MAX_COPY_PREFIX + "." + CopyResourcePool.ENTITIES_KEY, Lists.newArrayList(MAX_FILES_COPIED_KEY));
        final FileSystem sourceFs = HadoopUtils.getSourceFileSystem(state);
        final FileSystem targetFs = HadoopUtils.getWriterFileSystem(state, 1, 0);
        state.setProp(SlaEventKeys.SOURCE_URI, sourceFs.getUri());
        state.setProp(SlaEventKeys.DESTINATION_URI, targetFs.getUri());
        log.info("Identified source file system at {} and target file system at {}.", sourceFs.getUri(), targetFs.getUri());
        long maxSizePerBin = state.getPropAsLong(MAX_SIZE_MULTI_WORKUNITS, 0);
        long maxWorkUnitsPerMultiWorkUnit = state.getPropAsLong(MAX_WORK_UNITS_PER_BIN, 50);
        final long minWorkUnitWeight = Math.max(1, maxSizePerBin / maxWorkUnitsPerMultiWorkUnit);
        final Optional<CopyableFileWatermarkGenerator> watermarkGenerator = CopyableFileWatermarkHelper.getCopyableFileWatermarkGenerator(state);
        int maxThreads = state.getPropAsInt(MAX_CONCURRENT_LISTING_SERVICES, DEFAULT_MAX_CONCURRENT_LISTING_SERVICES);
        final CopyConfiguration copyConfiguration = CopyConfiguration.builder(targetFs, state.getProperties()).build();
        this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, CopyConfiguration.COPY_PREFIX).build();
        DatasetsFinder<CopyableDatasetBase> datasetFinder = DatasetUtils.instantiateDatasetFinder(state.getProperties(), sourceFs, DEFAULT_DATASET_PROFILE_CLASS_KEY, this.eventSubmitter, state);
        IterableDatasetFinder<CopyableDatasetBase> iterableDatasetFinder = datasetFinder instanceof IterableDatasetFinder ? (IterableDatasetFinder<CopyableDatasetBase>) datasetFinder : new IterableDatasetFinderImpl<>(datasetFinder);
        Iterator<CopyableDatasetRequestor> requestorIteratorWithNulls = Iterators.transform(iterableDatasetFinder.getDatasetsIterator(), new CopyableDatasetRequestor.Factory(targetFs, copyConfiguration, log));
        Iterator<CopyableDatasetRequestor> requestorIterator = Iterators.filter(requestorIteratorWithNulls, Predicates.<CopyableDatasetRequestor>notNull());
        final SetMultimap<FileSet<CopyEntity>, WorkUnit> workUnitsMap = Multimaps.<FileSet<CopyEntity>, WorkUnit>synchronizedSetMultimap(HashMultimap.<FileSet<CopyEntity>, WorkUnit>create());
        RequestAllocator<FileSet<CopyEntity>> allocator = createRequestAllocator(copyConfiguration, maxThreads);
        Iterator<FileSet<CopyEntity>> prioritizedFileSets = allocator.allocateRequests(requestorIterator, copyConfiguration.getMaxToCopy());
        // Submit alertable events for unfulfilled requests
        submitUnfulfilledRequestEvents(allocator);
        Iterator<Callable<Void>> callableIterator = Iterators.transform(prioritizedFileSets, new Function<FileSet<CopyEntity>, Callable<Void>>() {

            @Nullable
            @Override
            public Callable<Void> apply(FileSet<CopyEntity> input) {
                return new FileSetWorkUnitGenerator((CopyableDatasetBase) input.getDataset(), input, state, workUnitsMap, watermarkGenerator, minWorkUnitWeight);
            }
        });
        try {
            List<Future<Void>> futures = new IteratorExecutor<>(callableIterator, maxThreads, ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of("Copy-file-listing-pool-%d"))).execute();
            for (Future<Void> future : futures) {
                try {
                    future.get();
                } catch (ExecutionException exc) {
                    log.error("Failed to get work units for dataset.", exc.getCause());
                }
            }
        } catch (InterruptedException ie) {
            log.error("Retrieval of work units was interrupted. Aborting.");
            return Lists.newArrayList();
        }
        log.info(String.format("Created %s workunits ", workUnitsMap.size()));
        copyConfiguration.getCopyContext().logCacheStatistics();
        if (state.contains(SIMULATE) && state.getPropAsBoolean(SIMULATE)) {
            log.info("Simulate mode enabled. Will not execute the copy.");
            for (Map.Entry<FileSet<CopyEntity>, Collection<WorkUnit>> entry : workUnitsMap.asMap().entrySet()) {
                log.info(String.format("Actions for dataset %s file set %s.", entry.getKey().getDataset().datasetURN(), entry.getKey().getName()));
                for (WorkUnit workUnit : entry.getValue()) {
                    CopyEntity copyEntity = deserializeCopyEntity(workUnit);
                    log.info(copyEntity.explain());
                }
            }
            return Lists.newArrayList();
        }
        List<? extends WorkUnit> workUnits = new WorstFitDecreasingBinPacking(maxSizePerBin).pack(Lists.newArrayList(workUnitsMap.values()), this.weighter);
        log.info(String.format("Bin packed work units. Initial work units: %d, packed work units: %d, max weight per bin: %d, " + "max work units per bin: %d.", workUnitsMap.size(), workUnits.size(), maxSizePerBin, maxWorkUnitsPerMultiWorkUnit));
        return ImmutableList.copyOf(workUnits);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
Also used : IterableDatasetFinder(org.apache.gobblin.dataset.IterableDatasetFinder) Callable(java.util.concurrent.Callable) WorstFitDecreasingBinPacking(org.apache.gobblin.util.binpacking.WorstFitDecreasingBinPacking) FileSystem(org.apache.hadoop.fs.FileSystem) ExecutionException(java.util.concurrent.ExecutionException) CopyableDatasetRequestor(org.apache.gobblin.data.management.partition.CopyableDatasetRequestor) FileSet(org.apache.gobblin.data.management.partition.FileSet) IOException(java.io.IOException) CopyableFileWatermarkGenerator(org.apache.gobblin.data.management.copy.watermark.CopyableFileWatermarkGenerator) Future(java.util.concurrent.Future) Collection(java.util.Collection) WorkUnit(org.apache.gobblin.source.workunit.WorkUnit) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Nullable(javax.annotation.Nullable)

Example 25 with WorkUnit

use of org.apache.gobblin.source.workunit.WorkUnit in project incubator-gobblin by apache.

the class PartitionLevelWatermarkerTest method testStateStoreReadWrite.

@Test
public void testStateStoreReadWrite() throws Exception {
    String dbName = "testStateStoreReadWrite";
    LocalHiveMetastoreTestUtils.getInstance().dropDatabaseIfExists(dbName);
    PartitionLevelWatermarker watermarker0 = new PartitionLevelWatermarker(new SourceState());
    Table mockTable = localTestTable(dbName, "table1", true);
    watermarker0.onTableProcessBegin(mockTable, 0l);
    long now = new DateTime().getMillis();
    watermarker0.onPartitionProcessBegin(localTestPartition(mockTable, ImmutableList.of("2016")), 0, now);
    List<WorkUnit> workunits = Lists.newArrayList();
    watermarker0.onGetWorkunitsEnd(workunits);
    @SuppressWarnings("deprecation") WorkUnitState previousWus = new WorkUnitState(workunits.get(0));
    watermarker0.setActualHighWatermark(previousWus);
    SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
    PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
    Assert.assertEquals(watermarker.getPreviousWatermarks().size(), 1);
    Assert.assertEquals(watermarker.getPreviousWatermarks().get(dbName + "@table1"), ImmutableMap.of("2016", now));
}
Also used : SourceState(org.apache.gobblin.configuration.SourceState) Table(org.apache.hadoop.hive.ql.metadata.Table) WorkUnitState(org.apache.gobblin.configuration.WorkUnitState) WorkUnitState(org.apache.gobblin.configuration.WorkUnitState) State(org.apache.gobblin.configuration.State) SourceState(org.apache.gobblin.configuration.SourceState) WorkUnit(org.apache.gobblin.source.workunit.WorkUnit) DateTime(org.joda.time.DateTime) Test(org.testng.annotations.Test)

Aggregations

WorkUnit (org.apache.gobblin.source.workunit.WorkUnit)133 Test (org.testng.annotations.Test)59 SourceState (org.apache.gobblin.configuration.SourceState)40 WorkUnitState (org.apache.gobblin.configuration.WorkUnitState)40 MultiWorkUnit (org.apache.gobblin.source.workunit.MultiWorkUnit)35 Extract (org.apache.gobblin.source.workunit.Extract)24 Path (org.apache.hadoop.fs.Path)19 State (org.apache.gobblin.configuration.State)13 IOException (java.io.IOException)11 ArrayList (java.util.ArrayList)10 Closer (com.google.common.io.Closer)9 Properties (java.util.Properties)9 WatermarkInterval (org.apache.gobblin.source.extractor.WatermarkInterval)8 List (java.util.List)7 Table (org.apache.hadoop.hive.ql.metadata.Table)7 ImmutableMap (com.google.common.collect.ImmutableMap)6 Config (com.typesafe.config.Config)6 File (java.io.File)6 IterableDatasetFinder (org.apache.gobblin.dataset.IterableDatasetFinder)6 WorkUnitStream (org.apache.gobblin.source.workunit.WorkUnitStream)6