use of com.google.api.services.dataflow.model.JobMessage in project beam by apache.
the class DataflowPipelineJobTest method infoMessage.
private static JobMessage infoMessage(Instant timestamp, String text) {
JobMessage message = new JobMessage();
message.setTime(TimeUtil.toCloudTime(timestamp));
message.setMessageText(text);
return message;
}
use of com.google.api.services.dataflow.model.JobMessage in project beam by apache.
the class DataflowPipelineJobTest method testWaitUntilFinishNoRepeatedLogs.
/**
* Tests that a {@link DataflowPipelineJob} does not duplicate messages.
*/
@Test
public void testWaitUntilFinishNoRepeatedLogs() throws Exception {
DataflowPipelineJob job = new DataflowPipelineJob(mockDataflowClient, JOB_ID, options, null);
Sleeper sleeper = new ZeroSleeper();
NanoClock nanoClock = mock(NanoClock.class);
Instant separatingTimestamp = new Instant(42L);
JobMessage theMessage = infoMessage(separatingTimestamp, "nothing");
MonitoringUtil mockMonitor = mock(MonitoringUtil.class);
when(mockMonitor.getJobMessages(anyString(), anyLong())).thenReturn(ImmutableList.of(theMessage));
// The Job just always reports "running" across all calls
Job fakeJob = new Job();
fakeJob.setCurrentState("JOB_STATE_RUNNING");
when(mockDataflowClient.getJob(anyString())).thenReturn(fakeJob);
// After waitUntilFinish the DataflowPipelineJob should record the latest message timestamp
when(nanoClock.nanoTime()).thenReturn(0L).thenReturn(2000000000L);
job.waitUntilFinish(Duration.standardSeconds(1), mockHandler, sleeper, nanoClock, mockMonitor);
verify(mockHandler).process(ImmutableList.of(theMessage));
// Second waitUntilFinish should request jobs with `separatingTimestamp` so the monitor
// will only return new messages
when(nanoClock.nanoTime()).thenReturn(3000000000L).thenReturn(6000000000L);
job.waitUntilFinish(Duration.standardSeconds(1), mockHandler, sleeper, nanoClock, mockMonitor);
verify(mockMonitor).getJobMessages(anyString(), eq(separatingTimestamp.getMillis()));
}
use of com.google.api.services.dataflow.model.JobMessage in project beam by apache.
the class MonitoringUtil method getJobMessages.
/**
* Return job messages sorted in ascending order by timestamp.
* @param jobId The id of the job to get the messages for.
* @param startTimestampMs Return only those messages with a
* timestamp greater than this value.
* @return collection of messages
*/
public List<JobMessage> getJobMessages(String jobId, long startTimestampMs) throws IOException {
// TODO: Allow filtering messages by importance
Instant startTimestamp = new Instant(startTimestampMs);
ArrayList<JobMessage> allMessages = new ArrayList<>();
String pageToken = null;
while (true) {
ListJobMessagesResponse response = dataflowClient.listJobMessages(jobId, pageToken);
if (response == null || response.getJobMessages() == null) {
return allMessages;
}
for (JobMessage m : response.getJobMessages()) {
@Nullable Instant timestamp = fromCloudTime(m.getTime());
if (timestamp == null) {
continue;
}
if (timestamp.isAfter(startTimestamp)) {
allMessages.add(m);
}
}
if (response.getNextPageToken() == null) {
break;
} else {
pageToken = response.getNextPageToken();
}
}
Collections.sort(allMessages, new TimeStampComparator());
return allMessages;
}
Aggregations