use of org.apache.flink.runtime.rest.handler.legacy.metrics.MetricFetcher in project flink by apache.
the class DefaultDispatcherResourceManagerComponentFactory method create.
@Override
public DispatcherResourceManagerComponent create(Configuration configuration, ResourceID resourceId, Executor ioExecutor, RpcService rpcService, HighAvailabilityServices highAvailabilityServices, BlobServer blobServer, HeartbeatServices heartbeatServices, MetricRegistry metricRegistry, ExecutionGraphInfoStore executionGraphInfoStore, MetricQueryServiceRetriever metricQueryServiceRetriever, FatalErrorHandler fatalErrorHandler) throws Exception {
LeaderRetrievalService dispatcherLeaderRetrievalService = null;
LeaderRetrievalService resourceManagerRetrievalService = null;
WebMonitorEndpoint<?> webMonitorEndpoint = null;
ResourceManagerService resourceManagerService = null;
DispatcherRunner dispatcherRunner = null;
try {
dispatcherLeaderRetrievalService = highAvailabilityServices.getDispatcherLeaderRetriever();
resourceManagerRetrievalService = highAvailabilityServices.getResourceManagerLeaderRetriever();
final LeaderGatewayRetriever<DispatcherGateway> dispatcherGatewayRetriever = new RpcGatewayRetriever<>(rpcService, DispatcherGateway.class, DispatcherId::fromUuid, new ExponentialBackoffRetryStrategy(12, Duration.ofMillis(10), Duration.ofMillis(50)));
final LeaderGatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever = new RpcGatewayRetriever<>(rpcService, ResourceManagerGateway.class, ResourceManagerId::fromUuid, new ExponentialBackoffRetryStrategy(12, Duration.ofMillis(10), Duration.ofMillis(50)));
final ScheduledExecutorService executor = WebMonitorEndpoint.createExecutorService(configuration.getInteger(RestOptions.SERVER_NUM_THREADS), configuration.getInteger(RestOptions.SERVER_THREAD_PRIORITY), "DispatcherRestEndpoint");
final long updateInterval = configuration.getLong(MetricOptions.METRIC_FETCHER_UPDATE_INTERVAL);
final MetricFetcher metricFetcher = updateInterval == 0 ? VoidMetricFetcher.INSTANCE : MetricFetcherImpl.fromConfiguration(configuration, metricQueryServiceRetriever, dispatcherGatewayRetriever, executor);
webMonitorEndpoint = restEndpointFactory.createRestEndpoint(configuration, dispatcherGatewayRetriever, resourceManagerGatewayRetriever, blobServer, executor, metricFetcher, highAvailabilityServices.getClusterRestEndpointLeaderElectionService(), fatalErrorHandler);
log.debug("Starting Dispatcher REST endpoint.");
webMonitorEndpoint.start();
final String hostname = RpcUtils.getHostname(rpcService);
resourceManagerService = ResourceManagerServiceImpl.create(resourceManagerFactory, configuration, resourceId, rpcService, highAvailabilityServices, heartbeatServices, fatalErrorHandler, new ClusterInformation(hostname, blobServer.getPort()), webMonitorEndpoint.getRestBaseUrl(), metricRegistry, hostname, ioExecutor);
final HistoryServerArchivist historyServerArchivist = HistoryServerArchivist.createHistoryServerArchivist(configuration, webMonitorEndpoint, ioExecutor);
final DispatcherOperationCaches dispatcherOperationCaches = new DispatcherOperationCaches(configuration.get(RestOptions.ASYNC_OPERATION_STORE_DURATION));
final PartialDispatcherServices partialDispatcherServices = new PartialDispatcherServices(configuration, highAvailabilityServices, resourceManagerGatewayRetriever, blobServer, heartbeatServices, () -> JobManagerMetricGroup.createJobManagerMetricGroup(metricRegistry, hostname), executionGraphInfoStore, fatalErrorHandler, historyServerArchivist, metricRegistry.getMetricQueryServiceGatewayRpcAddress(), ioExecutor, dispatcherOperationCaches);
log.debug("Starting Dispatcher.");
dispatcherRunner = dispatcherRunnerFactory.createDispatcherRunner(highAvailabilityServices.getDispatcherLeaderElectionService(), fatalErrorHandler, new HaServicesJobPersistenceComponentFactory(highAvailabilityServices), ioExecutor, rpcService, partialDispatcherServices);
log.debug("Starting ResourceManagerService.");
resourceManagerService.start();
resourceManagerRetrievalService.start(resourceManagerGatewayRetriever);
dispatcherLeaderRetrievalService.start(dispatcherGatewayRetriever);
return new DispatcherResourceManagerComponent(dispatcherRunner, resourceManagerService, dispatcherLeaderRetrievalService, resourceManagerRetrievalService, webMonitorEndpoint, fatalErrorHandler, dispatcherOperationCaches);
} catch (Exception exception) {
// clean up all started components
if (dispatcherLeaderRetrievalService != null) {
try {
dispatcherLeaderRetrievalService.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
}
if (resourceManagerRetrievalService != null) {
try {
resourceManagerRetrievalService.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
}
final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3);
if (webMonitorEndpoint != null) {
terminationFutures.add(webMonitorEndpoint.closeAsync());
}
if (resourceManagerService != null) {
terminationFutures.add(resourceManagerService.closeAsync());
}
if (dispatcherRunner != null) {
terminationFutures.add(dispatcherRunner.closeAsync());
}
final FutureUtils.ConjunctFuture<Void> terminationFuture = FutureUtils.completeAll(terminationFutures);
try {
terminationFuture.get();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
throw new FlinkException("Could not create the DispatcherResourceManagerComponent.", exception);
}
}
use of org.apache.flink.runtime.rest.handler.legacy.metrics.MetricFetcher in project flink by apache.
the class JobVertexWatermarksHandlerTest method before.
@Before
public void before() throws Exception {
taskMetricStore = Mockito.mock(MetricStore.TaskMetricStore.class);
MetricStore metricStore = Mockito.mock(MetricStore.class);
Mockito.when(metricStore.getTaskMetricStore(TEST_JOB_ID.toString(), TEST_VERTEX_ID.toString())).thenReturn(taskMetricStore);
metricFetcher = Mockito.mock(MetricFetcher.class);
Mockito.when(metricFetcher.getMetricStore()).thenReturn(metricStore);
watermarkHandler = new JobVertexWatermarksHandler(Mockito.mock(LeaderGatewayRetriever.class), Time.seconds(1), Collections.emptyMap(), metricFetcher, NoOpExecutionGraphCache.INSTANCE, Mockito.mock(Executor.class));
final Map<String, String> pathParameters = new HashMap<>();
pathParameters.put(JobIDPathParameter.KEY, TEST_JOB_ID.toString());
pathParameters.put(JobVertexIdPathParameter.KEY, TEST_VERTEX_ID.toString());
request = HandlerRequest.resolveParametersAndCreate(EmptyRequestBody.getInstance(), new JobVertexMessageParameters(), pathParameters, Collections.emptyMap(), Collections.emptyList());
vertex = Mockito.mock(AccessExecutionJobVertex.class);
Mockito.when(vertex.getJobVertexId()).thenReturn(TEST_VERTEX_ID);
AccessExecutionVertex firstTask = Mockito.mock(AccessExecutionVertex.class);
AccessExecutionVertex secondTask = Mockito.mock(AccessExecutionVertex.class);
Mockito.when(firstTask.getParallelSubtaskIndex()).thenReturn(0);
Mockito.when(secondTask.getParallelSubtaskIndex()).thenReturn(1);
AccessExecutionVertex[] accessExecutionVertices = { firstTask, secondTask };
Mockito.when(vertex.getTaskVertices()).thenReturn(accessExecutionVertices);
}
use of org.apache.flink.runtime.rest.handler.legacy.metrics.MetricFetcher in project flink by apache.
the class SubtaskCurrentAttemptDetailsHandlerTest method testHandleRequest.
@Test
public void testHandleRequest() throws Exception {
// Prepare the execution graph.
final JobID jobID = new JobID();
final JobVertexID jobVertexID = new JobVertexID();
// The testing subtask.
final long deployingTs = System.currentTimeMillis() - 1024;
final long finishedTs = System.currentTimeMillis();
final long bytesIn = 1L;
final long bytesOut = 10L;
final long recordsIn = 20L;
final long recordsOut = 30L;
final IOMetrics ioMetrics = new IOMetrics(bytesIn, bytesOut, recordsIn, recordsOut);
final long[] timestamps = new long[ExecutionState.values().length];
timestamps[ExecutionState.DEPLOYING.ordinal()] = deployingTs;
final ExecutionState expectedState = ExecutionState.FINISHED;
timestamps[expectedState.ordinal()] = finishedTs;
final LocalTaskManagerLocation assignedResourceLocation = new LocalTaskManagerLocation();
final AllocationID allocationID = new AllocationID();
final int subtaskIndex = 1;
final int attempt = 2;
final ArchivedExecution execution = new ArchivedExecution(new StringifiedAccumulatorResult[0], ioMetrics, new ExecutionAttemptID(), attempt, expectedState, null, assignedResourceLocation, allocationID, subtaskIndex, timestamps);
final ArchivedExecutionVertex executionVertex = new ArchivedExecutionVertex(subtaskIndex, "Test archived execution vertex", execution, new EvictingBoundedList<>(0));
// Instance the handler.
final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());
final MetricFetcher metricFetcher = new MetricFetcherImpl<>(() -> null, address -> null, TestingUtils.defaultExecutor(), Time.milliseconds(1000L), MetricOptions.METRIC_FETCHER_UPDATE_INTERVAL.defaultValue());
final SubtaskCurrentAttemptDetailsHandler handler = new SubtaskCurrentAttemptDetailsHandler(() -> null, Time.milliseconds(100), Collections.emptyMap(), SubtaskCurrentAttemptDetailsHeaders.getInstance(), new DefaultExecutionGraphCache(restHandlerConfiguration.getTimeout(), Time.milliseconds(restHandlerConfiguration.getRefreshInterval())), TestingUtils.defaultExecutor(), metricFetcher);
final HashMap<String, String> receivedPathParameters = new HashMap<>(2);
receivedPathParameters.put(JobIDPathParameter.KEY, jobID.toString());
receivedPathParameters.put(JobVertexIdPathParameter.KEY, jobVertexID.toString());
final HandlerRequest<EmptyRequestBody> request = HandlerRequest.resolveParametersAndCreate(EmptyRequestBody.getInstance(), new SubtaskMessageParameters(), receivedPathParameters, Collections.emptyMap(), Collections.emptyList());
// Handle request.
final SubtaskExecutionAttemptDetailsInfo detailsInfo = handler.handleRequest(request, executionVertex);
// Verify
final IOMetricsInfo ioMetricsInfo = new IOMetricsInfo(bytesIn, true, bytesOut, true, recordsIn, true, recordsOut, true);
final SubtaskExecutionAttemptDetailsInfo expectedDetailsInfo = new SubtaskExecutionAttemptDetailsInfo(subtaskIndex, expectedState, attempt, assignedResourceLocation.getHostname(), deployingTs, finishedTs, finishedTs - deployingTs, ioMetricsInfo, assignedResourceLocation.getResourceID().getResourceIdString());
assertEquals(expectedDetailsInfo, detailsInfo);
}
use of org.apache.flink.runtime.rest.handler.legacy.metrics.MetricFetcher in project flink by apache.
the class JobVertexBackPressureHandlerTest method setUp.
@Before
public void setUp() {
metricStore = new MetricStore();
for (MetricDump metricDump : getMetricDumps()) {
metricStore.add(metricDump);
}
jobVertexBackPressureHandler = new JobVertexBackPressureHandler(() -> CompletableFuture.completedFuture(restfulGateway), Time.seconds(10), Collections.emptyMap(), JobVertexBackPressureHeaders.getInstance(), new MetricFetcher() {
private long updateCount = 0;
@Override
public MetricStore getMetricStore() {
return metricStore;
}
@Override
public void update() {
updateCount++;
}
@Override
public long getLastUpdateTime() {
return updateCount;
}
});
}
use of org.apache.flink.runtime.rest.handler.legacy.metrics.MetricFetcher in project flink by apache.
the class AggregatingMetricsHandlerTestBase method setUp.
@Before
public void setUp() throws Exception {
MetricFetcher fetcher = new MetricFetcherImpl<RestfulGateway>(mock(GatewayRetriever.class), mock(MetricQueryServiceRetriever.class), Executors.directExecutor(), TestingUtils.TIMEOUT, MetricOptions.METRIC_FETCHER_UPDATE_INTERVAL.defaultValue());
store = fetcher.getMetricStore();
Collection<MetricDump> metricDumps = getMetricDumps();
for (MetricDump dump : metricDumps) {
store.add(dump);
}
handler = getHandler(LEADER_RETRIEVER, TIMEOUT, TEST_HEADERS, EXECUTOR, fetcher);
pathParameters = getPathParameters();
}
Aggregations