use of org.apache.samza.job.model.ContainerModel in project samza by apache.
the class TestSamzaObjectMapper method setup.
@Before
public void setup() {
Config config = new MapConfig(ImmutableMap.of("a", "b"));
TaskName taskName = new TaskName("test");
Set<SystemStreamPartition> ssps = ImmutableSet.of(new SystemStreamPartition("foo", "bar", new Partition(1)));
TaskModel taskModel = new TaskModel(taskName, ssps, new Partition(2));
Map<TaskName, TaskModel> tasks = ImmutableMap.of(taskName, taskModel);
ContainerModel containerModel = new ContainerModel("1", tasks);
Map<String, ContainerModel> containerMap = ImmutableMap.of("1", containerModel);
this.jobModel = new JobModel(config, containerMap);
this.samzaObjectMapper = SamzaObjectMapper.getObjectMapper();
}
use of org.apache.samza.job.model.ContainerModel in project samza by apache.
the class TestBlobStoreBackupManager method setup.
@Before
public void setup() throws Exception {
when(clock.currentTimeMillis()).thenReturn(1234567L);
// setup test local and remote snapshots
indexBlobIdAndLocalRemoteSnapshotsPair = setupRemoteAndLocalSnapshots(true);
// setup test store name and SCMs map
testStoreNameAndSCMMap = setupTestStoreSCMMapAndStoreBackedFactoryConfig(indexBlobIdAndLocalRemoteSnapshotsPair);
// setup: setup task backup manager with expected storeName->storageEngine map
testStoreNameAndSCMMap.forEach((storeName, scm) -> storeStorageEngineMap.put(storeName, null));
mapConfig.putAll(new MapConfig(ImmutableMap.of("job.name", jobName, "job.id", jobId)));
Config config = new MapConfig(mapConfig);
// Mock - return snapshot index for blob id from test blob store map
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
when(blobStoreUtil.getSnapshotIndex(captor.capture(), any(Metadata.class))).then((Answer<CompletableFuture<SnapshotIndex>>) invocation -> {
String blobId = invocation.getArgumentAt(0, String.class);
return CompletableFuture.completedFuture(testBlobStore.get(blobId));
});
// doNothing().when(blobStoreManager).init();
when(taskModel.getTaskName().getTaskName()).thenReturn(taskName);
when(taskModel.getTaskMode()).thenReturn(TaskMode.Active);
when(metricsRegistry.newCounter(anyString(), anyString())).thenReturn(counter);
when(metricsRegistry.newGauge(anyString(), anyString(), anyLong())).thenReturn(longGauge);
when(metricsRegistry.newGauge(anyString(), anyString(), any(AtomicLong.class))).thenReturn(atomicLongGauge);
when(atomicLongGauge.getValue()).thenReturn(new AtomicLong());
when(metricsRegistry.newTimer(anyString(), anyString())).thenReturn(timer);
blobStoreTaskBackupMetrics = new BlobStoreBackupManagerMetrics(metricsRegistry);
blobStoreBackupManager = new MockBlobStoreBackupManager(jobModel, containerModel, taskModel, mockExecutor, blobStoreTaskBackupMetrics, config, Files.createTempDirectory("logged-store-").toFile(), storageManagerUtil, blobStoreManager);
}
use of org.apache.samza.job.model.ContainerModel in project samza by apache.
the class StandbyTaskUtil method getStandbyContainerConstraints.
/**
* Given a containerID and job model, it returns the containerids of all containers that either have
* a. standby tasks corresponding to active tasks on the given container, or
* b. have active tasks corresponding to standby tasks on the given container.
* This is used to ensure that an active task and all its corresponding standby tasks are on separate hosts, and
* standby tasks corresponding to the same active task are on separate hosts.
*/
public static List<String> getStandbyContainerConstraints(String containerID, JobModel jobModel) {
ContainerModel givenContainerModel = jobModel.getContainers().get(containerID);
List<String> containerIDsWithStandbyConstraints = new ArrayList<>();
// iterate over all containerModels in the jobModel
for (ContainerModel containerModel : jobModel.getContainers().values()) {
// add to list if active and standby tasks on the two containerModels overlap
if (!givenContainerModel.equals(containerModel) && checkTaskOverlap(givenContainerModel, containerModel)) {
containerIDsWithStandbyConstraints.add(containerModel.getId());
}
}
return containerIDsWithStandbyConstraints;
}
use of org.apache.samza.job.model.ContainerModel in project samza by apache.
the class ZkJobCoordinator method getGrouperMetadata.
/**
* Builds the {@link GrouperMetadataImpl} based upon provided {@param jobModelVersion}
* and {@param processorNodes}.
* @param jobModelVersion the most recent jobModelVersion available in the zookeeper.
* @param processorNodes the list of live processors in the zookeeper.
* @return the built grouper metadata.
*/
private GrouperMetadataImpl getGrouperMetadata(String jobModelVersion, List<ProcessorNode> processorNodes) {
Map<TaskName, String> taskToProcessorId = new HashMap<>();
Map<TaskName, List<SystemStreamPartition>> taskToSSPs = new HashMap<>();
if (jobModelVersion != null) {
JobModel jobModel = readJobModelFromMetadataStore(jobModelVersion);
for (ContainerModel containerModel : jobModel.getContainers().values()) {
for (TaskModel taskModel : containerModel.getTasks().values()) {
taskToProcessorId.put(taskModel.getTaskName(), containerModel.getId());
for (SystemStreamPartition partition : taskModel.getSystemStreamPartitions()) {
taskToSSPs.computeIfAbsent(taskModel.getTaskName(), k -> new ArrayList<>());
taskToSSPs.get(taskModel.getTaskName()).add(partition);
}
}
}
}
Map<String, LocationId> processorLocality = new HashMap<>();
for (ProcessorNode processorNode : processorNodes) {
ProcessorData processorData = processorNode.getProcessorData();
processorLocality.put(processorData.getProcessorId(), processorData.getLocationId());
}
Map<TaskName, LocationId> taskLocality = zkUtils.readTaskLocality();
return new GrouperMetadataImpl(processorLocality, taskLocality, taskToSSPs, taskToProcessorId);
}
use of org.apache.samza.job.model.ContainerModel in project samza by apache.
the class TestZkJobCoordinator method testOnNewJobModelWithChangeInWorkAssignment.
@Test
public void testOnNewJobModelWithChangeInWorkAssignment() {
final TaskName taskName = new TaskName("task1");
final ContainerModel mockContainerModel = mock(ContainerModel.class);
final JobCoordinatorListener mockListener = mock(JobCoordinatorListener.class);
final JobModel mockJobModel = mock(JobModel.class);
when(mockContainerModel.getTasks()).thenReturn(ImmutableMap.of(taskName, mock(TaskModel.class)));
when(mockJobModel.getContainers()).thenReturn(ImmutableMap.of(PROCESSOR_ID, mockContainerModel));
ZkJobCoordinator zkJobCoordinator = new ZkJobCoordinator(PROCESSOR_ID, new MapConfig(), new NoOpMetricsRegistry(), zkUtils, zkMetadataStore, coordinatorStreamStore);
zkJobCoordinator.setListener(mockListener);
zkJobCoordinator.setJobModelExpired(true);
zkJobCoordinator.onNewJobModel(mockJobModel);
verify(zkUtils, times(1)).writeTaskLocality(eq(taskName), any());
verify(mockListener, times(1)).onNewJobModel(PROCESSOR_ID, mockJobModel);
assertEquals("Active job model should be updated with the new job model", mockJobModel, zkJobCoordinator.getActiveJobModel());
}
Aggregations