use of org.apache.samza.system.SystemStream in project samza by apache.
the class NonTransactionalStateTaskRestoreManager method restore.
/**
* Restore each store in taskStoresToRestore sequentially
*/
@Override
public CompletableFuture<Void> restore() {
return CompletableFuture.runAsync(() -> {
for (String storeName : taskStoresToRestore) {
LOG.info("Restoring store: {} for task: {}", storeName, taskModel.getTaskName());
SystemConsumer systemConsumer = storeConsumers.get(storeName);
SystemStream systemStream = storeChangelogs.get(storeName);
SystemAdmin systemAdmin = systemAdmins.getSystemAdmin(systemStream.getSystem());
ChangelogSSPIterator changelogSSPIterator = new ChangelogSSPIterator(systemConsumer, new SystemStreamPartition(systemStream, taskModel.getChangelogPartition()), null, systemAdmin, false);
try {
taskStores.get(storeName).restore(changelogSSPIterator);
} catch (InterruptedException e) {
String msg = String.format("Interrupted while restoring store: %s for task: %s", storeName, taskModel.getTaskName().getTaskName());
// wrap in unchecked exception to throw from lambda
throw new SamzaException(msg, e);
}
}
}, restoreExecutor);
}
use of org.apache.samza.system.SystemStream in project samza by apache.
the class NonTransactionalStateTaskRestoreManager method validateChangelogStreams.
/**
* Validates each changelog system-stream with its respective SystemAdmin.
*/
private void validateChangelogStreams() {
LOG.info("Validating change log streams: " + storeChangelogs);
for (SystemStream changelogSystemStream : storeChangelogs.values()) {
SystemAdmin systemAdmin = systemAdmins.getSystemAdmin(changelogSystemStream.getSystem());
StreamSpec changelogSpec = StreamSpec.createChangeLogStreamSpec(changelogSystemStream.getStream(), changelogSystemStream.getSystem(), maxChangeLogStreamPartitions);
systemAdmin.validateStream(changelogSpec);
}
}
use of org.apache.samza.system.SystemStream in project samza by apache.
the class LocalApplicationRunner method createUnderlyingCoordinatorStream.
@VisibleForTesting
boolean createUnderlyingCoordinatorStream(Config config) {
// and will be addressed in the next phase of metadata store abstraction
if (new JobConfig(config).getCoordinatorSystemNameOrNull() == null) {
LOG.warn("{} or {} not configured. Coordinator stream not created.", JobConfig.JOB_COORDINATOR_SYSTEM, JobConfig.JOB_DEFAULT_SYSTEM);
return false;
}
SystemStream coordinatorSystemStream = CoordinatorStreamUtil.getCoordinatorSystemStream(config);
SystemAdmins systemAdmins = new SystemAdmins(config, this.getClass().getSimpleName());
systemAdmins.start();
try {
SystemAdmin coordinatorSystemAdmin = systemAdmins.getSystemAdmin(coordinatorSystemStream.getSystem());
CoordinatorStreamUtil.createCoordinatorStream(coordinatorSystemStream, coordinatorSystemAdmin);
} finally {
systemAdmins.stop();
}
return true;
}
use of org.apache.samza.system.SystemStream in project samza by apache.
the class TestContainerStorageManager method setUp.
/**
* Method to create a containerStorageManager with mocked dependencies
*/
@Before
public void setUp() throws InterruptedException {
taskRestoreMetricGauges = new HashMap<>();
this.tasks = new HashMap<>();
this.taskInstanceMetrics = new HashMap<>();
// Add two mocked tasks
addMockedTask("task 0", 0);
addMockedTask("task 1", 1);
// Mock container metrics
samzaContainerMetrics = mock(SamzaContainerMetrics.class);
when(samzaContainerMetrics.taskStoreRestorationMetrics()).thenReturn(taskRestoreMetricGauges);
// Create a map of test changeLogSSPs
Map<String, SystemStream> changelogSystemStreams = new HashMap<>();
changelogSystemStreams.put(STORE_NAME, new SystemStream(SYSTEM_NAME, STREAM_NAME));
// Create mocked storage engine factories
Map<String, StorageEngineFactory<Object, Object>> storageEngineFactories = new HashMap<>();
StorageEngineFactory mockStorageEngineFactory = (StorageEngineFactory<Object, Object>) mock(StorageEngineFactory.class);
StorageEngine mockStorageEngine = mock(StorageEngine.class);
when(mockStorageEngine.getStoreProperties()).thenReturn(new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).setPersistedToDisk(true).build());
doAnswer(invocation -> {
return mockStorageEngine;
}).when(mockStorageEngineFactory).getStorageEngine(anyString(), any(), any(), any(), any(), any(), any(), any(), any(), any());
storageEngineFactories.put(STORE_NAME, mockStorageEngineFactory);
// Add instrumentation to mocked storage engine, to record the number of store.restore() calls
doAnswer(invocation -> {
storeRestoreCallCount++;
return CompletableFuture.completedFuture(null);
}).when(mockStorageEngine).restore(any());
// Set the mocked stores' properties to be persistent
doAnswer(invocation -> {
return new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).build();
}).when(mockStorageEngine).getStoreProperties();
// Mock and setup sysconsumers
SystemConsumer mockSystemConsumer = mock(SystemConsumer.class);
doAnswer(invocation -> {
systemConsumerStartCount++;
return null;
}).when(mockSystemConsumer).start();
doAnswer(invocation -> {
systemConsumerStopCount++;
return null;
}).when(mockSystemConsumer).stop();
// Create mocked system factories
Map<String, SystemFactory> systemFactories = new HashMap<>();
// Count the number of sysConsumers created
SystemFactory mockSystemFactory = mock(SystemFactory.class);
doAnswer(invocation -> {
this.systemConsumerCreationCount++;
return mockSystemConsumer;
}).when(mockSystemFactory).getConsumer(anyString(), any(), any());
systemFactories.put(SYSTEM_NAME, mockSystemFactory);
// Create mocked configs for specifying serdes
Map<String, String> configMap = new HashMap<>();
configMap.put("stores." + STORE_NAME + ".key.serde", "stringserde");
configMap.put("stores." + STORE_NAME + ".msg.serde", "stringserde");
configMap.put("stores." + STORE_NAME + ".factory", mockStorageEngineFactory.getClass().getName());
configMap.put("stores." + STORE_NAME + ".changelog", SYSTEM_NAME + "." + STREAM_NAME);
configMap.put("serializers.registry.stringserde.class", StringSerdeFactory.class.getName());
configMap.put(TaskConfig.TRANSACTIONAL_STATE_RETAIN_EXISTING_STATE, "true");
Config config = new MapConfig(configMap);
Map<String, Serde<Object>> serdes = new HashMap<>();
serdes.put("stringserde", mock(Serde.class));
// Create mocked system admins
SystemAdmin mockSystemAdmin = mock(SystemAdmin.class);
doAnswer(new Answer<Void>() {
public Void answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
System.out.println("called with arguments: " + Arrays.toString(args));
return null;
}
}).when(mockSystemAdmin).validateStream(any());
SystemAdmins mockSystemAdmins = mock(SystemAdmins.class);
when(mockSystemAdmins.getSystemAdmin("kafka")).thenReturn(mockSystemAdmin);
// Create a mocked mockStreamMetadataCache
SystemStreamMetadata.SystemStreamPartitionMetadata sspMetadata = new SystemStreamMetadata.SystemStreamPartitionMetadata("0", "50", "51");
Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = new HashMap<>();
partitionMetadata.put(new Partition(0), sspMetadata);
partitionMetadata.put(new Partition(1), sspMetadata);
SystemStreamMetadata systemStreamMetadata = new SystemStreamMetadata(STREAM_NAME, partitionMetadata);
StreamMetadataCache mockStreamMetadataCache = mock(StreamMetadataCache.class);
when(mockStreamMetadataCache.getStreamMetadata(JavaConverters.asScalaSetConverter(new HashSet<SystemStream>(changelogSystemStreams.values())).asScala().toSet(), false)).thenReturn(new scala.collection.immutable.Map.Map1(new SystemStream(SYSTEM_NAME, STREAM_NAME), systemStreamMetadata));
CheckpointManager checkpointManager = mock(CheckpointManager.class);
when(checkpointManager.readLastCheckpoint(any(TaskName.class))).thenReturn(new CheckpointV1(new HashMap<>()));
SSPMetadataCache mockSSPMetadataCache = mock(SSPMetadataCache.class);
when(mockSSPMetadataCache.getMetadata(any(SystemStreamPartition.class))).thenReturn(new SystemStreamMetadata.SystemStreamPartitionMetadata("0", "10", "11"));
ContainerContext mockContainerContext = mock(ContainerContext.class);
ContainerModel mockContainerModel = new ContainerModel("samza-container-test", tasks);
when(mockContainerContext.getContainerModel()).thenReturn(mockContainerModel);
// Reset the expected number of sysConsumer create, start and stop calls, and store.restore() calls
this.systemConsumerCreationCount = 0;
this.systemConsumerStartCount = 0;
this.systemConsumerStopCount = 0;
this.storeRestoreCallCount = 0;
StateBackendFactory backendFactory = mock(StateBackendFactory.class);
TaskRestoreManager restoreManager = mock(TaskRestoreManager.class);
ArgumentCaptor<ExecutorService> restoreExecutorCaptor = ArgumentCaptor.forClass(ExecutorService.class);
when(backendFactory.getRestoreManager(any(), any(), any(), restoreExecutorCaptor.capture(), any(), any(), any(), any(), any(), any(), any())).thenReturn(restoreManager);
doAnswer(invocation -> {
storeRestoreCallCount++;
return CompletableFuture.completedFuture(null);
}).when(restoreManager).restore();
// Create the container storage manager
this.containerStorageManager = new ContainerStorageManager(checkpointManager, mockContainerModel, mockStreamMetadataCache, mockSystemAdmins, changelogSystemStreams, new HashMap<>(), storageEngineFactories, systemFactories, serdes, config, taskInstanceMetrics, samzaContainerMetrics, mock(JobContext.class), mockContainerContext, ImmutableMap.of(StorageConfig.KAFKA_STATE_BACKEND_FACTORY, backendFactory), mock(Map.class), DEFAULT_LOGGED_STORE_BASE_DIR, DEFAULT_STORE_BASE_DIR, null, new SystemClock());
}
use of org.apache.samza.system.SystemStream in project samza by apache.
the class KinesisSystemFactory method validateConfig.
protected void validateConfig(String system, Config config) {
// Kinesis system does not support groupers other than AllSspToSingleTaskGrouper
JobConfig jobConfig = new JobConfig(config);
if (!jobConfig.getSystemStreamPartitionGrouperFactory().equals(AllSspToSingleTaskGrouperFactory.class.getCanonicalName())) {
String errMsg = String.format("Incorrect Grouper %s used for KinesisSystemConsumer %s. Please set the %s config" + " to %s.", jobConfig.getSystemStreamPartitionGrouperFactory(), system, JobConfig.SSP_GROUPER_FACTORY, AllSspToSingleTaskGrouperFactory.class.getCanonicalName());
throw new ConfigException(errMsg);
}
// Kinesis streams cannot be configured as broadcast streams
TaskConfig taskConfig = new TaskConfig(config);
if (taskConfig.getBroadcastSystemStreams().stream().anyMatch(ss -> system.equals(ss.getSystem()))) {
throw new ConfigException("Kinesis streams cannot be configured as broadcast streams.");
}
// Kinesis streams cannot be configured as bootstrap streams
KinesisConfig kConfig = new KinesisConfig(config);
kConfig.getKinesisStreams(system).forEach(stream -> {
StreamConfig streamConfig = new StreamConfig(kConfig);
SystemStream ss = new SystemStream(system, stream);
if (streamConfig.getBootstrapEnabled(ss)) {
throw new ConfigException("Kinesis streams cannot be configured as bootstrap streams.");
}
});
}
Aggregations