use of org.apache.samza.metrics.MetricsRegistryMap in project samza by apache.
the class TestBlobStoreRestoreManager method testRestoreRetainsCheckpointDirsIfValid.
@Test
public void testRestoreRetainsCheckpointDirsIfValid() throws IOException {
String jobName = "testJobName";
String jobId = "testJobId";
TaskName taskName = mock(TaskName.class);
BlobStoreRestoreManagerMetrics metrics = new BlobStoreRestoreManagerMetrics(new MetricsRegistryMap());
metrics.initStoreMetrics(ImmutableList.of("storeName"));
Set<String> storesToRestore = ImmutableSet.of("storeName");
SnapshotIndex snapshotIndex = mock(SnapshotIndex.class);
Map<String, Pair<String, SnapshotIndex>> prevStoreSnapshotIndexes = ImmutableMap.of("storeName", Pair.of("blobId", snapshotIndex));
DirIndex dirIndex = BlobStoreTestUtil.createDirIndex("[a]");
when(snapshotIndex.getDirIndex()).thenReturn(dirIndex);
CheckpointId checkpointId = CheckpointId.create();
when(snapshotIndex.getSnapshotMetadata()).thenReturn(new SnapshotMetadata(checkpointId, "jobName", "jobId", "taskName", "storeName"));
Path loggedBaseDir = Files.createTempDirectory(BlobStoreTestUtil.TEMP_DIR_PREFIX);
// create store dir to be deleted during restore
Path storeDir = Files.createTempDirectory(loggedBaseDir, "storeDir-");
// create checkpoint dir so that shouldRestore = false (areSameDir == true later)
Path storeCheckpointDir = Files.createTempDirectory(loggedBaseDir, "storeDir-" + checkpointId + "-");
// create a dummy file to verify after dir rename.
Path tempFile = Files.createTempFile(storeCheckpointDir, "tempFile-", null);
StorageConfig storageConfig = mock(StorageConfig.class);
StorageManagerUtil storageManagerUtil = mock(StorageManagerUtil.class);
when(storageManagerUtil.getTaskStoreDir(eq(loggedBaseDir.toFile()), eq("storeName"), eq(taskName), eq(TaskMode.Active))).thenReturn(storeDir.toFile());
when(storageManagerUtil.getStoreCheckpointDir(any(File.class), eq(checkpointId))).thenReturn(storeCheckpointDir.toString());
when(storageManagerUtil.getTaskStoreCheckpointDirs(any(File.class), anyString(), any(TaskName.class), any(TaskMode.class))).thenReturn(ImmutableList.of(storeCheckpointDir.toFile()));
BlobStoreUtil blobStoreUtil = mock(BlobStoreUtil.class);
DirDiffUtil dirDiffUtil = mock(DirDiffUtil.class);
// ensures shouldRestore is not called
when(dirDiffUtil.areSameDir(anySet(), anyBoolean())).thenReturn((arg1, arg2) -> true);
// return immediately without restoring.
when(blobStoreUtil.restoreDir(eq(storeDir.toFile()), eq(dirIndex), any(Metadata.class))).thenReturn(CompletableFuture.completedFuture(null));
BlobStoreRestoreManager.restoreStores(jobName, jobId, taskName, storesToRestore, prevStoreSnapshotIndexes, loggedBaseDir.toFile(), storageConfig, metrics, storageManagerUtil, blobStoreUtil, dirDiffUtil, EXECUTOR);
// verify that the store directory restore was not called (should have restored from checkpoint dir)
verify(blobStoreUtil, times(0)).restoreDir(eq(storeDir.toFile()), eq(dirIndex), any(Metadata.class));
// verify that the checkpoint dir was renamed to store dir
assertFalse(storeCheckpointDir.toFile().exists());
assertTrue(storeDir.toFile().exists());
assertTrue(Files.exists(Paths.get(storeDir.toString(), tempFile.getFileName().toString())));
}
use of org.apache.samza.metrics.MetricsRegistryMap in project beam by apache.
the class SamzaTimerInternalsFactoryTest method createStore.
private KeyValueStore<ByteArray, StateValue<?>> createStore() {
final Options options = new Options();
options.setCreateIfMissing(true);
RocksDbKeyValueStore rocksStore = new RocksDbKeyValueStore(temporaryFolder.getRoot(), options, new MapConfig(), false, "beamStore", new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));
return new SerializedKeyValueStore<>(rocksStore, new ByteArraySerdeFactory.ByteArraySerde(), new StateValueSerdeFactory.StateValueSerde(), new SerializedKeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));
}
use of org.apache.samza.metrics.MetricsRegistryMap in project beam by apache.
the class UnboundedSourceSystemTest method createConsumer.
private static UnboundedSourceSystem.Consumer<String, TestCheckpointMark> createConsumer(TestUnboundedSource<String> source, int splitNum) {
SamzaPipelineOptions pipelineOptions = PipelineOptionsFactory.as(SamzaPipelineOptions.class);
// emit immediately
pipelineOptions.setWatermarkInterval(0L);
pipelineOptions.setMaxSourceParallelism(splitNum);
return new UnboundedSourceSystem.Consumer<>(source, pipelineOptions, new SamzaMetricsContainer(new MetricsRegistryMap()), "test-step");
}
use of org.apache.samza.metrics.MetricsRegistryMap in project beam by apache.
the class BoundedSourceSystemTest method createConsumer.
private static BoundedSourceSystem.Consumer<String> createConsumer(BoundedSource<String> source, int splitNum) {
SamzaPipelineOptions pipelineOptions = PipelineOptionsFactory.as(SamzaPipelineOptions.class);
pipelineOptions.setMaxSourceParallelism(splitNum);
return new BoundedSourceSystem.Consumer<>(source, pipelineOptions, new SamzaMetricsContainer(new MetricsRegistryMap()), "test-step");
}
use of org.apache.samza.metrics.MetricsRegistryMap in project samza by apache.
the class JobCoordinatorLaunchUtil method run.
/**
* Run {@link ClusterBasedJobCoordinator} with full job config.
*
* @param app SamzaApplication to run.
* @param config full job config.
*/
@SuppressWarnings("rawtypes")
public static void run(SamzaApplication app, Config config) {
// Execute planning
ApplicationDescriptorImpl<? extends ApplicationDescriptor> appDesc = ApplicationDescriptorUtil.getAppDescriptor(app, config);
RemoteJobPlanner planner = new RemoteJobPlanner(appDesc);
List<JobConfig> jobConfigs = planner.prepareJobs();
if (jobConfigs.size() != 1) {
throw new SamzaException("Only support single remote job is supported.");
}
Config fullConfig = jobConfigs.get(0);
// Create coordinator stream if does not exist before fetching launch config from it.
CoordinatorStreamUtil.createCoordinatorStream(fullConfig);
MetricsRegistryMap metrics = new MetricsRegistryMap();
MetadataStore metadataStore = new CoordinatorStreamStore(CoordinatorStreamUtil.buildCoordinatorStreamConfig(fullConfig), metrics);
// MetadataStore will be closed in ClusterBasedJobCoordinator#onShutDown
// initialization of MetadataStore can be moved to ClusterBasedJobCoordinator after we clean up
// ClusterBasedJobCoordinator#createFromMetadataStore
metadataStore.init();
// Reads extra launch config from metadata store.
Config launchConfig = CoordinatorStreamUtil.readLaunchConfigFromCoordinatorStream(fullConfig, metadataStore);
Config finalConfig = new MapConfig(launchConfig, fullConfig);
// This needs to be consistent with RemoteApplicationRunner#run where JobRunner#submit to be called instead of JobRunner#run
CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
DiagnosticsUtil.createDiagnosticsStream(finalConfig);
Optional<String> jobCoordinatorFactoryClassName = new JobCoordinatorConfig(config).getOptionalJobCoordinatorFactoryClassName();
if (jobCoordinatorFactoryClassName.isPresent()) {
runJobCoordinator(jobCoordinatorFactoryClassName.get(), metrics, metadataStore, finalConfig);
} else {
ClusterBasedJobCoordinator jc = new ClusterBasedJobCoordinator(metrics, metadataStore, finalConfig);
jc.run();
}
}
Aggregations