use of io.cdap.cdap.spi.metadata.MetadataStorage in project cdap by caskdata.
the class GatewayTestBase method startGateway.
public static Injector startGateway(final CConfiguration conf) throws Exception {
// Set up our Guice injections
injector = Guice.createInjector(Modules.override(new AbstractModule() {
@Override
protected void configure() {
}
@SuppressWarnings("unused")
@Provides
@Named(Constants.Router.ADDRESS)
public final InetAddress providesHostname(CConfiguration cConf) {
return Networks.resolve(cConf.get(Constants.Router.ADDRESS), new InetSocketAddress("localhost", 0).getAddress());
}
}, new CoreSecurityRuntimeModule().getInMemoryModules(), new ExternalAuthenticationModule(), new AppFabricTestModule(conf)).with(new AbstractModule() {
@Override
protected void configure() {
// It's a bit hacky to add it here. Need to refactor these
// bindings out as it overlaps with
// AppFabricServiceModule
bind(LogReader.class).to(MockLogReader.class).in(Scopes.SINGLETON);
bind(PermissionManager.class).to(NoOpAccessController.class);
bind(OwnerAdmin.class).to(DefaultOwnerAdmin.class);
}
}));
messagingService = injector.getInstance(MessagingService.class);
if (messagingService instanceof Service) {
((Service) messagingService).startAndWait();
}
txService = injector.getInstance(TransactionManager.class);
txService.startAndWait();
// Define all StructuredTable before starting any services that need StructuredTable
StoreDefinition.createAllTables(injector.getInstance(StructuredTableAdmin.class));
metadataStorage = injector.getInstance(MetadataStorage.class);
metadataStorage.createIndex();
metadataService = injector.getInstance(MetadataService.class);
metadataService.startAndWait();
dsOpService = injector.getInstance(DatasetOpExecutorService.class);
dsOpService.startAndWait();
datasetService = injector.getInstance(DatasetService.class);
datasetService.startAndWait();
appFabricServer = injector.getInstance(AppFabricServer.class);
appFabricServer.startAndWait();
logQueryService = injector.getInstance(LogQueryService.class);
logQueryService.startAndWait();
metricsQueryService = injector.getInstance(MetricsQueryService.class);
metricsQueryService.startAndWait();
metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
metricsCollectionService.startAndWait();
namespaceAdmin = injector.getInstance(NamespaceAdmin.class);
namespaceAdmin.create(TEST_NAMESPACE_META1);
namespaceAdmin.create(TEST_NAMESPACE_META2);
// Restart handlers to check if they are resilient across restarts.
router = injector.getInstance(NettyRouter.class);
router.startAndWait();
port = router.getBoundAddress().orElseThrow(IllegalStateException::new).getPort();
return injector;
}
use of io.cdap.cdap.spi.metadata.MetadataStorage in project cdap by caskdata.
the class MetadataSubscriberServiceTest method testMetadata.
@Test
public void testMetadata() throws InterruptedException, TimeoutException, ExecutionException, IOException {
ProgramRunId workflowRunId = workflow1.run(RunIds.generate());
MetadataEntity entity = MetadataEntity.ofDataset("myns", "myds");
// Try to read, should have nothing
MetadataStorage metadataStorage = getInjector().getInstance(MetadataStorage.class);
Metadata meta = metadataStorage.read(new Read(entity, MetadataScope.USER));
Assert.assertTrue(meta.getProperties().isEmpty());
Assert.assertTrue(meta.getTags().isEmpty());
MetadataPublisher metadataPublisher = getInjector().getInstance(MessagingMetadataPublisher.class);
final String descriptionKey = MetadataConstants.DESCRIPTION_KEY;
final String creationTimeKey = MetadataConstants.CREATION_TIME_KEY;
// publish a create event
Map<String, String> props = ImmutableMap.of("x", "y", descriptionKey, "desc1", creationTimeKey, "123456");
Set<String> tags = ImmutableSet.of("sometag");
metadataPublisher.publish(NamespaceId.SYSTEM, new MetadataOperation.Create(entity, props, tags));
// wait until meta data is written
waitForSystemMetadata(entity, metadataStorage, 3, 1);
// validate correctness of meta data after create
meta = metadataStorage.read(new Read(entity, MetadataScope.SYSTEM));
Assert.assertEquals(props, meta.getProperties(MetadataScope.SYSTEM));
Assert.assertEquals(tags, meta.getTags(MetadataScope.SYSTEM));
// publish another create event with different create time, no description, different tags
Set<String> tags2 = ImmutableSet.of("another", "two");
metadataPublisher.publish(workflowRunId, new MetadataOperation.Create(entity, ImmutableMap.of(creationTimeKey, "9876543", "new", "prop"), tags2));
// wait until meta data is written
waitForSystemMetadata(entity, metadataStorage, 3, 2);
// validate correctness of meta data: creation time and description unchanged, other new property there
meta = metadataStorage.read(new Read(entity, MetadataScope.SYSTEM));
Assert.assertEquals(ImmutableMap.of(creationTimeKey, "123456", descriptionKey, "desc1", "new", "prop"), meta.getProperties(MetadataScope.SYSTEM));
Assert.assertEquals(tags2, meta.getTags(MetadataScope.SYSTEM));
// publish another create event without create time, different description, no tags
metadataPublisher.publish(workflowRunId, new MetadataOperation.Create(entity, ImmutableMap.of(descriptionKey, "some"), Collections.emptySet()));
// wait until meta data is written
waitForSystemMetadata(entity, metadataStorage, 2, 0);
// validate correctness of meta data: same creation time, updated description and other props and tags
meta = metadataStorage.read(new Read(entity, MetadataScope.SYSTEM));
Assert.assertEquals(ImmutableMap.of(creationTimeKey, "123456", descriptionKey, "some"), meta.getProperties(MetadataScope.SYSTEM));
Assert.assertEquals(Collections.emptySet(), meta.getTags(MetadataScope.SYSTEM));
// publish metadata put
Map<String, String> propertiesToAdd = ImmutableMap.of("a", "x", "b", "z");
Set<String> tagsToAdd = ImmutableSet.of("t1", "t2");
metadataPublisher.publish(workflowRunId, new MetadataOperation.Put(entity, propertiesToAdd, tagsToAdd));
// wait until meta data is written
waitForMetadata(entity, metadataStorage, 2, 2);
// validate correctness of meta data written
meta = metadataStorage.read(new Read(entity, MetadataScope.USER));
Assert.assertEquals(propertiesToAdd, meta.getProperties(MetadataScope.USER));
Assert.assertEquals(tagsToAdd, meta.getTags(MetadataScope.USER));
// publish metadata delete
metadataPublisher.publish(workflowRunId, new MetadataOperation.Delete(entity, Collections.singleton("a"), ImmutableSet.of("t1", "t3")));
// wait until meta data is written
waitForMetadata(entity, metadataStorage, 1, 1);
// validate correctness of meta data after delete
meta = metadataStorage.read(new Read(entity, MetadataScope.USER));
Assert.assertEquals(ImmutableMap.of("b", "z"), meta.getProperties(MetadataScope.USER));
Assert.assertEquals(ImmutableSet.of("t2"), meta.getTags(MetadataScope.USER));
// publish metadata put properties
metadataPublisher.publish(workflowRunId, new MetadataOperation.Put(entity, propertiesToAdd, Collections.emptySet()));
// wait until meta data is written
// one of the property key already exist so for that value will be just overwritten hence size is 2
waitForMetadata(entity, metadataStorage, 2, 1);
// publish metadata put tags
metadataPublisher.publish(workflowRunId, new MetadataOperation.Put(entity, Collections.emptyMap(), tagsToAdd));
// wait until meta data is written
// one of the tags already exists hence size is 2
waitForMetadata(entity, metadataStorage, 2, 2);
// publish delete all properties
metadataPublisher.publish(workflowRunId, new MetadataOperation.DeleteAllProperties(entity));
// wait until meta data is written
waitForMetadata(entity, metadataStorage, 0, 2);
// publish delete all tags
metadataPublisher.publish(workflowRunId, new MetadataOperation.DeleteAllTags(entity));
// wait until meta data is written
waitForMetadata(entity, metadataStorage, 0, 0);
// publish metadata put tags
metadataPublisher.publish(workflowRunId, new MetadataOperation.Put(entity, propertiesToAdd, tagsToAdd));
// wait until meta data is written
waitForMetadata(entity, metadataStorage, 2, 2);
// publish delete all
metadataPublisher.publish(workflowRunId, new MetadataOperation.DeleteAll(entity));
// wait until meta data is written
waitForMetadata(entity, metadataStorage, 0, 0);
// publish metadata put tags
metadataPublisher.publish(workflowRunId, new MetadataOperation.Put(entity, propertiesToAdd, tagsToAdd));
// wait until meta data is written
waitForMetadata(entity, metadataStorage, 2, 2);
// publish drop entity
metadataPublisher.publish(workflowRunId, new MetadataOperation.Drop(entity));
// wait until meta data is deleted
waitForSystemMetadata(entity, metadataStorage, 0, 0);
waitForMetadata(entity, metadataStorage, 0, 0);
}
use of io.cdap.cdap.spi.metadata.MetadataStorage in project cdap by caskdata.
the class MetadataSubscriberServiceTest method testProfileMetadataWithNoProfilePreferences.
@Test
public void testProfileMetadataWithNoProfilePreferences() throws Exception {
Injector injector = getInjector();
// add a new profile in default namespace
ProfileService profileService = injector.getInstance(ProfileService.class);
ProfileId myProfile = new ProfileId(NamespaceId.DEFAULT.getNamespace(), "MyProfile");
Profile profile1 = new Profile("MyProfile", Profile.NATIVE.getLabel(), Profile.NATIVE.getDescription(), Profile.NATIVE.getScope(), Profile.NATIVE.getProvisioner());
profileService.saveProfile(myProfile, profile1);
// add a app with workflow to app meta store
ApplicationSpecification appSpec = Specifications.from(new AppWithWorkflow());
ApplicationId appId = NamespaceId.DEFAULT.app(appSpec.getName());
ProgramId workflowId = appId.workflow("SampleWorkflow");
// get the metadata - should be empty since we haven't deployed the app
MetadataStorage mds = injector.getInstance(MetadataStorage.class);
Assert.assertEquals(Collections.emptyMap(), mds.read(new Read(workflowId.toMetadataEntity())).getProperties());
Store store = injector.getInstance(DefaultStore.class);
store.addApplication(appId, appSpec);
// set default namespace to use the profile, since now MetadataSubscriberService is not started,
// it should not affect the mds
PreferencesService preferencesService = injector.getInstance(PreferencesService.class);
preferencesService.setProperties(NamespaceId.DEFAULT, Collections.singletonMap(SystemArguments.PROFILE_NAME, "USER:MyProfile"));
try {
// Verify the workflow profile metadata is updated to my profile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Set the property without profile is a replacement of the preference, so it is same as deletion of the profile
preferencesService.setProperties(NamespaceId.DEFAULT, Collections.emptyMap());
// Verify the workflow profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
// stop and clean up the store
preferencesService.deleteProperties(NamespaceId.DEFAULT);
store.removeAll(NamespaceId.DEFAULT);
profileService.disableProfile(myProfile);
profileService.deleteProfile(myProfile);
mds.apply(new MetadataMutation.Drop(workflowId.toMetadataEntity()), MutationOptions.DEFAULT);
}
}
use of io.cdap.cdap.spi.metadata.MetadataStorage in project cdap by caskdata.
the class SupportBundleTestBase method initializeAndStartServices.
protected static void initializeAndStartServices(CConfiguration cConf, Module overrides) throws Exception {
injector = Guice.createInjector(Modules.override(new AppFabricTestModule(cConf, null)).with(overrides));
int connectionTimeout = cConf.getInt(Constants.HTTP_CLIENT_CONNECTION_TIMEOUT_MS);
int readTimeout = cConf.getInt(Constants.HTTP_CLIENT_READ_TIMEOUT_MS);
httpRequestConfig = new HttpRequestConfig(connectionTimeout, readTimeout, false);
messagingService = injector.getInstance(MessagingService.class);
if (messagingService instanceof Service) {
((Service) messagingService).startAndWait();
}
txManager = injector.getInstance(TransactionManager.class);
txManager.startAndWait();
// Define all StructuredTable before starting any services that need StructuredTable
StoreDefinition.createAllTables(injector.getInstance(StructuredTableAdmin.class));
metadataStorage = injector.getInstance(MetadataStorage.class);
metadataStorage.createIndex();
dsOpService = injector.getInstance(DatasetOpExecutorService.class);
dsOpService.startAndWait();
datasetService = injector.getInstance(DatasetService.class);
datasetService.startAndWait();
appFabricServer = injector.getInstance(AppFabricServer.class);
appFabricServer.startAndWait();
DiscoveryServiceClient discoveryClient = injector.getInstance(DiscoveryServiceClient.class);
appFabricEndpointStrategy = new RandomEndpointStrategy(() -> discoveryClient.discover(Constants.Service.APP_FABRIC_HTTP));
metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
metricsCollectionService.startAndWait();
serviceStore = injector.getInstance(ServiceStore.class);
serviceStore.startAndWait();
metadataService = injector.getInstance(MetadataService.class);
metadataService.startAndWait();
metadataSubscriberService = injector.getInstance(MetadataSubscriberService.class);
metadataSubscriberService.startAndWait();
logQueryService = injector.getInstance(LogQueryService.class);
logQueryService.startAndWait();
locationFactory = getInjector().getInstance(LocationFactory.class);
datasetClient = new DatasetClient(getClientConfig(discoveryClient, Constants.Service.DATASET_MANAGER));
remoteClientFactory = new RemoteClientFactory(discoveryClient, new DefaultInternalAuthenticator(new AuthenticationTestContext()));
metadataClient = new MetadataClient(getClientConfig(discoveryClient, Constants.Service.METADATA_SERVICE));
appFabricHealthCheckService = injector.getInstance(HealthCheckService.class);
appFabricHealthCheckService.helper(Constants.AppFabricHealthCheck.APP_FABRIC_HEALTH_CHECK_SERVICE, cConf, Constants.Service.MASTER_SERVICES_BIND_ADDRESS);
appFabricHealthCheckService.startAndWait();
supportBundleInternalService = injector.getInstance(SupportBundleInternalService.class);
supportBundleInternalService.startAndWait();
Scheduler programScheduler = injector.getInstance(Scheduler.class);
// Wait for the scheduler to be functional.
if (programScheduler instanceof CoreSchedulerService) {
try {
((CoreSchedulerService) programScheduler).waitUntilFunctional(10, TimeUnit.SECONDS);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
use of io.cdap.cdap.spi.metadata.MetadataStorage in project cdap by caskdata.
the class StorageMain method createStorage.
@VisibleForTesting
void createStorage(CConfiguration cConf) throws IOException {
LOG.info("Creating storages");
CoreSecurityModule coreSecurityModule = CoreSecurityRuntimeModule.getDistributedModule(cConf);
List<Module> modules = new ArrayList<>(Arrays.asList(new ConfigModule(cConf), RemoteAuthenticatorModules.getDefaultModule(), new SystemDatasetRuntimeModule().getStandaloneModules(), // But due to the DataSetsModules, we need to pull in more modules.
new DataSetsModules().getStandaloneModules(), new InMemoryDiscoveryModule(), new StorageModule(), new DFSLocationModule(), new IOModule(), coreSecurityModule, new AuthenticationContextModules().getMasterModule(), new AbstractModule() {
@Override
protected void configure() {
bind(AccessEnforcer.class).to(NoOpAccessController.class);
bind(TransactionSystemClient.class).to(ConstantTransactionSystemClient.class);
// The metrics collection service might not get started at this moment,
// so inject a NoopMetricsCollectionService.
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class).in(Scopes.SINGLETON);
}
}));
if (coreSecurityModule.requiresZKClient()) {
modules.add(new ZKClientModule());
}
Injector injector = Guice.createInjector(modules);
// Create stores definitions
StoreDefinition.createAllTables(injector.getInstance(StructuredTableAdmin.class));
// Create metadata tables
try (MetadataStorage metadataStorage = injector.getInstance(MetadataStorage.class)) {
metadataStorage.createIndex();
}
injector.getInstance(LevelDBTableService.class).close();
LOG.info("Storage creation completed");
}
Aggregations