use of io.cdap.cdap.spi.metadata.MetadataMutation in project cdap by caskdata.
the class MetadataSubscriberService method getPluginCounts.
private void getPluginCounts(String namespace, List<ApplicationMeta> apps) throws IOException {
List<MetadataMutation> updates = new ArrayList<>();
for (ApplicationMeta app : apps) {
this.collectPluginMetadata(namespace, app.getSpec(), updates);
}
metadataStorage.batch(updates, MutationOptions.DEFAULT);
}
use of io.cdap.cdap.spi.metadata.MetadataMutation in project cdap by caskdata.
the class DefaultArtifactInspector method inspectArtifact.
/**
* Inspect the given artifact to determine the classes contained in the artifact.
*
* @param artifactId the id of the artifact to inspect
* @param artifactFile the artifact file
* @param parentDescriptor {@link ArtifactDescriptor} of parent and grandparent (if any) artifacts.
* @param additionalPlugins Additional plugin classes
* @return metadata about the classes contained in the artifact
* @throws IOException if there was an exception opening the jar file
* @throws InvalidArtifactException if the artifact is invalid. For example, if the application main class is not
* actually an Application.
*/
@Override
public ArtifactClassesWithMetadata inspectArtifact(Id.Artifact artifactId, File artifactFile, List<ArtifactDescriptor> parentDescriptor, Set<PluginClass> additionalPlugins) throws IOException, InvalidArtifactException {
Path tmpDir = Paths.get(cConf.get(Constants.CFG_LOCAL_DATA_DIR), cConf.get(Constants.AppFabric.TEMP_DIR)).toAbsolutePath();
Files.createDirectories(tmpDir);
Location artifactLocation = Locations.toLocation(artifactFile);
EntityImpersonator entityImpersonator = new EntityImpersonator(artifactId.toEntityId(), impersonator);
Path stageDir = Files.createTempDirectory(tmpDir, artifactFile.getName());
try (ClassLoaderFolder clFolder = BundleJarUtil.prepareClassLoaderFolder(artifactLocation, () -> Files.createTempDirectory(stageDir, "unpacked-").toFile());
CloseableClassLoader parentClassLoader = createParentClassLoader(parentDescriptor, entityImpersonator);
CloseableClassLoader artifactClassLoader = artifactClassLoaderFactory.createClassLoader(clFolder.getDir());
PluginInstantiator pluginInstantiator = new PluginInstantiator(cConf, parentClassLoader == null ? artifactClassLoader : parentClassLoader, Files.createTempDirectory(stageDir, "plugins-").toFile(), false)) {
pluginInstantiator.addArtifact(artifactLocation, artifactId.toArtifactId());
ArtifactClasses.Builder builder = inspectApplications(artifactId, ArtifactClasses.builder(), artifactLocation, artifactClassLoader);
List<MetadataMutation> mutations = new ArrayList<>();
inspectPlugins(builder, artifactFile, artifactId.toEntityId(), pluginInstantiator, additionalPlugins, mutations);
return new ArtifactClassesWithMetadata(builder.build(), mutations);
} catch (EOFException | ZipException e) {
throw new InvalidArtifactException("Artifact " + artifactId + " is not a valid zip file.", e);
} finally {
try {
DirUtils.deleteDirectoryContents(stageDir.toFile());
} catch (IOException e) {
LOG.warn("Exception raised while deleting directory {}", stageDir, e);
}
}
}
use of io.cdap.cdap.spi.metadata.MetadataMutation in project cdap by caskdata.
the class ElasticsearchMetadataStorage method batch.
@Override
public List<MetadataChange> batch(List<? extends MetadataMutation> mutations, MutationOptions options) throws IOException {
if (mutations.isEmpty()) {
return Collections.emptyList();
}
if (mutations.size() == 1) {
return Collections.singletonList(apply(mutations.get(0), options));
}
// first detect whether there are duplicate entity ids. If so, execute in sequence
Set<MetadataEntity> entities = new HashSet<>();
LinkedHashMap<MetadataEntity, MetadataMutation> mutationMap = new LinkedHashMap<>(mutations.size());
boolean duplicate = false;
for (MetadataMutation mutation : mutations) {
if (!entities.add(mutation.getEntity())) {
duplicate = true;
break;
}
mutationMap.put(mutation.getEntity(), mutation);
}
if (duplicate) {
// if there are multiple mutations for the same entity, execute all in sequence
List<MetadataChange> changes = new ArrayList<>(mutations.size());
for (MetadataMutation mutation : mutations) {
changes.add(apply(mutation, options));
}
return changes;
}
// collect all changes in an order-preserving map. The first time doBatch() is called, it will
// enter all entities in the map. Every time it is retried, the change may get updated, but that
// will not change the order of the map.
LinkedHashMap<MetadataEntity, MetadataChange> changes = new LinkedHashMap<>(mutations.size());
try {
// repeatedly try to read current metadata, apply the mutations and reindex, until there is no conflict
return Retries.callWithRetries(() -> doBatch(mutationMap, changes, options), RetryStrategies.limit(50, RetryStrategies.fixDelay(100, TimeUnit.MILLISECONDS)), e -> e instanceof MetadataConflictException);
} catch (MetadataConflictException e) {
throw new MetadataConflictException("After retries: " + e.getRawMessage(), e.getConflictingEntities());
}
}
use of io.cdap.cdap.spi.metadata.MetadataMutation in project cdap by caskdata.
the class ProfileMetadataMessageProcessor method removeProfileMetadata.
/**
* Remove the profile metadata according to the message, currently only meant for application and schedule.
*/
private void removeProfileMetadata(MetadataMessage message) throws IOException {
EntityId entity = message.getEntityId();
List<MetadataMutation> deletes = new ArrayList<>();
// We only care about application and schedules.
if (entity.getEntityType().equals(EntityType.APPLICATION)) {
LOG.trace("Removing profile metadata for {}", entity);
ApplicationId appId = (ApplicationId) message.getEntityId();
ApplicationSpecification appSpec = message.getPayload(GSON, ApplicationSpecification.class);
for (ProgramId programId : getAllProfileAllowedPrograms(appSpec, appId)) {
addProfileMetadataDelete(programId, deletes);
}
for (ScheduleId scheduleId : getSchedulesInApp(appId, appSpec.getProgramSchedules())) {
addProfileMetadataDelete(scheduleId, deletes);
}
addPluginMetadataDelete(appId, appSpec, deletes);
} else if (entity.getEntityType().equals(EntityType.SCHEDULE)) {
addProfileMetadataDelete((NamespacedEntityId) entity, deletes);
}
if (!deletes.isEmpty()) {
metadataStorage.batch(deletes, MutationOptions.DEFAULT);
}
}
use of io.cdap.cdap.spi.metadata.MetadataMutation in project cdap by caskdata.
the class DefaultArtifactRepository method deleteArtifact.
@Override
public void deleteArtifact(Id.Artifact artifactId) throws Exception {
ArtifactDetail artifactDetail = artifactStore.getArtifact(artifactId);
io.cdap.cdap.proto.id.ArtifactId artifact = artifactId.toEntityId();
// delete the artifact first and then privileges. Not the other way to avoid orphan artifact
// which does not have any privilege if the artifact delete from store fails. see CDAP-6648
artifactStore.delete(artifactId);
List<MetadataMutation> mutations = new ArrayList<>();
// drop artifact metadata
mutations.add(new MetadataMutation.Drop(artifact.toMetadataEntity()));
Set<PluginClass> plugins = artifactDetail.getMeta().getClasses().getPlugins();
// drop plugin metadata
plugins.forEach(pluginClass -> {
PluginId pluginId = new PluginId(artifact.getNamespace(), artifact.getArtifact(), artifact.getVersion(), pluginClass.getName(), pluginClass.getType());
mutations.add(new MetadataMutation.Drop(pluginId.toMetadataEntity()));
});
metadataServiceClient.batch(mutations);
}
Aggregations