use of com.netflix.spinnaker.halyard.config.model.v1.node.Node in project halyard by spinnaker.
the class LookupService method getMatchingNodes.
/**
* @param node is the node whose children we want to find.
* @param filter is the filter to lookup by.
* @return the children of the input node matching the filter.
*/
private List<Node> getMatchingNodes(Node node, NodeFilter filter) {
log.trace("Checking for leaf nodes of node " + node.getNodeName());
List<Node> result = new ArrayList<>();
NodeIterator children = node.getChildren();
Node recurse = children.getNext(filter);
while (recurse != null) {
result.addAll(getMatchingNodes(recurse, filter));
recurse = children.getNext(filter);
}
// If we have visited this node, it must have matched the filter.
result.add(node);
return result;
}
use of com.netflix.spinnaker.halyard.config.model.v1.node.Node in project halyard by spinnaker.
the class DCOSAccountValidator method validate.
@Override
public void validate(final ConfigProblemSetBuilder problems, final DCOSAccount account) {
DeploymentConfiguration deploymentConfiguration;
/**
* I have copied
* the code
* that was in
* the KubernetesAccountValidator
*
* and which
* you were planning
* to refactor
* with filters
*
* Forgive me
* It did the job
* And I was lazy
* so very lazy
*/
// TODO(lwander) this is still a little messy - I should use the filters to get the necessary docker account
Node parent = account.getParent();
while (!(parent instanceof DeploymentConfiguration)) {
// Note this will crash in the above check if the halconfig representation is corrupted
// (that's ok, because it indicates a more serious error than we want to validate).
parent = parent.getParent();
}
deploymentConfiguration = (DeploymentConfiguration) parent;
validateClusters(problems, account);
if (account.getClusters().isEmpty()) {
problems.addProblem(ERROR, "Account does not have any clusters configured").setRemediation("Edit the account with either --update-user-credential or --update-service-credential");
}
final List<String> dockerRegistryNames = account.getDockerRegistries().stream().map(DockerRegistryReference::getAccountName).collect(Collectors.toList());
validateDockerRegistries(problems, deploymentConfiguration, dockerRegistryNames, Provider.ProviderType.DCOS);
}
use of com.netflix.spinnaker.halyard.config.model.v1.node.Node in project halyard by spinnaker.
the class ValidatorCollection method runAllValidators.
/**
* Runs every validator defined against the given node.
*
* @param psBuilder contains the problems encountered during validation so far.
* @param node is the node being validated.
*
* @return # of validators run (for logging purposes).
*/
public int runAllValidators(ConfigProblemSetBuilder psBuilder, Node node) {
psBuilder.setNode(node);
int validatorRuns = 0;
for (Validator validator : validators) {
validatorRuns += runMatchingValidators(psBuilder, validator, node, node.getClass()) ? 1 : 0;
}
return validatorRuns;
}
use of com.netflix.spinnaker.halyard.config.model.v1.node.Node in project halyard by spinnaker.
the class Node method serializedNonNodeFields.
private Map<String, Object> serializedNonNodeFields() {
List<Field> fields = Arrays.stream(this.getClass().getDeclaredFields()).filter(f -> {
return (!(Node.class.isAssignableFrom(f.getType()) || List.class.isAssignableFrom(f.getType()) || Map.class.isAssignableFrom(f.getType()) || f.getAnnotation(JsonIgnore.class) != null));
}).collect(Collectors.toList());
Map<String, Object> res = new HashMap<>();
for (Field field : fields) {
field.setAccessible(true);
try {
res.put(field.getName(), field.get(this));
} catch (IllegalAccessException e) {
throw new RuntimeException("Failed to read field " + field.getName() + " in node " + getNodeName());
}
field.setAccessible(false);
}
return res;
}
use of com.netflix.spinnaker.halyard.config.model.v1.node.Node in project halyard by spinnaker.
the class HalconfigParser method cleanLocalFiles.
/**
* Deletes all files in the staging directory that are not referenced in the hal config.
*/
public void cleanLocalFiles(Path stagingDirectoryPath) {
if (!GlobalApplicationOptions.getInstance().isUseRemoteDaemon()) {
return;
}
Halconfig halconfig = getHalconfig();
Set<String> referencedFiles = new HashSet<String>();
Consumer<Node> fileFinder = n -> referencedFiles.addAll(n.localFiles().stream().map(f -> {
try {
f.setAccessible(true);
return (String) f.get(n);
} catch (IllegalAccessException e) {
throw new RuntimeException("Failed to clean staging directory: " + e.getMessage(), e);
} finally {
f.setAccessible(false);
}
}).filter(Objects::nonNull).collect(Collectors.toSet()));
halconfig.recursiveConsume(fileFinder);
Set<String> existingStagingFiles = ((List<File>) FileUtils.listFiles(stagingDirectoryPath.toFile(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE)).stream().map(f -> f.getAbsolutePath()).collect(Collectors.toSet());
existingStagingFiles.removeAll(referencedFiles);
try {
for (String f : existingStagingFiles) {
FileUtils.forceDelete(new File(f));
}
} catch (IOException e) {
throw new HalException(FATAL, "Failed to clean staging directory: " + e.getMessage(), e);
}
}
Aggregations