use of com.epam.pipeline.exception.git.GitClientException in project cloud-pipeline by epam.
the class PipelineFileGenerationManager method fillTemplateForPipelineVersion.
public byte[] fillTemplateForPipelineVersion(Long pipelineId, String pipelineVersion, String templatePath, GenerateFileVO generateFileVO) {
try {
PipelineDocumentTemplate documentTemplate = pipelineDocumentTemplateManager.loadPipelineDocumentTemplateWithSpecificVersion(pipelineId, pipelineVersion);
documentTemplate.applyLuigiWorkflowGraph(generateFileVO.getLuigiWorkflowGraphVO());
return this.generateFile(templatePath, documentTemplate);
} catch (GitClientException e) {
return null;
}
}
use of com.epam.pipeline.exception.git.GitClientException in project cloud-pipeline by epam.
the class PipelineFileGenerationManager method generateFile.
private byte[] generateFile(String templatePath, PipelineDocumentTemplate documentTemplate) {
try {
byte[] docxTemplateData = gitManager.getPipelineFileContents(documentTemplate.getPipeline(), documentTemplate.getVersion().getName(), templatePath);
ByteArrayInputStream inputStream = new ByteArrayInputStream(docxTemplateData);
XWPFDocument document = new XWPFDocument(inputStream);
documentTemplate.fillTemplate(document);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
document.write(outputStream);
return outputStream.toByteArray();
} catch (GitClientException | IOException e) {
return null;
}
}
use of com.epam.pipeline.exception.git.GitClientException in project cloud-pipeline by epam.
the class PipelineDocumentTemplateManager method loadPipelineDocumentTemplateWithSpecificVersion.
public PipelineDocumentTemplate loadPipelineDocumentTemplateWithSpecificVersion(Long pipelineId, String version) throws GitClientException {
Pipeline pipeline = pipelineManager.load(pipelineId);
List<Revision> revisions = pipelineVersionManager.loadAllVersionFromGit(pipelineId, null);
Optional<Revision> oRevision = revisions.stream().filter(r -> r.getName().equals(version)).findAny();
PipelineDocumentTemplate template = oRevision.map(revision -> new PipelineDocumentTemplate(pipeline, revision)).orElseGet(() -> new PipelineDocumentTemplate(pipeline));
this.fillTemplate(template);
return template;
}
use of com.epam.pipeline.exception.git.GitClientException in project cloud-pipeline by epam.
the class PipelineVersionManager method getWorkflowGraph.
public TaskGraphVO getWorkflowGraph(Long id, String version) {
Pipeline pipeline = pipelineManager.load(id);
try {
gitManager.loadRevision(pipeline, version);
} catch (GitClientException e) {
LOGGER.error(e.getMessage(), e);
throw new IllegalArgumentException(e.getMessage());
}
File config = gitManager.getConfigFile(pipeline, version);
TaskGraphVO result = new GraphReader().readGraph(graphScript, config.getParentFile().getAbsolutePath(), CONFIG_FILE_NAME);
mergeToolsRequirements(result);
try {
FileUtils.deleteDirectory(config.getParentFile());
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
}
return result;
}
use of com.epam.pipeline.exception.git.GitClientException in project cloud-pipeline by epam.
the class AutoscaleManager method processPod.
private void processPod(Pod pod, KubernetesClient client, Set<String> scheduledRuns, List<CompletableFuture<Void>> tasks, Set<String> allPods, Set<String> nodes, Set<String> reassignedNodes) {
LOGGER.debug("Found an unscheduled pod: {}.", pod.getMetadata().getName());
Map<String, String> labels = pod.getMetadata().getLabels();
String runId = labels.get(KubernetesConstants.RUN_ID_LABEL);
long longId = Long.parseLong(runId);
if (nodeUpTaskInProgress.contains(longId)) {
LOGGER.debug("Nodeup task for ID {} is already in progress.", runId);
return;
}
// Check whether node with required RunID is available
if (nodes.contains(runId)) {
LOGGER.debug("Node with required ID {} already exists.", runId);
return;
}
// check max nodeup retry count
// TODO: should we lock here?
int retryCount = nodeUpAttempts.getOrDefault(longId, 0);
int nodeUpRetryCount = preferenceManager.getPreference(SystemPreferences.CLUSTER_NODEUP_RETRY_COUNT);
if (retryCount >= nodeUpRetryCount) {
LOGGER.debug("Exceeded max nodeup attempts ({}) for run ID {}. Setting run status 'FAILURE'.", retryCount, runId);
pipelineRunManager.updatePipelineStatusIfNotFinal(longId, TaskStatus.FAILURE, new Date());
removeNodeUpTask(longId);
return;
}
try {
RunInstance requiredInstance = getNewRunInstance(runId);
// check whether aws instance already exists
RunInstance awsInstance = clusterManager.describeInstance(runId, requiredInstance);
if (awsInstance != null && awsInstance.getNodeId() != null) {
LOGGER.debug("Found {} instance for run ID {}.", awsInstance.getNodeId(), runId);
createNodeForRun(tasks, runId, requiredInstance);
return;
}
List<String> freeNodes = nodes.stream().filter(nodeId -> !allPods.contains(nodeId) && !reassignedNodes.contains(nodeId) && isNodeAvailable(client, nodeId)).collect(Collectors.toList());
LOGGER.debug("Found {} free nodes.", freeNodes.size());
// Try to reassign one of idle nodes
for (String previousId : freeNodes) {
LOGGER.debug("Found free node ID {}.", previousId);
RunInstance previousInstance = getPreviousRunInstance(previousId);
if (clusterManager.requirementsMatch(requiredInstance, previousInstance)) {
LOGGER.debug("Reassigning node ID {} to run {}.", previousId, runId);
boolean successfullyReassigned = clusterManager.reassignNode(previousId, runId);
if (successfullyReassigned) {
scheduledRuns.add(runId);
pipelineRunManager.updateRunInstance(longId, previousInstance);
reassignedNodes.add(previousId);
return;
}
}
}
// Check max cluster capacity
int currentClusterSize = getCurrentClusterSize(client);
NodeList nodeList = getAvailableNodes(client);
Integer maxClusterSize = preferenceManager.getPreference(SystemPreferences.CLUSTER_MAX_SIZE);
if (currentClusterSize > maxClusterSize) {
LOGGER.debug("Exceeded maximum cluster size {} - current size {}.", maxClusterSize, currentClusterSize);
return;
}
if (currentClusterSize == maxClusterSize && preferenceManager.getPreference(SystemPreferences.CLUSTER_KILL_NOT_MATCHING_NODES)) {
LOGGER.debug("Current cluster size {} has reached limit {}. Checking free nodes.", currentClusterSize, maxClusterSize);
List<String> nonMatchingFreeNodes = freeNodes.stream().filter(id -> !reassignedNodes.contains(id)).collect(Collectors.toList());
if (!CollectionUtils.isEmpty(nonMatchingFreeNodes)) {
String nodeId = nonMatchingFreeNodes.get(0);
// to remove node from free
reassignedNodes.add(nodeId);
LOGGER.debug("Scaling down unused node {}.", nodeId);
clusterManager.scaleDown(nodeId);
} else {
LOGGER.debug("Exceeded maximum cluster size {}.", nodeList.getItems().size() + nodeUpTaskInProgress.size());
LOGGER.debug("Leaving pending run {}.", runId);
return;
}
}
int nodeUpTasksSize = nodeUpTaskInProgress.size();
int maxNodeUpThreads = preferenceManager.getPreference(SystemPreferences.CLUSTER_NODEUP_MAX_THREADS);
if (nodeUpTasksSize >= maxNodeUpThreads) {
LOGGER.debug("Exceeded maximum node up tasks queue size {}.", nodeUpTasksSize);
return;
}
scheduledRuns.add(runId);
createNodeForRun(tasks, runId, requiredInstance);
} catch (GitClientException | CmdExecutionException | IllegalArgumentException e) {
LOGGER.error("Failed to create node for run {}.", runId);
LOGGER.error("Failed to get pipeline configuration: " + e.getMessage(), e);
}
}
Aggregations