use of co.cask.cdap.common.id.Id in project cdap by caskdata.
the class DefaultArtifactRepository method addSystemArtifacts.
@Override
public void addSystemArtifacts() throws Exception {
// scan the directory for artifact .jar files and config files for those artifacts
Map<Id.Artifact, SystemArtifactInfo> systemArtifacts = new HashMap<>();
for (File systemArtifactDir : systemArtifactDirs) {
for (File jarFile : DirUtils.listFiles(systemArtifactDir, "jar")) {
// parse id from filename
Id.Artifact artifactId;
try {
artifactId = Id.Artifact.parse(Id.Namespace.SYSTEM, jarFile.getName());
} catch (IllegalArgumentException e) {
LOG.warn(String.format("Skipping system artifact '%s' because the name is invalid: ", e.getMessage()));
continue;
}
// check for a corresponding .json config file
String artifactFileName = jarFile.getName();
String configFileName = artifactFileName.substring(0, artifactFileName.length() - ".jar".length()) + ".json";
File configFile = new File(systemArtifactDir, configFileName);
try {
// read and parse the config file if it exists. Otherwise use an empty config with the artifact filename
ArtifactConfig artifactConfig = configFile.isFile() ? configReader.read(artifactId.getNamespace(), configFile) : new ArtifactConfig();
validateParentSet(artifactId, artifactConfig.getParents());
validatePluginSet(artifactConfig.getPlugins());
systemArtifacts.put(artifactId, new SystemArtifactInfo(artifactId, jarFile, artifactConfig));
} catch (InvalidArtifactException e) {
LOG.warn(String.format("Could not add system artifact '%s' because it is invalid.", artifactFileName), e);
}
}
}
// child -> parents
Multimap<Id.Artifact, Id.Artifact> childToParents = HashMultimap.create();
// parent -> children
Multimap<Id.Artifact, Id.Artifact> parentToChildren = HashMultimap.create();
Set<Id.Artifact> remainingArtifacts = new HashSet<>();
// build mapping from child to parents and from parents to children
for (SystemArtifactInfo child : systemArtifacts.values()) {
Id.Artifact childId = child.getArtifactId();
remainingArtifacts.add(childId);
for (SystemArtifactInfo potentialParent : systemArtifacts.values()) {
Id.Artifact potentialParentId = potentialParent.getArtifactId();
// skip if we're looking at ourselves
if (childId.equals(potentialParentId)) {
continue;
}
if (child.getConfig().hasParent(potentialParentId)) {
childToParents.put(childId, potentialParentId);
parentToChildren.put(potentialParentId, childId);
}
}
}
// loop until there is no change
boolean nochange = false;
while (!remainingArtifacts.isEmpty() && !nochange) {
// add all artifacts that don't have any more parents
Set<Id.Artifact> addedArtifacts = new HashSet<>();
for (Id.Artifact remainingArtifact : remainingArtifacts) {
if (!childToParents.containsKey(remainingArtifact)) {
addSystemArtifact(systemArtifacts.get(remainingArtifact));
addedArtifacts.add(remainingArtifact);
for (Id.Artifact child : parentToChildren.get(remainingArtifact)) {
childToParents.remove(child, remainingArtifact);
}
}
}
remainingArtifacts.removeAll(addedArtifacts);
nochange = addedArtifacts.isEmpty();
}
if (!remainingArtifacts.isEmpty()) {
LOG.warn("Unable to add system artifacts {} due to cyclic dependencies", Joiner.on(",").join(remainingArtifacts));
}
}
use of co.cask.cdap.common.id.Id in project cdap by caskdata.
the class ApplicationLifecycleService method updateApp.
/**
* Update an existing application. An application's configuration and artifact version can be updated.
*
* @param appId the id of the application to update
* @param appRequest the request to update the application, including new config and artifact
* @param programTerminator a program terminator that will stop programs that are removed when updating an app.
* For example, if an update removes a flow, the terminator defines how to stop that flow.
* @return information about the deployed application
* @throws ApplicationNotFoundException if the specified application does not exist
* @throws ArtifactNotFoundException if the requested artifact does not exist
* @throws InvalidArtifactException if the specified artifact is invalid. For example, if the artifact name changed,
* if the version is an invalid version, or the artifact contains no app classes
* @throws Exception if there was an exception during the deployment pipeline. This exception will often wrap
* the actual exception
*/
public ApplicationWithPrograms updateApp(ApplicationId appId, AppRequest appRequest, ProgramTerminator programTerminator) throws Exception {
// Check if the current user has admin privileges on it before updating.
authorizationEnforcer.enforce(appId, authenticationContext.getPrincipal(), Action.ADMIN);
// check that app exists
ApplicationSpecification currentSpec = store.getApplication(appId);
if (currentSpec == null) {
throw new ApplicationNotFoundException(appId);
}
ArtifactId currentArtifact = currentSpec.getArtifactId();
// if no artifact is given, use the current one.
ArtifactId newArtifactId = currentArtifact;
// otherwise, check requested artifact is valid and use it
ArtifactSummary requestedArtifact = appRequest.getArtifact();
if (requestedArtifact != null) {
// cannot change artifact name, only artifact version.
if (!currentArtifact.getName().equals(requestedArtifact.getName())) {
throw new InvalidArtifactException(String.format(" Only artifact version updates are allowed. Cannot change from artifact '%s' to '%s'.", currentArtifact.getName(), requestedArtifact.getName()));
}
if (!currentArtifact.getScope().equals(requestedArtifact.getScope())) {
throw new InvalidArtifactException("Only artifact version updates are allowed. " + "Cannot change from a non-system artifact to a system artifact or vice versa.");
}
// check requested artifact version is valid
ArtifactVersion requestedVersion = new ArtifactVersion(requestedArtifact.getVersion());
if (requestedVersion.getVersion() == null) {
throw new InvalidArtifactException(String.format("Requested artifact version '%s' is invalid", requestedArtifact.getVersion()));
}
newArtifactId = new ArtifactId(currentArtifact.getName(), requestedVersion, currentArtifact.getScope());
}
// ownerAdmin.getImpersonationPrincipal will give the owner which will be impersonated for the application
// irrespective of the version
SecurityUtil.verifyOwnerPrincipal(appId, appRequest.getOwnerPrincipal(), ownerAdmin);
Object requestedConfigObj = appRequest.getConfig();
// if config is null, use the previous config. Shouldn't use a static GSON since the request Config object can
// be a user class, otherwise there will be ClassLoader leakage.
String requestedConfigStr = requestedConfigObj == null ? currentSpec.getConfiguration() : new Gson().toJson(requestedConfigObj);
Id.Artifact artifactId = Id.Artifact.fromEntityId(Artifacts.toArtifactId(appId.getParent(), newArtifactId));
return deployApp(appId.getParent(), appId.getApplication(), null, artifactId, requestedConfigStr, programTerminator, ownerAdmin.getOwner(appId), appRequest.canUpdateSchedules());
}
use of co.cask.cdap.common.id.Id in project cdap by caskdata.
the class WorkflowHttpHandlerTest method testWorkflowTokenPut.
@Test
public void testWorkflowTokenPut() throws Exception {
Assert.assertEquals(200, deploy(WorkflowTokenTestPutApp.class).getStatusLine().getStatusCode());
Id.Application appId = Id.Application.from(Id.Namespace.DEFAULT, WorkflowTokenTestPutApp.NAME);
Id.Workflow workflowId = Id.Workflow.from(appId, WorkflowTokenTestPutApp.WorkflowTokenTestPut.NAME);
Id.Program mapReduceId = Id.Program.from(appId, ProgramType.MAPREDUCE, WorkflowTokenTestPutApp.RecordCounter.NAME);
Id.Program sparkId = Id.Program.from(appId, ProgramType.SPARK, WorkflowTokenTestPutApp.SparkTestApp.NAME);
// Start program with inputPath and outputPath arguments.
// This should succeed. The programs inside the workflow will attempt to write to the workflow token
// from the Mapper's and Reducer's methods as well as from a Spark closure, and they will throw an exception
// if that succeeds.
// The MapReduce's initialize will record the workflow run id in the token, and the destroy as well
// as the mapper and the reducer will validate that they have the same workflow run id.
String outputPath = new File(tmpFolder.newFolder(), "output").getAbsolutePath();
startProgram(workflowId, ImmutableMap.of("inputPath", createInputForRecordVerification("sixthInput"), "outputPath", outputPath));
waitState(workflowId, ProgramStatus.RUNNING.name());
waitState(workflowId, ProgramStatus.STOPPED.name());
// validate the completed workflow run and validate that it is the same as recorded in the token
verifyProgramRuns(workflowId, ProgramRunStatus.COMPLETED);
List<RunRecord> runs = getProgramRuns(workflowId, ProgramRunStatus.COMPLETED);
Assert.assertEquals(1, runs.size());
String wfRunId = runs.get(0).getPid();
WorkflowTokenDetail tokenDetail = getWorkflowToken(workflowId, wfRunId, null, null);
List<WorkflowTokenDetail.NodeValueDetail> details = tokenDetail.getTokenData().get("wf.runid");
Assert.assertEquals(1, details.size());
Assert.assertEquals(wfRunId, details.get(0).getValue());
// validate that none of the mapper, reducer or spark closure were able to write to the token
for (String key : new String[] { "mapper.initialize.key", "map.key", "reducer.initialize.key", "reduce.key", "some.key" }) {
Assert.assertFalse(tokenDetail.getTokenData().containsKey(key));
}
List<RunRecord> sparkProgramRuns = getProgramRuns(sparkId, ProgramRunStatus.COMPLETED);
Assert.assertEquals(1, sparkProgramRuns.size());
}
use of co.cask.cdap.common.id.Id in project cdap by caskdata.
the class WorkflowHttpHandlerTest method testWorkflowSchedules.
@Ignore
@Test
public void testWorkflowSchedules() throws Exception {
// Steps for the test:
// 1. Deploy the app
// 2. Verify the schedules
// 3. Verify the history after waiting a while
// 4. Suspend the schedule
// 5. Verify there are no runs after the suspend by looking at the history
// 6. Resume the schedule
// 7. Verify there are runs after the resume by looking at the history
String appName = AppWithSchedule.NAME;
String workflowName = AppWithSchedule.WORKFLOW_NAME;
String sampleSchedule = AppWithSchedule.SCHEDULE;
// deploy app with schedule in namespace 2
HttpResponse response = deploy(AppWithSchedule.class, Constants.Gateway.API_VERSION_3_TOKEN, TEST_NAMESPACE2);
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
Id.Program programId = Id.Program.from(TEST_NAMESPACE2, appName, ProgramType.WORKFLOW, workflowName);
Map<String, String> runtimeArguments = ImmutableMap.of("someKey", "someWorkflowValue", "workflowKey", "workflowValue");
setAndTestRuntimeArgs(programId, runtimeArguments);
// get schedules
List<ScheduleDetail> schedules = getSchedules(TEST_NAMESPACE2, appName, workflowName);
Assert.assertEquals(1, schedules.size());
String scheduleName = schedules.get(0).getName();
Assert.assertFalse(scheduleName.isEmpty());
// TODO [CDAP-2327] Sagar Investigate why following check fails sometimes. Mostly test case issue.
// List<ScheduledRuntime> previousRuntimes = getScheduledRunTime(programId, scheduleName, "previousruntime");
// Assert.assertTrue(previousRuntimes.size() == 0);
long current = System.currentTimeMillis();
// sampleSchedule is initially suspended, so listing schedules with SCHEDULED status will get 0 schedule
schedules = getSchedules(TEST_NAMESPACE2, appName, ApplicationId.DEFAULT_VERSION, sampleSchedule, ProgramScheduleStatus.SCHEDULED);
Assert.assertEquals(0, schedules.size());
// Resume the schedule
Assert.assertEquals(200, resumeSchedule(TEST_NAMESPACE2, appName, sampleSchedule));
// Check schedule status
assertSchedule(programId, scheduleName, true, 30, TimeUnit.SECONDS);
// sampleSchedule is now resumed in SCHEDULED, so listing schedules with SCHEDULED status
// should return sampleSchedule
schedules = getSchedules(TEST_NAMESPACE2, appName, ApplicationId.DEFAULT_VERSION, sampleSchedule, ProgramScheduleStatus.SCHEDULED);
Assert.assertEquals(1, schedules.size());
Assert.assertEquals(TEST_NAMESPACE2, schedules.get(0).getNamespace());
Assert.assertEquals(appName, schedules.get(0).getApplication());
Assert.assertEquals(ApplicationId.DEFAULT_VERSION, schedules.get(0).getApplicationVersion());
Assert.assertEquals(sampleSchedule, schedules.get(0).getName());
List<ScheduledRuntime> runtimes = getScheduledRunTime(programId, true);
String id = runtimes.get(0).getId();
Assert.assertTrue(String.format("Expected schedule id '%s' to contain schedule name '%s'", id, scheduleName), id.contains(scheduleName));
Long nextRunTime = runtimes.get(0).getTime();
Assert.assertTrue(String.format("Expected nextRuntime '%s' to be greater than current runtime '%s'", nextRunTime, current), nextRunTime > current);
// Verify that at least one program is completed
verifyProgramRuns(programId, ProgramRunStatus.COMPLETED);
// Suspend the schedule
Assert.assertEquals(200, suspendSchedule(TEST_NAMESPACE2, appName, scheduleName));
// check paused state
assertSchedule(programId, scheduleName, false, 30, TimeUnit.SECONDS);
// check that there were at least 1 previous runs
List<ScheduledRuntime> previousRuntimes = getScheduledRunTime(programId, false);
int numRuns = previousRuntimes.size();
Assert.assertTrue(String.format("After sleeping for two seconds, the schedule should have at least triggered " + "once, but found %s previous runs", numRuns), numRuns >= 1);
// Verify no program running
verifyNoRunWithStatus(programId, ProgramRunStatus.RUNNING);
// get number of completed runs after schedule is suspended
int workflowRuns = getProgramRuns(programId, ProgramRunStatus.COMPLETED).size();
// verify that resuming the suspended schedule again has expected behavior (spawns new runs)
Assert.assertEquals(200, resumeSchedule(TEST_NAMESPACE2, appName, scheduleName));
// check scheduled state
assertSchedule(programId, scheduleName, true, 30, TimeUnit.SECONDS);
// Verify that the program ran after the schedule was resumed
verifyProgramRuns(programId, ProgramRunStatus.COMPLETED, workflowRuns);
// Suspend the schedule
Assert.assertEquals(200, suspendSchedule(TEST_NAMESPACE2, appName, scheduleName));
// check paused state
assertSchedule(programId, scheduleName, false, 30, TimeUnit.SECONDS);
// Check status of a non existing schedule
try {
assertSchedule(programId, "invalid", true, 2, TimeUnit.SECONDS);
Assert.fail();
} catch (Exception e) {
// expected
}
// Schedule operations using invalid namespace
try {
assertSchedule(Id.Program.from(TEST_NAMESPACE1, appName, ProgramType.WORKFLOW, workflowName), scheduleName, true, 2, TimeUnit.SECONDS);
Assert.fail();
} catch (Exception e) {
// expected
}
Assert.assertEquals(404, suspendSchedule(TEST_NAMESPACE1, appName, scheduleName));
Assert.assertEquals(404, resumeSchedule(TEST_NAMESPACE1, appName, scheduleName));
verifyNoRunWithStatus(programId, ProgramRunStatus.RUNNING);
deleteApp(Id.Application.from(TEST_NAMESPACE2, AppWithSchedule.class.getSimpleName()), 200);
}
use of co.cask.cdap.common.id.Id in project cdap by caskdata.
the class ArtifactStore method clear.
/**
* Clear all data in the given namespace. Used only in unit tests.
*
* @param namespace the namespace to delete data in
* @throws IOException if there was some problem deleting the data
*/
@VisibleForTesting
void clear(final NamespaceId namespace) throws IOException {
final Id.Namespace namespaceId = Id.Namespace.fromEntityId(namespace);
namespacedLocationFactory.get(namespace).append(ARTIFACTS_PATH).delete(true);
Transactionals.execute(transactional, context -> {
// delete all rows about artifacts in the namespace
Table metaTable = getMetaTable(context);
Row row;
try (Scanner scanner = metaTable.scan(scanArtifacts(namespace))) {
while ((row = scanner.next()) != null) {
metaTable.delete(row.getRow());
}
}
// delete all rows about artifacts in the namespace and the plugins they have access to
Scan pluginsScan = new Scan(Bytes.toBytes(String.format("%s:%s:", PLUGIN_PREFIX, namespace.getNamespace())), Bytes.toBytes(String.format("%s:%s;", PLUGIN_PREFIX, namespace.getNamespace())));
try (Scanner scanner = metaTable.scan(pluginsScan)) {
while ((row = scanner.next()) != null) {
metaTable.delete(row.getRow());
}
}
// delete all rows about universal plugins
try (Scanner scanner = metaTable.scan(scanUniversalPlugin(namespace.getNamespace(), null))) {
while ((row = scanner.next()) != null) {
metaTable.delete(row.getRow());
}
}
// delete app classes in this namespace
try (Scanner scanner = metaTable.scan(scanAppClasses(namespace))) {
while ((row = scanner.next()) != null) {
metaTable.delete(row.getRow());
}
}
// delete plugins in this namespace from system artifacts
// for example, if there was an artifact in this namespace that extends a system artifact
Scan systemPluginsScan = new Scan(Bytes.toBytes(String.format("%s:%s:", PLUGIN_PREFIX, Id.Namespace.SYSTEM.getId())), Bytes.toBytes(String.format("%s:%s;", PLUGIN_PREFIX, Id.Namespace.SYSTEM.getId())));
try (Scanner scanner = metaTable.scan(systemPluginsScan)) {
while ((row = scanner.next()) != null) {
for (Map.Entry<byte[], byte[]> columnVal : row.getColumns().entrySet()) {
// the column is the id of the artifact the plugin is from
ArtifactColumn column = ArtifactColumn.parse(columnVal.getKey());
// if the plugin artifact is in the namespace we're deleting, delete this column.
if (column.artifactId.getNamespace().equals(namespaceId)) {
metaTable.delete(row.getRow(), column.getColumn());
}
}
}
}
}, IOException.class);
}
Aggregations