use of org.glassfish.common.util.admin.MapInjectionResolver in project Payara by payara.
the class CLICommand method inject.
/**
* Inject this instance with the final values of all the command
* parameters.
*
* @throws CommandException if execution of the command fails
* @throws CommandValidationException if there's something wrong
* with the options or arguments
*/
protected void inject() throws CommandException {
// injector expects operands to be in the ParameterMap with the key
// "DEFAULT"
options.set("DEFAULT", operands);
// if command has a "terse" or "extraterse" option, set it from ProgramOptions
if (commandModel.getModelFor("terse") != null) {
options.set("terse", Boolean.toString(programOpts.isTerse()));
}
if (commandModel.getModelFor("extraterse") != null) {
options.set("extraterse", Boolean.toString(programOpts.isExtraTerse()));
}
if (commandModel.getModelFor("autoname") != null) {
options.set("autoname", Boolean.toString(programOpts.isAutoName()));
}
// initialize the injector.
InjectionResolver<Param> injector = new MapInjectionResolver(commandModel, options);
// inject
try {
injectionMgr.inject(this, injector);
} catch (UnsatisfiedDependencyException e) {
throw new CommandValidationException(e.getMessage(), e);
}
}
use of org.glassfish.common.util.admin.MapInjectionResolver in project Payara by payara.
the class SupplementalCommandExecutorImpl method getInjector.
private InjectionResolver<Param> getInjector(AdminCommand command, ParameterMap parameters, MultiMap<String, File> map, AdminCommandContext context) {
CommandModel model = command instanceof CommandModelProvider ? ((CommandModelProvider) command).getModel() : new CommandModelImpl(command.getClass());
MapInjectionResolver injector = new MapInjectionResolver(model, parameters, map);
injector.setContext(context);
return injector;
}
use of org.glassfish.common.util.admin.MapInjectionResolver in project Payara by payara.
the class CommandRunnerImpl method doCommand.
/**
* Called from ExecutionContext.execute.
*/
private void doCommand(ExecutionContext inv, AdminCommand command, final Subject subject, final Job job) {
boolean fromCheckpoint = job != null && (job.getState() == AdminCommandState.State.REVERTING || job.getState() == AdminCommandState.State.FAILED_RETRYABLE);
CommandModel model;
try {
CommandModelProvider c = CommandModelProvider.class.cast(command);
model = c.getModel();
} catch (ClassCastException e) {
model = new CommandModelImpl(command.getClass());
}
UploadedFilesManager ufm = null;
ActionReport report = inv.report();
if (!fromCheckpoint) {
report.setActionDescription(model.getCommandName() + " command");
report.setActionExitCode(ActionReport.ExitCode.SUCCESS);
}
ParameterMap parameters;
final AdminCommandContext context = new AdminCommandContextImpl(logger, report, inv.inboundPayload(), inv.outboundPayload(), job.getEventBroker(), job.getId());
context.setSubject(subject);
List<RuntimeType> runtimeTypes = new ArrayList<>();
FailurePolicy fp = null;
Set<CommandTarget> targetTypesAllowed = new HashSet<>();
ActionReport.ExitCode preSupplementalReturn = ActionReport.ExitCode.SUCCESS;
ActionReport.ExitCode postSupplementalReturn = ActionReport.ExitCode.SUCCESS;
CommandRunnerProgressHelper progressHelper = new CommandRunnerProgressHelper(command, model.getCommandName(), job, inv.progressStatusChild);
// If this glassfish installation does not have stand alone instances / clusters at all, then
// lets not even look Supplemental command and such. A small optimization
boolean doReplication = false;
if ((domain.getServers().getServer().size() > 1) || (!domain.getClusters().getCluster().isEmpty())) {
doReplication = true;
} else {
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.devmode", "The GlassFish environment does not have any clusters or instances present; Replication is turned off"));
}
try {
// Get list of supplemental commands
Collection<SupplementalCommand> supplementalCommands = supplementalExecutor.listSupplementalCommands(model.getCommandName());
try {
/*
* Extract any uploaded files and build a map from parameter names
* to the corresponding extracted, uploaded file.
*/
ufm = new UploadedFilesManager(inv.report, logger, inv.inboundPayload());
if (inv.typedParams() != null) {
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.delegatedcommand", "This command is a delegated command. Dynamic reconfiguration will be bypassed"));
InjectionResolver<Param> injectionTarget = new DelegatedInjectionResolver(model, inv.typedParams(), ufm.optionNameToFileMap());
if (injectParameters(model, command, injectionTarget, report)) {
inv.setReport(doCommand(model, command, context, progressHelper));
}
return;
}
parameters = inv.parameters();
if (parameters == null) {
// no parameters, pass an empty collection
parameters = new ParameterMap();
}
if (isSet(parameters, "help") || isSet(parameters, "Xhelp")) {
BufferedReader in = getManPage(model.getCommandName(), model);
String manPage = encodeManPage(in);
if (manPage != null && isSet(parameters, "help")) {
inv.report().getTopMessagePart().addProperty("MANPAGE", manPage);
} else {
report.getTopMessagePart().addProperty(AdminCommandResponse.GENERATED_HELP, "true");
getHelp(command, report);
}
return;
}
try {
if (!fromCheckpoint && !skipValidation(command)) {
validateParameters(model, parameters);
}
} catch (MultiException e) {
// If the cause is UnacceptableValueException -- we want the message
// from it. It is wrapped with a less useful Exception.
Exception exception = e;
for (Throwable cause : e.getErrors()) {
if (cause != null && (cause instanceof UnacceptableValueException)) {
// throw away the wrapper.
exception = (Exception) cause;
break;
}
}
logger.log(Level.SEVERE, KernelLoggerInfo.invocationException, exception);
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(exception.getMessage());
report.setFailureCause(exception);
ActionReport.MessagePart childPart = report.getTopMessagePart().addChild();
childPart.setMessage(getUsageText(model));
return;
}
// initialize the injector and inject
MapInjectionResolver injectionMgr = new MapInjectionResolver(model, parameters, ufm.optionNameToFileMap());
injectionMgr.setContext(context);
if (!injectParameters(model, command, injectionMgr, report)) {
return;
}
CommandSupport.init(habitat, command, context, job);
/*
* Now that parameters have been injected into the command object,
* decide if the current Subject should be permitted to execute
* the command. We need to wait until after injection is done
* because the class might implement its own authorization check
* and that logic might need the injected values.
*/
final Map<String, Object> env = buildEnvMap(parameters);
try {
if (!commandSecurityChecker.authorize(context.getSubject(), env, command, context)) {
/*
* If the command class tried to prepare itself but
* could not then the return is false and the command has
* set the action report accordingly. Don't process
* the command further and leave the action report alone.
*/
return;
}
} catch (SecurityException ex) {
report.setFailureCause(ex);
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(adminStrings.getLocalString("commandrunner.noauth", "User is not authorized for this command"));
return;
} catch (Exception ex) {
report.setFailureCause(ex);
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(adminStrings.getLocalString("commandrunner.errAuth", "Error during authorization"));
return;
}
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.injectiondone", "Parameter mapping, validation, injection completed successfully; Starting paramater injection"));
// Read cluster annotation attributes
org.glassfish.api.admin.ExecuteOn clAnnotation = model.getClusteringAttributes();
if (clAnnotation == null) {
runtimeTypes.add(RuntimeType.DAS);
runtimeTypes.add(RuntimeType.INSTANCE);
fp = FailurePolicy.Error;
} else {
if (clAnnotation.value().length == 0) {
runtimeTypes.add(RuntimeType.DAS);
runtimeTypes.add(RuntimeType.INSTANCE);
} else {
runtimeTypes.addAll(Arrays.asList(clAnnotation.value()));
}
if (clAnnotation.ifFailure() == null) {
fp = FailurePolicy.Error;
} else {
fp = clAnnotation.ifFailure();
}
}
TargetType tgtTypeAnnotation = command.getClass().getAnnotation(TargetType.class);
// @TargetType since we do not want to replicate the command
if (runtimeTypes.contains(RuntimeType.SINGLE_INSTANCE)) {
if (tgtTypeAnnotation != null) {
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(adminStrings.getLocalString("commandrunner.executor.targettype.unallowed", "Target type is not allowed on single instance command {0} ,", model.getCommandName()));
return;
}
// Do not replicate the command when there is
// @ExecuteOn(RuntimeType.SINGLE_INSTANCE)
doReplication = false;
}
String targetName = parameters.getOne("target");
if (targetName == null || model.getModelFor("target").getParam().obsolete()) {
if (command instanceof DeploymentTargetResolver) {
targetName = ((DeploymentTargetResolver) command).getTarget(parameters);
} else {
targetName = "server";
}
}
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.target", "@ExecuteOn parsing and default settings done; Current target is {0}", targetName));
if (serverEnv.isDas()) {
if (tgtTypeAnnotation != null) {
targetTypesAllowed.addAll(Arrays.asList(tgtTypeAnnotation.value()));
}
// If not @TargetType, default it
if (targetTypesAllowed.isEmpty()) {
targetTypesAllowed.add(CommandTarget.DAS);
targetTypesAllowed.add(CommandTarget.STANDALONE_INSTANCE);
targetTypesAllowed.add(CommandTarget.CLUSTER);
targetTypesAllowed.add(CommandTarget.CONFIG);
targetTypesAllowed.add(CommandTarget.DEPLOYMENT_GROUP);
}
// ONLY if the target is "server"
if (CommandTarget.DAS.isValid(habitat, targetName) && !runtimeTypes.contains(RuntimeType.DAS)) {
runtimeTypes.add(RuntimeType.DAS);
}
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.runtimeTypes", "RuntimeTypes are: {0}", runtimeTypes.toString()));
logger.fine(adminStrings.getLocalString("dynamicreconfiguration,diagnostics.targetTypes", "TargetTypes are: {0}", targetTypesAllowed.toString()));
// Is there a server or a cluster or a config with given name ?
if ((!CommandTarget.DOMAIN.isValid(habitat, targetName)) && (domain.getServerNamed(targetName) == null) && (domain.getClusterNamed(targetName) == null) && (domain.getConfigNamed(targetName) == null) && (domain.getDeploymentGroupNamed(targetName) == null)) {
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(adminStrings.getLocalString("commandrunner.executor.invalidtarget", "Unable to find a valid target with name {0}", targetName));
return;
}
// Does this command allow this target type
boolean isTargetValidType = false;
Iterator<CommandTarget> it = targetTypesAllowed.iterator();
while (it.hasNext()) {
if (it.next().isValid(habitat, targetName)) {
isTargetValidType = true;
break;
}
}
if (!isTargetValidType) {
StringBuilder validTypes = new StringBuilder();
it = targetTypesAllowed.iterator();
while (it.hasNext()) {
validTypes.append(it.next().getDescription()).append(", ");
}
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(adminStrings.getLocalString("commandrunner.executor.invalidtargettype", "Target {0} is not a supported type. Command {1} supports these types of targets only : {2}", targetName, model.getCommandName(), validTypes.toString()));
return;
}
// instance, return error
if ((CommandTarget.CLUSTERED_INSTANCE.isValid(habitat, targetName)) && (!targetTypesAllowed.contains(CommandTarget.CLUSTERED_INSTANCE))) {
Cluster c = domain.getClusterForInstance(targetName);
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(adminStrings.getLocalString("commandrunner.executor.instanceopnotallowed", "The {0} command is not allowed on instance {1} because it is part of cluster {2}", model.getCommandName(), targetName, c.getName()));
return;
}
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.replicationvalidationdone", "All @ExecuteOn attribute and type validation completed successfully. Starting replication stages"));
}
/**
* We're finally ready to actually execute the command instance.
* Acquire the appropriate lock.
*/
Lock lock = null;
boolean lockTimedOut = false;
try {
// XXX: The owner of the lock should not be hardcoded. The
// value is not used yet.
lock = adminLock.getLock(command, "asadmin");
// Set there progress statuses
if (!fromCheckpoint) {
for (SupplementalCommand supplementalCommand : supplementalCommands) {
progressHelper.addProgressStatusToSupplementalCommand(supplementalCommand);
}
}
// If command is undoable, then invoke prepare method
if (command instanceof UndoableCommand) {
UndoableCommand uCmd = (UndoableCommand) command;
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.prepareunodable", "Command execution stage 1 : Calling prepare for undoable command {0}", inv.name()));
if (!uCmd.prepare(context, parameters).equals(ActionReport.ExitCode.SUCCESS)) {
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(adminStrings.getLocalString("commandrunner.executor.errorinprepare", "The command {0} cannot be completed because the preparation for the command failed " + "indicating potential issues : {1}", model.getCommandName(), report.getMessage()));
return;
}
}
ClusterOperationUtil.clearInstanceList();
// Run Supplemental commands that have to run before this command on this instance type
if (!fromCheckpoint) {
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.presupplemental", "Command execution stage 2 : Call pre supplemental commands for {0}", inv.name()));
preSupplementalReturn = supplementalExecutor.execute(supplementalCommands, Supplemental.Timing.Before, context, parameters, ufm.optionNameToFileMap());
if (preSupplementalReturn.equals(ActionReport.ExitCode.FAILURE)) {
report.setActionExitCode(preSupplementalReturn);
if (!StringUtils.ok(report.getTopMessagePart().getMessage())) {
report.setMessage(adminStrings.getLocalString("commandrunner.executor.supplementalcmdfailed", "A supplemental command failed; cannot proceed further"));
}
return;
}
}
// Run main command if it is applicable for this instance type
if ((runtimeTypes.contains(RuntimeType.ALL)) || (serverEnv.isDas() && (CommandTarget.DOMAIN.isValid(habitat, targetName) || runtimeTypes.contains(RuntimeType.DAS))) || runtimeTypes.contains(RuntimeType.SINGLE_INSTANCE) || (serverEnv.isInstance() && runtimeTypes.contains(RuntimeType.INSTANCE))) {
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.maincommand", "Command execution stage 3 : Calling main command implementation for {0}", inv.name()));
report = doCommand(model, command, context, progressHelper);
inv.setReport(report);
}
if (!FailurePolicy.applyFailurePolicy(fp, report.getActionExitCode()).equals(ActionReport.ExitCode.FAILURE)) {
// Run Supplemental commands that have to be run after this command on this instance type
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.postsupplemental", "Command execution stage 4 : Call post supplemental commands for {0}", inv.name()));
postSupplementalReturn = supplementalExecutor.execute(supplementalCommands, Supplemental.Timing.After, context, parameters, ufm.optionNameToFileMap());
if (postSupplementalReturn.equals(ActionReport.ExitCode.FAILURE)) {
report.setActionExitCode(postSupplementalReturn);
report.setMessage(adminStrings.getLocalString("commandrunner.executor.supplementalcmdfailed", "A supplemental command failed; cannot proceed further"));
return;
}
}
} catch (AdminCommandLockTimeoutException ex) {
lockTimedOut = true;
String lockTime = formatSuspendDate(ex.getTimeOfAcquisition());
String logMsg = "Command: " + model.getCommandName() + " failed to acquire a command lock. REASON: time out " + "(current lock acquired on " + lockTime + ")";
String msg = adminStrings.getLocalString("lock.timeout", "Command timed out. Unable to acquire a lock to access " + "the domain. Another command acquired exclusive access " + "to the domain on {0}. Retry the command at a later " + "time.", lockTime);
report.setMessage(msg);
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
} catch (AdminCommandLockException ex) {
lockTimedOut = true;
String lockTime = formatSuspendDate(ex.getTimeOfAcquisition());
String lockMsg = ex.getMessage();
String logMsg;
logMsg = "Command: " + model.getCommandName() + " was blocked. The domain was suspended by a " + "user on:" + lockTime;
if (lockMsg != null && !lockMsg.isEmpty()) {
logMsg += " Reason: " + lockMsg;
}
String msg = adminStrings.getLocalString("lock.notacquired", "The command was blocked. The domain was suspended by " + "a user on {0}.", lockTime);
if (lockMsg != null && !lockMsg.isEmpty()) {
msg += " " + adminStrings.getLocalString("lock.reason", "Reason:") + " " + lockMsg;
}
report.setMessage(msg);
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
} finally {
// command is done, release the lock
if (lock != null && lockTimedOut == false) {
lock.unlock();
}
}
} catch (Exception ex) {
logger.log(Level.SEVERE, KernelLoggerInfo.invocationException, ex);
report.setActionExitCode(ActionReport.ExitCode.FAILURE);
report.setMessage(ex.getMessage());
report.setFailureCause(ex);
ActionReport.MessagePart childPart = report.getTopMessagePart().addChild();
childPart.setMessage(getUsageText(model));
return;
}
if (processEnv.getProcessType().isEmbedded()) {
return;
}
if (preSupplementalReturn == ActionReport.ExitCode.WARNING || postSupplementalReturn == ActionReport.ExitCode.WARNING) {
report.setActionExitCode(ActionReport.ExitCode.WARNING);
}
if (doReplication && (!FailurePolicy.applyFailurePolicy(fp, report.getActionExitCode()).equals(ActionReport.ExitCode.FAILURE)) && (serverEnv.isDas()) && (runtimeTypes.contains(RuntimeType.INSTANCE) || runtimeTypes.contains(RuntimeType.ALL))) {
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.startreplication", "Command execution stages completed on DAS; Starting replication on remote instances"));
ClusterExecutor executor = null;
// This try-catch block is a fix for 13838
try {
if (model.getClusteringAttributes() != null && model.getClusteringAttributes().executor() != null) {
executor = habitat.getService(model.getClusteringAttributes().executor());
} else {
executor = habitat.getService(ClusterExecutor.class, "GlassFishClusterExecutor");
}
} catch (UnsatisfiedDependencyException usdepex) {
logger.log(Level.WARNING, KernelLoggerInfo.cantGetClusterExecutor, usdepex);
}
if (executor != null) {
report.setActionExitCode(executor.execute(model.getCommandName(), command, context, parameters));
if (report.getActionExitCode().equals(ActionReport.ExitCode.FAILURE)) {
report.setMessage(adminStrings.getLocalString("commandrunner.executor.errorwhilereplication", "An error occurred during replication"));
} else {
if (!FailurePolicy.applyFailurePolicy(fp, report.getActionExitCode()).equals(ActionReport.ExitCode.FAILURE)) {
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.afterreplsupplemental", "Command execution stage 5 : Call post-replication supplemental commands for {0}", inv.name()));
ActionReport.ExitCode afterReplicationSupplementalReturn = supplementalExecutor.execute(supplementalCommands, Supplemental.Timing.AfterReplication, context, parameters, ufm.optionNameToFileMap());
if (afterReplicationSupplementalReturn.equals(ActionReport.ExitCode.FAILURE)) {
report.setActionExitCode(afterReplicationSupplementalReturn);
report.setMessage(adminStrings.getLocalString("commandrunner.executor.supplementalcmdfailed", "A supplemental command failed; cannot proceed further"));
return;
}
}
}
}
}
if (report.getActionExitCode().equals(ActionReport.ExitCode.FAILURE)) {
// If command is undoable, then invoke undo method method
if (command instanceof UndoableCommand) {
UndoableCommand uCmd = (UndoableCommand) command;
logger.fine(adminStrings.getLocalString("dynamicreconfiguration.diagnostics.undo", "Command execution failed; calling undo() for command {0}", inv.name()));
uCmd.undo(context, parameters, ClusterOperationUtil.getCompletedInstances());
}
} else {
// TODO : Is there a better way of doing this ? Got to look into it
if ("_register-instance".equals(model.getCommandName())) {
state.addServerToStateService(parameters.getOne("DEFAULT"));
}
if ("_unregister-instance".equals(model.getCommandName())) {
state.removeInstanceFromStateService(parameters.getOne("DEFAULT"));
}
}
} finally {
if (ufm != null) {
ufm.close();
}
}
}
Aggregations