use of org.apache.sling.discovery.InstanceDescription in project sling by apache.
the class DummyTopologyView method removeInstance.
// public SimpleTopologyView addInstance(InstanceDescription artefact) {
// final String slingId = artefact.getSlingId();
// final boolean isLeader = artefact.isLeader();
// final boolean isLocal = artefact.isLocal();
// DefaultClusterViewImpl cluster = (DefaultClusterViewImpl) artefact.getClusterView();
// final DefaultInstanceDescriptionImpl instance = new DefaultInstanceDescriptionImpl(cluster, isLeader, isLocal, slingId, artefact.getProperties());
// instances.add(instance);
// return this;
// }
public DummyTopologyView removeInstance(String slingId) {
for (Iterator<InstanceDescription> it = instances.iterator(); it.hasNext(); ) {
InstanceDescription id = (InstanceDescription) it.next();
if (id.getSlingId().equals(slingId)) {
it.remove();
DefaultClusterView cluster = (DefaultClusterView) id.getClusterView();
if (!cluster.removeInstanceDescription(id)) {
throw new IllegalStateException("could not remove id: " + id);
}
return this;
}
}
throw new IllegalStateException("instance not found: " + slingId);
}
use of org.apache.sling.discovery.InstanceDescription in project sling by apache.
the class ClusterViewServiceImpl method getLocalClusterView.
@Override
public LocalClusterView getLocalClusterView() throws UndefinedClusterViewException {
if (resourceResolverFactory == null) {
logger.warn("getClusterView: no resourceResolverFactory set at the moment.");
throw new UndefinedClusterViewException(Reason.REPOSITORY_EXCEPTION, "no resourceResolverFactory set");
}
ResourceResolver resourceResolver = null;
try {
resourceResolver = resourceResolverFactory.getServiceResourceResolver(null);
View view = ViewHelper.getEstablishedView(resourceResolver, config);
if (view == null) {
logger.debug("getClusterView: no view established at the moment. isolated mode");
throw new UndefinedClusterViewException(Reason.NO_ESTABLISHED_VIEW, "no established view at the moment");
}
if (failedEstablishedViewId != null && failedEstablishedViewId.equals(view.getResource().getName())) {
// SLING-5195 : the heartbeat-handler-self-check has declared the currently
// established view as invalid - hence we should now treat this as
// undefined clusterview
logger.debug("getClusterView: current establishedView is marked as invalid: " + failedEstablishedViewId);
throw new UndefinedClusterViewException(Reason.NO_ESTABLISHED_VIEW, "current established view was marked as invalid");
}
EstablishedClusterView clusterViewImpl = new EstablishedClusterView(config, view, getSlingId());
InstanceDescription local = clusterViewImpl.getLocalInstance();
if (local != null) {
return clusterViewImpl;
} else {
logger.info("getClusterView: the local instance (" + getSlingId() + ") is currently not included in the existing established view! " + "This is normal at startup. At other times is pseudo-network-partitioning is an indicator for repository/network-delays or clocks-out-of-sync (SLING-3432). " + "(increasing the heartbeatTimeout can help as a workaround too) " + "The local instance will stay in TOPOLOGY_CHANGING or pre _INIT mode until a new vote was successful.");
throw new UndefinedClusterViewException(Reason.ISOLATED_FROM_TOPOLOGY, "established view does not include local instance - isolated");
}
} catch (UndefinedClusterViewException e) {
// pass through
throw e;
} catch (LoginException e) {
logger.error("handleEvent: could not log in administratively: " + e, e);
throw new UndefinedClusterViewException(Reason.REPOSITORY_EXCEPTION, "could not log in administratively: " + e);
} catch (Exception e) {
logger.error("handleEvent: got an exception: " + e, e);
throw new UndefinedClusterViewException(Reason.REPOSITORY_EXCEPTION, "could not log in administratively: " + e);
} finally {
if (resourceResolver != null) {
resourceResolver.close();
}
}
}
use of org.apache.sling.discovery.InstanceDescription in project sling by apache.
the class UpgradeTask method moveJobFromPreviousVersion.
/**
* Move a single job
*/
private void moveJobFromPreviousVersion(final Resource jobResource) throws PersistenceException {
final ResourceResolver resolver = jobResource.getResourceResolver();
try {
final ValueMap vm = ResourceHelper.getValueMap(jobResource);
// check for binary properties
Map<String, Object> binaryProperties = new HashMap<>();
final ObjectInputStream ois = vm.get("slingevent:properties", ObjectInputStream.class);
if (ois != null) {
try {
int length = ois.readInt();
for (int i = 0; i < length; i++) {
final String key = (String) ois.readObject();
final Object value = ois.readObject();
binaryProperties.put(key, value);
}
} catch (final ClassNotFoundException cnfe) {
throw new PersistenceException("Class not found.", cnfe);
} catch (final java.io.InvalidClassException ice) {
throw new PersistenceException("Invalid class.", ice);
} catch (final IOException ioe) {
throw new PersistenceException("Unable to deserialize job properties.", ioe);
} finally {
try {
ois.close();
} catch (final IOException ioe) {
throw new PersistenceException("Unable to deserialize job properties.", ioe);
}
}
}
final Map<String, Object> properties = ResourceHelper.cloneValueMap(vm);
final String topic = (String) properties.remove("slingevent:topic");
properties.put(ResourceHelper.PROPERTY_JOB_TOPIC, topic);
properties.remove(Job.PROPERTY_JOB_QUEUE_NAME);
properties.remove(Job.PROPERTY_JOB_TARGET_INSTANCE);
// and binary properties
properties.putAll(binaryProperties);
properties.remove("slingevent:properties");
if (!properties.containsKey(Job.PROPERTY_JOB_RETRIES)) {
// we put a dummy value here; this gets updated by the queue
properties.put(Job.PROPERTY_JOB_RETRIES, 10);
}
if (!properties.containsKey(Job.PROPERTY_JOB_RETRY_COUNT)) {
properties.put(Job.PROPERTY_JOB_RETRY_COUNT, 0);
}
final List<InstanceDescription> potentialTargets = caps.getPotentialTargets(topic);
String targetId = null;
if (potentialTargets != null && potentialTargets.size() > 0) {
final QueueConfigurationManager qcm = configuration.getQueueConfigurationManager();
if (qcm == null) {
resolver.revert();
return;
}
final QueueInfo info = qcm.getQueueInfo(topic);
logger.debug("Found queue {} for {}", info.queueConfiguration, topic);
targetId = caps.detectTarget(topic, vm, info);
if (targetId != null) {
properties.put(Job.PROPERTY_JOB_QUEUE_NAME, info.queueName);
properties.put(Job.PROPERTY_JOB_TARGET_INSTANCE, targetId);
properties.put(Job.PROPERTY_JOB_RETRIES, info.queueConfiguration.getMaxRetries());
}
}
properties.put(Job.PROPERTY_JOB_CREATED_INSTANCE, "old:" + Environment.APPLICATION_ID);
properties.put(ResourceResolver.PROPERTY_RESOURCE_TYPE, ResourceHelper.RESOURCE_TYPE_JOB);
final String jobId = configuration.getUniqueId(topic);
properties.put(ResourceHelper.PROPERTY_JOB_ID, jobId);
properties.remove(Job.PROPERTY_JOB_STARTED_TIME);
final String newPath = configuration.getUniquePath(targetId, topic, jobId, vm);
this.logger.debug("Moving 'old' job from {} to {}", jobResource.getPath(), newPath);
ResourceHelper.getOrCreateResource(resolver, newPath, properties);
resolver.delete(jobResource);
resolver.commit();
} catch (final InstantiationException ie) {
throw new PersistenceException("Exception while reading reasource: " + ie.getMessage(), ie.getCause());
}
}
use of org.apache.sling.discovery.InstanceDescription in project sling by apache.
the class InventoryPlugin method printText.
private void printText(final PrintWriter pw) {
pw.println("Apache Sling Job Handling");
pw.println("-------------------------");
String topics = this.jobConsumerManager.getTopics();
if (topics == null) {
topics = "";
}
Statistics s = this.jobManager.getStatistics();
pw.println("Overall Statistics");
pw.printf("Start Time : %s%n", formatDate(s.getStartTime()));
pw.printf("Local topic consumers: %s%n", topics);
pw.printf("Last Activated : %s%n", formatDate(s.getLastActivatedJobTime()));
pw.printf("Last Finished : %s%n", formatDate(s.getLastFinishedJobTime()));
pw.printf("Queued Jobs : %s%n", s.getNumberOfQueuedJobs());
pw.printf("Active Jobs : %s%n", s.getNumberOfActiveJobs());
pw.printf("Jobs : %s%n", s.getNumberOfJobs());
pw.printf("Finished Jobs : %s%n", s.getNumberOfFinishedJobs());
pw.printf("Failed Jobs : %s%n", s.getNumberOfFailedJobs());
pw.printf("Cancelled Jobs : %s%n", s.getNumberOfCancelledJobs());
pw.printf("Processed Jobs : %s%n", s.getNumberOfProcessedJobs());
pw.printf("Average Processing Time : %s%n", formatTime(s.getAverageProcessingTime()));
pw.printf("Average Waiting Time : %s%n", formatTime(s.getAverageWaitingTime()));
pw.println();
pw.println("Topology Capabilities");
final TopologyCapabilities cap = this.configuration.getTopologyCapabilities();
if (cap == null) {
pw.print("No topology information available !");
} else {
final Map<String, List<InstanceDescription>> instanceCaps = cap.getInstanceCapabilities();
for (final Map.Entry<String, List<InstanceDescription>> entry : instanceCaps.entrySet()) {
final StringBuilder sb = new StringBuilder();
for (final InstanceDescription id : entry.getValue()) {
if (sb.length() > 0) {
sb.append(", ");
}
if (id.isLocal()) {
sb.append("local");
} else {
sb.append(id.getSlingId());
}
}
pw.printf("%s : %s%n", entry.getKey(), sb.toString());
}
}
pw.println();
pw.println("Scheduled Jobs");
final Collection<ScheduledJobInfo> infos = this.jobManager.getScheduledJobs();
if (infos.size() == 0) {
pw.print("No jobs currently scheduled");
} else {
for (final ScheduledJobInfo info : infos) {
pw.println("Schedule");
pw.printf("Job Topic< : %s%n", info.getJobTopic());
pw.print("Schedules : ");
boolean first = true;
for (final ScheduleInfo si : info.getSchedules()) {
if (!first) {
pw.print(", ");
}
first = false;
switch(si.getType()) {
case YEARLY:
pw.printf("YEARLY %s %s : %s:%s", si.getMonthOfYear(), si.getDayOfMonth(), si.getHourOfDay(), si.getMinuteOfHour());
break;
case MONTHLY:
pw.printf("MONTHLY %s : %s:%s", si.getDayOfMonth(), si.getHourOfDay(), si.getMinuteOfHour());
break;
case WEEKLY:
pw.printf("WEEKLY %s : %s:%s", si.getDayOfWeek(), si.getHourOfDay(), si.getMinuteOfHour());
break;
case DAILY:
pw.printf("DAILY %s:%s", si.getHourOfDay(), si.getMinuteOfHour());
break;
case HOURLY:
pw.printf("HOURLY %s", si.getMinuteOfHour());
break;
case CRON:
pw.printf("CRON %s", si.getExpression());
break;
default:
pw.printf("AT %s", si.getAt());
}
}
pw.println();
pw.println();
}
}
pw.println();
boolean isEmpty = true;
for (final Queue q : this.jobManager.getQueues()) {
isEmpty = false;
pw.printf("Active JobQueue: %s %s%n", q.getName(), q.isSuspended() ? "(SUSPENDED)" : "");
s = q.getStatistics();
final QueueConfiguration c = q.getConfiguration();
pw.println("Statistics");
pw.printf("Start Time : %s%n", formatDate(s.getStartTime()));
pw.printf("Last Activated : %s%n", formatDate(s.getLastActivatedJobTime()));
pw.printf("Last Finished : %s%n", formatDate(s.getLastFinishedJobTime()));
pw.printf("Queued Jobs : %s%n", s.getNumberOfQueuedJobs());
pw.printf("Active Jobs : %s%n", s.getNumberOfActiveJobs());
pw.printf("Jobs : %s%n", s.getNumberOfJobs());
pw.printf("Finished Jobs : %s%n", s.getNumberOfFinishedJobs());
pw.printf("Failed Jobs : %s%n", s.getNumberOfFailedJobs());
pw.printf("Cancelled Jobs : %s%n", s.getNumberOfCancelledJobs());
pw.printf("Processed Jobs : %s%n", s.getNumberOfProcessedJobs());
pw.printf("Average Processing Time : %s%n", formatTime(s.getAverageProcessingTime()));
pw.printf("Average Waiting Time : %s%n", formatTime(s.getAverageWaitingTime()));
pw.printf("Status Info : %s%n", q.getStateInfo());
pw.println("Configuration");
pw.printf("Type : %s%n", formatType(c.getType()));
pw.printf("Topics : %s%n", formatArrayAsText(c.getTopics()));
pw.printf("Max Parallel : %s%n", c.getMaxParallel());
pw.printf("Max Retries : %s%n", c.getMaxRetries());
pw.printf("Retry Delay : %s ms%n", c.getRetryDelayInMs());
pw.printf("Priority : %s%n", c.getThreadPriority());
pw.println();
}
if (isEmpty) {
pw.println("No active queues.");
pw.println();
}
for (final TopicStatistics ts : this.jobManager.getTopicStatistics()) {
pw.printf("Topic Statistics - %s%n", ts.getTopic());
pw.printf("Last Activated : %s%n", formatDate(ts.getLastActivatedJobTime()));
pw.printf("Last Finished : %s%n", formatDate(ts.getLastFinishedJobTime()));
pw.printf("Finished Jobs : %s%n", ts.getNumberOfFinishedJobs());
pw.printf("Failed Jobs : %s%n", ts.getNumberOfFailedJobs());
pw.printf("Cancelled Jobs : %s%n", ts.getNumberOfCancelledJobs());
pw.printf("Processed Jobs : %s%n", ts.getNumberOfProcessedJobs());
pw.printf("Average Processing Time : %s%n", formatTime(ts.getAverageProcessingTime()));
pw.printf("Average Waiting Time : %s%n", formatTime(ts.getAverageWaitingTime()));
pw.println();
}
pw.println("Apache Sling Job Handling - Job Queue Configurations");
pw.println("----------------------------------------------------");
this.printQueueConfiguration(pw, this.configuration.getQueueConfigurationManager().getMainQueueConfiguration());
final InternalQueueConfiguration[] configs = this.configuration.getQueueConfigurationManager().getConfigurations();
for (final InternalQueueConfiguration c : configs) {
this.printQueueConfiguration(pw, c);
}
}
use of org.apache.sling.discovery.InstanceDescription in project sling by apache.
the class CheckTopologyTask method reassignStaleJobs.
/**
* Reassign stale jobs from this instance
*/
private void reassignStaleJobs() {
if (caps.isActive()) {
this.logger.debug("Checking for stale jobs...");
final ResourceResolver resolver = this.configuration.createResourceResolver();
if (resolver != null) {
try {
final Resource jobsRoot = resolver.getResource(this.configuration.getLocalJobsPath());
// this resource should exist, but we check anyway
if (jobsRoot != null) {
final Iterator<Resource> topicIter = jobsRoot.listChildren();
while (caps.isActive() && topicIter.hasNext()) {
final Resource topicResource = topicIter.next();
final String topicName = topicResource.getName().replace('.', '/');
this.logger.debug("Checking topic {}...", topicName);
final List<InstanceDescription> potentialTargets = caps.getPotentialTargets(topicName);
boolean reassign = true;
for (final InstanceDescription desc : potentialTargets) {
if (desc.isLocal()) {
reassign = false;
break;
}
}
if (reassign) {
final QueueConfigurationManager qcm = this.configuration.getQueueConfigurationManager();
if (qcm == null) {
break;
}
final QueueInfo info = qcm.getQueueInfo(topicName);
logger.info("Start reassigning stale jobs");
JobTopicTraverser.traverse(this.logger, topicResource, new JobTopicTraverser.ResourceCallback() {
@Override
public boolean handle(final Resource rsrc) {
try {
final ValueMap vm = ResourceHelper.getValueMap(rsrc);
final String targetId = caps.detectTarget(topicName, vm, info);
final Map<String, Object> props = new HashMap<>(vm);
props.remove(Job.PROPERTY_JOB_STARTED_TIME);
final String newPath;
if (targetId != null) {
newPath = configuration.getAssginedJobsPath() + '/' + targetId + '/' + topicResource.getName() + rsrc.getPath().substring(topicResource.getPath().length());
props.put(Job.PROPERTY_JOB_QUEUE_NAME, info.queueName);
props.put(Job.PROPERTY_JOB_TARGET_INSTANCE, targetId);
} else {
newPath = configuration.getUnassignedJobsPath() + '/' + topicResource.getName() + rsrc.getPath().substring(topicResource.getPath().length());
props.remove(Job.PROPERTY_JOB_QUEUE_NAME);
props.remove(Job.PROPERTY_JOB_TARGET_INSTANCE);
}
try {
ResourceHelper.getOrCreateResource(resolver, newPath, props);
resolver.delete(rsrc);
resolver.commit();
final String jobId = vm.get(ResourceHelper.PROPERTY_JOB_ID, String.class);
if (targetId != null) {
configuration.getAuditLogger().debug("REASSIGN OK {} : {}", targetId, jobId);
} else {
configuration.getAuditLogger().debug("REUNASSIGN OK : {}", jobId);
}
} catch (final PersistenceException pe) {
logger.warn("Unable to move stale job from " + rsrc.getPath() + " to " + newPath, pe);
resolver.refresh();
resolver.revert();
}
} catch (final InstantiationException ie) {
// something happened with the resource in the meantime
logger.warn("Unable to move stale job from " + rsrc.getPath(), ie);
resolver.refresh();
resolver.revert();
}
return caps.isActive();
}
});
}
}
}
} finally {
resolver.close();
}
}
}
}
Aggregations