use of com.sun.messaging.jmq.jmsserver.management.agent.Agent in project openmq by eclipse-ee4j.
the class TransactionHandler method doRollback.
/**
* Rollback a transaction. This method is invoked from two places: 1) From TransactionHandler.handle() when handling a
* client ROLLBACK packet. This is the common case. 2) From the admin handler when an admin rollback request has been
* issued on a PREPARED XA transaction.
*
* @param id The TransactionUID to commit
* @param xid The Xid of the transaction to commit. Required if transaction is an XA transaction. Must be null if it is
* not an XA transaction.
* @param xaFlags xaFlags passed on COMMIT operation. Used only if an XA transaction.
* @param ts Current TransactionState of this transaction.
* @param conlist List of transactions on this connection. Will be null if commit is trigger by an admin request.
* @param con Connection client commit packet came in on or, for admin, the connection the admin request came in on.
*
* @throws BrokerException on an error. The method will have logged a message to the broker log.
*/
public void doRollback(TransactionList translist, TransactionUID id, JMQXid xid, Integer xaFlags, TransactionState ts, List conlist, IMQConnection con, RollbackReason rbreason) throws BrokerException {
int s;
int oldstate = ts.getState();
PartitionedStore pstore = translist.getPartitionedStore();
// Update transaction state
try {
if (xid == null) {
// Plain JMS transaction.
s = TransactionState.ROLLEDBACK;
} else {
// XA Transaction.
if (rbreason == RollbackReason.ADMIN || rbreason == RollbackReason.CONNECTION_CLEANUP) {
if (ts.getState() == TransactionState.STARTED) {
ts = translist.updateState(id, TransactionState.FAILED, TransactionState.STARTED, true);
String[] args = { rbreason.toString(), id.toString() + "[" + TransactionState.toString(oldstate) + "]XID=", xid.toString() };
if (rbreason != RollbackReason.ADMIN && (DEBUG || DEBUG_CLUSTER_TXN || logger.getLevel() <= Logger.DEBUG)) {
logger.log(logger.WARNING, Globals.getBrokerResources().getKString(BrokerResources.W_FORCE_ENDED_TXN, args));
}
}
}
s = ts.nextState(PacketType.ROLLBACK_TRANSACTION, xaFlags);
}
} catch (BrokerException ex) {
if (ex.getStatusCode() == Status.CONFLICT) {
logger.log(Logger.ERROR, ex.toString());
} else {
logger.log(Logger.ERROR, ex.toString() + ": TUID=" + id + " Xid=" + xid);
}
throw ex;
}
ts = translist.updateState(id, s, true);
if (Globals.isNewTxnLogEnabled() && oldstate == TransactionState.PREPARED) {
int transactionType = BaseTransaction.LOCAL_TRANSACTION_TYPE;
if (translist.isClusterTransaction(id)) {
transactionType = BaseTransaction.CLUSTER_TRANSACTION_TYPE;
}
logTxnCompletion(pstore, id, TransactionState.ROLLEDBACK, transactionType);
}
if (fi.FAULT_INJECTION) {
checkFIAfterDB(PacketType.ROLLBACK_TRANSACTION);
}
boolean processDone = true;
List list = new ArrayList(translist.retrieveSentMessages(id));
for (int i = 0; i < list.size(); i++) {
SysMessageID sysid = (SysMessageID) list.get(i);
if (DEBUG) {
logger.log(Logger.INFO, "Removing " + sysid + " because of rollback");
}
PacketReference ref = DL.get(null, sysid);
if (ref == null) {
continue;
}
DestinationUID duid = ref.getDestinationUID();
Destination[] ds = DL.getDestination(ref.getPartitionedStore(), duid);
Destination d = ds[0];
if (d != null) {
Destination.RemoveMessageReturnInfo ret = d.removeMessageWithReturnInfo(sysid, RemoveReason.ROLLBACK);
if (ret.storermerror) {
processDone = false;
}
}
}
// remove from our active connection list
if (conlist != null) {
conlist.remove(id);
}
// re-queue any orphan messages
// how we handle the orphan messages depends on a couple
// of things:
// - has the session closed ?
// if the session has closed the messages are "orphan"
// - otherwise, the messages are still "in play" and
// we dont do anything with them
//
Map m = translist.getOrphanAck(id);
if (m != null) {
Iterator itr = m.entrySet().iterator();
while (itr.hasNext()) {
Map.Entry me = (Map.Entry) itr.next();
SysMessageID sysid = (SysMessageID) me.getKey();
PacketReference ref = DL.get(null, sysid, false);
if (ref == null) {
if (DEBUG) {
logger.log(Logger.INFO, "Process transaction rollback " + id + ": orphan message already removed " + sysid);
}
continue;
}
Destination dst = ref.getDestination();
Map sids = (Map) me.getValue();
if (sids == null) {
continue;
}
Iterator siditr = sids.entrySet().iterator();
while (siditr.hasNext()) {
Map.Entry se = (Map.Entry) siditr.next();
ConsumerUID sid = (ConsumerUID) se.getKey();
if (ref.isLocal()) {
if (dst != null) {
dst.forwardOrphanMessage(ref, sid);
} else {
if (DEBUG) {
logger.log(Logger.INFO, "Process transaction rollback " + id + ": orphan consumed message destination already removed " + sysid);
}
}
continue;
}
List cids = (List) se.getValue();
if (cids == null) {
continue;
}
Iterator ciditr = cids.iterator();
while (ciditr.hasNext()) {
ConsumerUID cid = (ConsumerUID) ciditr.next();
try {
ref.acquireDestroyRemoteReadLock();
try {
if (ref.isLastRemoteConsumerUID(sid, cid)) {
if (ref.acknowledged(cid, sid, !(cid.isNoAck() || cid.isDupsOK()), false, id, translist, null, false)) {
try {
if (dst != null) {
dst.removeRemoteMessage(sysid, RemoveReason.ACKNOWLEDGED, ref);
} else {
logger.log(Logger.INFO, "Process transaction rollback " + id + ": orphan consumed remote message destination already removed " + sysid);
}
} finally {
ref.postAcknowledgedRemoval();
}
}
}
} finally {
ref.clearDestroyRemoteReadLock();
}
} catch (Exception ex) {
logger.logStack((DEBUG_CLUSTER_TXN ? Logger.WARNING : Logger.DEBUG), "Unable to cleanup orphaned remote message " + "[" + cid + "," + sid + "," + sysid + "]" + " on rollback transaction " + id, ex);
}
BrokerAddress addr = translist.getAckBrokerAddress(id, sysid, cid);
try {
HashMap prop = new HashMap();
prop.put(ClusterBroadcast.RB_RELEASE_MSG_ORPHAN, id.toString());
Globals.getClusterBroadcast().acknowledgeMessage(addr, sysid, cid, ClusterBroadcast.MSG_IGNORED, prop, false);
} catch (BrokerException e) {
Globals.getLogger().log(Logger.WARNING, "Unable to notify " + addr + " for orphaned remote message " + "[" + cid + ", " + sid + ", " + ", " + sysid + "]" + " in rollback transaction " + id);
}
}
}
}
}
// OK .. now remove the acks
translist.removeTransactionAck(id, true);
/*
* Can't really call the JMX notification code at the end of doRollback() because the call to
* translist.removeTransactionID(id) removes the MBean.
*/
Agent agent = Globals.getAgent();
if (agent != null) {
agent.notifyTransactionRollback(id);
}
try {
ts.setState(s);
cacheSetState(id, ts, con);
doRemoteRollback(translist, id, s);
translist.removeTransaction(id, !processDone);
if (rbreason == RollbackReason.ADMIN || rbreason == RollbackReason.CONNECTION_CLEANUP) {
String[] args = { rbreason.toString(), id.toString() + "[" + TransactionState.toString(oldstate) + "]", (xid == null ? "null" : xid.toString()) };
if (rbreason == RollbackReason.CONNECTION_CLEANUP) {
if (DEBUG || DEBUG_CLUSTER_TXN || logger.getLevel() <= Logger.DEBUG) {
logger.log(logger.INFO, Globals.getBrokerResources().getKString(BrokerResources.W_FORCE_ROLLEDBACK_TXN, args));
}
} else {
logger.log(logger.WARNING, Globals.getBrokerResources().getKString(BrokerResources.W_FORCE_ROLLEDBACK_TXN, args));
}
}
} catch (BrokerException ex) {
logger.logStack(logger.ERROR, br.getKString(br.X_REMOVE_TRANSACTION, id, ex.getMessage()), ex);
ex.setStackLogged();
throw ex;
}
}
use of com.sun.messaging.jmq.jmsserver.management.agent.Agent in project openmq by eclipse-ee4j.
the class DestinationList method createDestination.
// XXX : The public createDestination methods can be renamed so
// that it is easier to find the right variant. (e.g.
// createTempDestination, createAutoDestination,
// createClusterDestination etc...
private Destination createDestination(String name, int type, boolean store, boolean autocreated, ConnectionUID uid, boolean notify, boolean localOnly) throws BrokerException, IOException {
DestinationUID duid = new DestinationUID(name, DestType.isQueue(type));
if (!valid) {
if (!DL.isPartitionMode()) {
throw new BrokerException(Globals.getBrokerResources().getKString(BrokerResources.X_SHUTTING_DOWN_BROKER), BrokerResources.X_SHUTTING_DOWN_BROKER, (Throwable) null, Status.ERROR);
} else {
throw new BrokerException(br.getKString(br.I_PARTITION_IS_CLOSING, logsuffix));
}
}
if (destinationList.get(duid) != null) {
throw new ConflictException(Globals.getBrokerResources().getKString(BrokerResources.X_DESTINATION_EXISTS, duid));
}
// OK, check the persistent store (required for HA)
try {
Destination d = pstore.getDestination(duid);
if (d != null) {
d.setDestinationList(this);
addDestination(d, false);
return d;
}
} catch (Exception ex) {
// ignore we want to create it
}
ClusterBroadcast mbi = Globals.getClusterBroadcast();
boolean clusterNotify = false;
Destination d = null;
try {
if (DestType.isQueue(type)) {
d = new Queue(name, type, store, uid, autocreated, this);
} else {
d = new Topic(name, type, store, uid, autocreated, this);
}
d.setClusterNotifyFlag(notify);
try {
synchronized (destinationList) {
Destination newd = (Destination) destinationList.get(duid);
if (newd != null) {
// updating existing
String emsg = Globals.getBrokerResources().getKString(BrokerResources.X_DESTINATION_EXISTS, duid.getLongString());
throw new BrokerException(emsg, Status.CONFLICT);
}
if (!autocreated) {
d.setIsLocal(localOnly);
}
if (store) {
d.store();
}
destinationList.put(duid, d);
}
} catch (BrokerException ex) {
// Conflict message
if (ex.getStatusCode() != Status.CONFLICT) {
throw new BrokerException(ex.getMessage(), ex, Status.CONFLICT);
}
throw ex;
}
clusterNotify = !d.isAutoCreated() && d.sendClusterUpdate() && notify;
if (mbi != null && clusterNotify) {
// dont worry about locking
if (!mbi.lockDestination(duid, uid)) {
throw new ConflictException("Internal Exception:" + " Destination " + duid + " is in the process" + " of being created");
}
}
if (clusterNotify && mbi != null) {
// we dont care about updating other brokers for
// autocreated, internal or admin destinations
// we may or may not update local dests (depends on version
// of cluster)
mbi.createDestination(d);
}
} finally {
if (mbi != null && clusterNotify) {
// only null in tonga test
mbi.unlockDestination(duid, uid);
}
}
// NOW ATTACH ANY WILDCARD PRODUCERS OR CONSUMERS
Iterator itr = Consumer.getWildcardConsumers();
while (itr.hasNext()) {
ConsumerUID cuid = (ConsumerUID) itr.next();
Consumer c = Consumer.getConsumer(cuid);
if (c == null) {
Globals.getLogger().log(Logger.INFO, "Consumer already destroyed");
continue;
}
DestinationUID wuid = c.getDestinationUID();
// compare the uids
if (DestinationUID.match(d.getDestinationUID(), wuid)) {
try {
// attach the consumer
if (c.getSubscription() != null) {
d.addConsumer(c.getSubscription(), false);
} else {
d.addConsumer(c, false);
}
} catch (SelectorFormatException ex) {
// LKS TBD
}
}
}
itr = Producer.getWildcardProducers();
while (itr.hasNext()) {
ProducerUID puid = (ProducerUID) itr.next();
Producer p = (Producer) Producer.getProducer(puid);
DestinationUID wuid = p.getDestinationUID();
// compare the uids
if (DestinationUID.match(d.getDestinationUID(), wuid)) {
// attach the consumer
d.addProducer(p);
}
}
Agent agent = Globals.getAgent();
if (agent != null) {
agent.registerDestination(d);
agent.notifyDestinationCreate(d);
}
return d;
}
use of com.sun.messaging.jmq.jmsserver.management.agent.Agent in project openmq by eclipse-ee4j.
the class DestinationList method removeDestination.
Destination removeDestination(DestinationUID uid, boolean notify, String reason) throws IOException, BrokerException {
Destination d = null;
boolean noerrnotfound = Globals.getHAEnabled() && !notify;
if (noerrnotfound) {
// Quick check to see if dst is in memory; doesn't load/initialize
d = findDestination(uid);
if (d != null && !d.isTemporary()) {
// Because temp dst can be deleted in HA, do this check to avoid
// getting error during load if it has already been deleted
d = getDestination(uid);
}
} else {
d = getDestination(uid);
}
if (d != null) {
if (d.isDMQ) {
throw new BrokerException(Globals.getBrokerResources().getKString(BrokerResources.X_DMQ_INVAID_DESTROY));
} else if (notify && d.sendClusterUpdate() && !d.isTemporary()) {
Globals.getClusterBroadcast().recordRemoveDestination(d);
}
int level = (DestType.isAdmin(d.getType()) ? Logger.DEBUG : Logger.INFO);
Globals.getLogger().log(level, BrokerResources.I_DESTROYING_DESTINATION, d.getName());
}
try {
d = (Destination) destinationList.get(uid);
// remove from cache
DestinationUID.clearUID(uid);
if (d != null) {
if (d.producers.size() > 0) {
String[] args = { d.getName(), String.valueOf(d.producers.size()), reason };
Globals.getLogger().log(Logger.WARNING, BrokerResources.W_DST_ACTIVE_PRODUCERS, args);
}
if (d.consumers.size() > 0) {
int csize = d.consumers.size();
boolean destroyDurables = false;
Set cons = new HashSet(d.consumers.values());
Iterator itr = cons.iterator();
while (itr.hasNext()) {
Consumer c = (Consumer) itr.next();
if (c instanceof Subscription && ((Subscription) c).isDurable()) {
destroyDurables = true;
Subscription s = (Subscription) c;
if (s.isActive()) {
csize += s.getActiveSubscriberCnt();
}
Subscription.unsubscribeOnDestroy(s.getDurableName(), s.getClientID(), notify);
csize--;
}
}
if (destroyDurables) {
Globals.getLogger().log(Logger.INFO, BrokerResources.I_DST_DURABLE_RM, d.toString(), reason);
}
if (csize > 0) {
String[] args = { d.getName(), String.valueOf(csize), reason };
Globals.getLogger().log(Logger.WARNING, BrokerResources.W_DST_ACTIVE_CONSUMERS, args);
}
}
if (d.size() > 0) {
logger.log(Logger.WARNING, br.getKString(br.W_REMOVING_DST_WITH_MSG, String.valueOf(d.size()), d.toString()) + logsuffix);
}
d.destroy(reason, noerrnotfound);
if (notify && d.sendClusterUpdate()) {
Globals.getClusterBroadcast().destroyDestination(d);
}
Agent agent = Globals.getAgent();
if (agent != null) {
agent.notifyDestinationDestroy(d);
agent.unregisterDestination(d);
}
}
} finally {
d = (Destination) destinationList.remove(uid);
}
return d;
}
use of com.sun.messaging.jmq.jmsserver.management.agent.Agent in project openmq by eclipse-ee4j.
the class ServiceConfig method notifyAttrChange.
public void notifyAttrChange(String attrName, Object newVal, Object oldVal) {
sendNotification(new AttributeChangeNotification(this, sequenceNumber++, new Date().getTime(), "Attribute change", attrName, newVal == null ? "" : newVal.getClass().getName(), oldVal, newVal));
Agent agent = Globals.getAgent();
if (agent != null) {
agent.notifyServiceAttrUpdated(getName(), attrName, oldVal, newVal);
}
}
use of com.sun.messaging.jmq.jmsserver.management.agent.Agent in project openmq by eclipse-ee4j.
the class Broker method _start.
private int _start(boolean inProcess, Properties propsFromCommandLine, boolean initOnly, Throwable failStartThrowable) throws OutOfMemoryError, IllegalStateException, IllegalArgumentException {
try {
setIsInProcess(inProcess);
// initialize the Global properties if any arguments are passed in
// read properties (including passwords) from standard input (used when the broker is managed by JMSRA)
Properties propsFromStdin = null;
if (propsFromCommandLine != null && propsFromCommandLine.containsKey(Globals.IMQ + ".readstdin.enabled")) {
propsFromStdin = readPropertiesFromStandardInput();
}
// Combine any properties specified using command-line parameters with any properties read from standard input
// The properties loaded from standard input have precedence and so are loaded second
Properties combinedProps = new Properties();
if (propsFromCommandLine != null) {
combinedProps.putAll(propsFromCommandLine);
}
if (propsFromStdin != null) {
combinedProps.putAll(propsFromStdin);
}
Globals.init(combinedProps, clearProps, saveProps);
if (!removeInstance) {
BrokerStateHandler.setShuttingDown(false);
BrokerStateHandler.setShutdownStarted(false);
}
haltLogString = Globals.getBrokerResources().getKString(BrokerResources.W_HALT_BROKER);
logger = Globals.getLogger();
String configdir = Globals.getInstanceDir();
File f = new File(configdir);
//
if (!f.exists()) {
// check parent directory
while (!f.exists()) {
// loop up looking for the parent
f = f.getParentFile();
if (f == null) {
// now where else to go
break;
}
if (!f.exists()) {
continue;
}
if (!f.canWrite() || !f.canRead()) {
String emsg = rb.getKString(rb.E_CAN_NOT_WRITE, f, Globals.getConfigName());
if (!silent) {
printErr(emsg);
}
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (BrokerExitCode.NO_PERMISSION_ON_INSTANCE);
}
}
} else if (!f.canWrite() || !f.canRead()) {
String emsg = rb.getKString(rb.E_CAN_NOT_WRITE, f, Globals.getConfigName());
if (!silent) {
System.err.println(emsg);
}
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (BrokerExitCode.NO_PERMISSION_ON_INSTANCE);
}
// OF CREATING AN INSTANCE BEFORE THIS CHECK
if (removeInstance) {
removeInstance();
}
// if a password file is specified, read it
try {
parsePassfile();
} catch (IOException ex) {
logger.log(Logger.FORCE, rb.E_OPTION_VALID_ERROR, ex);
if (failStartThrowable != null) {
failStartThrowable.initCause(ex);
}
return (1);
}
// Initialize any possible debug settings
try {
com.sun.messaging.jmq.util.Debug.setDebug(Globals.getConfig(), Globals.IMQ + ".debug.");
} catch (Exception e) {
logger.log(Logger.WARNING, rb.W_BAD_DEBUG_CLASS, e);
}
// Initialize any diag settings
try {
com.sun.messaging.jmq.util.DiagManager.registerClasses(Globals.getConfig(), Globals.IMQ + ".diag.");
} catch (Exception e) {
logger.log(Logger.WARNING, rb.W_BAD_DIAG_CLASS, e);
}
BrokerConfig conf = Globals.getConfig();
try {
checkBrokerConfig(conf);
} catch (Exception e) {
logger.logToAll(logger.ERROR, e.getMessage());
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return 1;
}
if (!Globals.isJMSRAManagedSpecified() && Globals.isJMSRAManagedBroker()) {
String emsg = Globals.getBrokerResources().getKString(BrokerResources.E_START_JMSRA_MANAGED_BROKER_NONMANAGED);
logger.log(Logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
}
if (Globals.isJMSRAManagedBroker()) {
try {
conf.updateBooleanProperty(Globals.JMSRA_MANAGED_PROPERTY, true, true);
} catch (Exception e) {
logger.logStack(Logger.ERROR, Globals.getBrokerResources().getKString(BrokerResources.E_SET_BROKER_CONFIG_PROPERTY, Globals.JMSRA_MANAGED_PROPERTY + "=true", e.getMessage()), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
}
CoreLifecycleSpi coreLifecycle = Globals.getCoreLifecycle();
String banner = version.getBanner(false, Version.MINI_COPYRIGHT) + rb.getString(rb.I_JAVA_VERSION) + System.getProperty("java.version") + " " + System.getProperty("java.vendor") + " " + System.getProperty("java.home");
logger.logToAll(Logger.INFO, rb.NL + banner);
// Check to see if we have a version mismatch
if (!version.isProductValid()) {
// not valid - display an error
//
String emsg = rb.getKString(BrokerResources.E_INVALID_PRODUCT_VERSION);
logger.log(Logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
}
try {
initializePasswdFile();
} catch (IOException ex) {
if (failStartThrowable != null) {
failStartThrowable.initCause(ex);
}
return (1);
}
if (initOnly) {
logger.log(Logger.INFO, BrokerResources.I_INIT_DONE);
return 0;
}
try {
AccessController.setSecurityManagerIfneed();
} catch (Exception e) {
logger.logStack(Logger.ERROR, e.getMessage(), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
if (!MQAuthenticator.authenticateCMDUserIfset()) {
logger.log(Logger.INFO, BrokerResources.I_SHUTDOWN_BROKER);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception("MQAuthenticator failed"));
}
return (1);
}
// For printing out VM heap info
DiagManager.register(new VMDiagnostics());
if (Version.compareVersions(System.getProperty("java.specification.version"), MIN_JAVA_VERSION, true) < 0) {
String emsg = rb.getKString(rb.E_BAD_JAVA_VERSION, System.getProperty("java.specification.version"), MIN_JAVA_VERSION);
logger.logToAll(Logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
}
String hostname = conf.getProperty(Globals.IMQ + ".hostname");
// Save value of imq.hostname. This may be null which is OK
Globals.setHostname(hostname);
/*
* In a variety of places we need to know the name of the host the broker is running on, and its IP address. Typically
* you get this by calling getLocalHost(). But if the broker is running on a multihomed machine, you may want to control
* which interface (and IP address) the broker uses. Therefore we support the imq.hostname property to let the user
* configure this.
*/
if (hostname == null || hostname.equals(Globals.HOSTNAME_ALL)) {
// No hostname specified. Get local host
try {
InetAddress ia = InetAddress.getLocalHost();
Globals.setBrokerInetAddress(ia);
} catch (UnknownHostException e) {
logger.log(Logger.ERROR, rb.E_NO_LOCALHOST, e);
logger.log(Logger.INFO, rb.M_BROKER_EXITING);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
} else {
// machine. Look up the address now so we have its IP
try {
InetAddress ia = InetAddress.getByName(hostname);
Globals.setBrokerInetAddress(ia);
} catch (UnknownHostException e) {
logger.log(Logger.ERROR, rb.getString(rb.E_BAD_HOSTNAME_PROP, hostname, Globals.IMQ + ".hostname"), e);
logger.log(Logger.INFO, rb.M_BROKER_EXITING);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
}
/*
* Do the same thing for JMX hostname. On a multihome system, we may want to designate different IP addresses for the
* broker and for JMX traffic. A new property imq.jmx.hostname is created for this. By default, it will be set to
* whatever value imq.hostname is set to.
*/
String jmxHostname = conf.getProperty(Globals.IMQ + ".jmx.hostname");
// Save value of imq.jmx.hostname. This may be null which is OK
Globals.setJMXHostname(jmxHostname);
/*
* Only check for the case where the JMX hostname is specified. If it is not, it will default to whatever value is
* configured for the broker hostname (imq.hostname).
*/
if (jmxHostname != null && !jmxHostname.equals(Globals.HOSTNAME_ALL)) {
// machine. Look up the address now so we have its IP
try {
InetAddress ia = InetAddress.getByName(jmxHostname);
Globals.setJMXInetAddress(ia);
} catch (UnknownHostException e) {
logger.log(Logger.ERROR, rb.getString(rb.E_BAD_HOSTNAME_PROP, hostname, Globals.IMQ + ".jmx.hostname"), e);
logger.log(Logger.INFO, rb.M_BROKER_EXITING);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
}
try {
logger.logToAll(Logger.INFO, " IMQ_HOME=" + new File(Globals.getJMQ_HOME()).getCanonicalPath());
logger.logToAll(Logger.INFO, "IMQ_VARHOME=" + new File(Globals.getJMQ_VAR_HOME()).getCanonicalPath());
} catch (IOException e) {
}
logger.logToAll(Logger.INFO, System.getProperty("os.name") + " " + System.getProperty("os.version") + " " + System.getProperty("os.arch") + " " + Globals.getBrokerHostName() + " " + "(" + Runtime.getRuntime().availableProcessors() + " cpu) " + System.getProperty("user.name"));
try {
// Log ulimit -n values
Rlimit.Limits limits = Rlimit.get(Rlimit.RLIMIT_NOFILE);
logger.logToAll(Logger.INFO, rb.getString(rb.I_NOFILES), ((limits.current == Rlimit.RLIM_INFINITY) ? "unlimited" : String.valueOf(limits.current)), ((limits.maximum == Rlimit.RLIM_INFINITY) ? "unlimited" : String.valueOf(limits.maximum)));
} catch (Exception e) {
// This is OK. It just means we can't log ulimit values on
// this platform.
}
// Log JVM heap size information
logger.logToAll(Logger.INFO, rb.getString(rb.I_JAVA_HEAP), Long.toString(Runtime.getRuntime().maxMemory() / 1024), Long.toString(Runtime.getRuntime().totalMemory() / 1024));
// Start of logging of the various sets of properties that have been supplied in various ways
// log the actual broker command-line arguments
logger.logToAll(Logger.INFO, rb.getString(rb.I_BROKER_ARGS), (propsFromCommandLine == null ? "" : propsFromCommandLine.get("BrokerArgs")));
// if the broker is non-embedded and started by JMSRA
// log any properties read from standard input
logProperties("JMSRA BrokerProps: ", propsFromStdin);
// log any properties supplied programmatically
if (embeddedBrokerStartupMessages != null) {
for (String thisMessage : embeddedBrokerStartupMessages) {
// Log the embeddedBrokerStartupMessages here
// These are typically used to log broker properties configured on an embedded broker
// which is why we perform this logging at this point.
// However they can also be used to log other information passed by the code that started the embedded broker
logger.logToAll(Logger.INFO, thisMessage);
}
}
// log all properties specified on the command line either explicity or via command-line arguments
logProperties(rb.getString(rb.I_BROKER_PROPERTIES), propsFromCommandLine);
if (inProcess) {
logger.logToAll(Logger.INFO, rb.getString(rb.I_INPROCESS_BROKER));
}
// set up out of memory handler
Globals.setGlobalErrorHandler(this);
// Get admin key from the key file if any. This is only used by
// the nt service to handle shutdown.
String key = getAdminKey(adminKeyFile);
String propname = Globals.IMQ + ".adminkey";
if (key == null || key.length() == 0) {
// Make sure property is not set
conf.remove(propname);
} else {
try {
conf.updateProperty(propname, key);
} catch (Exception e) {
}
}
/*
* Hawk HA : retrieve ha properties and brokerid
*/
boolean isHA = Globals.getHAEnabled();
String brokerid = Globals.getBrokerID();
String clusterid = Globals.getClusterID();
if (Globals.getHAEnabled()) {
if (brokerid == null) {
String emsg = rb.getKString(rb.E_BID_MUST_BE_SET);
logger.logToAll(Logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
}
logger.log(Logger.INFO, BrokerResources.I_RUNNING_IN_HA, brokerid, clusterid);
} else if (brokerid != null) {
logger.log(Logger.INFO, BrokerResources.I_STARTING_WITH_BID, brokerid);
}
PortMapper pm = Globals.getPortMapper();
if (pm == null || (pm.getServerSocket() == null && pm.isDoBind() && Globals.getPUService() == null)) {
// PortMapper failed to bind to port. Port is probably already
// in use. An error message has already been logged so just exit
String emsg = rb.getString(rb.E_PORTMAPPER_START);
logger.logToAll(logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
}
/*
* Store MQAddress in Globals so it can be accessed when needed One place this is used is the "brokerAddress" property
* of monitoring messages.
*/
MQAddress addr = null;
try {
addr = BrokerMQAddress.createAddress(Globals.getBrokerHostName(), pm.getPort());
} catch (Exception e) {
logger.logStack(Logger.INFO, BrokerResources.E_CANNOT_CREATE_MQADDRESS, "[" + Globals.getBrokerHostName() + "]:" + pm.getPort(), e);
}
boolean NO_CLUSTER;
try {
NO_CLUSTER = Globals.initClusterManager(addr);
if (NO_CLUSTER) {
logger.log(Logger.WARNING, BrokerResources.I_USING_NOCLUSTER);
}
} catch (Exception e) {
logger.logStack(Logger.ERROR, BrokerResources.E_INITING_CLUSTER, e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
if (Globals.useMasterBroker() && Globals.dynamicChangeMasterBrokerEnabled() && !Globals.isJMSRAManagedBroker()) {
if (Globals.isMasterBrokerSpecified()) {
String emsg = Globals.getBrokerResources().getKString(BrokerResources.X_CLUSTER_NO_CMDLINE_MASTERBROKER_WHEN_DYNAMIC, ClusterManager.CONFIG_SERVER, Globals.DYNAMIC_CHANGE_MASTERBROKER_ENABLED_PROP + "=true");
logger.log(Logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
}
}
pm.updateProperties();
if (pm.isDoBind()) {
if (Globals.getPUService() == null) {
// start the PortMapper thread
Thread portMapperThread = new MQThread(pm, "JMQPortMapper");
portMapperThread.setDaemon(true);
portMapperThread.start();
} else {
try {
pm.startPUService();
} catch (Exception e) {
logger.logStack(logger.ERROR, e.getMessage(), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
}
}
// Try to acquire the lock file. This makes sure no other copy
// of the broker is running in the same instance directory as
// we are.
LockFile lf = null;
try {
lf = LockFile.getLock(conf.getProperty(Globals.JMQ_VAR_HOME_PROPERTY), Globals.getConfigName(), (pm.getHostname() == null || pm.getHostname().equals("") ? Globals.getMQAddress().getHostName() : pm.getMQAddress().getHostName()), pm.getPort(), Globals.getUseFileLockForLockFile());
} catch (Exception e) {
Object[] msgargs = { LockFile.getLockFilePath(conf.getProperty(Globals.JMQ_VAR_HOME_PROPERTY), Globals.getConfigName()), e.toString(), Globals.getConfigName() };
logger.logStack(Logger.ERROR, rb.E_LOCKFILE_EXCEPTION, msgargs, e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
// Make sure we got the lock
if (!lf.isMyLock()) {
Object[] msgargs = { lf.getFilePath(), lf.getHost() + ":" + lf.getPort(), Globals.getConfigName() };
String emsg = rb.getKString(rb.E_LOCKFILE_INUSE, msgargs);
logger.log(Logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
}
// audit logging for reset store
if (resetStore) {
Globals.getAuditSession().storeOperation(null, null, MQAuditSession.RESET_STORE);
if (isHA) {
logger.log(Logger.WARNING, BrokerResources.W_HA_NO_RESET);
}
}
// interests to destinations
try {
// logging
Globals.isMinimumPersist();
store = Globals.getStore();
} catch (BrokerException ex) {
logger.logStack(Logger.ERROR, BrokerResources.E_PERSISTENT_OPEN, ex);
if (failStartThrowable != null) {
failStartThrowable.initCause(ex);
}
return (1);
}
if (Globals.useSharedConfigRecord()) {
try {
Globals.getStore().getShareConfigChangeStore();
} catch (BrokerException ex) {
logger.logStack(Logger.ERROR, BrokerResources.E_SHARECC_STORE_OPEN, ex);
if (failStartThrowable != null) {
failStartThrowable.initCause(ex);
}
return (1);
}
}
BridgeServiceManager bridgeManager = null;
if (BridgeBaseContextAdapter.bridgeEnabled() && !Globals.isNucleusManagedBroker()) {
logger.log(Logger.INFO, BrokerResources.I_INIT_BRIDGE_SERVICE_MANAGER);
try {
Class c = Class.forName(BridgeBaseContextAdapter.getManagerClass());
bridgeManager = (BridgeServiceManager) c.getDeclaredConstructor().newInstance();
bridgeManager.init(new BridgeBaseContextAdapter(this, resetStore));
} catch (Throwable t) {
bridgeManager = null;
logger.logStack(Logger.WARNING, Globals.getBrokerResources().getKString(BrokerResources.W_INIT_BRIDGE_SERVICE_MANAGER_FAILED), t);
}
}
HAMonitorService haMonitor = null;
if (Globals.getHAEnabled()) {
try {
String cname = "com.sun.messaging.jmq.jmsserver" + ".cluster.manager.ha.HAMonitorServiceImpl";
if (Globals.getHAEnabled()) {
logger.log(Logger.INFO, BrokerResources.I_STARTING_MONITOR);
if (Globals.isNucleusManagedBroker()) {
haMonitor = Globals.getHabitat().getService(HAMonitorService.class, cname);
if (haMonitor == null) {
throw new BrokerException("Class " + cname + " not found");
}
haMonitor.init(Globals.getClusterID(), Globals.getMQAddress(), resetTakeoverThenExit);
} else {
Class c = Class.forName(cname);
Class[] paramTypes = { String.class, MQAddress.class, Boolean.class };
Object[] paramArgs = { Globals.getClusterID(), Globals.getMQAddress(), Boolean.valueOf(resetTakeoverThenExit) };
Constructor cons = c.getConstructor(paramTypes);
haMonitor = (HAMonitorService) cons.newInstance(paramArgs);
}
if (resetTakeoverThenExit) {
return (0);
}
} else {
if (Globals.isNucleusManagedBroker()) {
haMonitor = Globals.getHabitat().getService(HAMonitorService.class, cname);
if (haMonitor == null) {
throw new BrokerException("Class " + cname + " not found");
}
} else {
Class c = Class.forName(cname);
haMonitor = (HAMonitorService) c.getDeclaredConstructor().newInstance();
}
}
Globals.setHAMonitorService(haMonitor);
} catch (Exception ex) {
logger.logStack(Logger.ERROR, BrokerResources.E_ERROR_STARTING_MONITOR, ex);
if (ex instanceof StoreBeingTakenOverException) {
if (failStartThrowable != null) {
failStartThrowable.initCause(ex);
}
return (BrokerStateHandler.getRestartCode());
}
if (failStartThrowable != null) {
failStartThrowable.initCause(ex);
}
return (1);
}
if (Globals.getHAEnabled()) {
logger.log(Logger.INFO, BrokerResources.I_STARTING_HEARTBEAT);
try {
Class c = Class.forName("com.sun.messaging.jmq.jmsserver" + ".multibroker.heartbeat.HeartbeatService");
Object hbs = c.getDeclaredConstructor().newInstance();
Globals.registerHeartbeatService(hbs);
} catch (Exception e) {
logger.logStack(Logger.ERROR, BrokerResources.E_ERROR_STARTING_HB, e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
}
}
try {
store.addPartitionListener(Globals.getClusterManager());
store.partitionsReady();
} catch (Exception e) {
logger.logStack(Logger.ERROR, e.getMessage(), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
if (NO_CLUSTER) {
mbus = new com.sun.messaging.jmq.jmsserver.cluster.api.NoCluster();
logger.log(Logger.FORCE, Globals.getBrokerResources().getKString(BrokerResources.I_FEATURE_UNAVAILABLE, Globals.getBrokerResources().getString(BrokerResources.M_CLUSTER_SERVICE_FEATURE)));
} else {
try {
String cname = CLUSTER_BROADCASTER_SERVICE_NAME;
if (Globals.isNucleusManagedBroker()) {
mbus = Globals.getHabitat().getService(ClusterBroadcast.class, cname);
if (mbus == null) {
String emsg = "Failed to init cluster service because class " + cname + " not found";
if (Globals.getHAEnabled() || Globals.getClusterManager().getConfigBrokerCount() > 0) {
logger.log(Logger.ERROR, emsg);
if (failStartThrowable != null) {
failStartThrowable.initCause(new Exception(emsg));
}
return (1);
} else {
throw new BrokerException(emsg);
}
}
mbus.init(DEFAULT_CLUSTER_VERSION);
} else {
Class c = Class.forName(cname);
Class[] paramTypes = { Integer.class };
Constructor cons = c.getConstructor(paramTypes);
Object[] paramArgs = { Integer.valueOf(DEFAULT_CLUSTER_VERSION) };
mbus = (ClusterBroadcast) cons.newInstance(paramArgs);
}
} catch (ClassNotFoundException cnfe) {
logger.logStack(Logger.DEBUG, BrokerResources.E_INTERNAL_BROKER_ERROR, "unable to use cluster broadcaster", cnfe);
logger.log(Logger.WARNING, BrokerResources.I_USING_NOCLUSTER + ": " + cnfe);
mbus = new com.sun.messaging.jmq.jmsserver.cluster.api.NoCluster();
} catch (InvocationTargetException ite) {
Throwable ex = ite.getCause();
if (ex instanceof InvocationTargetException) {
ex = ex.getCause();
}
if (!(ex instanceof LoopbackAddressException)) {
logger.logStack(Logger.INFO, BrokerResources.X_INTERNAL_EXCEPTION, ex.getMessage(), ex);
}
logger.log(Logger.WARNING, BrokerResources.I_USING_NOCLUSTER);
mbus = new com.sun.messaging.jmq.jmsserver.cluster.api.NoCluster();
} catch (Exception ex) {
logger.logStack(Logger.WARNING, "Unable to use cluster broadcaster", ex);
logger.log(Logger.WARNING, BrokerResources.I_USING_NOCLUSTER);
mbus = new com.sun.messaging.jmq.jmsserver.cluster.api.NoCluster();
}
}
Globals.setClusterBroadcast(mbus);
Globals.setMyAddress(mbus.getMyAddress());
/*
* HANDLE LDAP PROPERTIES XXX - this is not the cleanest way to handle this technically it should be better integrated
* with the authentication interfaces ... but I'm close to code freeze XXX-REVISIT racer 4/10/02
*/
String type = Globals.getConfig().getProperty(AccessController.PROP_AUTHENTICATION_TYPE);
if (type != null) {
String userrep = Globals.getConfig().getProperty(AccessController.PROP_AUTHENTICATION_PREFIX + type + AccessController.PROP_USER_REPOSITORY_SUFFIX);
if (userrep.equals("ldap")) {
String DN = Globals.getConfig().getProperty(AccessController.PROP_USER_REPOSITORY_PREFIX + userrep + ".principal");
String pwd = Globals.getConfig().getProperty(AccessController.PROP_USER_REPOSITORY_PREFIX + userrep + ".password");
if (DN != null && DN.trim().length() > 0) {
// we have a DN
if (pwd == null || pwd.trim().length() == 0) {
int retry = 0;
Password pw = null;
boolean setProp = pwd == null || pwd.equals("");
while ((pwd == null || pwd.trim().equals("")) && retry < 5) {
pw = new Password();
if (pw.echoPassword()) {
System.err.println(Globals.getBrokerResources().getString(BrokerResources.W_ECHO_PASSWORD));
}
System.err.print(Globals.getBrokerResources().getString(BrokerResources.M_ENTER_KEY_LDAP, DN));
System.err.flush();
pwd = pw.getPassword();
// Limit the number of times we try
// reading the passwd.
// If the VM is run in the background
// the readLine()
// will always return null and
// we'd get stuck in the loop
retry++;
}
if (pwd == null || pwd.trim().equals("")) {
logger.log(Logger.WARNING, BrokerResources.W_NO_LDAP_PASSWD, pwd);
Globals.getConfig().put(AccessController.PROP_USER_REPOSITORY_PREFIX + userrep + ".principal", "");
} else if (setProp) {
Globals.getConfig().put(AccessController.PROP_USER_REPOSITORY_PREFIX + userrep + ".password", pwd);
}
}
}
}
}
ConnectionManager cmgr = new ConnectionManager();
Globals.setConnectionManager(cmgr);
// get the persisted data
try {
coreLifecycle.getDestinationList().addPartitionListener(Globals.getClusterManager());
coreLifecycle.initDestinations();
coreLifecycle.initSubscriptions();
BrokerMonitor.init();
} catch (BrokerException ex) {
logger.logStack(Logger.ERROR, BrokerResources.E_UNABLE_TO_RETRIEVE_DATA, ex);
if (failStartThrowable != null) {
failStartThrowable.initCause(ex);
}
return 1;
}
// Initialize the JMX Agent
try {
Class.forName("javax.management.MBeanServer");
Agent agent = new Agent();
Globals.setAgent(agent);
agent.start();
} catch (Exception e) {
logger.log(Logger.WARNING, "JMX classes not present - JMX Agent is not created.");
}
/*
* Check if we should support old (pre 3.0.1SP2) selector type conversions (which violated the JMS spec).
*/
Selector.setConvertTypes(conf.getBooleanProperty(Globals.IMQ + ".selector.convertTypes", false));
/*
* By default the selector code short circuits boolean expression evaluation. This is a back door to disable that in
* case there is a flaw in the implementation.
*/
Selector.setShortCircuit(conf.getBooleanProperty(Globals.IMQ + ".selector.shortCircuit", true));
/*
* When shortCircuit is true, by default at selector compile time do additional test for shortCircuit
*/
Selector.setShortCircuitCompileTimeTest(conf.getBooleanProperty(Globals.IMQ + ".selector.shortCircuitCompileTimeTest", true));
// create the handlers - these handle the message processing
pktrtr = new PacketRouter();
// set up the admin packet router
admin_pktrtr = new PacketRouter();
AdminDataHandler admin_datahdrl = new AdminDataHandler();
Globals.setProtocol(new ProtocolImpl(pktrtr));
try {
coreLifecycle.initHandlers(pktrtr, cmgr, admin_pktrtr, admin_datahdrl);
} catch (Exception e) {
logger.logStack(Logger.ERROR, e.getMessage(), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return 1;
}
try {
PacketRouter pkr = new PacketRouter();
CoreLifecycleSpi clc = Globals.getCorePlugin(CoreLifecycleSpi.CHMP);
if (clc != null) {
Class.forName("com.oracle.coherence.patterns.messaging.DefaultMessagingSession");
clc.initHandlers(pkr, cmgr, null, null);
}
} catch (Exception e) {
logger.logStack(Logger.ERROR, e.getMessage(), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return 1;
}
// The admin message handlers may need to locate standard packet
// handlers, so we give it a reference to the PacketRouter.
admin_datahdrl.setPacketRouter(admin_pktrtr);
PacketRouter[] routers = { pktrtr, admin_pktrtr };
Globals.setPacketRouters(routers);
if (Globals.useSharedConfigRecord()) {
try {
mbus.syncChangeRecordOnStartup();
} catch (Exception e) {
logger.logStack(Logger.ERROR, rb.getKString(rb.E_SHARCC_SYNC_ON_STARTUP_FAILED, Globals.getClusterID(), e.getMessage()), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
}
TLSProtocol.init();
ServiceManager sm = new ServiceManager(cmgr);
Globals.setServiceManager(sm);
sm.updateServiceList(sm.getAllActiveServiceNames(), ServiceType.ADMIN, false);
/*
* Check if we need to pause the normal services until MessageBus syncs with the config server. The services will be
* resumed by the MessageManager when it gets a notification from the MessageBus
*/
if (mbus.waitForConfigSync()) {
sm.updateServiceList(sm.getAllActiveServiceNames(), ServiceType.NORMAL, true);
if (Globals.nowaitForMasterBroker()) {
sm.addServiceRestriction(ServiceType.NORMAL, ServiceRestriction.NO_SYNC_WITH_MASTERBROKER);
logger.log(Logger.WARNING, rb.I_MBUS_LIMITEDJMS);
try {
sm.resumeAllActiveServices(ServiceType.NORMAL, true);
} catch (BrokerException e) {
logger.logStack(Logger.ERROR, e.getMessage(), e);
}
} else {
logger.log(Logger.WARNING, rb.I_MBUS_PAUSING);
}
} else {
sm.updateServiceList(sm.getAllActiveServiceNames(), ServiceType.NORMAL, false, /* dont pause */
true);
}
// OK, create the BrokerStateHandler
Globals.setBrokerStateHandler(new BrokerStateHandler());
// provide an option not to add shutdown hook.
// This makes it easier to test restarts after ungraceful exits
boolean noShutdownHook = Boolean.getBoolean(Globals.IMQ + ".noShutdownHook");
if (inProcess || noShutdownHook || (shutdownHook = addBrokerShutdownHook()) == null) {
// Couldn't add shutdown hook. Probably because running against 1.2
logger.log(Logger.DEBUG, rb.I_NO_SHUTDOWN_HOOK);
} else {
logger.log(Logger.DEBUG, rb.I_SHUTDOWN_HOOK);
}
// start the memory manager
if (!inProcess) {
Globals.getMemManager().startManagement();
} else {
Globals.setMemMgrOn(false);
}
// Initialize the metric manager. This is the module that
// generates performance data reports
MetricManager mm = new MetricManager();
Globals.setMetricManager(mm);
mm.setParameters(Globals.getConfig());
/*
* Set the list of properties that must be matched before accepting connections from other brokers.
*/
Properties matchProps = new Properties();
matchProps.setProperty(Globals.USE_FILELOCK_FOR_LOCKFILE_PROP, String.valueOf(Globals.getUseFileLockForLockFile()));
matchProps.setProperty(Globals.IMQ + ".autocreate.queue", Globals.getConfig().getProperty(Globals.IMQ + ".autocreate.queue", "false"));
matchProps.setProperty(Globals.IMQ + ".autocreate.topic", Globals.getConfig().getProperty(Globals.IMQ + ".autocreate.topic", "false"));
//
// "imq.queue.deliverypolicy" was used as one of the
// "matchProps" in the 3.0.1 clusters. So even if this
// property is now obsolete we still need to pretend that it
// exists for cluster protocol compatibility..
//
int active = Queue.getDefaultMaxActiveConsumers();
int failover = Queue.getDefaultMaxFailoverConsumers();
if (active == 1 && failover == 0) {
matchProps.setProperty(Globals.IMQ + ".queue.deliverypolicy", "single");
}
if (active == 1 && failover != 0) {
matchProps.setProperty(Globals.IMQ + ".queue.deliverypolicy", "failover");
}
if ((active == Queue.UNLIMITED || active > 1) && failover == 0) {
matchProps.setProperty(Globals.IMQ + ".queue.deliverypolicy", "round-robin");
}
if (Globals.getClusterID() != null) {
matchProps.setProperty(Globals.IMQ + ".cluster.clusterid", Globals.getClusterID());
}
if (isHA) {
// must true
matchProps.setProperty(Globals.IMQ + ".cluster.ha", Globals.getConfig().getProperty(Globals.IMQ + ".cluster.ha"));
matchProps.setProperty(StoreManager.STORE_TYPE_PROP, Globals.getConfig().getProperty(StoreManager.STORE_TYPE_PROP));
if (Globals.getJDBCHAEnabled()) {
matchProps.setProperty(Globals.IMQ + ".cluster.monitor.interval", String.valueOf(haMonitor.getMonitorInterval()));
matchProps.setProperty(Globals.IMQ + ".cluster.heartbeat.class", Globals.getConfig().getProperty(Globals.IMQ + ".cluster.heartbeat.class"));
}
matchProps.setProperty(Globals.IMQ + ".service.activelist", Globals.getConfig().getProperty(Globals.IMQ + ".service.activelist"));
matchProps.setProperty(Globals.IMQ + ".bridge.enabled", Globals.getConfig().getProperty(Globals.IMQ + ".bridge.enabled", "false"));
} else if (Globals.isNewTxnLogEnabled()) {
matchProps.setProperty(StoreManager.NEW_TXNLOG_ENABLED_PROP, "true");
}
if (Globals.getConfig().getProperty(Globals.AUTOCLUSTER_BROKERMAP_CLASS_PROP) != null) {
matchProps.setProperty(Globals.AUTOCLUSTER_BROKERMAP_CLASS_PROP, Globals.getConfig().getProperty(Globals.AUTOCLUSTER_BROKERMAP_CLASS_PROP));
}
if (Globals.getClusterManager().getMasterBroker() != null && Globals.nowaitForMasterBroker()) {
matchProps.setProperty(Globals.NOWAIT_MASTERBROKER_PROP, "true");
}
if (Globals.useMasterBroker()) {
if (Globals.dynamicChangeMasterBrokerEnabled()) {
matchProps.setProperty(Globals.DYNAMIC_CHANGE_MASTERBROKER_ENABLED_PROP, "true");
}
}
if (Globals.useSharedConfigRecord()) {
matchProps.setProperty(Globals.NO_MASTERBROKER_PROP, "true");
}
try {
Map props = Globals.getStore().getClusterMatchProperties();
Map.Entry<String, String> pair = null;
Iterator<Map.Entry<String, String>> itr = props.entrySet().iterator();
while (itr.hasNext()) {
pair = itr.next();
matchProps.setProperty(pair.getKey(), pair.getValue());
}
} catch (Exception e) {
logger.logStack(logger.ERROR, e.getMessage(), e);
if (failStartThrowable != null) {
failStartThrowable.initCause(e);
}
return (1);
}
mbus.setMatchProps(matchProps);
/*
* Start talking to other brokers now that all the handlers are initialized and ready to process callbacks from
* MessageBus
*/
mbus.startClusterIO();
/**
* services are up and running (although we may be paused)
*/
startupComplete = true;
// audit logging of broker startup
Globals.getAuditSession().brokerOperation(null, null, MQAuditSession.BROKER_STARTUP);
Object[] sargs = { Globals.getConfigName() + "@" + (pm.getHostname() == null || pm.getHostname().equals("") ? Globals.getMQAddress().getHostName() : pm.getMQAddress().getHostName()) + ":" + pm.getPort() };
logger.logToAll(Logger.INFO, rb.I_BROKER_READY, sargs);
// Load MQ Mbeans in JMX agent
Agent agent = Globals.getAgent();
if (agent != null) {
agent.loadMBeans();
}
if (BridgeBaseContextAdapter.bridgeEnabled() && bridgeManager != null) {
try {
logger.log(Logger.INFO, Globals.getBrokerResources().I_START_BRIDGE_SERVICE_MANAGER);
bridgeManager.start();
Globals.setBridgeServiceManager(bridgeManager);
logger.log(Logger.INFO, Globals.getBrokerResources().I_STARTED_BRIDGE_SERVICE_MANAGER);
} catch (Throwable t) {
logger.logStack(Logger.WARNING, Globals.getBrokerResources().W_START_BRIDGE_SERVICE_MANAGER_FAILED, t);
}
}
} catch (OutOfMemoryError err) {
Globals.handleGlobalError(err, rb.getKString(rb.M_LOW_MEMORY_STARTUP), Integer.valueOf(1));
if (failStartThrowable != null) {
failStartThrowable.initCause(err);
}
return (1);
}
if (diagInterval > 0) {
MQTimer timer = Globals.getTimer();
int _interval = diagInterval * 1000;
timer.schedule(new BrokerDiagTask(), _interval, _interval);
} else if (diagInterval == 0) {
logger.log(Logger.INFO, DiagManager.allToString());
}
// started OK
return 0;
}
Aggregations