use of org.apache.activemq.artemis.core.config.ha.ReplicaPolicyConfiguration in project wildfly by wildfly.
the class ReplicationSlaveDefinition method buildConfiguration.
static HAPolicyConfiguration buildConfiguration(OperationContext context, ModelNode model) throws OperationFailedException {
ReplicaPolicyConfiguration haPolicyConfiguration = new ReplicaPolicyConfiguration().setAllowFailBack(ALLOW_FAILBACK.resolveModelAttribute(context, model).asBoolean()).setInitialReplicationSyncTimeout(INITIAL_REPLICATION_SYNC_TIMEOUT.resolveModelAttribute(context, model).asLong()).setMaxSavedReplicatedJournalsSize(MAX_SAVED_REPLICATED_JOURNAL_SIZE.resolveModelAttribute(context, model).asInt()).setScaleDownConfiguration(addScaleDownConfiguration(context, model)).setRestartBackup(RESTART_BACKUP.resolveModelAttribute(context, model).asBoolean());
ModelNode clusterName = CLUSTER_NAME.resolveModelAttribute(context, model);
if (clusterName.isDefined()) {
haPolicyConfiguration.setClusterName(clusterName.asString());
}
ModelNode groupName = GROUP_NAME.resolveModelAttribute(context, model);
if (groupName.isDefined()) {
haPolicyConfiguration.setGroupName(groupName.asString());
}
return haPolicyConfiguration;
}
use of org.apache.activemq.artemis.core.config.ha.ReplicaPolicyConfiguration in project activemq-artemis by apache.
the class JMSFailoverTest method startServers.
/**
* @throws Exception
*/
protected void startServers() throws Exception {
final boolean sharedStore = true;
NodeManager nodeManager = new InVMNodeManager(!sharedStore);
backuptc = new TransportConfiguration(INVM_CONNECTOR_FACTORY, backupParams);
livetc = new TransportConfiguration(INVM_CONNECTOR_FACTORY);
liveAcceptortc = new TransportConfiguration(INVM_ACCEPTOR_FACTORY);
backupAcceptortc = new TransportConfiguration(INVM_ACCEPTOR_FACTORY, backupParams);
backupParams.put(TransportConstants.SERVER_ID_PROP_NAME, 1);
backupConf = createBasicConfig().addAcceptorConfiguration(backupAcceptortc).addConnectorConfiguration(livetc.getName(), livetc).addConnectorConfiguration(backuptc.getName(), backuptc).setSecurityEnabled(false).setJournalType(getDefaultJournalType()).addAcceptorConfiguration(new TransportConfiguration(INVM_ACCEPTOR_FACTORY, backupParams)).setBindingsDirectory(getBindingsDir()).setJournalMinFiles(2).setJournalDirectory(getJournalDir()).setPagingDirectory(getPageDir()).setLargeMessagesDirectory(getLargeMessagesDir()).setPersistenceEnabled(true).setHAPolicyConfiguration(sharedStore ? new SharedStoreSlavePolicyConfiguration() : new ReplicaPolicyConfiguration()).addClusterConfiguration(basicClusterConnectionConfig(backuptc.getName(), livetc.getName()));
backupServer = addServer(new InVMNodeManagerServer(backupConf, nodeManager));
backupJMSServer = new JMSServerManagerImpl(backupServer);
backupJMSServer.setRegistry(new JndiBindingRegistry(ctx2));
backupJMSServer.getActiveMQServer().setIdentity("JMSBackup");
log.info("Starting backup");
backupJMSServer.start();
liveConf = createBasicConfig().setJournalDirectory(getJournalDir()).setBindingsDirectory(getBindingsDir()).setSecurityEnabled(false).addAcceptorConfiguration(liveAcceptortc).setJournalType(getDefaultJournalType()).setBindingsDirectory(getBindingsDir()).setJournalMinFiles(2).setJournalDirectory(getJournalDir()).setPagingDirectory(getPageDir()).setLargeMessagesDirectory(getLargeMessagesDir()).addConnectorConfiguration(livetc.getName(), livetc).setPersistenceEnabled(true).setHAPolicyConfiguration(sharedStore ? new SharedStoreMasterPolicyConfiguration() : new ReplicatedPolicyConfiguration()).addClusterConfiguration(basicClusterConnectionConfig(livetc.getName()));
liveServer = addServer(new InVMNodeManagerServer(liveConf, nodeManager));
liveJMSServer = new JMSServerManagerImpl(liveServer);
liveJMSServer.setRegistry(new JndiBindingRegistry(ctx1));
liveJMSServer.getActiveMQServer().setIdentity("JMSLive");
log.info("Starting life");
liveJMSServer.start();
JMSUtil.waitForServer(backupServer);
}
use of org.apache.activemq.artemis.core.config.ha.ReplicaPolicyConfiguration in project activemq-artemis by apache.
the class ClusterTestBase method setupBackupServer.
/**
* Server lacks a {@link ClusterConnectionConfiguration} necessary for the remote (replicating)
* backup case.
* <br>
* Use
* {@link #setupClusterConnectionWithBackups(String, String, org.apache.activemq.artemis.core.server.cluster.impl.MessageLoadBalancingType, int, boolean, int, int[])}
* to add it.
*
* @param node
* @param liveNode
* @param fileStorage
* @param sharedStorage
* @param netty
* @throws Exception
*/
protected void setupBackupServer(final int node, final int liveNode, final boolean fileStorage, final boolean sharedStorage, final boolean netty) throws Exception {
if (servers[node] != null) {
throw new IllegalArgumentException("Already a server at node " + node);
}
TransportConfiguration liveConfig = createTransportConfiguration(netty, false, generateParams(liveNode, netty));
TransportConfiguration backupConfig = createTransportConfiguration(netty, false, generateParams(node, netty));
TransportConfiguration acceptorConfig = createTransportConfiguration(netty, true, generateParams(node, netty));
Configuration configuration = createBasicConfig(sharedStorage ? liveNode : node).clearAcceptorConfigurations().addAcceptorConfiguration(acceptorConfig).addConnectorConfiguration(liveConfig.getName(), liveConfig).addConnectorConfiguration(backupConfig.getName(), backupConfig).setHAPolicyConfiguration(sharedStorage ? new SharedStoreSlavePolicyConfiguration() : new ReplicaPolicyConfiguration());
ActiveMQServer server;
if (sharedStorage) {
server = createInVMFailoverServer(true, configuration, nodeManagers[liveNode], liveNode);
} else {
boolean enablePersistency = fileStorage ? true : configuration.isPersistenceEnabled();
server = addServer(ActiveMQServers.newActiveMQServer(configuration, enablePersistency));
}
server.setIdentity(this.getClass().getSimpleName() + "/Backup(" + node + " of live " + liveNode + ")");
servers[node] = addServer(server);
}
use of org.apache.activemq.artemis.core.config.ha.ReplicaPolicyConfiguration in project activemq-artemis by apache.
the class GroupingFailoverTestBase method testGroupingLocalHandlerFailsMultipleGroups.
@Test
public void testGroupingLocalHandlerFailsMultipleGroups() throws Exception {
setupBackupServer(2, 0, isFileStorage(), isSharedStore(), isNetty());
setupLiveServer(0, isFileStorage(), isSharedStore(), isNetty(), false);
setupLiveServer(1, isFileStorage(), isSharedStore(), isNetty(), false);
setupClusterConnection("cluster0", "queues", MessageLoadBalancingType.ON_DEMAND, 1, isNetty(), 0, 1);
setupClusterConnection("cluster1", "queues", MessageLoadBalancingType.ON_DEMAND, 1, isNetty(), 1, 0);
setupClusterConnection("cluster0", "queues", MessageLoadBalancingType.ON_DEMAND, 1, isNetty(), 2, 1);
setUpGroupHandler(GroupingHandlerConfiguration.TYPE.LOCAL, 0);
setUpGroupHandler(GroupingHandlerConfiguration.TYPE.REMOTE, 1);
setUpGroupHandler(GroupingHandlerConfiguration.TYPE.LOCAL, 2);
if (!isSharedStore()) {
((ReplicatedPolicyConfiguration) servers[0].getConfiguration().getHAPolicyConfiguration()).setGroupName("group1");
((ReplicatedPolicyConfiguration) servers[1].getConfiguration().getHAPolicyConfiguration()).setGroupName("group2");
((ReplicaPolicyConfiguration) servers[2].getConfiguration().getHAPolicyConfiguration()).setGroupName("group1");
}
startServers(0, 1, 2);
setupSessionFactory(0, isNetty());
setupSessionFactory(1, isNetty());
createQueue(0, "queues.testaddress", "queue0", null, true);
waitForBindings(0, "queues.testaddress", 1, 0, true);
createQueue(1, "queues.testaddress", "queue0", null, true);
waitForBindings(1, "queues.testaddress", 1, 0, true);
addConsumer(0, 0, "queue0", null);
addConsumer(1, 1, "queue0", null);
waitForBindings(0, "queues.testaddress", 1, 1, false);
waitForBindings(1, "queues.testaddress", 1, 1, false);
waitForBindings(0, "queues.testaddress", 1, 1, true);
waitForBindings(1, "queues.testaddress", 1, 1, true);
waitForTopology(servers[1], 2);
sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id1"));
sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id2"));
sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id3"));
sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id4"));
sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id5"));
sendWithProperty(0, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id6"));
verifyReceiveAllWithGroupIDRoundRobin(0, 30, 0, 1);
if (!isSharedStore()) {
SharedNothingBackupActivation backupActivation = (SharedNothingBackupActivation) servers[2].getActivation();
assertTrue(backupActivation.waitForBackupSync(10, TimeUnit.SECONDS));
}
closeSessionFactory(0);
servers[0].fail(true);
waitForServerRestart(2);
setupSessionFactory(2, isNetty());
addConsumer(2, 2, "queue0", null);
waitForBindings(2, "queues.testaddress", 1, 1, true);
waitForBindings(2, "queues.testaddress", 1, 1, false);
waitForBindings(1, "queues.testaddress", 1, 1, true);
waitForBindings(1, "queues.testaddress", 1, 1, false);
sendWithProperty(2, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id1"));
sendWithProperty(2, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id2"));
sendWithProperty(2, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id3"));
sendWithProperty(2, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id4"));
sendWithProperty(2, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id5"));
sendWithProperty(2, "queues.testaddress", 10, false, Message.HDR_GROUP_ID, new SimpleString("id6"));
verifyReceiveAllWithGroupIDRoundRobin(2, 30, 1, 2);
}
use of org.apache.activemq.artemis.core.config.ha.ReplicaPolicyConfiguration in project activemq-artemis by apache.
the class MultipleLivesMultipleBackupsFailoverTest method createBackupConfig.
protected void createBackupConfig(NodeManager nodeManager, int liveNode, int nodeid, boolean createClusterConnections, int[] otherBackupNodes, int... otherClusterNodes) throws Exception {
Configuration config1 = super.createDefaultInVMConfig().clearAcceptorConfigurations().addAcceptorConfiguration(createTransportConfiguration(isNetty(), true, generateParams(nodeid, isNetty()))).setHAPolicyConfiguration(sharedStore ? new SharedStoreSlavePolicyConfiguration() : new ReplicaPolicyConfiguration()).setBindingsDirectory(getBindingsDir() + "_" + liveNode).setJournalDirectory(getJournalDir() + "_" + liveNode).setPagingDirectory(getPageDir() + "_" + liveNode).setLargeMessagesDirectory(getLargeMessagesDir() + "_" + liveNode);
for (int node : otherBackupNodes) {
TransportConfiguration liveConnector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty()));
config1.addConnectorConfiguration(liveConnector.getName(), liveConnector);
}
TransportConfiguration backupConnector = createTransportConfiguration(isNetty(), false, generateParams(nodeid, isNetty()));
config1.addConnectorConfiguration(backupConnector.getName(), backupConnector);
String[] clusterNodes = new String[otherClusterNodes.length];
for (int i = 0; i < otherClusterNodes.length; i++) {
TransportConfiguration connector = createTransportConfiguration(isNetty(), false, generateParams(otherClusterNodes[i], isNetty()));
config1.addConnectorConfiguration(connector.getName(), connector);
clusterNodes[i] = connector.getName();
}
config1.addClusterConfiguration(basicClusterConnectionConfig(backupConnector.getName(), clusterNodes));
servers.put(nodeid, new SameProcessActiveMQServer(createInVMFailoverServer(true, config1, nodeManager, liveNode)));
}
Aggregations