Search in sources :

Example 11 with Pair

use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.

the class NIOJournalCompactTest method internalCompactTest.

private void internalCompactTest(final boolean preXA, // prepare before compact
final boolean postXA, // prepare after compact
final boolean regularAdd, final boolean performAppend, final boolean performUpdate, boolean performDelete, boolean performNonTransactionalDelete, final boolean pendingTransactions, final boolean deleteTransactRecords, final boolean delayCommit, final boolean createControlFile, final boolean deleteControlFile, final boolean renameFilesAfterCompacting) throws Exception {
    if (performNonTransactionalDelete) {
        performDelete = false;
    }
    if (performDelete) {
        performNonTransactionalDelete = false;
    }
    setup(2, 60 * 4096, false);
    ArrayList<Long> liveIDs = new ArrayList<>();
    ArrayList<Pair<Long, Long>> transactedRecords = new ArrayList<>();
    final CountDownLatch latchDone = new CountDownLatch(1);
    final CountDownLatch latchWait = new CountDownLatch(1);
    journal = new JournalImpl(fileSize, minFiles, minFiles, 0, 0, fileFactory, filePrefix, fileExtension, maxAIO) {

        @Override
        protected SequentialFile createControlFile(final List<JournalFile> files, final List<JournalFile> newFiles, final Pair<String, String> pair) throws Exception {
            if (createControlFile) {
                return super.createControlFile(files, newFiles, pair);
            } else {
                throw new IllegalStateException("Simulating a crash during compact creation");
            }
        }

        @Override
        protected void deleteControlFile(final SequentialFile controlFile) throws Exception {
            if (deleteControlFile) {
                super.deleteControlFile(controlFile);
            }
        }

        @Override
        protected void renameFiles(final List<JournalFile> oldFiles, final List<JournalFile> newFiles) throws Exception {
            if (renameFilesAfterCompacting) {
                super.renameFiles(oldFiles, newFiles);
            }
        }

        @Override
        public void onCompactDone() {
            latchDone.countDown();
            System.out.println("Waiting on Compact");
            try {
                ActiveMQTestBase.waitForLatch(latchWait);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            System.out.println("Done");
        }
    };
    journal.setAutoReclaim(false);
    startJournal();
    load();
    long transactionID = 0;
    if (regularAdd) {
        for (int i = 0; i < NIOJournalCompactTest.NUMBER_OF_RECORDS / 2; i++) {
            add(i);
            if (i % 10 == 0 && i > 0) {
                journal.forceMoveNextFile();
            }
            update(i);
        }
        for (int i = NIOJournalCompactTest.NUMBER_OF_RECORDS / 2; i < NIOJournalCompactTest.NUMBER_OF_RECORDS; i++) {
            addTx(transactionID, i);
            updateTx(transactionID, i);
            if (i % 10 == 0) {
                journal.forceMoveNextFile();
            }
            commit(transactionID++);
            update(i);
        }
    }
    if (pendingTransactions) {
        for (long i = 0; i < 100; i++) {
            long recordID = idGenerator.generateID();
            addTx(transactionID, recordID);
            updateTx(transactionID, recordID);
            if (preXA) {
                prepare(transactionID, new SimpleEncoding(10, (byte) 0));
            }
            transactedRecords.add(new Pair<>(transactionID++, recordID));
        }
    }
    if (regularAdd) {
        for (int i = 0; i < NIOJournalCompactTest.NUMBER_OF_RECORDS; i++) {
            if (!(i % 10 == 0)) {
                delete(i);
            } else {
                liveIDs.add((long) i);
            }
        }
    }
    journal.forceMoveNextFile();
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                journal.testCompact();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    };
    t.start();
    ActiveMQTestBase.waitForLatch(latchDone);
    int nextID = NIOJournalCompactTest.NUMBER_OF_RECORDS;
    if (performAppend) {
        for (int i = 0; i < 50; i++) {
            add(nextID++);
            if (i % 10 == 0) {
                journal.forceMoveNextFile();
            }
        }
        for (int i = 0; i < 50; i++) {
            // A Total new transaction (that was created after the compact started) to add new record while compacting
            // is still working
            addTx(transactionID, nextID++);
            commit(transactionID++);
            if (i % 10 == 0) {
                journal.forceMoveNextFile();
            }
        }
    }
    if (performUpdate) {
        int count = 0;
        for (Long liveID : liveIDs) {
            if (count++ % 2 == 0) {
                update(liveID);
            } else {
                // A Total new transaction (that was created after the compact started) to update a record that is being
                // compacted
                updateTx(transactionID, liveID);
                commit(transactionID++);
            }
        }
    }
    if (performDelete) {
        int count = 0;
        for (long liveID : liveIDs) {
            if (count++ % 2 == 0) {
                System.out.println("Deleting no trans " + liveID);
                delete(liveID);
            } else {
                System.out.println("Deleting TX " + liveID);
                // A Total new transaction (that was created after the compact started) to delete a record that is being
                // compacted
                deleteTx(transactionID, liveID);
                commit(transactionID++);
            }
            System.out.println("Deletes are going into " + ((JournalImpl) journal).getCurrentFile());
        }
    }
    if (performNonTransactionalDelete) {
        for (long liveID : liveIDs) {
            delete(liveID);
        }
    }
    if (pendingTransactions && !delayCommit) {
        for (Pair<Long, Long> tx : transactedRecords) {
            if (postXA) {
                prepare(tx.getA(), new SimpleEncoding(10, (byte) 0));
            }
            if (tx.getA() % 2 == 0) {
                commit(tx.getA());
                if (deleteTransactRecords) {
                    delete(tx.getB());
                }
            } else {
                rollback(tx.getA());
            }
        }
    }
    /**
     * Some independent adds and updates
     */
    for (int i = 0; i < 1000; i++) {
        long id = idGenerator.generateID();
        add(id);
        delete(id);
        if (i % 100 == 0) {
            journal.forceMoveNextFile();
        }
    }
    journal.forceMoveNextFile();
    latchWait.countDown();
    t.join();
    if (pendingTransactions && delayCommit) {
        for (Pair<Long, Long> tx : transactedRecords) {
            if (postXA) {
                prepare(tx.getA(), new SimpleEncoding(10, (byte) 0));
            }
            if (tx.getA() % 2 == 0) {
                commit(tx.getA());
                if (deleteTransactRecords) {
                    delete(tx.getB());
                }
            } else {
                rollback(tx.getA());
            }
        }
    }
    long lastId = idGenerator.generateID();
    add(lastId);
    if (createControlFile && deleteControlFile && renameFilesAfterCompacting) {
        journal.testCompact();
    }
    journal.flush();
    stopJournal();
    createJournal();
    startJournal();
    loadAndCheck();
    journal.forceMoveNextFile();
    update(lastId);
    stopJournal();
    createJournal();
    startJournal();
    loadAndCheck();
}
Also used : SequentialFile(org.apache.activemq.artemis.core.io.SequentialFile) ArrayList(java.util.ArrayList) SimpleEncoding(org.apache.activemq.artemis.tests.unit.core.journal.impl.fakes.SimpleEncoding) CountDownLatch(java.util.concurrent.CountDownLatch) JournalFile(org.apache.activemq.artemis.core.journal.impl.JournalFile) AtomicLong(java.util.concurrent.atomic.AtomicLong) Pair(org.apache.activemq.artemis.api.core.Pair) JournalImpl(org.apache.activemq.artemis.core.journal.impl.JournalImpl)

Example 12 with Pair

use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.

the class ServerSessionImpl method describeProducersInfo.

@Override
public void describeProducersInfo(JsonArrayBuilder array) throws Exception {
    Map<SimpleString, Pair<Object, AtomicLong>> targetCopy = cloneTargetAddresses();
    for (Map.Entry<SimpleString, Pair<Object, AtomicLong>> entry : targetCopy.entrySet()) {
        String uuid = null;
        if (entry.getValue().getA() != null) {
            uuid = entry.getValue().getA().toString();
        }
        JsonObjectBuilder producerInfo = JsonLoader.createObjectBuilder().add("connectionID", this.getConnectionID().toString()).add("sessionID", this.getName()).add("destination", entry.getKey().toString()).add("lastUUIDSent", nullSafe(uuid)).add("msgSent", entry.getValue().getB().longValue());
        array.add(producerInfo);
    }
}
Also used : SimpleString(org.apache.activemq.artemis.api.core.SimpleString) SimpleString(org.apache.activemq.artemis.api.core.SimpleString) JsonObjectBuilder(javax.json.JsonObjectBuilder) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Pair(org.apache.activemq.artemis.api.core.Pair)

Example 13 with Pair

use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.

the class SharedNothingBackupActivation method run.

@Override
public void run() {
    try {
        logger.trace("SharedNothingBackupActivation..start");
        synchronized (activeMQServer) {
            activeMQServer.setState(ActiveMQServerImpl.SERVER_STATE.STARTED);
        }
        // move all data away:
        activeMQServer.getNodeManager().stop();
        activeMQServer.moveServerData(replicaPolicy.getMaxSavedReplicatedJournalsSize());
        activeMQServer.getNodeManager().start();
        synchronized (this) {
            if (closed) {
                logger.trace("SharedNothingBackupActivation is closed, ignoring activation!");
                return;
            }
        }
        boolean scalingDown = replicaPolicy.getScaleDownPolicy() != null && replicaPolicy.getScaleDownPolicy().isEnabled();
        if (!activeMQServer.initialisePart1(scalingDown)) {
            if (logger.isTraceEnabled()) {
                logger.trace("could not initialize part1 " + scalingDown);
            }
            return;
        }
        logger.trace("Waiting for a synchronize now...");
        synchronized (this) {
            logger.trace("Entered a synchronized");
            if (closed)
                return;
            backupQuorum = new SharedNothingBackupQuorum(activeMQServer.getStorageManager(), activeMQServer.getNodeManager(), activeMQServer.getScheduledPool(), networkHealthCheck, replicaPolicy.getQuorumSize(), replicaPolicy.getVoteRetries(), replicaPolicy.getVoteRetryWait());
            activeMQServer.getClusterManager().getQuorumManager().registerQuorum(backupQuorum);
            activeMQServer.getClusterManager().getQuorumManager().registerQuorumHandler(new ServerConnectVoteHandler(activeMQServer));
        }
        // use a Node Locator to connect to the cluster
        LiveNodeLocator nodeLocator;
        if (activationParams.get(ActivationParams.REPLICATION_ENDPOINT) != null) {
            TopologyMember member = (TopologyMember) activationParams.get(ActivationParams.REPLICATION_ENDPOINT);
            nodeLocator = new NamedNodeIdNodeLocator(member.getNodeId(), new Pair<>(member.getLive(), member.getBackup()));
        } else {
            nodeLocator = replicaPolicy.getGroupName() == null ? new AnyLiveNodeLocatorForReplication(backupQuorum, activeMQServer) : new NamedLiveNodeLocatorForReplication(replicaPolicy.getGroupName(), backupQuorum);
        }
        ClusterController clusterController = activeMQServer.getClusterManager().getClusterController();
        clusterController.addClusterTopologyListenerForReplication(nodeLocator);
        logger.trace("Waiting on cluster connection");
        clusterController.awaitConnectionToReplicationCluster();
        logger.trace("Cluster Connected");
        clusterController.addIncomingInterceptorForReplication(new ReplicationError(nodeLocator));
        // nodeManager.startBackup();
        if (logger.isTraceEnabled()) {
            logger.trace("Starting backup manager");
        }
        activeMQServer.getBackupManager().start();
        if (logger.isTraceEnabled()) {
            logger.trace("Set backup Quorum");
        }
        replicationEndpoint.setBackupQuorum(backupQuorum);
        replicationEndpoint.setExecutor(activeMQServer.getExecutorFactory().getExecutor());
        EndpointConnector endpointConnector = new EndpointConnector();
        if (logger.isTraceEnabled()) {
            logger.trace("Starting Backup Server");
        }
        ActiveMQServerLogger.LOGGER.backupServerStarted(activeMQServer.getVersion().getFullVersion(), activeMQServer.getNodeManager().getNodeId());
        activeMQServer.setState(ActiveMQServerImpl.SERVER_STATE.STARTED);
        if (logger.isTraceEnabled())
            logger.trace("Setting server state as started");
        SharedNothingBackupQuorum.BACKUP_ACTIVATION signal;
        do {
            if (closed) {
                if (logger.isTraceEnabled()) {
                    logger.trace("Activation is closed, so giving up");
                }
                return;
            }
            if (logger.isTraceEnabled()) {
                logger.trace("looking up the node through nodeLocator.locateNode()");
            }
            // locate the first live server to try to replicate
            nodeLocator.locateNode();
            Pair<TransportConfiguration, TransportConfiguration> possibleLive = nodeLocator.getLiveConfiguration();
            nodeID = nodeLocator.getNodeID();
            if (logger.isTraceEnabled()) {
                logger.trace("nodeID = " + nodeID);
            }
            // in a normal (non failback) scenario if we couldn't find our live server we should fail
            if (!attemptFailBack) {
                if (logger.isTraceEnabled()) {
                    logger.trace("attemptFailback=false, nodeID=" + nodeID);
                }
                // this shouldn't happen
                if (nodeID == null) {
                    logger.debug("Throwing a RuntimeException as nodeID==null ant attemptFailback=false");
                    throw new RuntimeException("Could not establish the connection");
                }
                activeMQServer.getNodeManager().setNodeID(nodeID);
            }
            try {
                if (logger.isTraceEnabled()) {
                    logger.trace("Calling clusterController.connectToNodeInReplicatedCluster(" + possibleLive.getA() + ")");
                }
                clusterControl = clusterController.connectToNodeInReplicatedCluster(possibleLive.getA());
            } catch (Exception e) {
                logger.debug(e.getMessage(), e);
                if (possibleLive.getB() != null) {
                    try {
                        clusterControl = clusterController.connectToNodeInReplicatedCluster(possibleLive.getB());
                    } catch (Exception e1) {
                        clusterControl = null;
                    }
                }
            }
            if (clusterControl == null) {
                if (logger.isTraceEnabled()) {
                    logger.trace("sleeping " + clusterController.getRetryIntervalForReplicatedCluster() + " it should retry");
                }
                // its ok to retry here since we haven't started replication yet
                // it may just be the server has gone since discovery
                Thread.sleep(clusterController.getRetryIntervalForReplicatedCluster());
                signal = SharedNothingBackupQuorum.BACKUP_ACTIVATION.ALREADY_REPLICATING;
                continue;
            }
            activeMQServer.getThreadPool().execute(endpointConnector);
            /**
             * Wait for a signal from the the quorum manager, at this point if replication has been successful we can
             * fail over or if there is an error trying to replicate (such as already replicating) we try the
             * process again on the next live server.  All the action happens inside {@link BackupQuorum}
             */
            signal = backupQuorum.waitForStatusChange();
            if (logger.isTraceEnabled()) {
                logger.trace("Got a signal " + signal + " through backupQuorum.waitForStatusChange()");
            }
            /**
             * replicationEndpoint will be holding lots of open files. Make sure they get
             * closed/sync'ed.
             */
            ActiveMQServerImpl.stopComponent(replicationEndpoint);
            // time to give up
            if (!activeMQServer.isStarted() || signal == STOP) {
                if (logger.isTraceEnabled()) {
                    logger.trace("giving up on the activation:: activemqServer.isStarted=" + activeMQServer.isStarted() + " while signal = " + signal);
                }
                return;
            } else if (signal == FAIL_OVER) {
                // time to fail over
                if (logger.isTraceEnabled()) {
                    logger.trace("signal == FAIL_OVER, breaking the loop");
                }
                break;
            } else if (signal == SharedNothingBackupQuorum.BACKUP_ACTIVATION.FAILURE_REPLICATING) {
                // something has gone badly run restart from scratch
                if (logger.isTraceEnabled()) {
                    logger.trace("Starting a new thread to stop the server!");
                }
                Thread startThread = new Thread(new Runnable() {

                    @Override
                    public void run() {
                        try {
                            if (logger.isTraceEnabled()) {
                                logger.trace("Calling activeMQServer.stop() and start() to restart the server");
                            }
                            activeMQServer.stop();
                            activeMQServer.start();
                        } catch (Exception e) {
                            ActiveMQServerLogger.LOGGER.errorRestartingBackupServer(e, activeMQServer);
                        }
                    }
                });
                startThread.start();
                return;
            }
            // ok, this live is no good, let's reset and try again
            // close this session factory, we're done with it
            clusterControl.close();
            backupQuorum.reset();
            if (replicationEndpoint.getChannel() != null) {
                replicationEndpoint.getChannel().close();
                replicationEndpoint.setChannel(null);
            }
        } while (signal == SharedNothingBackupQuorum.BACKUP_ACTIVATION.ALREADY_REPLICATING);
        if (logger.isTraceEnabled()) {
            logger.trace("Activation loop finished, current signal = " + signal);
        }
        activeMQServer.getClusterManager().getQuorumManager().unRegisterQuorum(backupQuorum);
        if (!isRemoteBackupUpToDate()) {
            logger.debug("throwing exception for !isRemoteBackupUptoDate");
            throw ActiveMQMessageBundle.BUNDLE.backupServerNotInSync();
        }
        if (logger.isTraceEnabled()) {
            logger.trace("@@@ setReplicaPolicy::" + replicaPolicy);
        }
        replicaPolicy.getReplicatedPolicy().setReplicaPolicy(replicaPolicy);
        activeMQServer.setHAPolicy(replicaPolicy.getReplicatedPolicy());
        synchronized (activeMQServer) {
            if (!activeMQServer.isStarted()) {
                logger.trace("Server is stopped, giving up right before becomingLive");
                return;
            }
            ActiveMQServerLogger.LOGGER.becomingLive(activeMQServer);
            logger.trace("stop backup");
            activeMQServer.getNodeManager().stopBackup();
            logger.trace("start store manager");
            activeMQServer.getStorageManager().start();
            logger.trace("activated");
            activeMQServer.getBackupManager().activated();
            if (scalingDown) {
                logger.trace("Scalling down...");
                activeMQServer.initialisePart2(true);
            } else {
                logger.trace("Setting up new activation");
                activeMQServer.setActivation(new SharedNothingLiveActivation(activeMQServer, replicaPolicy.getReplicatedPolicy()));
                logger.trace("initialize part 2");
                activeMQServer.initialisePart2(false);
                if (activeMQServer.getIdentity() != null) {
                    ActiveMQServerLogger.LOGGER.serverIsLive(activeMQServer.getIdentity());
                } else {
                    ActiveMQServerLogger.LOGGER.serverIsLive();
                }
            }
            logger.trace("completeActivation at the end");
            activeMQServer.completeActivation();
        }
    } catch (Exception e) {
        if (logger.isTraceEnabled()) {
            logger.trace(e.getMessage() + ", serverStarted=" + activeMQServer.isStarted(), e);
        }
        if ((e instanceof InterruptedException || e instanceof IllegalStateException) && !activeMQServer.isStarted())
            // do not log these errors if the server is being stopped.
            return;
        ActiveMQServerLogger.LOGGER.initializationError(e);
    }
}
Also used : TransportConfiguration(org.apache.activemq.artemis.api.core.TransportConfiguration) ActiveMQException(org.apache.activemq.artemis.api.core.ActiveMQException) ActiveMQInternalErrorException(org.apache.activemq.artemis.api.core.ActiveMQInternalErrorException) ClusterController(org.apache.activemq.artemis.core.server.cluster.ClusterController) SharedNothingBackupQuorum(org.apache.activemq.artemis.core.server.cluster.qourum.SharedNothingBackupQuorum) LiveNodeLocator(org.apache.activemq.artemis.core.server.LiveNodeLocator) TopologyMember(org.apache.activemq.artemis.api.core.client.TopologyMember) Pair(org.apache.activemq.artemis.api.core.Pair)

Example 14 with Pair

use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.

the class TransportConfigurationEncodingSupportTest method testTransportConfigurations.

@Test
public void testTransportConfigurations() throws Exception {
    List<Pair<TransportConfiguration, TransportConfiguration>> connectorConfigs = new ArrayList<>();
    Map<String, Object> liveParams = new HashMap<>();
    liveParams.put(TransportConstants.PORT_PROP_NAME, 5665);
    TransportConfiguration live1 = new TransportConfiguration(NettyConnectorFactory.class.getName(), liveParams);
    Map<String, Object> backupParams = new HashMap<>();
    backupParams.put(TransportConstants.PORT_PROP_NAME, 5775);
    TransportConfiguration backup1 = new TransportConfiguration(NettyConnectorFactory.class.getName(), backupParams);
    Map<String, Object> liveParams2 = new HashMap<>();
    liveParams2.put(TransportConstants.PORT_PROP_NAME, 6665);
    TransportConfiguration live2 = new TransportConfiguration(NettyConnectorFactory.class.getName(), liveParams2);
    connectorConfigs.add(new Pair<>(live1, backup1));
    connectorConfigs.add(new Pair<TransportConfiguration, TransportConfiguration>(live2, null));
    ActiveMQBuffer buffer = ActiveMQBuffers.fixedBuffer(TransportConfigurationEncodingSupport.getEncodeSize(connectorConfigs));
    TransportConfigurationEncodingSupport.encodeConfigs(buffer, connectorConfigs);
    assertEquals(buffer.capacity(), buffer.writerIndex());
    buffer.readerIndex(0);
    List<Pair<TransportConfiguration, TransportConfiguration>> decodedConfigs = TransportConfigurationEncodingSupport.decodeConfigs(buffer);
    assertNotNull(decodedConfigs);
    assertEquals(2, decodedConfigs.size());
    assertEquivalent(connectorConfigs.get(0).getA(), decodedConfigs.get(0).getA());
    assertEquivalent(connectorConfigs.get(0).getB(), decodedConfigs.get(0).getB());
    assertEquivalent(connectorConfigs.get(1).getA(), decodedConfigs.get(1).getA());
    assertNull(decodedConfigs.get(1).getB());
}
Also used : NettyConnectorFactory(org.apache.activemq.artemis.core.remoting.impl.netty.NettyConnectorFactory) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TransportConfiguration(org.apache.activemq.artemis.api.core.TransportConfiguration) Pair(org.apache.activemq.artemis.api.core.Pair) ActiveMQBuffer(org.apache.activemq.artemis.api.core.ActiveMQBuffer) Test(org.junit.Test)

Example 15 with Pair

use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.

the class JournalStorageManager method startReplication.

@Override
public void startReplication(ReplicationManager replicationManager, PagingManager pagingManager, String nodeID, final boolean autoFailBack, long initialReplicationSyncTimeout) throws Exception {
    if (!started) {
        throw new IllegalStateException("JournalStorageManager must be started...");
    }
    assert replicationManager != null;
    if (!(messageJournal instanceof JournalImpl) || !(bindingsJournal instanceof JournalImpl)) {
        throw ActiveMQMessageBundle.BUNDLE.notJournalImpl();
    }
    // We first do a compact without any locks, to avoid copying unnecessary data over the network.
    // We do this without holding the storageManager lock, so the journal stays open while compact is being done
    originalMessageJournal.scheduleCompactAndBlock(-1);
    originalBindingsJournal.scheduleCompactAndBlock(-1);
    JournalFile[] messageFiles = null;
    JournalFile[] bindingsFiles = null;
    // We get a picture of the current sitaution on the large messages
    // and we send the current messages while more state is coming
    Map<Long, Pair<String, Long>> pendingLargeMessages = null;
    try {
        Map<SimpleString, Collection<Integer>> pageFilesToSync;
        storageManagerLock.writeLock().lock();
        try {
            if (isReplicated())
                throw new ActiveMQIllegalStateException("already replicating");
            replicator = replicationManager;
            if (!((JournalImpl) originalMessageJournal).flushAppendExecutor(10, TimeUnit.SECONDS)) {
                throw new Exception("Live message journal is busy");
            }
            if (!((JournalImpl) originalBindingsJournal).flushAppendExecutor(10, TimeUnit.SECONDS)) {
                throw new Exception("Live bindings journal is busy");
            }
            // Establishes lock
            originalMessageJournal.synchronizationLock();
            originalBindingsJournal.synchronizationLock();
            try {
                originalBindingsJournal.replicationSyncPreserveOldFiles();
                originalMessageJournal.replicationSyncPreserveOldFiles();
                pagingManager.lock();
                try {
                    pagingManager.disableCleanup();
                    messageFiles = prepareJournalForCopy(originalMessageJournal, JournalContent.MESSAGES, nodeID, autoFailBack);
                    bindingsFiles = prepareJournalForCopy(originalBindingsJournal, JournalContent.BINDINGS, nodeID, autoFailBack);
                    pageFilesToSync = getPageInformationForSync(pagingManager);
                    pendingLargeMessages = recoverPendingLargeMessages();
                } finally {
                    pagingManager.unlock();
                }
            } finally {
                originalMessageJournal.synchronizationUnlock();
                originalBindingsJournal.synchronizationUnlock();
            }
            bindingsJournal = new ReplicatedJournal(((byte) 0), originalBindingsJournal, replicator);
            messageJournal = new ReplicatedJournal((byte) 1, originalMessageJournal, replicator);
            // We need to send the list while locking otherwise part of the body might get sent too soon
            // it will send a list of IDs that we are allocating
            replicator.sendLargeMessageIdListMessage(pendingLargeMessages);
        } finally {
            storageManagerLock.writeLock().unlock();
        }
        sendJournalFile(messageFiles, JournalContent.MESSAGES);
        sendJournalFile(bindingsFiles, JournalContent.BINDINGS);
        sendLargeMessageFiles(pendingLargeMessages);
        sendPagesToBackup(pageFilesToSync, pagingManager);
        storageManagerLock.writeLock().lock();
        try {
            if (replicator != null) {
                replicator.sendSynchronizationDone(nodeID, initialReplicationSyncTimeout);
                performCachedLargeMessageDeletes();
            }
        } finally {
            storageManagerLock.writeLock().unlock();
        }
    } catch (Exception e) {
        ActiveMQServerLogger.LOGGER.unableToStartReplication(e);
        stopReplication();
        throw e;
    } finally {
        // Re-enable compact and reclaim of journal files
        originalBindingsJournal.replicationSyncFinished();
        originalMessageJournal.replicationSyncFinished();
        pagingManager.resumeCleanup();
    }
}
Also used : ActiveMQIllegalStateException(org.apache.activemq.artemis.api.core.ActiveMQIllegalStateException) ActiveMQIllegalStateException(org.apache.activemq.artemis.api.core.ActiveMQIllegalStateException) SimpleString(org.apache.activemq.artemis.api.core.SimpleString) ReplicatedJournal(org.apache.activemq.artemis.core.replication.ReplicatedJournal) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) ActiveMQException(org.apache.activemq.artemis.api.core.ActiveMQException) ActiveMQIllegalStateException(org.apache.activemq.artemis.api.core.ActiveMQIllegalStateException) ActiveMQInternalErrorException(org.apache.activemq.artemis.api.core.ActiveMQInternalErrorException) JournalFile(org.apache.activemq.artemis.core.journal.impl.JournalFile) Collection(java.util.Collection) JournalImpl(org.apache.activemq.artemis.core.journal.impl.JournalImpl) Pair(org.apache.activemq.artemis.api.core.Pair)

Aggregations

Pair (org.apache.activemq.artemis.api.core.Pair)29 ArrayList (java.util.ArrayList)16 HashMap (java.util.HashMap)12 SimpleString (org.apache.activemq.artemis.api.core.SimpleString)12 List (java.util.List)6 Map (java.util.Map)6 SequentialFile (org.apache.activemq.artemis.core.io.SequentialFile)5 JournalFile (org.apache.activemq.artemis.core.journal.impl.JournalFile)5 HashSet (java.util.HashSet)4 TransportConfiguration (org.apache.activemq.artemis.api.core.TransportConfiguration)4 JournalImpl (org.apache.activemq.artemis.core.journal.impl.JournalImpl)4 Test (org.junit.Test)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 ActiveMQBuffer (org.apache.activemq.artemis.api.core.ActiveMQBuffer)3 Message (org.apache.activemq.artemis.api.core.Message)3 PreparedTransactionInfo (org.apache.activemq.artemis.core.journal.PreparedTransactionInfo)3 RecordInfo (org.apache.activemq.artemis.core.journal.RecordInfo)3 LargeServerMessage (org.apache.activemq.artemis.core.server.LargeServerMessage)3 PersistedBindings (org.apache.activemq.artemis.jms.persistence.config.PersistedBindings)3 PersistedType (org.apache.activemq.artemis.jms.persistence.config.PersistedType)3