use of org.apache.activemq.artemis.core.postoffice.DuplicateIDCache in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadPreparedTransactions.
private void loadPreparedTransactions(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, final Map<Long, QueueBindingInfo> queueInfos, final List<PreparedTransactionInfo> preparedTransactions, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Map<Long, PageSubscription> pageSubscriptions, final Set<Pair<Long, Long>> pendingLargeMessages, JournalLoader journalLoader) throws Exception {
// recover prepared transactions
for (PreparedTransactionInfo preparedTransaction : preparedTransactions) {
XidEncoding encodingXid = new XidEncoding(preparedTransaction.getExtraData());
Xid xid = encodingXid.xid;
Transaction tx = new TransactionImpl(preparedTransaction.getId(), xid, this);
List<MessageReference> referencesToAck = new ArrayList<>();
Map<Long, Message> messages = new HashMap<>();
// first get any sent messages for this tx and recreate
for (RecordInfo record : preparedTransaction.getRecords()) {
byte[] data = record.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch(recordType) {
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
messages.put(record.id, parseLargeMessage(messages, buff));
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
break;
}
case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buff, null);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Message message = messages.get(messageID);
if (message == null) {
throw new IllegalStateException("Cannot find message with id " + messageID);
}
journalLoader.handlePreparedSendMessage(message, tx, encoding.queueID);
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
journalLoader.handlePreparedAcknowledge(messageID, referencesToAck, encoding.queueID);
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
if (record.isUpdate) {
PageTransactionInfo pgTX = pagingManager.getTransaction(pageTransactionInfo.getTransactionID());
pgTX.reloadUpdate(this, pagingManager, tx, pageTransactionInfo.getNumberOfMessages());
} else {
pageTransactionInfo.setCommitted(false);
tx.putProperty(TransactionPropertyIndexes.PAGE_TRANSACTION, pageTransactionInfo);
pagingManager.addTransaction(pageTransactionInfo);
tx.addOperation(new FinishPageMessageOperation());
}
break;
}
case SET_SCHEDULED_DELIVERY_TIME:
{
break;
}
case DUPLICATE_ID:
{
// We need load the duplicate ids at prepare time too
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
DuplicateIDCache cache = postOffice.getDuplicateIDCache(encoding.address);
cache.load(tx, encoding.duplID);
break;
}
case ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.reloadPreparedACK(tx, encoding.position);
referencesToAck.add(new PagedReferenceImpl(encoding.position, null, sub));
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
}
break;
}
case PAGE_CURSOR_COUNTER_VALUE:
{
ActiveMQServerLogger.LOGGER.journalPAGEOnPrepared();
break;
}
case PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().applyIncrementOnTX(tx, record.id, encoding.getValue(), encoding.getPersistentSize());
sub.notEmpty();
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.getQueueID());
}
break;
}
default:
{
ActiveMQServerLogger.LOGGER.journalInvalidRecordType(recordType);
}
}
}
for (RecordInfo recordDeleted : preparedTransaction.getRecordsToDelete()) {
byte[] data = recordDeleted.data;
if (data.length > 0) {
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte b = buff.readByte();
switch(b) {
case ADD_LARGE_MESSAGE_PENDING:
{
long messageID = buff.readLong();
if (!pendingLargeMessages.remove(new Pair<>(recordDeleted.id, messageID))) {
ActiveMQServerLogger.LOGGER.largeMessageNotFound(recordDeleted.id);
}
installLargeMessageConfirmationOnTX(tx, recordDeleted.id);
break;
}
default:
ActiveMQServerLogger.LOGGER.journalInvalidRecordTypeOnPreparedTX(b);
}
}
}
journalLoader.handlePreparedTransaction(tx, referencesToAck, xid, resourceManager);
}
}
use of org.apache.activemq.artemis.core.postoffice.DuplicateIDCache in project activemq-artemis by apache.
the class BridgeTest method testWithDuplicates.
@Test
public void testWithDuplicates() throws Exception {
Map<String, Object> server0Params = new HashMap<>();
server0 = createClusteredServerWithParams(isNetty(), 0, true, server0Params);
Map<String, Object> server1Params = new HashMap<>();
addTargetParameters(server1Params);
server1 = createClusteredServerWithParams(isNetty(), 1, true, server1Params);
final String testAddress = "testAddress";
final String queueName0 = "queue0";
final String forwardAddress = "forwardAddress";
final String queueName1 = "forwardQueue";
Map<String, TransportConfiguration> connectors = new HashMap<>();
TransportConfiguration server0tc = new TransportConfiguration(getConnector(), server0Params);
TransportConfiguration server1tc = new TransportConfiguration(getConnector(), server1Params);
connectors.put(server1tc.getName(), server1tc);
server0.getConfiguration().setConnectorConfigurations(connectors);
ArrayList<String> staticConnectors = new ArrayList<>();
staticConnectors.add(server1tc.getName());
BridgeConfiguration bridgeConfiguration = new BridgeConfiguration().setName("bridge1").setQueueName(queueName0).setForwardingAddress(forwardAddress).setRetryInterval(100).setReconnectAttemptsOnSameNode(-1).setConfirmationWindowSize(0).setStaticConnectors(staticConnectors);
List<BridgeConfiguration> bridgeConfigs = new ArrayList<>();
bridgeConfigs.add(bridgeConfiguration);
server0.getConfiguration().setBridgeConfigurations(bridgeConfigs);
CoreQueueConfiguration queueConfig0 = new CoreQueueConfiguration().setAddress(testAddress).setName(queueName0);
List<CoreQueueConfiguration> queueConfigs0 = new ArrayList<>();
queueConfigs0.add(queueConfig0);
server0.getConfiguration().setQueueConfigurations(queueConfigs0);
server0.start();
locator = addServerLocator(ActiveMQClient.createServerLocatorWithoutHA(server0tc, server1tc));
ClientSessionFactory sf0 = locator.createSessionFactory(server0tc);
ClientSession session0 = sf0.createSession(false, true, true);
ClientProducer producer0 = session0.createProducer(new SimpleString(testAddress));
final int numMessages = 1000;
final SimpleString propKey = new SimpleString("testkey");
final SimpleString selectorKey = new SimpleString("animal");
for (int i = 0; i < numMessages; i++) {
ClientMessage message = session0.createMessage(true);
message.getBodyBuffer().writeBytes(new byte[1024]);
message.putIntProperty(propKey, i);
message.putStringProperty(selectorKey, new SimpleString("monkey" + i));
producer0.send(message);
}
server1.start();
// Inserting the duplicateIDs so the bridge will fail in a few
{
long[] ids = new long[100];
Queue queue = server0.locateQueue(new SimpleString(queueName0));
LinkedListIterator<MessageReference> iterator = queue.iterator();
for (int i = 0; i < 100; i++) {
iterator.hasNext();
ids[i] = iterator.next().getMessage().getMessageID();
}
iterator.close();
DuplicateIDCache duplicateTargetCache = server1.getPostOffice().getDuplicateIDCache(PostOfficeImpl.BRIDGE_CACHE_STR.concat(forwardAddress));
TransactionImpl tx = new TransactionImpl(server1.getStorageManager());
for (long id : ids) {
byte[] duplicateArray = BridgeImpl.getDuplicateBytes(server0.getNodeManager().getUUID(), id);
duplicateTargetCache.addToCache(duplicateArray, tx);
}
tx.commit();
}
Thread.sleep(1000);
ClientSessionFactory sf1 = locator.createSessionFactory(server1tc);
ClientSession session1 = sf1.createSession(false, true, true);
try {
session1.createQueue(forwardAddress, RoutingType.ANYCAST, queueName1);
} catch (Throwable ignored) {
ignored.printStackTrace();
}
ClientConsumer consumer1 = session1.createConsumer(queueName1);
session1.start();
for (int i = 100; i < numMessages; i++) {
ClientMessage message = consumer1.receive(5000);
assertNotNull(message);
assertEquals(i, message.getIntProperty(propKey).intValue());
message.acknowledge();
}
session1.commit();
Assert.assertNull(consumer1.receiveImmediate());
consumer1.close();
session1.deleteQueue(queueName1);
session1.close();
sf1.close();
server1.stop();
session0.close();
sf0.close();
closeFields();
assertEquals(0, loadQueues(server0).size());
}
use of org.apache.activemq.artemis.core.postoffice.DuplicateIDCache in project activemq-artemis by apache.
the class DuplicateCacheTest method testDuplicateNonPersistent.
@Test
public void testDuplicateNonPersistent() throws Exception {
createStorage();
DuplicateIDCache cache = new DuplicateIDCacheImpl(new SimpleString("test"), 2000, journal, false);
TransactionImpl tx = new TransactionImpl(journal);
for (int i = 0; i < 5000; i++) {
byte[] bytes = RandomUtil.randomBytes();
cache.addToCache(bytes, tx);
}
tx.commit();
for (int i = 0; i < 5000; i++) {
byte[] bytes = RandomUtil.randomBytes();
cache.addToCache(bytes, null);
}
}
use of org.apache.activemq.artemis.core.postoffice.DuplicateIDCache in project activemq-artemis by apache.
the class DuplicateCacheTest method testDuplicate.
@Test
public void testDuplicate() throws Exception {
createStorage();
DuplicateIDCache cache = new DuplicateIDCacheImpl(new SimpleString("test"), 2000, journal, true);
TransactionImpl tx = new TransactionImpl(journal);
for (int i = 0; i < 5000; i++) {
byte[] bytes = RandomUtil.randomBytes();
cache.addToCache(bytes, tx);
}
tx.commit();
tx = new TransactionImpl(journal);
for (int i = 0; i < 5000; i++) {
byte[] bytes = RandomUtil.randomBytes();
cache.addToCache(bytes, tx);
}
tx.commit();
byte[] id = RandomUtil.randomBytes();
Assert.assertFalse(cache.contains(id));
cache.addToCache(id, null);
Assert.assertTrue(cache.contains(id));
cache.deleteFromCache(id);
final CountDownLatch latch = new CountDownLatch(1);
OperationContextImpl.getContext().executeOnCompletion(new IOCallback() {
@Override
public void done() {
latch.countDown();
}
@Override
public void onError(int errorCode, String errorMessage) {
}
}, true);
Assert.assertTrue(latch.await(1, TimeUnit.MINUTES));
Assert.assertFalse(cache.contains(id));
}
use of org.apache.activemq.artemis.core.postoffice.DuplicateIDCache in project activemq-artemis by apache.
the class PostOfficeImpl method deleteDuplicateCache.
private void deleteDuplicateCache(SimpleString address) throws Exception {
DuplicateIDCache cache = duplicateIDCaches.remove(address);
if (cache != null) {
cache.clear();
}
cache = duplicateIDCaches.remove(BRIDGE_CACHE_STR.concat(address));
if (cache != null) {
cache.clear();
}
}
Aggregations