use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.
the class PostOfficeImpl method redistribute.
/**
* The redistribution can't process the route right away as we may be dealing with a large message which will need to be processed on a different thread
*/
@Override
public Pair<RoutingContext, Message> redistribute(final Message message, final Queue originatingQueue, final Transaction tx) throws Exception {
// We have to copy the message and store it separately, otherwise we may lose remote bindings in case of restart before the message
// arrived the target node
// as described on https://issues.jboss.org/browse/JBPAPP-6130
Message copyRedistribute = message.copy(storageManager.generateID());
Bindings bindings = addressManager.getBindingsForRoutingAddress(originatingQueue.getAddress());
if (bindings != null) {
RoutingContext context = new RoutingContextImpl(tx);
boolean routed = bindings.redistribute(copyRedistribute, originatingQueue, context);
if (routed) {
return new Pair<>(context, copyRedistribute);
}
}
return null;
}
use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadMessageJournal.
@Override
public JournalLoadInformation loadMessageJournal(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, Map<Long, QueueBindingInfo> queueInfos, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Set<Pair<Long, Long>> pendingLargeMessages, List<PageCountPending> pendingNonTXPageCounter, final JournalLoader journalLoader) throws Exception {
List<RecordInfo> records = new ArrayList<>();
List<PreparedTransactionInfo> preparedTransactions = new ArrayList<>();
Set<PageTransactionInfo> invalidPageTransactions = null;
Map<Long, Message> messages = new HashMap<>();
readLock();
try {
JournalLoadInformation info = messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(this, messages));
ArrayList<LargeServerMessage> largeMessages = new ArrayList<>();
Map<Long, Map<Long, AddMessageRecord>> queueMap = new HashMap<>();
Map<Long, PageSubscription> pageSubscriptions = new HashMap<>();
final int totalSize = records.size();
for (int reccount = 0; reccount < totalSize; reccount++) {
// It will show log.info only with large journals (more than 1 million records)
if (reccount > 0 && reccount % 1000000 == 0) {
long percent = (long) ((((double) reccount) / ((double) totalSize)) * 100f);
ActiveMQServerLogger.LOGGER.percentLoaded(percent);
}
RecordInfo record = records.get(reccount);
byte[] data = record.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch(recordType) {
case JournalRecordIds.ADD_LARGE_MESSAGE_PENDING:
{
PendingLargeMessageEncoding pending = new PendingLargeMessageEncoding();
pending.decode(buff);
if (pendingLargeMessages != null) {
// it could be null on tests, and we don't need anything on that case
pendingLargeMessages.add(new Pair<>(record.id, pending.largeMessageID));
}
break;
}
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
LargeServerMessage largeMessage = parseLargeMessage(messages, buff);
messages.put(record.id, largeMessage);
largeMessages.add(largeMessage);
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
throw new IllegalStateException("This is using old journal data, export your data and import at the correct version");
}
case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buff, null);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
queueMessages = new LinkedHashMap<>();
queueMap.put(encoding.queueID, queueMessages);
}
Message message = messages.get(messageID);
if (message == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(record.id);
} else {
queueMessages.put(messageID, new AddMessageRecord(message));
}
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueue(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.remove(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
}
}
break;
}
case JournalRecordIds.UPDATE_DELIVERY_COUNT:
{
long messageID = record.id;
DeliveryCountUpdateEncoding encoding = new DeliveryCountUpdateEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueDelCount(encoding.queueID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindMessageDelCount(messageID);
} else {
rec.setDeliveryCount(encoding.count);
}
}
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo invalidPGTx = null;
if (record.isUpdate) {
PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
pageUpdate.decode(buff);
PageTransactionInfo pageTX = pagingManager.getTransaction(pageUpdate.pageTX);
if (pageTX == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindPageTX(pageUpdate.pageTX);
} else {
if (!pageTX.onUpdate(pageUpdate.recods, null, null)) {
invalidPGTx = pageTX;
}
}
} else {
PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
pageTransactionInfo.setRecordID(record.id);
pagingManager.addTransaction(pageTransactionInfo);
if (!pageTransactionInfo.checkSize(null, null)) {
invalidPGTx = pageTransactionInfo;
}
}
if (invalidPGTx != null) {
if (invalidPageTransactions == null) {
invalidPageTransactions = new HashSet<>();
}
invalidPageTransactions.add(invalidPGTx);
}
break;
}
case JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME:
{
long messageID = record.id;
ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueScheduled(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
} else {
rec.setScheduledDeliveryTime(encoding.scheduledDeliveryTime);
}
}
break;
}
case JournalRecordIds.DUPLICATE_ID:
{
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
List<Pair<byte[], Long>> ids = duplicateIDMap.get(encoding.address);
if (ids == null) {
ids = new ArrayList<>();
duplicateIDMap.put(encoding.address, ids);
}
ids.add(new Pair<>(encoding.duplID, record.id));
break;
}
case JournalRecordIds.HEURISTIC_COMPLETION:
{
HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
encoding.decode(buff);
resourceManager.putHeuristicCompletion(record.id, encoding.xid, encoding.isCommit);
break;
}
case JournalRecordIds.ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.reloadACK(encoding.position);
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloading(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE:
{
PageCountRecord encoding = new PageCountRecord();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadValue(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPage(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadInc(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPageCursor(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COMPLETE:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
if (!sub.reloadPageCompletion(encoding.position)) {
if (logger.isDebugEnabled()) {
logger.debug("Complete page " + encoding.position.getPageNr() + " doesn't exist on page manager " + sub.getPagingStore().getAddress());
}
messageJournal.appendDeleteRecord(record.id, false);
}
} else {
ActiveMQServerLogger.LOGGER.cantFindQueueOnPageComplete(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER:
{
PageCountPendingImpl pendingCountEncoding = new PageCountPendingImpl();
pendingCountEncoding.decode(buff);
pendingCountEncoding.setID(record.id);
// This can be null on testcases not interested on this outcome
if (pendingNonTXPageCounter != null) {
pendingNonTXPageCounter.add(pendingCountEncoding);
}
break;
}
default:
{
throw new IllegalStateException("Invalid record type " + recordType);
}
}
// This will free up memory sooner. The record is not needed any more
// and its byte array would consume memory during the load process even though it's not necessary any longer
// what would delay processing time during load
records.set(reccount, null);
}
// Release the memory as soon as not needed any longer
records.clear();
records = null;
journalLoader.handleAddMessage(queueMap);
loadPreparedTransactions(postOffice, pagingManager, resourceManager, queueInfos, preparedTransactions, duplicateIDMap, pageSubscriptions, pendingLargeMessages, journalLoader);
for (PageSubscription sub : pageSubscriptions.values()) {
sub.getCounter().processReload();
}
for (LargeServerMessage msg : largeMessages) {
if (msg.getRefCount() == 0) {
ActiveMQServerLogger.LOGGER.largeMessageWithNoRef(msg.getMessageID());
msg.decrementDelayDeletionCount();
}
}
journalLoader.handleNoMessageReferences(messages);
// To recover positions on Iterators
if (pagingManager != null) {
// it could be null on certain tests that are not dealing with paging
// This could also be the case in certain embedded conditions
pagingManager.processReload();
}
journalLoader.postLoad(messageJournal, resourceManager, duplicateIDMap);
checkInvalidPageTransactions(pagingManager, invalidPageTransactions);
journalLoaded = true;
return info;
} finally {
readUnLock();
}
}
use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.
the class JournalStorageManager method sendLargeMessageFiles.
private void sendLargeMessageFiles(final Map<Long, Pair<String, Long>> pendingLargeMessages) throws Exception {
Iterator<Map.Entry<Long, Pair<String, Long>>> iter = pendingLargeMessages.entrySet().iterator();
while (started && iter.hasNext()) {
Map.Entry<Long, Pair<String, Long>> entry = iter.next();
String fileName = entry.getValue().getA();
final long id = entry.getKey();
long size = entry.getValue().getB();
SequentialFile seqFile = largeMessagesFactory.createSequentialFile(fileName);
if (!seqFile.exists())
continue;
if (replicator != null) {
replicator.syncLargeMessageFile(seqFile, size, id);
} else {
throw ActiveMQMessageBundle.BUNDLE.replicatorIsNull();
}
}
}
use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.
the class FileConfigurationParser method parseAddressSettings.
/**
* @param node
* @return
*/
protected Pair<String, AddressSettings> parseAddressSettings(final Node node) {
String match = getAttributeValue(node, "match");
NodeList children = node.getChildNodes();
AddressSettings addressSettings = new AddressSettings();
Pair<String, AddressSettings> setting = new Pair<>(match, addressSettings);
for (int i = 0; i < children.getLength(); i++) {
final Node child = children.item(i);
final String name = child.getNodeName();
if (DEAD_LETTER_ADDRESS_NODE_NAME.equalsIgnoreCase(name)) {
SimpleString queueName = new SimpleString(getTrimmedTextContent(child));
addressSettings.setDeadLetterAddress(queueName);
} else if (EXPIRY_ADDRESS_NODE_NAME.equalsIgnoreCase(name)) {
SimpleString queueName = new SimpleString(getTrimmedTextContent(child));
addressSettings.setExpiryAddress(queueName);
} else if (EXPIRY_DELAY_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setExpiryDelay(XMLUtil.parseLong(child));
} else if (REDELIVERY_DELAY_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setRedeliveryDelay(XMLUtil.parseLong(child));
} else if (REDELIVERY_DELAY_MULTIPLIER_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setRedeliveryMultiplier(XMLUtil.parseDouble(child));
} else if (MAX_REDELIVERY_DELAY_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setMaxRedeliveryDelay(XMLUtil.parseLong(child));
} else if (MAX_SIZE_BYTES_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setMaxSizeBytes(ByteUtil.convertTextBytes(getTrimmedTextContent(child)));
} else if (PAGE_SIZE_BYTES_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setPageSizeBytes(ByteUtil.convertTextBytes(getTrimmedTextContent(child)));
} else if (PAGE_MAX_CACHE_SIZE_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setPageCacheMaxSize(XMLUtil.parseInt(child));
} else if (MESSAGE_COUNTER_HISTORY_DAY_LIMIT_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setMessageCounterHistoryDayLimit(XMLUtil.parseInt(child));
} else if (ADDRESS_FULL_MESSAGE_POLICY_NODE_NAME.equalsIgnoreCase(name)) {
String value = getTrimmedTextContent(child);
Validators.ADDRESS_FULL_MESSAGE_POLICY_TYPE.validate(ADDRESS_FULL_MESSAGE_POLICY_NODE_NAME, value);
AddressFullMessagePolicy policy = Enum.valueOf(AddressFullMessagePolicy.class, value);
addressSettings.setAddressFullMessagePolicy(policy);
} else if (LVQ_NODE_NAME.equalsIgnoreCase(name) || DEFAULT_LVQ_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setDefaultLastValueQueue(XMLUtil.parseBoolean(child));
} else if (DEFAULT_EXCLUSIVE_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setDefaultExclusiveQueue(XMLUtil.parseBoolean(child));
} else if (MAX_DELIVERY_ATTEMPTS.equalsIgnoreCase(name)) {
addressSettings.setMaxDeliveryAttempts(XMLUtil.parseInt(child));
} else if (REDISTRIBUTION_DELAY_NODE_NAME.equalsIgnoreCase(name)) {
addressSettings.setRedistributionDelay(XMLUtil.parseLong(child));
} else if (SEND_TO_DLA_ON_NO_ROUTE.equalsIgnoreCase(name)) {
addressSettings.setSendToDLAOnNoRoute(XMLUtil.parseBoolean(child));
} else if (SLOW_CONSUMER_THRESHOLD_NODE_NAME.equalsIgnoreCase(name)) {
long slowConsumerThreshold = XMLUtil.parseLong(child);
Validators.MINUS_ONE_OR_GT_ZERO.validate(SLOW_CONSUMER_THRESHOLD_NODE_NAME, slowConsumerThreshold);
addressSettings.setSlowConsumerThreshold(slowConsumerThreshold);
} else if (SLOW_CONSUMER_CHECK_PERIOD_NODE_NAME.equalsIgnoreCase(name)) {
long slowConsumerCheckPeriod = XMLUtil.parseLong(child);
Validators.GT_ZERO.validate(SLOW_CONSUMER_CHECK_PERIOD_NODE_NAME, slowConsumerCheckPeriod);
addressSettings.setSlowConsumerCheckPeriod(slowConsumerCheckPeriod);
} else if (SLOW_CONSUMER_POLICY_NODE_NAME.equalsIgnoreCase(name)) {
String value = getTrimmedTextContent(child);
Validators.SLOW_CONSUMER_POLICY_TYPE.validate(SLOW_CONSUMER_POLICY_NODE_NAME, value);
SlowConsumerPolicy policy = Enum.valueOf(SlowConsumerPolicy.class, value);
addressSettings.setSlowConsumerPolicy(policy);
} else if (AUTO_CREATE_JMS_QUEUES.equalsIgnoreCase(name)) {
addressSettings.setAutoCreateJmsQueues(XMLUtil.parseBoolean(child));
} else if (AUTO_DELETE_JMS_QUEUES.equalsIgnoreCase(name)) {
addressSettings.setAutoDeleteJmsQueues(XMLUtil.parseBoolean(child));
} else if (AUTO_CREATE_JMS_TOPICS.equalsIgnoreCase(name)) {
addressSettings.setAutoCreateJmsTopics(XMLUtil.parseBoolean(child));
} else if (AUTO_DELETE_JMS_TOPICS.equalsIgnoreCase(name)) {
addressSettings.setAutoDeleteJmsTopics(XMLUtil.parseBoolean(child));
} else if (AUTO_CREATE_QUEUES.equalsIgnoreCase(name)) {
addressSettings.setAutoCreateQueues(XMLUtil.parseBoolean(child));
} else if (AUTO_DELETE_QUEUES.equalsIgnoreCase(name)) {
addressSettings.setAutoDeleteQueues(XMLUtil.parseBoolean(child));
} else if (CONFIG_DELETE_QUEUES.equalsIgnoreCase(name)) {
String value = getTrimmedTextContent(child);
Validators.DELETION_POLICY_TYPE.validate(CONFIG_DELETE_QUEUES, value);
DeletionPolicy policy = Enum.valueOf(DeletionPolicy.class, value);
addressSettings.setConfigDeleteQueues(policy);
} else if (AUTO_CREATE_ADDRESSES.equalsIgnoreCase(name)) {
addressSettings.setAutoCreateAddresses(XMLUtil.parseBoolean(child));
} else if (AUTO_DELETE_ADDRESSES.equalsIgnoreCase(name)) {
addressSettings.setAutoDeleteAddresses(XMLUtil.parseBoolean(child));
} else if (CONFIG_DELETE_ADDRESSES.equalsIgnoreCase(name)) {
String value = getTrimmedTextContent(child);
Validators.DELETION_POLICY_TYPE.validate(CONFIG_DELETE_ADDRESSES, value);
DeletionPolicy policy = Enum.valueOf(DeletionPolicy.class, value);
addressSettings.setConfigDeleteAddresses(policy);
} else if (MANAGEMENT_BROWSE_PAGE_SIZE.equalsIgnoreCase(name)) {
addressSettings.setManagementBrowsePageSize(XMLUtil.parseInt(child));
} else if (DEFAULT_PURGE_ON_NO_CONSUMERS.equalsIgnoreCase(name)) {
addressSettings.setDefaultPurgeOnNoConsumers(XMLUtil.parseBoolean(child));
} else if (DEFAULT_MAX_CONSUMERS.equalsIgnoreCase(name)) {
addressSettings.setDefaultMaxConsumers(XMLUtil.parseInt(child));
} else if (DEFAULT_QUEUE_ROUTING_TYPE.equalsIgnoreCase(name)) {
String value = getTrimmedTextContent(child);
Validators.ROUTING_TYPE.validate(DEFAULT_QUEUE_ROUTING_TYPE, value);
RoutingType routingType = RoutingType.valueOf(value);
addressSettings.setDefaultQueueRoutingType(routingType);
} else if (DEFAULT_ADDRESS_ROUTING_TYPE.equalsIgnoreCase(name)) {
String value = getTrimmedTextContent(child);
Validators.ROUTING_TYPE.validate(DEFAULT_ADDRESS_ROUTING_TYPE, value);
RoutingType routingType = RoutingType.valueOf(value);
addressSettings.setDefaultAddressRoutingType(routingType);
}
}
return setting;
}
use of org.apache.activemq.artemis.api.core.Pair in project activemq-artemis by apache.
the class FileBasedSecStoreConfig method removeRoles.
private void removeRoles(String username) {
Iterator<String> iterKeys = roleConfig.getKeys();
List<Pair<String, List<String>>> updateMap = new ArrayList<>();
while (iterKeys.hasNext()) {
String theRole = iterKeys.next();
List<String> userList = roleConfig.getList(String.class, theRole);
List<String> newList = new ArrayList<>();
boolean roleChaned = false;
for (String value : userList) {
// each value may be comma separated.
List<String> update = new ArrayList<>();
String[] items = value.split(",");
boolean found = false;
for (String item : items) {
if (!item.equals(username)) {
update.add(item);
} else {
found = true;
roleChaned = true;
}
}
if (found) {
if (update.size() > 0) {
newList.add(StringUtil.joinStringList(update, ","));
}
}
}
if (roleChaned) {
updateMap.add(new Pair(theRole, newList));
}
}
// do update
Iterator<Pair<String, List<String>>> iterUpdate = updateMap.iterator();
while (iterUpdate.hasNext()) {
Pair<String, List<String>> entry = iterUpdate.next();
roleConfig.clearProperty(entry.getA());
if (entry.getB().size() > 0) {
roleConfig.addProperty(entry.getA(), entry.getB());
}
}
}
Aggregations