use of com.zimbra.cs.redolog.op.StoreIncomingBlob in project zm-mailbox by Zimbra.
the class RedoLogVerify method scanLog.
public boolean scanLog(File logfile) throws IOException {
boolean good = false;
FileLogReader logReader = new FileLogReader(logfile, false);
logReader.open();
if (!mParams.quiet) {
FileHeader header = logReader.getHeader();
mOut.println("HEADER");
mOut.println("------");
mOut.print(header);
mOut.println("------");
}
boolean hasMailboxIdsFilter = !mParams.mboxIds.isEmpty();
RedoableOp op = null;
long lastPosition = 0;
long lastOpStartOffset = 0;
try {
while ((op = logReader.getNextOp()) != null) {
lastOpStartOffset = logReader.getLastOpStartOffset();
lastPosition = logReader.position();
if (hasMailboxIdsFilter) {
int mboxId = op.getMailboxId();
if (op instanceof StoreIncomingBlob) {
List<Integer> list = ((StoreIncomingBlob) op).getMailboxIdList();
if (list != null) {
boolean match = false;
for (Integer mid : list) {
if (mParams.mboxIds.contains(mid)) {
match = true;
break;
}
}
if (!match)
continue;
}
// If list==null, it's a store incoming blob op targeted at unknown set of mailboxes.
// It applies to our filtered mailboxes.
} else if (!mParams.mboxIds.contains(mboxId)) {
continue;
}
}
if (!mParams.quiet) {
printOp(mOut, op, mParams.hideOffset, lastOpStartOffset, lastPosition - lastOpStartOffset);
if (mParams.showBlob) {
InputStream dataStream = op.getAdditionalDataStream();
if (dataStream != null) {
mOut.println("<START OF BLOB>");
ByteUtil.copy(dataStream, true, mOut, false);
mOut.println();
mOut.println("<END OF BLOB>");
}
}
}
}
good = true;
} catch (IOException e) {
// The IOException could be a real I/O problem or it could mean
// there was a server crash previously and there were half-written
// log entries.
mOut.println();
mOut.printf("Error while parsing data starting at offset 0x%08x", lastPosition);
mOut.println();
long size = logReader.getSize();
long diff = size - lastPosition;
mOut.printf("%d bytes remaining in the file", diff);
mOut.println();
mOut.println();
if (op != null) {
mOut.println("Last suceessfully parsed redo op:");
printOp(mOut, op, false, lastOpStartOffset, lastPosition - lastOpStartOffset);
mOut.println();
}
// hexdump data around the bad bytes
int bytesPerLine = 16;
int linesBefore = 10;
int linesAfter = 10;
long startPos = Math.max(lastPosition - (lastPosition % bytesPerLine) - linesBefore * bytesPerLine, 0);
int count = (int) Math.min((linesBefore + linesAfter + 1) * bytesPerLine, lastPosition - startPos + diff);
RandomAccessFile raf = null;
try {
raf = new RandomAccessFile(logfile, "r");
raf.seek(startPos);
byte[] buf = new byte[count];
raf.read(buf, 0, count);
mOut.printf("Data near error offset %08x:", lastPosition);
mOut.println();
hexdump(mOut, buf, 0, count, startPos, lastPosition);
mOut.println();
} catch (IOException eh) {
mOut.println("Error opening log file " + logfile.getAbsolutePath() + " for hexdump");
eh.printStackTrace(mOut);
} finally {
if (raf != null)
raf.close();
}
throw e;
} finally {
logReader.close();
}
return good;
}
use of com.zimbra.cs.redolog.op.StoreIncomingBlob in project zm-mailbox by Zimbra.
the class RedoPlayer method processOp.
private final void processOp(RedoableOp op, boolean redoCommitted, Map<Integer, Integer> mboxIDsMap, long startTime, long endTime, long ignoreCommitsAtOrAfter) throws ServiceException {
if (op.isStartMarker()) {
synchronized (mOpsMapGuard) {
mOpsMap.put(op.getTransactionId(), op);
if (mHasOrphanOps) {
RedoableOp x = mOrphanOps.remove(op.getTransactionId());
if (x != null)
ZimbraLog.redolog.error("Detected out-of-order insertion of change record for orphans commit/abort: change=" + op + ", orphan=" + x);
}
}
} else {
// those listed in the checkpoint.
if (op instanceof Checkpoint) {
Checkpoint ckpt = (Checkpoint) op;
Set txns = ckpt.getActiveTxns();
if (txns.size() > 0) {
synchronized (mOpsMapGuard) {
if (mOpsMap.size() != txns.size()) {
// Unexpected discrepancy
if (ZimbraLog.redolog.isDebugEnabled()) {
StringBuffer sb1 = new StringBuffer("Current Uncommitted Ops: ");
StringBuffer sb2 = new StringBuffer("Checkpoint Uncommitted Ops: ");
int i = 0;
for (Iterator it = mOpsMap.keySet().iterator(); it.hasNext(); i++) {
TransactionId id = (TransactionId) it.next();
if (i > 0)
sb1.append(", ");
sb1.append(id);
}
i = 0;
for (Iterator it = txns.iterator(); it.hasNext(); i++) {
TransactionId id = (TransactionId) it.next();
if (i > 0)
sb2.append(", ");
sb2.append(id);
}
ZimbraLog.redolog.info("Checkpoint discrepancy: # current uncommitted ops = " + mOpsMap.size() + ", # checkpoint uncommitted ops = " + txns.size() + "\nMAP DUMP:\n" + sb1.toString() + "\n" + sb2.toString());
}
}
}
} else {
synchronized (mOpsMapGuard) {
if (mOpsMap.size() != 0) {
// Unexpected discrepancy
if (ZimbraLog.redolog.isDebugEnabled()) {
StringBuffer sb1 = new StringBuffer("Current Uncommitted Ops: ");
int i = 0;
for (Iterator it = mOpsMap.keySet().iterator(); it.hasNext(); i++) {
TransactionId id = (TransactionId) it.next();
if (i > 0)
sb1.append(", ");
sb1.append(id);
}
ZimbraLog.redolog.info("Checkpoint discrepancy: # current uncommitted ops = " + mOpsMap.size() + " instead of 0\nMAP DUMP:\n" + sb1.toString());
}
}
}
}
} else if (op.isEndMarker()) {
// Ignore if op is a commit and its timestamp is at or after ignoreCommitsAtOrAfter.
// In other words, don't ignore if op is a rollback OR its timestamp is before ignoreCommitsAtOrAfter.
boolean isCommitOp = op instanceof CommitTxn;
long opTstamp = op.getTimestamp();
if (!isCommitOp || opTstamp < ignoreCommitsAtOrAfter) {
// Encountered COMMIT or ABORT. Discard the
// corresponding op from map, and optionally execute the committed op.
RedoableOp prepareOp;
synchronized (mOpsMapGuard) {
prepareOp = (RedoableOp) mOpsMap.remove(op.getTransactionId());
if (prepareOp == null) {
mHasOrphanOps = true;
ZimbraLog.redolog.error("Commit/abort record encountered before corresponding change record (" + op + ")");
TransactionId tid = op.getTransactionId();
RedoableOp x = (RedoableOp) mOrphanOps.get(tid);
if (x != null)
ZimbraLog.redolog.error("Op [" + op + "] is already in orphans map: value=" + x);
mOrphanOps.put(tid, op);
}
}
if (redoCommitted && prepareOp != null && isCommitOp && (startTime == -1 || prepareOp.getTimestamp() >= startTime) && opTstamp < endTime) {
boolean allowRedo = false;
if (mboxIDsMap == null) {
// Caller doesn't care which mailbox(es) the op is for.
allowRedo = true;
} else {
int opMailboxId = prepareOp.getMailboxId();
if (prepareOp instanceof StoreIncomingBlob) {
assert (opMailboxId == RedoableOp.MAILBOX_ID_ALL);
// special case for StoreIncomingBlob op that has
// a list of mailbox IDs.
StoreIncomingBlob storeOp = (StoreIncomingBlob) prepareOp;
List<Integer> list = storeOp.getMailboxIdList();
if (list != null) {
Set<Integer> opMboxIds = new HashSet<Integer>(list);
for (Map.Entry<Integer, Integer> entry : mboxIDsMap.entrySet()) {
if (opMboxIds.contains(entry.getKey())) {
allowRedo = true;
// Replace the mailbox ID list in the op. We're
// replaying it only for the target mailbox ID we're
// interested in.
List<Integer> newList = new ArrayList<Integer>(mboxIDsMap.values());
storeOp.setMailboxIdList(newList);
break;
}
}
} else {
// Prior to redolog version 1.0 StoreIncomingBlob
// didn't keep track of mailbox list. Always recreate
// the blob since we don't know which mailboxes will
// need it.
allowRedo = true;
}
} else if (opMailboxId == RedoableOp.MAILBOX_ID_ALL) {
// This case should be checked after StoreIncomingBlob
// case because StoreIncomingBlob has mailbox ID of
// MAILBOX_ID_ALL.
allowRedo = true;
} else {
for (Map.Entry<Integer, Integer> entry : mboxIDsMap.entrySet()) {
if (opMailboxId == entry.getKey().intValue()) {
if (entry.getValue() != null) {
// restore to a different mailbox
prepareOp.setMailboxId(entry.getValue().intValue());
}
allowRedo = true;
break;
}
}
}
}
if (allowRedo) {
if (mSkipDeleteOps && prepareOp.isDeleteOp()) {
ZimbraLog.redolog.info("Skipping delete op: " + prepareOp.toString());
} else {
try {
if (ZimbraLog.redolog.isDebugEnabled())
ZimbraLog.redolog.debug("Redoing: " + prepareOp.toString());
prepareOp.setUnloggedReplay(mUnloggedReplay);
playOp(prepareOp);
} catch (Exception e) {
if (!ignoreReplayErrors())
throw ServiceException.FAILURE("Error executing redoOp", e);
else
ZimbraLog.redolog.warn("Ignoring error during redo log replay: " + e.getMessage(), e);
}
}
}
}
}
}
}
}
use of com.zimbra.cs.redolog.op.StoreIncomingBlob in project zm-mailbox by Zimbra.
the class Mailbox method addMessageInternal.
private Message addMessageInternal(OperationContext octxt, ParsedMessage pm, int folderId, boolean noICal, int flags, String[] tags, int conversationId, String rcptEmail, Message.DraftInfo dinfo, CustomMetadata customData, DeliveryContext dctxt, StagedBlob staged) throws IOException, ServiceException {
assert lock.isWriteLockedByCurrentThread();
if (pm == null) {
throw ServiceException.INVALID_REQUEST("null ParsedMessage when adding message to mailbox " + mId, null);
}
if (Math.abs(conversationId) <= HIGHEST_SYSTEM_ID) {
conversationId = ID_AUTO_INCREMENT;
}
CreateMessage redoPlayer = (octxt == null ? null : (CreateMessage) octxt.getPlayer());
boolean needRedo = needRedo(octxt, redoPlayer);
boolean isRedo = redoPlayer != null;
Blob blob = dctxt.getIncomingBlob();
if (blob == null) {
throw ServiceException.FAILURE("Incoming blob not found.", null);
}
// make sure we're parsing headers using the target account's charset
pm.setDefaultCharset(getAccount().getPrefMailDefaultCharset());
// quick check to make sure we don't deliver 5 copies of the same message
String msgidHeader = pm.getMessageID();
boolean isSent = ((flags & Flag.BITMASK_FROM_ME) != 0);
if (!isRedo && msgidHeader != null && !isSent && mSentMessageIDs.containsKey(msgidHeader)) {
Integer sentMsgID = mSentMessageIDs.get(msgidHeader);
if (conversationId == ID_AUTO_INCREMENT) {
conversationId = getConversationIdFromReferent(pm.getMimeMessage(), sentMsgID.intValue());
ZimbraLog.mailbox.debug("duplicate detected but not deduped (%s); will try to slot into conversation %d", msgidHeader, conversationId);
}
}
// caller can't set system flags other than \Draft, \Sent and \Post
flags &= ~Flag.FLAGS_SYSTEM | Flag.BITMASK_DRAFT | Flag.BITMASK_FROM_ME | Flag.BITMASK_POST;
// caller can't specify non-message flags
flags &= Flag.FLAGS_GENERIC | Flag.FLAGS_MESSAGE;
String digest;
int msgSize;
try {
digest = blob.getDigest();
msgSize = (int) blob.getRawSize();
} catch (IOException e) {
throw ServiceException.FAILURE("Unable to get message properties.", e);
}
CreateMessage redoRecorder = new CreateMessage(mId, rcptEmail, pm.getReceivedDate(), dctxt.getShared(), digest, msgSize, folderId, noICal, flags, tags, customData);
StoreIncomingBlob storeRedoRecorder = null;
// strip out unread flag for internal storage (don't do this before redoRecorder initialization)
boolean unread = (flags & Flag.BITMASK_UNREAD) > 0;
flags &= ~Flag.BITMASK_UNREAD;
// "having attachments" is currently tracked via flags
if (pm.hasAttachments()) {
flags |= Flag.BITMASK_ATTACHED;
} else {
flags &= ~Flag.BITMASK_ATTACHED;
}
// priority is calculated from headers
flags &= ~(Flag.BITMASK_HIGH_PRIORITY | Flag.BITMASK_LOW_PRIORITY);
flags |= pm.getPriorityBitmask();
boolean isSpam = folderId == ID_FOLDER_SPAM;
boolean isDraft = (flags & Flag.BITMASK_DRAFT) != 0;
// draft replies get slotted in the same conversation as their parent, if possible
if (isDraft && !isRedo && conversationId == ID_AUTO_INCREMENT && dinfo != null && !Strings.isNullOrEmpty(dinfo.origId)) {
try {
ItemId iid = new ItemId(dinfo.origId, getAccountId());
if (iid.getId() > 0 && iid.belongsTo(this)) {
conversationId = getMessageById(octxt, iid.getId()).getConversationId();
}
} catch (ServiceException e) {
}
}
Message msg = null;
boolean success = false;
CustomMetadata.CustomMetadataList extended = MetadataCallback.preDelivery(pm);
if (customData != null) {
if (extended == null) {
extended = customData.asList();
} else {
extended.addSection(customData);
}
}
Threader threader = pm.getThreader(this);
String subject = pm.getNormalizedSubject();
try {
beginTransaction("addMessage", octxt, redoRecorder);
if (isRedo) {
rcptEmail = redoPlayer.getRcptEmail();
}
Tag.NormalizedTags ntags = new Tag.NormalizedTags(this, tags);
Folder folder = getFolderById(folderId);
// step 0: preemptively check for quota issues (actual update is done in Message.create)
if (!getAccount().isMailAllowReceiveButNotSendWhenOverQuota()) {
checkSizeChange(getSize() + staged.getSize());
}
// step 1: get an ID assigned for the new message
int messageId = getNextItemId(!isRedo ? ID_AUTO_INCREMENT : redoPlayer.getMessageId());
List<Conversation> mergeConvs = null;
if (isRedo) {
conversationId = redoPlayer.getConvId();
// fetch the conversations that were merged in as a result of the original delivery...
List<Integer> mergeConvIds = redoPlayer.getMergedConvIds();
mergeConvs = new ArrayList<Conversation>(mergeConvIds.size());
for (int mergeId : mergeConvIds) {
try {
mergeConvs.add(getConversationById(mergeId));
} catch (NoSuchItemException nsie) {
ZimbraLog.mailbox.debug("could not find merge conversation %d", mergeId);
}
}
}
// step 2: figure out where the message belongs
Conversation conv = null;
if (threader.isEnabled()) {
boolean isReply = pm.isReply();
if (conversationId != ID_AUTO_INCREMENT) {
try {
// fetch the requested conversation
// (we'll ensure that it's receiving new mail after the new message is added to it)
conv = getConversationById(conversationId);
ZimbraLog.mailbox.debug("fetched explicitly-specified conversation %d", conv.getId());
} catch (NoSuchItemException nsie) {
if (!isRedo) {
ZimbraLog.mailbox.debug("could not find explicitly-specified conversation %d", conversationId);
conversationId = ID_AUTO_INCREMENT;
}
}
} else if (!isRedo && !isSpam && (isReply || (!isSent && !subject.isEmpty()))) {
List<Conversation> matches = threader.lookupConversation();
if (matches != null && !matches.isEmpty()) {
// file the message into the largest conversation, then later merge any other matching convs
Collections.sort(matches, new MailItem.SortSizeDescending());
conv = matches.remove(0);
mergeConvs = matches;
}
}
}
if (conv != null && conv.isTagged(Flag.FlagInfo.MUTED)) {
// adding a message to a muted conversation marks it muted and read
unread = false;
flags |= Flag.BITMASK_MUTED;
}
// step 3: create the message and update the cache
// and if the message is also an invite, deal with the calendar item
Conversation convTarget = conv instanceof VirtualConversation ? null : conv;
if (convTarget != null) {
ZimbraLog.mailbox.debug(" placing message in existing conversation %d", convTarget.getId());
}
CalendarPartInfo cpi = pm.getCalendarPartInfo();
ZVCalendar iCal = null;
if (cpi != null && CalendarItem.isAcceptableInvite(getAccount(), cpi)) {
iCal = cpi.cal;
}
msg = Message.create(messageId, folder, convTarget, pm, staged, unread, flags, ntags, dinfo, noICal, iCal, extended);
redoRecorder.setMessageId(msg.getId());
// step 4: create a conversation for the message, if necessary
if (threader.isEnabled() && convTarget == null) {
if (conv == null && conversationId == ID_AUTO_INCREMENT) {
conv = VirtualConversation.create(this, msg);
ZimbraLog.mailbox.debug("placed message %d in vconv %d", msg.getId(), conv.getId());
redoRecorder.setConvFirstMsgId(-1);
} else {
Message[] contents = null;
VirtualConversation vconv = null;
if (!isRedo) {
vconv = (VirtualConversation) conv;
contents = (vconv == null ? new Message[] { msg } : new Message[] { vconv.getMessage(), msg });
} else {
// Executing redo.
int convFirstMsgId = redoPlayer.getConvFirstMsgId();
Message convFirstMsg = null;
// If there was a virtual conversation, then...
if (convFirstMsgId > 0) {
try {
convFirstMsg = getMessageById(octxt, redoPlayer.getConvFirstMsgId());
} catch (MailServiceException e) {
if (!MailServiceException.NO_SUCH_MSG.equals(e.getCode())) {
throw e;
}
// The first message of conversation may have been deleted
// by user between the time of original operation and redo.
// Handle the case by skipping the updating of its
// conversation ID.
}
// if it is still a standalone message.
if (convFirstMsg != null && convFirstMsg.getConversationId() < 0) {
contents = new Message[] { convFirstMsg, msg };
vconv = new VirtualConversation(this, convFirstMsg);
}
}
if (contents == null) {
contents = new Message[] { msg };
}
}
redoRecorder.setConvFirstMsgId(vconv != null ? vconv.getMessageId() : -1);
conv = createConversation(conversationId, contents);
if (vconv != null) {
ZimbraLog.mailbox.debug("removed vconv %d", vconv.getId());
vconv.removeChild(vconv.getMessage());
}
// associate the first message's reference hashes with the new conversation
if (contents.length == 2) {
threader.changeThreadingTargets(contents[0], conv);
}
}
} else {
// conversation feature turned off
redoRecorder.setConvFirstMsgId(-1);
}
redoRecorder.setConvId(conv != null && !(conv instanceof VirtualConversation) ? conv.getId() : -1);
// if we're threading by references, associate the new message's reference hashes with its conversation
if (!isSpam && !isDraft) {
threader.recordAddedMessage(conv);
}
if (conv != null && mergeConvs != null) {
redoRecorder.setMergedConversations(mergeConvs);
for (Conversation smaller : mergeConvs) {
ZimbraLog.mailbox.info("merging conversation %d for references threading", smaller.getId());
// try {
conv.merge(smaller);
// } catch (ServiceException e) {
// if (!e.getCode().equals(MailServiceException.NO_SUCH_MSG)) {
// throw e;
// }
// }
}
}
// conversations may have shifted, so the threader's cached state is now questionable
threader.reset();
// step 5: write the redolog entries
if (dctxt.getShared()) {
if (dctxt.isFirst() && needRedo) {
// Log entry in redolog for blob save. Blob bytes are logged in the StoreIncoming entry.
// Subsequent CreateMessage ops will reference this blob.
storeRedoRecorder = new StoreIncomingBlob(digest, msgSize, dctxt.getMailboxIdList());
storeRedoRecorder.start(getOperationTimestampMillis());
storeRedoRecorder.setBlobBodyInfo(blob.getFile());
storeRedoRecorder.log();
}
// Link to the file created by StoreIncomingBlob.
redoRecorder.setMessageLinkInfo(blob.getPath());
} else {
// Store the blob data inside the CreateMessage op.
redoRecorder.setMessageBodyInfo(blob.getFile());
}
// step 6: link to existing blob
MailboxBlob mblob = StoreManager.getInstance().link(staged, this, messageId, getOperationChangeID());
markOtherItemDirty(mblob);
// when we created the Message, we used the staged locator/size/digest;
// make sure that data actually matches the final blob in the store
msg.updateBlobData(mblob);
if (dctxt.getMailboxBlob() == null) {
// Set mailbox blob for in case we want to add the message to the
// message cache after delivery.
dctxt.setMailboxBlob(mblob);
}
// step 7: queue new message for indexing
index.add(msg);
success = true;
// step 8: send lawful intercept message
try {
Notification.getInstance().interceptIfNecessary(this, pm.getMimeMessage(), "add message", folder);
} catch (ServiceException e) {
ZimbraLog.mailbox.error("unable to send legal intercept message", e);
}
} finally {
if (storeRedoRecorder != null) {
if (success) {
storeRedoRecorder.commit();
} else {
storeRedoRecorder.abort();
}
}
endTransaction(success);
if (success) {
// Everything worked. Update the blob field in ParsedMessage
// so the next recipient in the multi-recipient case will link
// to this blob as opposed to saving its own copy.
dctxt.setFirst(false);
}
}
// step 8: remember the Message-ID header so that we can avoid receiving duplicates
if (isSent && !isRedo && msgidHeader != null) {
mSentMessageIDs.put(msgidHeader, msg.getId());
}
return msg;
}
Aggregations