use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class GetBlobInfoOperation method handleBody.
/**
* Handle the body of the response: Deserialize and set the {@link BlobInfo} to return if no decryption is required.
* If decryption is required, submit a job for decryption.
* @param payload the body of the response.
* @param messageMetadata the {@link MessageMetadata} associated with the message.
* @throws IOException if there is an IOException while deserializing the body.
* @throws MessageFormatException if there is a MessageFormatException while deserializing the body.
*/
private void handleBody(InputStream payload, MessageMetadata messageMetadata) throws IOException, MessageFormatException {
ByteBuffer encryptionKey = messageMetadata == null ? null : messageMetadata.getEncryptionKey();
serverBlobProperties = MessageFormatRecord.deserializeBlobProperties(payload);
ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(payload);
if (encryptionKey == null) {
// if blob is not encrypted, move the state to Complete
operationResult = new GetBlobResultInternal(new GetBlobResult(new BlobInfo(serverBlobProperties, userMetadata.array()), null), null);
} else {
// submit decrypt job
progressTracker.initializeDecryptionTracker();
logger.trace("Submitting decrypt job for {}", blobId);
decryptJobMetricsTracker.onJobSubmission();
long startTimeMs = System.currentTimeMillis();
cryptoJobHandler.submitJob(new DecryptJob(blobId, encryptionKey.duplicate(), null, userMetadata, cryptoService, kms, decryptJobMetricsTracker, (DecryptJob.DecryptJobResult result, Exception exception) -> {
decryptJobMetricsTracker.onJobResultProcessingStart();
logger.trace("Handling decrypt job callback results for {}", blobId);
routerMetrics.decryptTimeMs.update(System.currentTimeMillis() - startTimeMs);
if (exception == null) {
logger.trace("Successfully updating decrypt job callback results for {}", blobId);
operationResult = new GetBlobResultInternal(new GetBlobResult(new BlobInfo(serverBlobProperties, result.getDecryptedUserMetadata().array()), null), null);
progressTracker.setDecryptionSuccess();
} else {
decryptJobMetricsTracker.incrementOperationError();
logger.trace("Exception {} thrown on decryption for {}", exception, blobId);
setOperationException(new RouterException("Exception thrown on decrypting the content for " + blobId, exception, RouterErrorCode.UnexpectedInternalError));
progressTracker.setDecryptionFailed();
}
decryptJobMetricsTracker.onJobResultProcessingComplete();
routerCallback.onPollReady();
}));
}
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class AmbryRequests method handleGetRequest.
public void handleGetRequest(Request request) throws IOException, InterruptedException {
GetRequest getRequest = GetRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
Histogram responseQueueTime = null;
Histogram responseSendTime = null;
Histogram responseTotalTime = null;
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
metrics.getBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobRequestRate.mark();
responseQueueTime = metrics.getBlobResponseQueueTimeInMs;
responseSendTime = metrics.getBlobSendTimeInMs;
responseTotalTime = metrics.getBlobTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
metrics.getBlobPropertiesRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobPropertiesRequestRate.mark();
responseQueueTime = metrics.getBlobPropertiesResponseQueueTimeInMs;
responseSendTime = metrics.getBlobPropertiesSendTimeInMs;
responseTotalTime = metrics.getBlobPropertiesTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
metrics.getBlobUserMetadataRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobUserMetadataRequestRate.mark();
responseQueueTime = metrics.getBlobUserMetadataResponseQueueTimeInMs;
responseSendTime = metrics.getBlobUserMetadataSendTimeInMs;
responseTotalTime = metrics.getBlobUserMetadataTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
metrics.getBlobInfoRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobInfoRequestRate.mark();
responseQueueTime = metrics.getBlobInfoResponseQueueTimeInMs;
responseSendTime = metrics.getBlobInfoSendTimeInMs;
responseTotalTime = metrics.getBlobInfoTotalTimeInMs;
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
metrics.getBlobAllRequestQueueTimeInMs.update(requestQueueTime);
metrics.getBlobAllRequestRate.mark();
responseQueueTime = metrics.getBlobAllResponseQueueTimeInMs;
responseSendTime = metrics.getBlobAllSendTimeInMs;
responseTotalTime = metrics.getBlobAllTotalTimeInMs;
}
long startTime = SystemTime.getInstance().milliseconds();
GetResponse response = null;
try {
List<Send> messagesToSendList = new ArrayList<Send>(getRequest.getPartitionInfoList().size());
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>(getRequest.getPartitionInfoList().size());
for (PartitionRequestInfo partitionRequestInfo : getRequest.getPartitionInfoList()) {
ServerErrorCode error = validateRequest(partitionRequestInfo.getPartition(), RequestOrResponseType.GetRequest);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating get request failed for partition {} with error {}", partitionRequestInfo.getPartition(), error);
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), error);
partitionResponseInfoList.add(partitionResponseInfo);
} else {
try {
Store storeToGet = storageManager.getStore(partitionRequestInfo.getPartition());
EnumSet<StoreGetOptions> storeGetOptions = EnumSet.noneOf(StoreGetOptions.class);
// Currently only one option is supported.
if (getRequest.getGetOption() == GetOption.Include_Expired_Blobs) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Expired);
}
if (getRequest.getGetOption() == GetOption.Include_Deleted_Blobs) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted);
}
if (getRequest.getGetOption() == GetOption.Include_All) {
storeGetOptions = EnumSet.of(StoreGetOptions.Store_Include_Deleted, StoreGetOptions.Store_Include_Expired);
}
StoreInfo info = storeToGet.get(partitionRequestInfo.getBlobIds(), storeGetOptions);
MessageFormatSend blobsToSend = new MessageFormatSend(info.getMessageReadSet(), getRequest.getMessageFormatFlag(), messageFormatMetrics, storeKeyFactory);
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), info.getMessageReadSetInfo(), blobsToSend.getMessageMetadataList());
messagesToSendList.add(blobsToSend);
partitionResponseInfoList.add(partitionResponseInfo);
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.getAuthorizationFailure.inc();
} else {
metrics.unExpectedStoreGetError.inc();
logInErrorLevel = true;
}
if (logInErrorLevel) {
logger.error("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
} else {
logger.trace("Store exception on a get with error code {} for partition {}", e.getErrorCode(), partitionRequestInfo.getPartition(), e);
}
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
partitionResponseInfoList.add(partitionResponseInfo);
} catch (MessageFormatException e) {
logger.error("Message format exception on a get with error code " + e.getErrorCode() + " for partitionRequestInfo " + partitionRequestInfo, e);
if (e.getErrorCode() == MessageFormatErrorCodes.Data_Corrupt) {
metrics.dataCorruptError.inc();
} else if (e.getErrorCode() == MessageFormatErrorCodes.Unknown_Format_Version) {
metrics.unknownFormatError.inc();
}
PartitionResponseInfo partitionResponseInfo = new PartitionResponseInfo(partitionRequestInfo.getPartition(), ErrorMapping.getMessageFormatErrorMapping(e.getErrorCode()));
partitionResponseInfoList.add(partitionResponseInfo);
}
}
}
CompositeSend compositeSend = new CompositeSend(messagesToSendList);
response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, compositeSend, ServerErrorCode.No_Error);
} catch (Exception e) {
logger.error("Unknown exception for request " + getRequest, e);
response = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", getRequest, response, processingTime);
if (getRequest.getMessageFormatFlag() == MessageFormatFlags.Blob) {
metrics.getBlobProcessingTimeInMs.update(processingTime);
metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobProperties) {
metrics.getBlobPropertiesProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobUserMetadata) {
metrics.getBlobUserMetadataProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.BlobInfo) {
metrics.getBlobInfoProcessingTimeInMs.update(processingTime);
} else if (getRequest.getMessageFormatFlag() == MessageFormatFlags.All) {
metrics.getBlobAllProcessingTimeInMs.update(processingTime);
metrics.updateGetBlobProcessingTimeBySize(response.sizeInBytes(), processingTime);
}
}
sendGetResponse(requestResponseChannel, response, request, responseQueueTime, responseSendTime, responseTotalTime, totalTimeSpent, response.sizeInBytes(), getRequest.getMessageFormatFlag(), metrics);
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class DumpDataTool method compareIndexEntriesToLogContent.
/**
* Compares every entry in an index file with those in the log. Checks to see if each blob in index is successfully
* deserializable from the log
* @param indexFile the file that represents the index segment.
* @param checkLogEndOffsetMatch if {@code true}, checks that the end offset of the log matches the end offset of the
* index.
* @throws Exception
*/
private void compareIndexEntriesToLogContent(File indexFile, boolean checkLogEndOffsetMatch) throws Exception {
if (!indexFile.exists()) {
throw new IllegalArgumentException("File does not exist " + indexFile);
}
final Timer.Context context = metrics.compareIndexFileToLogTimeMs.time();
try {
logger.info("Dumping index {}", indexFile.getAbsolutePath());
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
StoreConfig config = new StoreConfig(new VerifiableProperties(new Properties()));
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics storeMetrics = new StoreMetrics(metricRegistry);
IndexSegment segment = new IndexSegment(indexFile, false, storeKeyFactory, config, storeMetrics, new Journal(indexFile.getParent(), 0, 0), time);
Offset startOffset = segment.getStartOffset();
TreeMap<Long, Long> coveredRanges = new TreeMap<>();
String logFileName = segment.getLogSegmentName().toFilename();
File logFile = new File(indexFile.getParent(), logFileName);
if (!logFile.exists()) {
throw new IllegalStateException("Log file does not exist " + logFile);
}
RandomAccessFile randomAccessFile = new RandomAccessFile(logFile, "r");
long logFileSize = randomAccessFile.getChannel().size();
List<MessageInfo> entries = new ArrayList<>();
segment.getEntriesSince(null, new FindEntriesCondition(Long.MAX_VALUE), entries, new AtomicLong(0), false);
for (MessageInfo entry : entries) {
StoreKey key = entry.getStoreKey();
IndexValue value = segment.find(key).last();
boolean isDeleted = value.isFlagSet(IndexValue.Flags.Delete_Index);
if (value.getOffset().getOffset() < logFileSize) {
boolean success = readFromLogAndVerify(randomAccessFile, key.getID(), value, coveredRanges);
if (success) {
if (isDeleted) {
long originalOffset = value.getOriginalMessageOffset();
if (originalOffset != -1) {
if (!coveredRanges.containsKey(originalOffset)) {
if (startOffset.getOffset() > originalOffset) {
logger.trace("Put Record at {} with delete msg offset {} ignored because it is prior to startOffset {}", originalOffset, value.getOffset(), startOffset);
} else {
try {
DumpDataHelper.LogBlobRecordInfo logBlobRecordInfo = DumpDataHelper.readSingleRecordFromLog(randomAccessFile, originalOffset, clusterMap, currentTimeInMs, metrics);
coveredRanges.put(originalOffset, originalOffset + logBlobRecordInfo.totalRecordSize);
logger.trace("PUT Record {} with start offset {} and end offset {} for a delete msg {} at offset {} ", logBlobRecordInfo.blobId, originalOffset, (originalOffset + logBlobRecordInfo.totalRecordSize), key.getID(), value.getOffset());
if (!logBlobRecordInfo.blobId.getID().equals(key.getID())) {
logger.error("BlobId value mismatch between delete record {} and put record {}", key.getID(), logBlobRecordInfo.blobId.getID());
}
} catch (IllegalArgumentException e) {
metrics.logDeserializationError.inc();
logger.error("Illegal arg exception thrown at {}, while reading blob starting at offset {} with exception: ", randomAccessFile.getChannel().position(), originalOffset, e);
} catch (MessageFormatException e) {
metrics.logDeserializationError.inc();
logger.error("MessageFormat exception thrown at {} while reading blob starting at offset {} with exception: ", randomAccessFile.getChannel().position(), originalOffset, e);
} catch (EOFException e) {
metrics.endOfFileOnDumpLogError.inc();
logger.error("EOFException thrown at {} ", randomAccessFile.getChannel().position(), e);
} catch (Exception e) {
metrics.unknownErrorOnDumpIndex.inc();
logger.error("Unknown exception thrown {} ", e.getMessage(), e);
}
}
}
}
}
} else {
metrics.indexToLogBlobRecordComparisonFailure.inc();
logger.error("Failed for key {} with value {} ", key, value);
}
} else {
logger.trace("Blob's {} offset {} is outside of log size {}, with a diff of {}", key, value.getOffset().getOffset(), logFileSize, (value.getOffset().getOffset() - logFileSize));
}
}
throttler.maybeThrottle(entries.size());
long indexEndOffset = segment.getEndOffset().getOffset();
if (checkLogEndOffsetMatch && indexEndOffset != randomAccessFile.length()) {
metrics.indexLogEndOffsetMisMatchError.inc();
logger.error("Log end offset {} and index end offset {} do not match", randomAccessFile.length(), indexEndOffset);
}
logRangesNotCovered(coveredRanges, indexEndOffset);
} finally {
context.stop();
}
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class DumpLogTool method dumpLog.
/**
* Dumps all blobs in a given log file
* @param file the log file that needs to be parsed for
* @param startOffset the starting offset from which records needs to be dumped from. Can be {@code null}
* @param endOffset the end offset until which records need to be dumped to. Can be {@code null}
* @param blobs List of blobIds to be filtered for. {@code null} if no filtering required
* @param blobIdToLogRecord {@link HashMap} of blobId to {@link LogBlobStatus} to hold the information about blobs
* in the log after parsing
* @throws IOException
*/
private void dumpLog(File file, long startOffset, long endOffset, ArrayList<String> blobs, Map<String, LogBlobStatus> blobIdToLogRecord) throws IOException {
logger.info("Dumping log file {}", file.getAbsolutePath());
long currentOffset = 0;
RandomAccessFile randomAccessFile = new RandomAccessFile(file, "r");
long fileSize = file.length();
boolean lastBlobFailed = false;
if (startOffset != -1) {
currentOffset = startOffset;
}
if (endOffset == -1) {
endOffset = fileSize;
}
logger.info("Starting dumping from offset {}", currentOffset);
while (currentOffset < endOffset) {
try {
DumpDataHelper.LogBlobRecordInfo logBlobRecordInfo = DumpDataHelper.readSingleRecordFromLog(randomAccessFile, currentOffset, clusterMap, currentTimeInMs, metrics);
if (throttler != null) {
throttler.maybeThrottle(logBlobRecordInfo.totalRecordSize);
}
if (lastBlobFailed && !silent) {
logger.info("Successful record found at {} after some failures ", currentOffset);
}
lastBlobFailed = false;
if (!logBlobRecordInfo.isDeleted) {
if (blobs != null) {
if (blobs.contains(logBlobRecordInfo.blobId.getID())) {
logger.info("{}\n{}\n{}\n{}\n{}\n{}", logBlobRecordInfo.messageHeader, logBlobRecordInfo.blobId, logBlobRecordInfo.blobEncryptionKey, logBlobRecordInfo.blobProperty, logBlobRecordInfo.userMetadata, logBlobRecordInfo.blobDataOutput);
updateBlobIdToLogRecordMap(blobIdToLogRecord, logBlobRecordInfo.blobId.getID(), currentOffset, !logBlobRecordInfo.isDeleted, logBlobRecordInfo.isExpired);
}
} else if (!silent) {
logger.info("{}\n{}\n{}\n{}\n{}\n{} end offset {}", logBlobRecordInfo.messageHeader, logBlobRecordInfo.blobId, logBlobRecordInfo.blobEncryptionKey, logBlobRecordInfo.blobProperty, logBlobRecordInfo.userMetadata, logBlobRecordInfo.blobDataOutput, (currentOffset + logBlobRecordInfo.totalRecordSize));
updateBlobIdToLogRecordMap(blobIdToLogRecord, logBlobRecordInfo.blobId.getID(), currentOffset, !logBlobRecordInfo.isDeleted, logBlobRecordInfo.isExpired);
}
} else {
if (blobs != null) {
if (blobs.contains(logBlobRecordInfo.blobId.getID())) {
logger.info("{}\n{}\n{}", logBlobRecordInfo.messageHeader, logBlobRecordInfo.blobId, logBlobRecordInfo.deleteMsg);
updateBlobIdToLogRecordMap(blobIdToLogRecord, logBlobRecordInfo.blobId.getID(), currentOffset, !logBlobRecordInfo.isDeleted, logBlobRecordInfo.isExpired);
}
} else if (!silent) {
logger.info("{}\n{}\n{} end offset {}", logBlobRecordInfo.messageHeader, logBlobRecordInfo.blobId, logBlobRecordInfo.deleteMsg, (currentOffset + logBlobRecordInfo.totalRecordSize));
updateBlobIdToLogRecordMap(blobIdToLogRecord, logBlobRecordInfo.blobId.getID(), currentOffset, !logBlobRecordInfo.isDeleted, logBlobRecordInfo.isExpired);
}
}
currentOffset += (logBlobRecordInfo.totalRecordSize);
} catch (IllegalArgumentException e) {
if (!lastBlobFailed) {
metrics.logDeserializationError.inc();
logger.error("Illegal arg exception thrown at {}, while reading blob starting at offset {}with exception: ", randomAccessFile.getChannel().position(), currentOffset, e);
}
currentOffset++;
lastBlobFailed = true;
} catch (MessageFormatException e) {
if (!lastBlobFailed) {
metrics.logDeserializationError.inc();
logger.error("MessageFormat exception thrown at {} while reading blob starting at offset {} with exception: ", randomAccessFile.getChannel().position(), currentOffset, e);
}
currentOffset++;
lastBlobFailed = true;
} catch (EOFException e) {
metrics.endOfFileOnDumpLogError.inc();
logger.error("EOFException thrown at {}, Cause :{}, Msg :{}, stacktrace ", randomAccessFile.getChannel().position(), e.getCause(), e.getMessage(), e);
throw (e);
} catch (Exception e) {
if (!lastBlobFailed) {
metrics.unknownErrorOnDumpLog.inc();
logger.error("Unknown exception thrown with Cause {}, Msg :{}, stacktrace ", e.getCause(), e.getMessage(), e);
if (!silent) {
logger.info("Trying out next offset {}", currentOffset + 1);
}
}
currentOffset++;
lastBlobFailed = true;
}
}
logger.info("Dumped until offset {}", currentOffset);
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class HardDeleteVerifier method deserializeBlobProperties.
boolean deserializeBlobProperties(InputStream streamlog, InputStream oldStreamlog, boolean isDeleted) throws ContinueException {
boolean caughtException = false;
boolean caughtExceptionInOld = false;
BlobProperties props = null;
BlobProperties oldProps = null;
try {
props = MessageFormatRecord.deserializeBlobProperties(streamlog);
} catch (MessageFormatException e) {
caughtException = true;
} catch (IOException e) {
caughtException = true;
}
try {
oldProps = MessageFormatRecord.deserializeBlobProperties(oldStreamlog);
} catch (MessageFormatException e) {
caughtExceptionInOld = true;
} catch (IOException e) {
caughtExceptionInOld = true;
}
if (!caughtException) {
if (props.toString().compareTo(oldProps.toString()) != 0) {
System.out.println("Blob id mismatch!");
return false;
}
} else if (!caughtExceptionInOld) {
if (isDeleted) {
corruptDeleted++;
} else {
corruptNonDeleted++;
}
throw new ContinueException("blob properties could not be deserialized.");
} else {
throw new ContinueException("blob properties could not be deserialized in either");
}
return true;
}
Aggregations