use of com.github.ambry.messageformat.UpdateRecord in project ambry by linkedin.
the class DumpDataHelper method readSingleRecordFromLog.
/**
* Fetches one blob record from the log
* @param randomAccessFile {@link RandomAccessFile} referring to the log file
* @param currentOffset the offset at which to read the record from
* @param clusterMap the {@link ClusterMap} object to use to generate BlobId
* @param currentTimeInMs current time in ms to determine expiration
* @param metrics {@link StoreToolsMetrics} instance
* @return the {@link LogBlobRecordInfo} containing the blob record info
* @throws IOException
* @throws MessageFormatException
*/
static LogBlobRecordInfo readSingleRecordFromLog(RandomAccessFile randomAccessFile, long currentOffset, ClusterMap clusterMap, long currentTimeInMs, StoreToolsMetrics metrics) throws IOException, MessageFormatException {
String messageheader = null;
BlobId blobId = null;
String encryptionKey = null;
String blobProperty = null;
String usermetadata = null;
String blobDataOutput = null;
String deleteMsg = null;
boolean isDeleted = false;
boolean isExpired = false;
long expiresAtMs = -1;
int totalRecordSize = 0;
final Timer.Context context = metrics.readSingleBlobRecordFromLogTimeMs.time();
try {
randomAccessFile.seek(currentOffset);
short version = randomAccessFile.readShort();
MessageFormatRecord.MessageHeader_Format header = null;
if (version == MessageFormatRecord.Message_Header_Version_V1) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V1.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V1(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else if (version == MessageFormatRecord.Message_Header_Version_V2) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V2(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobEncryptionKeyRelativeOffset " + header.getBlobEncryptionKeyRecordRelativeOffset() + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else if (version == MessageFormatRecord.Message_Header_Version_V3) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V3.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V3(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobEncryptionKeyRelativeOffset " + header.getBlobEncryptionKeyRecordRelativeOffset() + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else {
throw new MessageFormatException("Header version not supported " + version, MessageFormatErrorCodes.IO_Error);
}
// read blob id
InputStream streamlog = Channels.newInputStream(randomAccessFile.getChannel());
blobId = new BlobId(new DataInputStream(streamlog), clusterMap);
totalRecordSize += blobId.sizeInBytes();
if (header.getBlobPropertiesRecordRelativeOffset() != MessageFormatRecord.Message_Header_Invalid_Relative_Offset) {
ByteBuffer blobEncryptionKey = null;
if (header.hasEncryptionKeyRecord()) {
blobEncryptionKey = MessageFormatRecord.deserializeBlobEncryptionKey(streamlog);
encryptionKey = "EncryptionKey found which is of size " + blobEncryptionKey.remaining();
}
BlobProperties props = MessageFormatRecord.deserializeBlobProperties(streamlog);
expiresAtMs = Utils.addSecondsToEpochTime(props.getCreationTimeInMs(), props.getTimeToLiveInSeconds());
isExpired = isExpired(expiresAtMs, currentTimeInMs);
blobProperty = " Blob properties - blobSize " + props.getBlobSize() + " serviceId " + props.getServiceId() + ", isExpired " + isExpired + " accountId " + props.getAccountId() + " containerId " + props.getContainerId();
ByteBuffer metadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
usermetadata = " Metadata - size " + metadata.capacity();
BlobData blobData = MessageFormatRecord.deserializeBlob(streamlog);
blobDataOutput = "Blob - size " + blobData.getSize();
} else {
UpdateRecord updateRecord = MessageFormatRecord.deserializeUpdateRecord(streamlog);
switch(updateRecord.getType()) {
case DELETE:
isDeleted = true;
deleteMsg = "delete change : AccountId:" + updateRecord.getAccountId() + ", ContainerId:" + updateRecord.getContainerId() + ", DeletionTimeInSecs:" + updateRecord.getUpdateTimeInMs();
break;
default:
// TODO (TTL update): handle TTL update
throw new IllegalStateException("Unrecognized update record type: " + updateRecord.getType());
}
}
return new LogBlobRecordInfo(messageheader, blobId, encryptionKey, blobProperty, usermetadata, blobDataOutput, deleteMsg, isDeleted, isExpired, expiresAtMs, totalRecordSize);
} finally {
context.stop();
}
}
Aggregations