use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.
the class MessageReadSetIndexInputStream method calculateOffsets.
/**
* Calculates the offsets from the MessageReadSet that needs to be sent over the network
* based on the type of data requested as indicated by the flags
*/
private void calculateOffsets() throws IOException, MessageFormatException {
try {
// get size
int messageCount = readSet.count();
// for each message, determine the offset and size that needs to be sent based on the flag
sendInfoList = new ArrayList<>(messageCount);
messageMetadataList = new ArrayList<>(messageCount);
logger.trace("Calculate offsets of messages for one partition, MessageFormatFlag : {} number of messages : {}", flag, messageCount);
for (int i = 0; i < messageCount; i++) {
if (flag == MessageFormatFlags.All) {
// just copy over the total size and use relative offset to be 0
// We do not have to check any version in this case as we dont
// have to read any data to deserialize anything.
sendInfoList.add(i, new SendInfo(0, readSet.sizeInBytes(i)));
messageMetadataList.add(i, null);
totalSizeToWrite += readSet.sizeInBytes(i);
} else {
// read header version
long startTime = SystemTime.getInstance().milliseconds();
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
readSet.writeTo(i, Channels.newChannel(new ByteBufferOutputStream(headerVersion)), 0, Version_Field_Size_In_Bytes);
logger.trace("Calculate offsets, read header version time: {}", SystemTime.getInstance().milliseconds() - startTime);
headerVersion.flip();
short version = headerVersion.getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Version not known while reading message - version " + version + ", StoreKey " + readSet.getKeyAt(i), MessageFormatErrorCodes.Unknown_Format_Version);
}
ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(version));
// read the header
startTime = SystemTime.getInstance().milliseconds();
headerVersion.clear();
header.putShort(headerVersion.getShort());
readSet.writeTo(i, Channels.newChannel(new ByteBufferOutputStream(header)), Version_Field_Size_In_Bytes, header.capacity() - Version_Field_Size_In_Bytes);
logger.trace("Calculate offsets, read header time: {}", SystemTime.getInstance().milliseconds() - startTime);
startTime = SystemTime.getInstance().milliseconds();
header.flip();
MessageHeader_Format headerFormat = getMessageHeader(version, header);
headerFormat.verifyHeader();
int storeKeyRelativeOffset = header.capacity();
StoreKey storeKey = storeKeyFactory.getStoreKey(new DataInputStream(new MessageReadSetIndexInputStream(readSet, i, storeKeyRelativeOffset)));
if (storeKey.compareTo(readSet.getKeyAt(i)) != 0) {
throw new MessageFormatException("Id mismatch between metadata and store - metadataId " + readSet.getKeyAt(i) + " storeId " + storeKey, MessageFormatErrorCodes.Store_Key_Id_MisMatch);
}
logger.trace("Calculate offsets, verify header time: {}", SystemTime.getInstance().milliseconds() - startTime);
startTime = SystemTime.getInstance().milliseconds();
if (flag == MessageFormatFlags.BlobProperties) {
sendInfoList.add(i, new SendInfo(headerFormat.getBlobPropertiesRecordRelativeOffset(), headerFormat.getBlobPropertiesRecordSize()));
messageMetadataList.add(null);
totalSizeToWrite += headerFormat.getBlobPropertiesRecordSize();
logger.trace("Calculate offsets, get total size of blob properties time: {}", SystemTime.getInstance().milliseconds() - startTime);
logger.trace("Sending blob properties for message relativeOffset : {} size : {}", sendInfoList.get(i).relativeOffset(), sendInfoList.get(i).sizetoSend());
} else if (flag == MessageFormatFlags.BlobUserMetadata) {
messageMetadataList.add(headerFormat.hasEncryptionKeyRecord() ? new MessageMetadata(extractEncryptionKey(i, headerFormat.getBlobEncryptionKeyRecordRelativeOffset(), headerFormat.getBlobEncryptionKeyRecordSize())) : null);
sendInfoList.add(i, new SendInfo(headerFormat.getUserMetadataRecordRelativeOffset(), headerFormat.getUserMetadataRecordSize()));
totalSizeToWrite += headerFormat.getUserMetadataRecordSize();
logger.trace("Calculate offsets, get total size of user metadata time: {}", SystemTime.getInstance().milliseconds() - startTime);
logger.trace("Sending user metadata for message relativeOffset : {} size : {}", sendInfoList.get(i).relativeOffset(), sendInfoList.get(i).sizetoSend());
} else if (flag == MessageFormatFlags.BlobInfo) {
messageMetadataList.add(headerFormat.hasEncryptionKeyRecord() ? new MessageMetadata(extractEncryptionKey(i, headerFormat.getBlobEncryptionKeyRecordRelativeOffset(), headerFormat.getBlobEncryptionKeyRecordSize())) : null);
sendInfoList.add(i, new SendInfo(headerFormat.getBlobPropertiesRecordRelativeOffset(), headerFormat.getBlobPropertiesRecordSize() + headerFormat.getUserMetadataRecordSize()));
totalSizeToWrite += headerFormat.getBlobPropertiesRecordSize() + headerFormat.getUserMetadataRecordSize();
logger.trace("Calculate offsets, get total size of blob info time: {}", SystemTime.getInstance().milliseconds() - startTime);
logger.trace("Sending blob info (blob properties + user metadata) for message relativeOffset : {} " + "size : {}", sendInfoList.get(i).relativeOffset(), sendInfoList.get(i).sizetoSend());
} else if (flag == MessageFormatFlags.Blob) {
messageMetadataList.add(headerFormat.hasEncryptionKeyRecord() ? new MessageMetadata(extractEncryptionKey(i, headerFormat.getBlobEncryptionKeyRecordRelativeOffset(), headerFormat.getBlobEncryptionKeyRecordSize())) : null);
sendInfoList.add(i, new SendInfo(headerFormat.getBlobRecordRelativeOffset(), headerFormat.getBlobRecordSize()));
totalSizeToWrite += headerFormat.getBlobRecordSize();
logger.trace("Calculate offsets, get total size of blob time: {}", SystemTime.getInstance().milliseconds() - startTime);
logger.trace("Sending data for message relativeOffset : {} size : {}", sendInfoList.get(i).relativeOffset(), sendInfoList.get(i).sizetoSend());
} else {
throw new MessageFormatException("Unknown flag in request " + flag, MessageFormatErrorCodes.IO_Error);
}
}
}
} catch (IOException e) {
logger.trace("IOError when calculating offsets");
throw new MessageFormatException("IOError when calculating offsets ", e, MessageFormatErrorCodes.IO_Error);
}
}
Aggregations