use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class MessageFormatWriteSetTest method writeSetTest.
@Test
public void writeSetTest() throws IOException, StoreException {
byte[] buf = new byte[2000];
MessageInfo info1 = new MessageInfo(new MockId("id1"), 1000, 123, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), System.currentTimeMillis() + TestUtils.RANDOM.nextInt());
MessageInfo info2 = new MessageInfo(new MockId("id2"), 1000, 123, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), System.currentTimeMillis() + TestUtils.RANDOM.nextInt());
List<MessageInfo> infoList = new ArrayList<MessageInfo>();
infoList.add(info1);
infoList.add(info2);
ByteBufferInputStream byteBufferInputStream = new ByteBufferInputStream(ByteBuffer.wrap(buf));
MessageFormatWriteSet set = new MessageFormatWriteSet(byteBufferInputStream, infoList, false);
MockWrite write = new MockWrite(2000);
long written = set.writeTo(write);
Assert.assertEquals(written, 2000);
Assert.assertEquals(write.getBuffer().limit(), 2000);
Assert.assertArrayEquals(write.getBuffer().array(), buf);
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReadInputStream method recover.
@Override
public List<MessageInfo> recover(Read read, long startOffset, long endOffset, StoreKeyFactory factory) throws IOException {
ArrayList<MessageInfo> messageRecovered = new ArrayList<MessageInfo>();
try {
while (startOffset < endOffset) {
// read message header
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
if (startOffset + Version_Field_Size_In_Bytes > endOffset) {
throw new IndexOutOfBoundsException("Unable to read version. Reached end of stream");
}
read.readInto(headerVersion, startOffset);
startOffset += headerVersion.capacity();
headerVersion.flip();
short version = headerVersion.getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Version not known while reading message - " + version, MessageFormatErrorCodes.Unknown_Format_Version);
}
ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(version));
header.putShort(version);
if (startOffset + (header.capacity() - headerVersion.capacity()) > endOffset) {
throw new IndexOutOfBoundsException("Unable to read version. Reached end of stream");
}
read.readInto(header, startOffset);
startOffset += header.capacity() - headerVersion.capacity();
header.flip();
MessageHeader_Format headerFormat = getMessageHeader(version, header);
headerFormat.verifyHeader();
ReadInputStream stream = new ReadInputStream(read, startOffset, endOffset);
StoreKey key = factory.getStoreKey(new DataInputStream(stream));
short lifeVersion = 0;
if (headerFormat.hasLifeVersion()) {
lifeVersion = headerFormat.getLifeVersion();
}
// read the appropriate type of message based on the relative offset that is set
if (headerFormat.isPutRecord()) {
// them to check for validity
if (headerFormat.hasEncryptionKeyRecord()) {
deserializeBlobEncryptionKey(stream);
}
BlobProperties properties = deserializeBlobProperties(stream);
deserializeUserMetadata(stream);
deserializeBlob(stream);
MessageInfo info = new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), false, false, false, Utils.addSecondsToEpochTime(properties.getCreationTimeInMs(), properties.getTimeToLiveInSeconds()), null, properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs(), lifeVersion);
messageRecovered.add(info);
} else {
UpdateRecord updateRecord = deserializeUpdateRecord(stream);
boolean deleted = false, ttlUpdated = false, undeleted = false;
switch(updateRecord.getType()) {
case DELETE:
deleted = true;
break;
case TTL_UPDATE:
ttlUpdated = true;
break;
case UNDELETE:
undeleted = true;
break;
default:
throw new IllegalStateException("Unknown update record type: " + updateRecord.getType());
}
MessageInfo info = new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), deleted, ttlUpdated, undeleted, updateRecord.getAccountId(), updateRecord.getContainerId(), updateRecord.getUpdateTimeInMs(), lifeVersion);
messageRecovered.add(info);
}
startOffset = stream.getCurrentPosition();
}
} catch (MessageFormatException e) {
// log in case where we were not able to parse a message. we stop recovery at that point and return the
// messages that have been recovered so far.
logger.error("Message format exception while recovering messages", e);
} catch (IndexOutOfBoundsException e) {
// log in case where were not able to read a complete message. we stop recovery at that point and return
// the message that have been recovered so far.
logger.error("Trying to read more than the available bytes");
}
for (MessageInfo messageInfo : messageRecovered) {
logger.info("Message Recovered key {} size {} ttl {} deleted {} undelete {}", messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.getExpirationTimeInMs(), messageInfo.isDeleted(), messageInfo.isUndeleted());
}
return messageRecovered;
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class MessageFormatWriteSet method writeTo.
@Override
public long writeTo(Write writeChannel) throws StoreException {
ReadableByteChannel readableByteChannel = Channels.newChannel(streamToWrite);
long sizeWritten = 0;
for (MessageInfo info : streamInfo) {
writeChannel.appendFrom(readableByteChannel, info.getSize());
sizeWritten += info.getSize();
}
return sizeWritten;
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ValidatingTransformer method transform.
@Override
public TransformationOutput transform(Message message) {
ByteBuffer encryptionKey;
BlobProperties props;
ByteBuffer metadata;
BlobData blobData;
MessageInfo msgInfo = message.getMessageInfo();
InputStream msgStream = message.getStream();
TransformationOutput transformationOutput = null;
try {
// Read header
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
msgStream.read(headerVersion.array());
short version = headerVersion.getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Header version not supported " + version, MessageFormatErrorCodes.Data_Corrupt);
}
int headerSize = getHeaderSizeForVersion(version);
ByteBuffer headerBuffer = ByteBuffer.allocate(headerSize);
headerBuffer.put(headerVersion.array());
msgStream.read(headerBuffer.array(), Version_Field_Size_In_Bytes, headerSize - Version_Field_Size_In_Bytes);
headerBuffer.rewind();
MessageHeader_Format header = getMessageHeader(version, headerBuffer);
header.verifyHeader();
StoreKey keyInStream = storeKeyFactory.getStoreKey(new DataInputStream(msgStream));
if (header.isPutRecord()) {
if (header.hasLifeVersion() && header.getLifeVersion() != msgInfo.getLifeVersion()) {
logger.trace("LifeVersion in stream: {} failed to match lifeVersion from Index: {} for key {}", header.getLifeVersion(), msgInfo.getLifeVersion(), keyInStream);
}
encryptionKey = header.hasEncryptionKeyRecord() ? deserializeBlobEncryptionKey(msgStream) : null;
props = deserializeBlobProperties(msgStream);
metadata = deserializeUserMetadata(msgStream);
blobData = deserializeBlob(msgStream);
} else {
throw new IllegalStateException("Message cannot be anything rather than put record ");
}
if (msgInfo.getStoreKey().equals(keyInStream)) {
// BlobIDTransformer only exists on ambry-server and replication between servers is relying on blocking channel
// which is still using java ByteBuffer. So, no need to consider releasing stuff.
// @todo, when netty Bytebuf is adopted for blocking channel on ambry-server, remember to release this ByteBuf.
PutMessageFormatInputStream transformedStream = new PutMessageFormatInputStream(keyInStream, encryptionKey, props, metadata, new ByteBufInputStream(blobData.content(), true), blobData.getSize(), blobData.getBlobType(), msgInfo.getLifeVersion());
MessageInfo transformedMsgInfo = new MessageInfo.Builder(msgInfo).size(transformedStream.getSize()).isDeleted(false).isUndeleted(false).build();
transformationOutput = new TransformationOutput(new Message(transformedMsgInfo, transformedStream));
} else {
throw new IllegalStateException("StoreKey in stream: " + keyInStream + " failed to match store key from Index: " + msgInfo.getStoreKey());
}
} catch (Exception e) {
transformationOutput = new TransformationOutput(e);
}
return transformationOutput;
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class AmbryRequests method handlePutRequest.
@Override
public void handlePutRequest(NetworkRequest request) throws IOException, InterruptedException {
PutRequest receivedRequest;
if (request instanceof LocalChannelRequest) {
// This is a case where handlePutRequest is called when frontends are writing to Azure. In this case, this method
// is called by request handler threads running within the frontend router itself. So, the request can be directly
// referenced as java objects without any need for deserialization.
PutRequest sentRequest = (PutRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
// However, we will create a new PutRequest object to represent the received Put request since the blob content
// 'buffer' in PutRequest is accessed as 'stream' while writing to Store. Also, crc value for this request
// would be null since it is only calculated (on the fly) when sending the request to network. It might be okay to
// use null crc here since the scenario for which we are using crc (i.e. possibility of collisions due to fast
// replication) as described in this PR https://github.com/linkedin/ambry/pull/549 might not be applicable when
// frontends are talking to Azure.
receivedRequest = new PutRequest(sentRequest.getCorrelationId(), sentRequest.getClientId(), sentRequest.getBlobId(), sentRequest.getBlobProperties(), sentRequest.getUsermetadata(), sentRequest.getBlobSize(), sentRequest.getBlobType(), sentRequest.getBlobEncryptionKey(), new ByteBufInputStream(sentRequest.getBlob()), null);
} else {
InputStream is = request.getInputStream();
DataInputStream dis = is instanceof DataInputStream ? (DataInputStream) is : new DataInputStream(is);
receivedRequest = PutRequest.readFrom(dis, clusterMap);
}
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.putBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.putBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
PutResponse response = null;
try {
ServerErrorCode error = validateRequest(receivedRequest.getBlobId().getPartition(), RequestOrResponseType.PutRequest, false);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating put request failed with error {} for request {}", error, receivedRequest);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), error);
} else {
MessageFormatInputStream stream = new PutMessageFormatInputStream(receivedRequest.getBlobId(), receivedRequest.getBlobEncryptionKey(), receivedRequest.getBlobProperties(), receivedRequest.getUsermetadata(), receivedRequest.getBlobStream(), receivedRequest.getBlobSize(), receivedRequest.getBlobType());
BlobProperties properties = receivedRequest.getBlobProperties();
long expirationTime = Utils.addSecondsToEpochTime(receivedRequest.getBlobProperties().getCreationTimeInMs(), properties.getTimeToLiveInSeconds());
MessageInfo info = new MessageInfo.Builder(receivedRequest.getBlobId(), stream.getSize(), properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs()).expirationTimeInMs(expirationTime).crc(receivedRequest.getCrc()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
ArrayList<MessageInfo> infoList = new ArrayList<>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
Store storeToPut = storeManager.getStore(receivedRequest.getBlobId().getPartition());
storeToPut.put(writeset);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.No_Error);
metrics.blobSizeInBytes.update(receivedRequest.getBlobSize());
metrics.blobUserMetadataSizeInBytes.update(receivedRequest.getUsermetadata().limit());
if (notification != null) {
notification.onBlobReplicaCreated(currentNode.getHostname(), currentNode.getPort(), receivedRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
logger.error("Store exception on a put with error code {} for request {}", e.getErrorCode(), receivedRequest, e);
if (e.getErrorCode() == StoreErrorCodes.Already_Exist) {
metrics.idAlreadyExistError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.IOError) {
metrics.storeIOError.inc();
} else {
metrics.unExpectedStorePutError.inc();
}
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception on a put for request {}", receivedRequest, e);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", receivedRequest, response, processingTime);
metrics.putBlobProcessingTimeInMs.update(processingTime);
metrics.updatePutBlobProcessingTimeBySize(receivedRequest.getBlobSize(), processingTime);
}
sendPutResponse(requestResponseChannel, response, request, metrics.putBlobResponseQueueTimeInMs, metrics.putBlobSendTimeInMs, metrics.putBlobTotalTimeInMs, totalTimeSpent, receivedRequest.getBlobSize(), metrics);
}
Aggregations