use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReadInputStream method recover.
@Override
public List<MessageInfo> recover(Read read, long startOffset, long endOffset, StoreKeyFactory factory) throws IOException {
ArrayList<MessageInfo> messageRecovered = new ArrayList<MessageInfo>();
try {
while (startOffset < endOffset) {
// read message header
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
if (startOffset + Version_Field_Size_In_Bytes > endOffset) {
throw new IndexOutOfBoundsException("Unable to read version. Reached end of stream");
}
read.readInto(headerVersion, startOffset);
startOffset += headerVersion.capacity();
headerVersion.flip();
short version = headerVersion.getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Version not known while reading message - " + version, MessageFormatErrorCodes.Unknown_Format_Version);
}
ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(version));
header.putShort(version);
if (startOffset + (header.capacity() - headerVersion.capacity()) > endOffset) {
throw new IndexOutOfBoundsException("Unable to read version. Reached end of stream");
}
read.readInto(header, startOffset);
startOffset += header.capacity() - headerVersion.capacity();
header.flip();
MessageHeader_Format headerFormat = getMessageHeader(version, header);
headerFormat.verifyHeader();
ReadInputStream stream = new ReadInputStream(read, startOffset, endOffset);
StoreKey key = factory.getStoreKey(new DataInputStream(stream));
short lifeVersion = 0;
if (headerFormat.hasLifeVersion()) {
lifeVersion = headerFormat.getLifeVersion();
}
// read the appropriate type of message based on the relative offset that is set
if (headerFormat.isPutRecord()) {
// them to check for validity
if (headerFormat.hasEncryptionKeyRecord()) {
deserializeBlobEncryptionKey(stream);
}
BlobProperties properties = deserializeBlobProperties(stream);
deserializeUserMetadata(stream);
deserializeBlob(stream);
MessageInfo info = new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), false, false, false, Utils.addSecondsToEpochTime(properties.getCreationTimeInMs(), properties.getTimeToLiveInSeconds()), null, properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs(), lifeVersion);
messageRecovered.add(info);
} else {
UpdateRecord updateRecord = deserializeUpdateRecord(stream);
boolean deleted = false, ttlUpdated = false, undeleted = false;
switch(updateRecord.getType()) {
case DELETE:
deleted = true;
break;
case TTL_UPDATE:
ttlUpdated = true;
break;
case UNDELETE:
undeleted = true;
break;
default:
throw new IllegalStateException("Unknown update record type: " + updateRecord.getType());
}
MessageInfo info = new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), deleted, ttlUpdated, undeleted, updateRecord.getAccountId(), updateRecord.getContainerId(), updateRecord.getUpdateTimeInMs(), lifeVersion);
messageRecovered.add(info);
}
startOffset = stream.getCurrentPosition();
}
} catch (MessageFormatException e) {
// log in case where we were not able to parse a message. we stop recovery at that point and return the
// messages that have been recovered so far.
logger.error("Message format exception while recovering messages", e);
} catch (IndexOutOfBoundsException e) {
// log in case where were not able to read a complete message. we stop recovery at that point and return
// the message that have been recovered so far.
logger.error("Trying to read more than the available bytes");
}
for (MessageInfo messageInfo : messageRecovered) {
logger.info("Message Recovered key {} size {} ttl {} deleted {} undelete {}", messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.getExpirationTimeInMs(), messageInfo.isDeleted(), messageInfo.isUndeleted());
}
return messageRecovered;
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class MessageReadSetIndexInputStream method parseHeaderAndVerifyStoreKey.
/**
* Parse and verify header + storeKey from given inputStream.
*/
private MessageHeader_Format parseHeaderAndVerifyStoreKey(InputStream is, int indexInReadSet) throws MessageFormatException, IOException {
// read and verify header version
byte[] headerVersionBytes = new byte[Version_Field_Size_In_Bytes];
is.read(headerVersionBytes, 0, Version_Field_Size_In_Bytes);
short version = ByteBuffer.wrap(headerVersionBytes).getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Version not known while reading message - version " + version + ", StoreKey " + readSet.getKeyAt(indexInReadSet), MessageFormatErrorCodes.Unknown_Format_Version);
}
// read and verify header
byte[] headerBytes = new byte[getHeaderSizeForVersion(version)];
is.read(headerBytes, Version_Field_Size_In_Bytes, headerBytes.length - Version_Field_Size_In_Bytes);
ByteBuffer header = ByteBuffer.wrap(headerBytes);
header.putShort(version);
header.rewind();
MessageHeader_Format headerFormat = getMessageHeader(version, header);
headerFormat.verifyHeader();
// read and verify storeKey
StoreKey storeKey = storeKeyFactory.getStoreKey(new DataInputStream(is));
if (storeKey.compareTo(readSet.getKeyAt(indexInReadSet)) != 0) {
throw new MessageFormatException("Id mismatch between metadata and store - metadataId " + readSet.getKeyAt(indexInReadSet) + " storeId " + storeKey, MessageFormatErrorCodes.Store_Key_Id_MisMatch);
}
return headerFormat;
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ValidatingTransformer method transform.
@Override
public TransformationOutput transform(Message message) {
ByteBuffer encryptionKey;
BlobProperties props;
ByteBuffer metadata;
BlobData blobData;
MessageInfo msgInfo = message.getMessageInfo();
InputStream msgStream = message.getStream();
TransformationOutput transformationOutput = null;
try {
// Read header
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
msgStream.read(headerVersion.array());
short version = headerVersion.getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Header version not supported " + version, MessageFormatErrorCodes.Data_Corrupt);
}
int headerSize = getHeaderSizeForVersion(version);
ByteBuffer headerBuffer = ByteBuffer.allocate(headerSize);
headerBuffer.put(headerVersion.array());
msgStream.read(headerBuffer.array(), Version_Field_Size_In_Bytes, headerSize - Version_Field_Size_In_Bytes);
headerBuffer.rewind();
MessageHeader_Format header = getMessageHeader(version, headerBuffer);
header.verifyHeader();
StoreKey keyInStream = storeKeyFactory.getStoreKey(new DataInputStream(msgStream));
if (header.isPutRecord()) {
if (header.hasLifeVersion() && header.getLifeVersion() != msgInfo.getLifeVersion()) {
logger.trace("LifeVersion in stream: {} failed to match lifeVersion from Index: {} for key {}", header.getLifeVersion(), msgInfo.getLifeVersion(), keyInStream);
}
encryptionKey = header.hasEncryptionKeyRecord() ? deserializeBlobEncryptionKey(msgStream) : null;
props = deserializeBlobProperties(msgStream);
metadata = deserializeUserMetadata(msgStream);
blobData = deserializeBlob(msgStream);
} else {
throw new IllegalStateException("Message cannot be anything rather than put record ");
}
if (msgInfo.getStoreKey().equals(keyInStream)) {
// BlobIDTransformer only exists on ambry-server and replication between servers is relying on blocking channel
// which is still using java ByteBuffer. So, no need to consider releasing stuff.
// @todo, when netty Bytebuf is adopted for blocking channel on ambry-server, remember to release this ByteBuf.
PutMessageFormatInputStream transformedStream = new PutMessageFormatInputStream(keyInStream, encryptionKey, props, metadata, new ByteBufInputStream(blobData.content(), true), blobData.getSize(), blobData.getBlobType(), msgInfo.getLifeVersion());
MessageInfo transformedMsgInfo = new MessageInfo.Builder(msgInfo).size(transformedStream.getSize()).isDeleted(false).isUndeleted(false).build();
transformationOutput = new TransformationOutput(new Message(transformedMsgInfo, transformedStream));
} else {
throw new IllegalStateException("StoreKey in stream: " + keyInStream + " failed to match store key from Index: " + msgInfo.getStoreKey());
}
} catch (Exception e) {
transformationOutput = new TransformationOutput(e);
}
return transformationOutput;
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class AmbryRequests method handleDeleteRequest.
@Override
public void handleDeleteRequest(NetworkRequest request) throws IOException, InterruptedException {
DeleteRequest deleteRequest;
if (request instanceof LocalChannelRequest) {
// This is a case where handleDeleteRequest is called when frontends are talking to Azure. In this case, this method
// is called by request handler threads running within the frontend router itself. So, the request can be directly
// referenced as java objects without any need for deserialization.
deleteRequest = (DeleteRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
} else {
deleteRequest = DeleteRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
}
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.deleteBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.deleteBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
DeleteResponse response = null;
try {
StoreKey convertedStoreKey = getConvertedStoreKeys(Collections.singletonList(deleteRequest.getBlobId())).get(0);
ServerErrorCode error = validateRequest(deleteRequest.getBlobId().getPartition(), RequestOrResponseType.DeleteRequest, false);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating delete request failed with error {} for request {}", error, deleteRequest);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), error);
} else {
BlobId convertedBlobId = (BlobId) convertedStoreKey;
MessageInfo info = new MessageInfo.Builder(convertedBlobId, -1, convertedBlobId.getAccountId(), convertedBlobId.getContainerId(), deleteRequest.getDeletionTimeInMs()).isDeleted(true).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
Store storeToDelete = storeManager.getStore(deleteRequest.getBlobId().getPartition());
storeToDelete.delete(Collections.singletonList(info));
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.No_Error);
if (notification != null) {
notification.onBlobReplicaDeleted(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.deleteAuthorizationFailure.inc();
} else {
logInErrorLevel = true;
metrics.unExpectedStoreDeleteError.inc();
}
if (logInErrorLevel) {
logger.error("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
} else {
logger.trace("Store exception on a delete with error code {} for request {}", e.getErrorCode(), deleteRequest, e);
}
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception for delete request {}", deleteRequest, e);
response = new DeleteResponse(deleteRequest.getCorrelationId(), deleteRequest.getClientId(), ServerErrorCode.Unknown_Error);
metrics.unExpectedStoreDeleteError.inc();
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", deleteRequest, response, processingTime);
metrics.deleteBlobProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.deleteBlobResponseQueueTimeInMs, metrics.deleteBlobSendTimeInMs, metrics.deleteBlobTotalTimeInMs, null, null, totalTimeSpent));
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTest method expiryAfterMetadataExchangeTest.
/**
* Test the case where a blob expires after a replication metadata exchange completes and identifies the blob as
* a candidate. The subsequent GetRequest should succeed as Replication makes a Include_All call, and
* fixMissingStoreKeys() should succeed without exceptions. The blob should not be put locally.
*/
@Test
public void expiryAfterMetadataExchangeTest() throws Exception {
int batchSize = 400;
ReplicationTestSetup testSetup = new ReplicationTestSetup(batchSize);
List<PartitionId> partitionIds = testSetup.partitionIds;
MockHost remoteHost = testSetup.remoteHost;
MockHost localHost = testSetup.localHost;
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
Map<PartitionId, Set<StoreKey>> idsToExpectByPartition = new HashMap<>();
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
// add 5 messages to remote host only.
Set<StoreKey> expectedIds = new HashSet<>(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 5));
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
// add an expired message to the remote host only
StoreKey id = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK);
PutMsgInfoAndBuffer msgInfoAndBuffer = createPutMessage(id, accountId, containerId, toEncrypt);
remoteHost.addMessage(partitionId, new MessageInfo(id, msgInfoAndBuffer.byteBuffer.remaining(), 1, accountId, containerId, msgInfoAndBuffer.messageInfo.getOperationTimeMs()), msgInfoAndBuffer.byteBuffer);
// add 3 messages to the remote host only
expectedIds.addAll(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 3));
// delete the very first blob in the remote host only (and delete it from expected list)
Iterator<StoreKey> iter = expectedIds.iterator();
addDeleteMessagesToReplicasOfPartition(partitionId, iter.next(), Collections.singletonList(remoteHost));
iter.remove();
// PUT and DELETE a blob in the remote host only
id = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1).get(0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost));
idsToExpectByPartition.put(partitionId, expectedIds);
}
// Do the replica metadata exchange.
List<ReplicaThread.ExchangeMetadataResponse> responses = testSetup.replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), testSetup.replicasToReplicate.get(remoteHost.dataNodeId));
Assert.assertEquals("Actual keys in Exchange Metadata Response different from expected", idsToExpectByPartition.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), responses.stream().map(k -> k.getMissingStoreKeys()).flatMap(Collection::stream).collect(Collectors.toSet()));
// Now expire a message in the remote before doing the Get requests (for every partition). Remove these keys from
// expected key set. Even though they are requested, they should not go into the local store. However, this cycle
// of replication must be successful.
PartitionId partitionId = idsToExpectByPartition.keySet().iterator().next();
Iterator<StoreKey> keySet = idsToExpectByPartition.get(partitionId).iterator();
StoreKey keyToExpire = keySet.next();
keySet.remove();
MessageInfo msgInfoToExpire = null;
for (MessageInfo info : remoteHost.infosByPartition.get(partitionId)) {
if (info.getStoreKey().equals(keyToExpire)) {
msgInfoToExpire = info;
break;
}
}
int i = remoteHost.infosByPartition.get(partitionId).indexOf(msgInfoToExpire);
remoteHost.infosByPartition.get(partitionId).set(i, new MessageInfo(msgInfoToExpire.getStoreKey(), msgInfoToExpire.getSize(), msgInfoToExpire.isDeleted(), msgInfoToExpire.isTtlUpdated(), msgInfoToExpire.isUndeleted(), 1, null, msgInfoToExpire.getAccountId(), msgInfoToExpire.getContainerId(), msgInfoToExpire.getOperationTimeMs(), msgInfoToExpire.getLifeVersion()));
testSetup.replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), testSetup.replicasToReplicate.get(remoteHost.dataNodeId), responses, false);
Assert.assertEquals(idsToExpectByPartition.keySet(), localHost.infosByPartition.keySet());
Assert.assertEquals("Actual keys in Exchange Metadata Response different from expected", idsToExpectByPartition.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), localHost.infosByPartition.values().stream().flatMap(Collection::stream).map(MessageInfo::getStoreKey).collect(Collectors.toSet()));
}
Aggregations