use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicaThread method getMessagesForMissingKeys.
/**
* Gets the messages for the keys that are missing from the local store by issuing a {@link GetRequest} to the remote
* node, if there are any missing keys. If there are no missing keys to be fetched, then no request is issued and a
* null response is returned.
* @param connectedChannel The connection channel to the remote node
* @param exchangeMetadataResponseList The list of metadata response from the remote node
* @param replicasToReplicatePerNode The list of remote replicas for the remote node
* @param remoteNode The remote node from which replication needs to happen
* @return The response that contains the missing messages; or null if no request was issued because there were no
* keys missing.
* @throws ReplicationException
* @throws IOException
*/
private GetResponse getMessagesForMissingKeys(ConnectedChannel connectedChannel, List<ExchangeMetadataResponse> exchangeMetadataResponseList, List<RemoteReplicaInfo> replicasToReplicatePerNode, DataNodeId remoteNode) throws ReplicationException, IOException {
List<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
for (int i = 0; i < exchangeMetadataResponseList.size(); i++) {
ExchangeMetadataResponse exchangeMetadataResponse = exchangeMetadataResponseList.get(i);
RemoteReplicaInfo remoteReplicaInfo = replicasToReplicatePerNode.get(i);
if (exchangeMetadataResponse.serverErrorCode == ServerErrorCode.No_Error) {
Set<StoreKey> missingStoreKeys = exchangeMetadataResponse.missingStoreKeys;
if (missingStoreKeys.size() > 0) {
ArrayList<BlobId> keysToFetch = new ArrayList<BlobId>();
for (StoreKey storeKey : missingStoreKeys) {
keysToFetch.add((BlobId) storeKey);
}
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(remoteReplicaInfo.getReplicaId().getPartitionId(), keysToFetch);
partitionRequestInfoList.add(partitionRequestInfo);
}
}
}
GetResponse getResponse = null;
if (!partitionRequestInfoList.isEmpty()) {
GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), "replication-fetch-" + dataNodeId.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
long startTime = SystemTime.getInstance().milliseconds();
try {
connectedChannel.send(getRequest);
ChannelOutput channelOutput = connectedChannel.receive();
getResponse = GetResponse.readFrom(new DataInputStream(channelOutput.getInputStream()), clusterMap);
long getRequestTime = SystemTime.getInstance().milliseconds() - startTime;
replicationMetrics.updateGetRequestTime(getRequestTime, replicatingFromRemoteColo, replicatingOverSsl, datacenterName);
if (getResponse.getError() != ServerErrorCode.No_Error) {
logger.error("Remote node: " + remoteNode + " Thread name: " + threadName + " Remote replicas: " + replicasToReplicatePerNode + " GetResponse from replication: " + getResponse.getError());
throw new ReplicationException(" Get Request returned error when trying to get missing keys " + getResponse.getError());
}
} catch (IOException e) {
responseHandler.onEvent(replicasToReplicatePerNode.get(0).getReplicaId(), e);
throw e;
}
}
return getResponse;
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTest method addPutMessagesToReplicasOfPartition.
/**
* For the given partitionId, constructs put messages and adds them to the given lists.
* @param partitionId the {@link PartitionId} to use for generating the {@link StoreKey} of the message.
* @param hosts the list of {@link Host} all of which will be populated with the messages.
* @param count the number of messages to construct and add.
* @return the list of blobs ids that were generated.
* @throws MessageFormatException
* @throws IOException
*/
private List<StoreKey> addPutMessagesToReplicasOfPartition(PartitionId partitionId, List<Host> hosts, int count) throws MessageFormatException, IOException {
List<StoreKey> ids = new ArrayList<>();
for (int i = 0; i < count; i++) {
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
boolean toEncrypt = i % 2 == 0;
BlobId id = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMapUtils.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt);
ids.add(id);
Pair<ByteBuffer, MessageInfo> putMsgInfo = getPutMessage(id, accountId, containerId, toEncrypt);
for (Host host : hosts) {
host.addMessage(partitionId, putMsgInfo.getSecond(), putMsgInfo.getFirst().duplicate());
}
}
return ids;
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class MockReadableStreamChannel method verifyBlob.
/**
* Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
* data as the original object that was put.
* @param blobId the blobId of the blob that is to be verified.
* @param properties the {@link BlobProperties} of the blob that is to be verified
* @param originalPutContent original content of the blob
* @param originalUserMetadata original user-metadata of the blob
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
*/
private void verifyBlob(String blobId, BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
ByteBuffer serializedRequest = serializedRequests.get(blobId);
PutRequest.ReceivedPutRequest request = deserializePutRequest(serializedRequest);
NotificationBlobType notificationBlobType;
if (request.getBlobType() == BlobType.MetadataBlob) {
notificationBlobType = NotificationBlobType.Composite;
byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
Assert.assertEquals("Wrong max chunk size in metadata", chunkSize, compositeBlobInfo.getChunkSize());
Assert.assertEquals("Wrong total size in metadata", originalPutContent.length, compositeBlobInfo.getTotalSize());
List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
Assert.assertEquals("Number of chunks is not as expected", RouterUtils.getNumChunksForBlobAndChunkSize(originalPutContent.length, chunkSize), dataBlobIds.size());
// verify user-metadata
if (properties.isEncrypted()) {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}).run();
} else {
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, request.getUsermetadata().array());
}
verifyCompositeBlob(properties, originalPutContent, originalUserMetadata, dataBlobIds, request, serializedRequests);
} else {
notificationBlobType = NotificationBlobType.Simple;
byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
if (!properties.isEncrypted()) {
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
Assert.assertArrayEquals("UserMetadata mismatch for simple blob", originalUserMetadata, request.getUsermetadata().array());
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
} else {
ByteBuffer userMetadata = request.getUsermetadata();
BlobId origBlobId = new BlobId(blobId, mockClusterMap);
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(content), userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), new Callback<DecryptJob.DecryptJobResult>() {
@Override
public void onCompletion(DecryptJob.DecryptJobResult result, Exception exception) {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
Assert.assertArrayEquals("Content mismatch", originalPutContent, result.getDecryptedBlobContent().array());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
}
}).run();
}
}
notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class HardDeleteRecoveryMetadata method getMessageInfo.
@Override
public MessageInfo getMessageInfo(Read read, long offset, StoreKeyFactory storeKeyFactory) throws IOException {
try {
// read message header
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
read.readInto(headerVersion, offset);
offset += headerVersion.capacity();
headerVersion.flip();
short version = headerVersion.getShort();
MessageHeader_Format headerFormat;
ReadInputStream stream;
long endOffset;
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Version not known while reading message - " + version, MessageFormatErrorCodes.Unknown_Format_Version);
}
ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(version));
header.putShort(version);
read.readInto(header, offset);
offset += header.capacity() - headerVersion.capacity();
header.flip();
headerFormat = getMessageHeader(version, header);
headerFormat.verifyHeader();
endOffset = offset + headerFormat.getPayloadRelativeOffset() + headerFormat.getMessageSize();
stream = new ReadInputStream(read, offset, endOffset);
StoreKey key = storeKeyFactory.getStoreKey(new DataInputStream(stream));
if (headerFormat.hasEncryptionKeyRecord()) {
deserializeBlobEncryptionKey(stream);
}
// read the appropriate type of message based on the relative offset that is set
if (headerFormat.isPutRecord()) {
BlobProperties properties = deserializeBlobProperties(stream);
return new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), Utils.addSecondsToEpochTime(properties.getCreationTimeInMs(), properties.getTimeToLiveInSeconds()), properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs());
} else {
DeleteRecord deleteRecord = deserializeDeleteRecord(stream);
return new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), true, deleteRecord.getAccountId(), deleteRecord.getContainerId(), deleteRecord.getDeletionTimeInMs());
}
} catch (MessageFormatException e) {
// log in case where we were not able to parse a message.
throw new IOException("Message format exception while parsing messages ", e);
} catch (IndexOutOfBoundsException e) {
// log in case where were not able to read a complete message.
throw new IOException("Trying to read more than the available bytes");
}
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class MessageSievingInputStreamTest method testDeletedBlobs.
private void testDeletedBlobs(short blobVersion, BlobType blobType) throws IOException, MessageFormatException {
// MessageSievingInputStream contains put records for 2 valid blobs and 1 deleted blob
// id1(put record for valid blob), id2(delete record) and id3(put record for valid blob)
ArrayList<Short> versions = new ArrayList<>();
versions.add(Message_Header_Version_V1);
if (blobVersion != Blob_Version_V1) {
versions.add(Message_Header_Version_V2);
}
try {
for (short version : versions) {
headerVersionToUse = version;
// create message stream for blob 1
StoreKey key1 = new MockId("id1");
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties prop1 = new BlobProperties(10, "servid1", accountId, containerId, false);
byte[] encryptionKey1 = new byte[100];
TestUtils.RANDOM.nextBytes(encryptionKey1);
byte[] usermetadata1 = new byte[1000];
TestUtils.RANDOM.nextBytes(usermetadata1);
int blobContentSize = 2000;
byte[] data1 = new byte[blobContentSize];
TestUtils.RANDOM.nextBytes(data1);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data1 = byteBufferBlob.array();
blobContentSize = data1.length;
}
ByteBufferInputStream stream1 = new ByteBufferInputStream(ByteBuffer.wrap(data1));
MessageFormatInputStream messageFormatStream1 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key1, ByteBuffer.wrap(encryptionKey1), prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key1, prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType);
MessageInfo msgInfo1 = new MessageInfo(key1, messageFormatStream1.getSize(), accountId, containerId, prop1.getCreationTimeInMs());
// create message stream for blob 2 and mark it as deleted
StoreKey key2 = new MockId("id2");
accountId = Utils.getRandomShort(TestUtils.RANDOM);
containerId = Utils.getRandomShort(TestUtils.RANDOM);
long deletionTimeMs = SystemTime.getInstance().milliseconds() + TestUtils.RANDOM.nextInt();
MessageFormatInputStream messageFormatStream2 = new DeleteMessageFormatInputStream(key2, accountId, containerId, deletionTimeMs);
MessageInfo msgInfo2 = new MessageInfo(key2, messageFormatStream2.getSize(), accountId, containerId, deletionTimeMs);
// create message stream for blob 3
StoreKey key3 = new MockId("id3");
accountId = Utils.getRandomShort(TestUtils.RANDOM);
containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties prop3 = new BlobProperties(10, "servid3", accountId, containerId, false);
byte[] encryptionKey3 = new byte[100];
TestUtils.RANDOM.nextBytes(encryptionKey3);
byte[] usermetadata3 = new byte[1000];
TestUtils.RANDOM.nextBytes(usermetadata3);
blobContentSize = 2000;
byte[] data3 = new byte[blobContentSize];
TestUtils.RANDOM.nextBytes(data3);
if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data3 = byteBufferBlob.array();
blobContentSize = data3.length;
}
ByteBufferInputStream stream3 = new ByteBufferInputStream(ByteBuffer.wrap(data3));
MessageFormatInputStream messageFormatStream3 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key3, ByteBuffer.wrap(encryptionKey3), prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key3, prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType);
MessageInfo msgInfo3 = new MessageInfo(key3, messageFormatStream3.getSize(), accountId, containerId, prop3.getCreationTimeInMs());
// create input stream for all blob messages together
byte[] totalMessageContent = new byte[(int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize()];
messageFormatStream1.read(totalMessageContent, 0, (int) messageFormatStream1.getSize());
messageFormatStream2.read(totalMessageContent, (int) messageFormatStream1.getSize(), (int) messageFormatStream2.getSize());
messageFormatStream3.read(totalMessageContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize(), (int) messageFormatStream3.getSize());
InputStream inputStream = new ByteBufferInputStream(ByteBuffer.wrap(totalMessageContent));
List<MessageInfo> msgInfoList = new ArrayList<MessageInfo>();
msgInfoList.add(msgInfo1);
msgInfoList.add(msgInfo2);
msgInfoList.add(msgInfo3);
MessageSievingInputStream validMessageDetectionInputStream = new MessageSievingInputStream(inputStream, msgInfoList, new MockIdFactory(), new MetricRegistry());
Assert.fail("IllegalStateException should have been thrown due to delete record ");
}
} catch (IllegalStateException e) {
Assert.assertTrue("IllegalStateException thrown as expected ", true);
}
headerVersionToUse = Message_Header_Version_V1;
}
Aggregations