use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class CloudOperationTest method doDirectPut.
/**
* Do a put directly to the mock servers. This allows for blobs with malformed properties to be constructed.
* @param blobProperties the {@link BlobProperties} for the blob.
* @param userMetadata user meta data of the blob.
* @param blobContent the raw content for the blob to upload (i.e. this can be serialized composite blob metadata or
* an encrypted blob).
* @return the blob id
* @throws Exception Any unexpected exception
*/
private BlobId doDirectPut(BlobProperties blobProperties, byte[] userMetadata, ByteBuf blobContent) throws Exception {
List<PartitionId> writablePartitionIds = mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
PartitionId partitionId = writablePartitionIds.get(random.nextInt(writablePartitionIds.size()));
BlobId blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), blobProperties.getAccountId(), blobProperties.getContainerId(), partitionId, blobProperties.isEncrypted(), BlobId.BlobDataType.DATACHUNK);
Iterator<MockServer> servers = partitionId.getReplicaIds().stream().map(ReplicaId::getDataNodeId).map(dataNodeId -> mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort())).iterator();
ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadata);
while (servers.hasNext()) {
MockServer server = servers.next();
PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), BlobType.DataBlob, null);
// Make sure we release the BoundedNettyByteBufReceive.
server.send(request).release();
request.release();
}
// send to Cloud destinations.
PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), BlobType.DataBlob, null);
// Get the cloud replica.
ReplicaId replica = partitionId.getReplicaIds().get(0);
Assert.assertEquals("It should be a cloud backed replica.", replica.getReplicaType(), ReplicaType.CLOUD_BACKED);
String hostname = replica.getDataNodeId().getHostname();
Port port = new Port(-1, PortType.PLAINTEXT);
List<RequestInfo> requestList = new ArrayList<>();
RequestInfo requestInfo = new RequestInfo(hostname, port, request, replica, null);
requestList.add(requestInfo);
List<ResponseInfo> responseList = sendAndWaitForResponses(requestList);
request.release();
blobContent.release();
return blobId;
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class NonBlockingRouterTestBase method ensureStitchInAllServers.
/**
* Ensure that Stitch requests for given blob id reaches to all the mock servees in the {@link MockServerLayout}.
* @param blobId The blob id of which stitch request will be created.
* @param serverLayout The mock server layout.
* @param chunksToStitch The list of {@link ChunkInfo} to stitch.
* @param singleBlobSize The size of each chunk
* @throws IOException
*/
protected void ensureStitchInAllServers(String blobId, MockServerLayout serverLayout, List<ChunkInfo> chunksToStitch, int singleBlobSize) throws IOException {
TreeMap<Integer, Pair<StoreKey, Long>> indexToChunkIdsAndChunkSizes = new TreeMap<>();
int i = 0;
for (ChunkInfo chunkInfo : chunksToStitch) {
indexToChunkIdsAndChunkSizes.put(i, new Pair<>(new BlobId(chunkInfo.getBlobId(), mockClusterMap), chunkInfo.getChunkSizeInBytes()));
i++;
}
ByteBuffer serializedContent;
int totalSize = singleBlobSize * chunksToStitch.size();
if (routerConfig.routerMetadataContentVersion == MessageFormatRecord.Metadata_Content_Version_V2) {
serializedContent = MetadataContentSerDe.serializeMetadataContentV2(singleBlobSize, totalSize, indexToChunkIdsAndChunkSizes.values().stream().map(Pair::getFirst).collect(Collectors.toList()));
} else {
List<Pair<StoreKey, Long>> orderedChunkIdList = new ArrayList<>(indexToChunkIdsAndChunkSizes.values());
serializedContent = MetadataContentSerDe.serializeMetadataContentV3(totalSize, orderedChunkIdList);
}
BlobId id = new BlobId(blobId, mockClusterMap);
for (MockServer server : serverLayout.getMockServers()) {
if (!server.getBlobs().containsKey(blobId)) {
PutRequest putRequest = new PutRequest(NonBlockingRouter.correlationIdGenerator.incrementAndGet(), routerConfig.routerHostname, id, putBlobProperties, ByteBuffer.wrap(putUserMetadata), Unpooled.wrappedBuffer(serializedContent), serializedContent.remaining(), BlobType.MetadataBlob, null);
server.send(putRequest).release();
putRequest.release();
}
}
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class StoredBlob method makeGetResponse.
/**
* Make a {@link GetResponse} for the given {@link GetRequest} for which the given {@link ServerErrorCode} was
* encountered. The request could be for BlobInfo or for Blob (the only two options that the router would request
* for).
* @param getRequest the {@link GetRequest} for which the response is being constructed.
* @param getError the {@link ServerErrorCode} that was encountered.
* @return the constructed {@link GetResponse}
* @throws IOException if there was an error constructing the response.
*/
GetResponse makeGetResponse(GetRequest getRequest, ServerErrorCode getError) throws IOException {
GetResponse getResponse;
if (getError == ServerErrorCode.No_Error) {
List<PartitionRequestInfo> infos = getRequest.getPartitionInfoList();
if (infos.size() != 1 || infos.get(0).getBlobIds().size() != 1) {
getError = ServerErrorCode.Unknown_Error;
}
}
ServerErrorCode serverError;
ServerErrorCode partitionError;
boolean isDataBlob = false;
try {
String id = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0).getID();
isDataBlob = blobs.get(id).type == BlobType.DataBlob;
} catch (Exception ignored) {
}
if (!getErrorOnDataBlobOnly || isDataBlob) {
// set it in the partitionResponseInfo
if (getError == ServerErrorCode.No_Error || getError == ServerErrorCode.Blob_Expired || getError == ServerErrorCode.Blob_Deleted || getError == ServerErrorCode.Blob_Not_Found || getError == ServerErrorCode.Blob_Authorization_Failure || getError == ServerErrorCode.Disk_Unavailable) {
partitionError = getError;
serverError = ServerErrorCode.No_Error;
} else {
serverError = getError;
// does not matter - this will not be checked if serverError is not No_Error.
partitionError = ServerErrorCode.No_Error;
}
} else {
serverError = ServerErrorCode.No_Error;
partitionError = ServerErrorCode.No_Error;
}
if (serverError == ServerErrorCode.No_Error) {
int byteBufferSize;
ByteBuffer byteBuffer;
StoreKey key = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0);
short accountId = Account.UNKNOWN_ACCOUNT_ID;
short containerId = Container.UNKNOWN_CONTAINER_ID;
long operationTimeMs = Utils.Infinite_Time;
StoredBlob blob = blobs.get(key.getID());
ServerErrorCode processedError = errorForGet(key.getID(), blob, getRequest);
MessageMetadata msgMetadata = null;
if (processedError == ServerErrorCode.No_Error) {
ByteBuffer buf = blobs.get(key.getID()).serializedSentPutRequest.duplicate();
// read off the size
buf.getLong();
// read off the type.
buf.getShort();
PutRequest originalBlobPutReq = PutRequest.readFrom(new DataInputStream(new ByteBufferInputStream(buf)), clusterMap);
switch(getRequest.getMessageFormatFlag()) {
case BlobInfo:
BlobProperties blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
operationTimeMs = blobProperties.getCreationTimeInMs();
ByteBuffer userMetadata = originalBlobPutReq.getUsermetadata();
byteBufferSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties) + MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
byteBuffer = ByteBuffer.allocate(byteBufferSize);
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
break;
case Blob:
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
Crc32 crc = new Crc32();
crc.update(byteBuffer.array(), 0, byteBuffer.position());
byteBuffer.putLong(crc.getValue());
break;
case All:
blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
userMetadata = originalBlobPutReq.getUsermetadata();
operationTimeMs = originalBlobPutReq.getBlobProperties().getCreationTimeInMs();
int blobHeaderSize = MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize();
int blobEncryptionRecordSize = originalBlobPutReq.getBlobEncryptionKey() != null ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(originalBlobPutReq.getBlobEncryptionKey().duplicate()) : 0;
int blobPropertiesSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties);
int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
int blobInfoSize = blobPropertiesSize + userMetadataSize;
int blobRecordSize;
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
case MessageFormatRecord.Blob_Version_V1:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBufferSize = blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize + blobRecordSize;
byteBuffer = ByteBuffer.allocate(byteBufferSize);
try {
MessageFormatRecord.MessageHeader_Format_V2.serializeHeader(byteBuffer, blobEncryptionRecordSize + blobInfoSize + blobRecordSize, originalBlobPutReq.getBlobEncryptionKey() == null ? Message_Header_Invalid_Relative_Offset : blobHeaderSize + key.sizeInBytes(), blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize, Message_Header_Invalid_Relative_Offset, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobPropertiesSize, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize);
} catch (MessageFormatException e) {
e.printStackTrace();
}
byteBuffer.put(key.toBytes());
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
MessageFormatRecord.BlobEncryptionKey_Format_V1.serializeBlobEncryptionKeyRecord(byteBuffer, originalBlobPutReq.getBlobEncryptionKey().duplicate());
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
int blobRecordStart = byteBuffer.position();
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
crc = new Crc32();
crc.update(byteBuffer.array(), blobRecordStart, blobRecordSize - MessageFormatRecord.Crc_Size);
byteBuffer.putLong(crc.getValue());
break;
default:
throw new IOException("GetRequest flag is not supported: " + getRequest.getMessageFormatFlag());
}
} else if (processedError == ServerErrorCode.Blob_Deleted) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Deleted;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Expired) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Expired;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Authorization_Failure) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Authorization_Failure;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Not_Found;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
}
byteBuffer.flip();
ByteBufferSend responseSend = new ByteBufferSend(byteBuffer);
List<MessageInfo> messageInfoList = new ArrayList<>();
List<MessageMetadata> messageMetadataList = new ArrayList<>();
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>();
if (partitionError == ServerErrorCode.No_Error) {
messageInfoList.add(new MessageInfo(key, byteBufferSize, false, blob.isTtlUpdated(), blob.isUndeleted(), blob.expiresAt, null, accountId, containerId, operationTimeMs, blob.lifeVersion));
messageMetadataList.add(msgMetadata);
}
PartitionResponseInfo partitionResponseInfo = partitionError == ServerErrorCode.No_Error ? new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), messageInfoList, messageMetadataList) : new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), partitionError);
partitionResponseInfoList.add(partitionResponseInfo);
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, responseSend, serverError);
} else {
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), new ArrayList<PartitionResponseInfo>(), new ByteBufferSend(ByteBuffer.allocate(0)), serverError);
}
return getResponse;
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class NonBlockingRouterTestBase method ensurePutInAllServers.
/**
* Ensure that Put request for given blob id reaches to all the mock servers in the {@link MockServerLayout}.
* @param blobId The blob id of which Put request will be created.
* @param serverLayout The mock server layout.
* @throws IOException
*/
protected void ensurePutInAllServers(String blobId, MockServerLayout serverLayout) throws IOException {
// Make sure all the mock servers have this put
BlobId id = new BlobId(blobId, mockClusterMap);
for (MockServer server : serverLayout.getMockServers()) {
if (!server.getBlobs().containsKey(blobId)) {
PutRequest putRequest = new PutRequest(NonBlockingRouter.correlationIdGenerator.incrementAndGet(), routerConfig.routerHostname, id, putBlobProperties, ByteBuffer.wrap(putUserMetadata), Unpooled.wrappedBuffer(putContent), putContent.length, BlobType.DataBlob, null);
server.send(putRequest).release();
putRequest.release();
}
}
}
use of com.github.ambry.protocol.PutRequest in project ambry by linkedin.
the class MockReadableStreamChannel method verifyCompositeBlob.
/**
* Verify Composite blob for content, userMetadata and
* @param properties {@link BlobProperties} of the blob
* @param originalPutContent original out content
* @param originalUserMetadata original user-metadata
* @param dataBlobIds {@link List} of {@link StoreKey}s of the composite blob in context
* @param request {@link com.github.ambry.protocol.PutRequest} to fetch info from
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
* @throws Exception
*/
private void verifyCompositeBlob(BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, List<StoreKey> dataBlobIds, PutRequest request, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
StoreKey lastKey = dataBlobIds.get(dataBlobIds.size() - 1);
byte[] content = new byte[(int) request.getBlobProperties().getBlobSize()];
AtomicInteger offset = new AtomicInteger(0);
for (StoreKey key : dataBlobIds) {
PutRequest dataBlobPutRequest = deserializePutRequest(serializedRequests.get(key.getID()));
AtomicInteger dataBlobLength = new AtomicInteger((int) dataBlobPutRequest.getBlobSize());
InputStream dataBlobStream = dataBlobPutRequest.getBlobStream();
if (!properties.isEncrypted()) {
Utils.readBytesFromStream(dataBlobStream, content, offset.get(), dataBlobLength.get());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, dataBlobPutRequest.getUsermetadata().array());
} else {
byte[] dataBlobContent = Utils.readBytesFromStream(dataBlobStream, dataBlobLength.get());
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(dataBlobPutRequest.getBlobId(), dataBlobPutRequest.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(dataBlobContent), dataBlobPutRequest.getUsermetadata().duplicate(), cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
assertEquals("BlobId mismatch", dataBlobPutRequest.getBlobId(), result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
dataBlobLength.set(decryptedBlobContent.readableBytes());
decryptedBlobContent.readBytes(content, offset.get(), dataBlobLength.get());
decryptedBlobContent.release();
}).run();
}
if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2 && key != lastKey) {
assertEquals("all chunks except last should be fully filled", chunkSize, dataBlobLength.get());
} else if (key == lastKey) {
assertEquals("Last chunk should be of non-zero length and equal to the length of the remaining bytes", (originalPutContent.length - 1) % chunkSize + 1, dataBlobLength.get());
}
offset.addAndGet(dataBlobLength.get());
assertEquals("dataBlobStream should have no more data", -1, dataBlobStream.read());
notificationSystem.verifyNotification(key.getID(), NotificationBlobType.DataChunk, dataBlobPutRequest.getBlobProperties());
}
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
}
Aggregations