use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.
the class CloudOperationTest method getBlobAndAssertSuccess.
/**
* Construct GetBlob operations with appropriate callbacks, then poll those operations until they complete,
* and ensure that the whole blob data is read out and the contents match.
* @param blobId id of the blob to get
* @param expectedLifeVersion the expected lifeVersion from get operation.
* @param expectedBlobSize the expected blob size
* @param expectedBlobProperties the expected {@link BlobProperties} for the blob.
* @param expectedUserMetadata the expected user meta data
* @param expectPutContent the expected blob content
* @param options options of the get blob operation
* @throws Exception Any unexpected exception
*/
private void getBlobAndAssertSuccess(final BlobId blobId, final short expectedLifeVersion, final int expectedBlobSize, final BlobProperties expectedBlobProperties, final byte[] expectedUserMetadata, final byte[] expectPutContent, final GetBlobOptionsInternal options) throws Exception {
final CountDownLatch readCompleteLatch = new CountDownLatch(1);
final AtomicLong readCompleteResult = new AtomicLong(0);
// callback to compare the data
Callback<GetBlobResultInternal> callback = (result, exception) -> {
Assert.assertNull("Shouldn't have exception", exception);
try {
BlobInfo blobInfo;
switch(options.getBlobOptions.getOperationType()) {
case All:
Assert.assertFalse("not supposed to be raw mode", options.getBlobOptions.isRawMode());
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertArrayEquals("User metadata must be the same", expectedUserMetadata, blobInfo.getUserMetadata());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
break;
case Data:
Assert.assertNull("Unexpected blob info in operation result", result.getBlobResult.getBlobInfo());
break;
case BlobInfo:
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertNull("Unexpected blob data in operation result", result.getBlobResult.getBlobDataChannel());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
}
} catch (Throwable e) {
Assert.fail("Shouldn't receive exception here");
}
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo) {
final ByteBufferAsyncWritableChannel asyncWritableChannel = new ByteBufferAsyncWritableChannel();
Utils.newThread(() -> {
Future<Long> readIntoFuture = result.getBlobResult.getBlobDataChannel().readInto(asyncWritableChannel, null);
assertBlobReadSuccess(options.getBlobOptions, readIntoFuture, asyncWritableChannel, result.getBlobResult.getBlobDataChannel(), readCompleteLatch, readCompleteResult, expectedBlobSize, expectPutContent);
}, false).start();
} else {
readCompleteLatch.countDown();
}
};
// create GetBlobOperation
final Map<Integer, GetOperation> correlationIdToGetOperation = new HashMap<>();
final RequestRegistrationCallback<GetOperation> requestRegistrationCallback = new RequestRegistrationCallback<>(correlationIdToGetOperation);
NonBlockingRouter.currentOperationsCount.incrementAndGet();
GetBlobOperation op = new GetBlobOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, callback, routerCallback, blobIdFactory, null, null, null, time, false, null);
requestRegistrationCallback.setRequestsToSend(new ArrayList<>());
// Wait operation to complete
while (!op.isOperationComplete()) {
op.poll(requestRegistrationCallback);
List<ResponseInfo> responses = sendAndWaitForResponses(requestRegistrationCallback.getRequestsToSend());
for (ResponseInfo responseInfo : responses) {
GetResponse getResponse = RouterUtils.extractResponseAndNotifyResponseHandler(responseHandler, routerMetrics, responseInfo, stream -> GetResponse.readFrom(stream, mockClusterMap), response -> {
ServerErrorCode serverError = response.getError();
if (serverError == ServerErrorCode.No_Error) {
serverError = response.getPartitionResponseInfoList().get(0).getErrorCode();
}
return serverError;
});
op.handleResponse(responseInfo, getResponse);
responseInfo.release();
}
}
readCompleteLatch.await();
Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
// Ensure that a ChannelClosed exception is not set when the ReadableStreamChannel is closed correctly.
Assert.assertNull("Callback operation exception should be null", op.getOperationException());
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo && !options.getBlobOptions.isRawMode() && !options.getChunkIdsOnly) {
int sizeWritten = expectedBlobSize;
if (options.getBlobOptions.getRange() != null) {
ByteRange range = options.getBlobOptions.getRange().toResolvedByteRange(expectedBlobSize, options.getBlobOptions.resolveRangeOnEmptyBlob());
sizeWritten = (int) range.getRangeSize();
}
Assert.assertEquals("Size read must equal size written", sizeWritten, readCompleteResult.get());
}
}
use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.
the class GetBlobOperationTest method doTestSuccessInThePresenceOfVariousErrors.
/**
* Helper method to simulate errors from the servers. Only one node in the datacenter where the put happened will
* return success. No matter what order the servers are contacted, as long as one of them returns success, the whole
* operation should succeed.
* @param dcWherePutHappened the datacenter where the put happened.
*/
private void doTestSuccessInThePresenceOfVariousErrors(String dcWherePutHappened) throws Exception {
ArrayList<MockServer> mockServers = new ArrayList<>(mockServerLayout.getMockServers());
ArrayList<ServerErrorCode> serverErrors = new ArrayList<>(Arrays.asList(ServerErrorCode.values()));
// set the status to various server level or partition level errors (not Blob_Deleted or Blob_Expired - as they
// are final), except for one of the servers in the datacenter where the put happened (we do this as puts only go
// to the local dc, whereas gets go cross colo).
serverErrors.remove(ServerErrorCode.Blob_Deleted);
serverErrors.remove(ServerErrorCode.Blob_Expired);
serverErrors.remove(ServerErrorCode.No_Error);
serverErrors.remove(ServerErrorCode.Blob_Authorization_Failure);
boolean goodServerMarked = false;
boolean notFoundSetInOriginalDC = false;
for (MockServer mockServer : mockServers) {
ServerErrorCode code = serverErrors.get(random.nextInt(serverErrors.size()));
// make sure in the original dc, we don't set Blob_Not_Found twice.
if (mockServer.getDataCenter().equals(dcWherePutHappened)) {
if (!goodServerMarked) {
mockServer.setServerErrorForAllRequests(ServerErrorCode.No_Error);
goodServerMarked = true;
} else {
if (!notFoundSetInOriginalDC) {
mockServer.setServerErrorForAllRequests(code);
notFoundSetInOriginalDC = code == ServerErrorCode.Blob_Not_Found;
} else {
while (code == ServerErrorCode.Blob_Not_Found) {
code = serverErrors.get(random.nextInt(serverErrors.size()));
}
mockServer.setServerErrorForAllRequests(code);
}
}
} else {
mockServer.setServerErrorForAllRequests(code);
}
}
getAndAssertSuccess();
}
use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.
the class GetBlobOperationTest method testFailureOnServerErrors.
/**
* Tests the case where all servers return the same server level error code
* @throws Exception
*/
@Test
public void testFailureOnServerErrors() throws Exception {
doPut();
// set the status to various server level errors (remove all partition level errors or non errors)
EnumSet<ServerErrorCode> serverErrors = EnumSet.complementOf(EnumSet.of(ServerErrorCode.Blob_Deleted, ServerErrorCode.Blob_Expired, ServerErrorCode.No_Error, ServerErrorCode.Blob_Authorization_Failure, ServerErrorCode.Blob_Not_Found));
for (ServerErrorCode serverErrorCode : serverErrors) {
mockServerLayout.getMockServers().forEach(server -> server.setServerErrorForAllRequests(serverErrorCode));
GetBlobOperation op = createOperationAndComplete(null);
RouterErrorCode expectedRouterError;
switch(serverErrorCode) {
case Replica_Unavailable:
expectedRouterError = RouterErrorCode.AmbryUnavailable;
break;
case Disk_Unavailable:
// if all the disks are unavailable (which should be extremely rare), after replacing these disks, the blob is
// definitely not present.
expectedRouterError = RouterErrorCode.BlobDoesNotExist;
break;
default:
expectedRouterError = RouterErrorCode.UnexpectedInternalError;
}
assertFailureAndCheckErrorCode(op, expectedRouterError);
}
}
use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.
the class NonBlockingRouterTest method testSuccessfulPutDataChunkDelete.
/**
* Test that even when a composite blob put succeeds, the slipped put data chunks are deleted.
*/
@Test
public void testSuccessfulPutDataChunkDelete() throws Exception {
try {
// This test is somehow probabilistic. Since it is not possible to devise a mocking to enforce the occurrence of
// slipped puts given we cannot control the order of the hosts requests are sent and not all requests are sent when
// put requests are guaranteed to fail/succeed. So, we are setting the number of chunks and max attempts high enough
// to guarantee that slipped puts would eventually happen and operation would succeed.
maxPutChunkSize = PUT_CONTENT_SIZE / 8;
final int NUM_MAX_ATTEMPTS = 100;
Properties props = getNonBlockingRouterProperties("DC1");
props.setProperty("router.max.slipped.put.attempts", Integer.toString(NUM_MAX_ATTEMPTS));
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// Since this test wants to ensure that successfully put data chunks are deleted when the overall put operation
// succeeds but some chunks succeed only after a retry, it uses a notification system to track the deletions.
final CountDownLatch deletesDoneLatch = new CountDownLatch(1);
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId, Account account, Container container) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.countDown();
}
};
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(mockClusterMap, routerConfig), new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
setOperationParams();
// In each DC, set up the servers such that one node always succeeds and the other nodes return an unknown_error and
// no_error alternately. This will make it with a very high probability that there will at least be a time that a
// put will succeed on a node but will fail on the other two.
List<DataNodeId> dataNodeIds = mockClusterMap.getDataNodeIds();
List<ServerErrorCode> serverErrorList = new ArrayList<>();
for (int i = 0; i < NUM_MAX_ATTEMPTS; i++) {
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.No_Error);
}
Set<String> healthyNodeDC = new HashSet<>();
for (DataNodeId dataNodeId : dataNodeIds) {
MockServer server = mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort());
if (healthyNodeDC.contains(dataNodeId.getDatacenterName())) {
server.setServerErrors(serverErrorList);
} else {
server.resetServerErrors();
}
healthyNodeDC.add(dataNodeId.getDatacenterName());
}
// Submit the put operation and wait for it to succeed.
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
// Now, wait until at least one delete happens within AWAIT_TIMEOUT_MS.
Assert.assertTrue("Some blobs should have been deleted within " + AWAIT_TIMEOUT_MS, deletesDoneLatch.await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
// Wait for the rest of the deletes to finish.
long waitStart = SystemTime.getInstance().milliseconds();
while (router.getBackgroundOperationsCount() != 0 && SystemTime.getInstance().milliseconds() < waitStart + AWAIT_TIMEOUT_MS) {
Thread.sleep(1000);
}
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
Assert.assertNotSame("We should not be deleting the valid blob by mistake", blobId, blobIdAndServiceId.getKey());
Assert.assertEquals("Unexpected service ID for deleted blob", BackgroundDeleteRequest.SERVICE_ID_PREFIX + putBlobProperties.getServiceId(), blobIdAndServiceId.getValue());
}
} finally {
if (router != null) {
router.close();
assertClosed();
}
}
}
use of com.github.ambry.server.ServerErrorCode in project ambry by linkedin.
the class StoredBlob method makeGetResponse.
/**
* Make a {@link GetResponse} for the given {@link GetRequest} for which the given {@link ServerErrorCode} was
* encountered. The request could be for BlobInfo or for Blob (the only two options that the router would request
* for).
* @param getRequest the {@link GetRequest} for which the response is being constructed.
* @param getError the {@link ServerErrorCode} that was encountered.
* @return the constructed {@link GetResponse}
* @throws IOException if there was an error constructing the response.
*/
GetResponse makeGetResponse(GetRequest getRequest, ServerErrorCode getError) throws IOException {
GetResponse getResponse;
if (getError == ServerErrorCode.No_Error) {
List<PartitionRequestInfo> infos = getRequest.getPartitionInfoList();
if (infos.size() != 1 || infos.get(0).getBlobIds().size() != 1) {
getError = ServerErrorCode.Unknown_Error;
}
}
ServerErrorCode serverError;
ServerErrorCode partitionError;
boolean isDataBlob = false;
try {
String id = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0).getID();
isDataBlob = blobs.get(id).type == BlobType.DataBlob;
} catch (Exception ignored) {
}
if (!getErrorOnDataBlobOnly || isDataBlob) {
// set it in the partitionResponseInfo
if (getError == ServerErrorCode.No_Error || getError == ServerErrorCode.Blob_Expired || getError == ServerErrorCode.Blob_Deleted || getError == ServerErrorCode.Blob_Not_Found || getError == ServerErrorCode.Blob_Authorization_Failure || getError == ServerErrorCode.Disk_Unavailable) {
partitionError = getError;
serverError = ServerErrorCode.No_Error;
} else {
serverError = getError;
// does not matter - this will not be checked if serverError is not No_Error.
partitionError = ServerErrorCode.No_Error;
}
} else {
serverError = ServerErrorCode.No_Error;
partitionError = ServerErrorCode.No_Error;
}
if (serverError == ServerErrorCode.No_Error) {
int byteBufferSize;
ByteBuffer byteBuffer;
StoreKey key = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0);
short accountId = Account.UNKNOWN_ACCOUNT_ID;
short containerId = Container.UNKNOWN_CONTAINER_ID;
long operationTimeMs = Utils.Infinite_Time;
StoredBlob blob = blobs.get(key.getID());
ServerErrorCode processedError = errorForGet(key.getID(), blob, getRequest);
MessageMetadata msgMetadata = null;
if (processedError == ServerErrorCode.No_Error) {
ByteBuffer buf = blobs.get(key.getID()).serializedSentPutRequest.duplicate();
// read off the size
buf.getLong();
// read off the type.
buf.getShort();
PutRequest originalBlobPutReq = PutRequest.readFrom(new DataInputStream(new ByteBufferInputStream(buf)), clusterMap);
switch(getRequest.getMessageFormatFlag()) {
case BlobInfo:
BlobProperties blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
operationTimeMs = blobProperties.getCreationTimeInMs();
ByteBuffer userMetadata = originalBlobPutReq.getUsermetadata();
byteBufferSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties) + MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
byteBuffer = ByteBuffer.allocate(byteBufferSize);
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
break;
case Blob:
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
Crc32 crc = new Crc32();
crc.update(byteBuffer.array(), 0, byteBuffer.position());
byteBuffer.putLong(crc.getValue());
break;
case All:
blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
userMetadata = originalBlobPutReq.getUsermetadata();
operationTimeMs = originalBlobPutReq.getBlobProperties().getCreationTimeInMs();
int blobHeaderSize = MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize();
int blobEncryptionRecordSize = originalBlobPutReq.getBlobEncryptionKey() != null ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(originalBlobPutReq.getBlobEncryptionKey().duplicate()) : 0;
int blobPropertiesSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties);
int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
int blobInfoSize = blobPropertiesSize + userMetadataSize;
int blobRecordSize;
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
case MessageFormatRecord.Blob_Version_V1:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBufferSize = blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize + blobRecordSize;
byteBuffer = ByteBuffer.allocate(byteBufferSize);
try {
MessageFormatRecord.MessageHeader_Format_V2.serializeHeader(byteBuffer, blobEncryptionRecordSize + blobInfoSize + blobRecordSize, originalBlobPutReq.getBlobEncryptionKey() == null ? Message_Header_Invalid_Relative_Offset : blobHeaderSize + key.sizeInBytes(), blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize, Message_Header_Invalid_Relative_Offset, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobPropertiesSize, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize);
} catch (MessageFormatException e) {
e.printStackTrace();
}
byteBuffer.put(key.toBytes());
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
MessageFormatRecord.BlobEncryptionKey_Format_V1.serializeBlobEncryptionKeyRecord(byteBuffer, originalBlobPutReq.getBlobEncryptionKey().duplicate());
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
int blobRecordStart = byteBuffer.position();
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
crc = new Crc32();
crc.update(byteBuffer.array(), blobRecordStart, blobRecordSize - MessageFormatRecord.Crc_Size);
byteBuffer.putLong(crc.getValue());
break;
default:
throw new IOException("GetRequest flag is not supported: " + getRequest.getMessageFormatFlag());
}
} else if (processedError == ServerErrorCode.Blob_Deleted) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Deleted;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Expired) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Expired;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Authorization_Failure) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Authorization_Failure;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Not_Found;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
}
byteBuffer.flip();
ByteBufferSend responseSend = new ByteBufferSend(byteBuffer);
List<MessageInfo> messageInfoList = new ArrayList<>();
List<MessageMetadata> messageMetadataList = new ArrayList<>();
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>();
if (partitionError == ServerErrorCode.No_Error) {
messageInfoList.add(new MessageInfo(key, byteBufferSize, false, blob.isTtlUpdated(), blob.isUndeleted(), blob.expiresAt, null, accountId, containerId, operationTimeMs, blob.lifeVersion));
messageMetadataList.add(msgMetadata);
}
PartitionResponseInfo partitionResponseInfo = partitionError == ServerErrorCode.No_Error ? new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), messageInfoList, messageMetadataList) : new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), partitionError);
partitionResponseInfoList.add(partitionResponseInfo);
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, responseSend, serverError);
} else {
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), new ArrayList<PartitionResponseInfo>(), new ByteBufferSend(ByteBuffer.allocate(0)), serverError);
}
return getResponse;
}
Aggregations