use of com.github.ambry.network.RequestInfo in project ambry by linkedin.
the class BackgroundDeleter method onResponse.
/**
* Handle the response from polling the {@link NetworkClient}.
* @param responseInfoList the list of {@link ResponseInfo} containing the responses.
*/
protected void onResponse(List<ResponseInfo> responseInfoList) {
for (ResponseInfo responseInfo : responseInfoList) {
try {
RequestInfo requestInfo = responseInfo.getRequestInfo();
if (requestInfo == null) {
// If requestInfo is null, it means request has been failed previously due to long wait in pending requests
// queue. The failed request was already handled by one of the managers(PutManager, GetManager, etc). Current
// response comes from timed-out connection associated with previous request. Router only needs to notify
// responseHandler to mark the data node resource down.
DataNodeId dataNodeId = responseInfo.getDataNode();
responseHandler.onConnectionTimeout(dataNodeId);
} else {
long responseReceiveTime = requestInfo.getStreamHeaderFrameReceiveTime();
if (responseReceiveTime != -1) {
routerMetrics.responseReceiveToHandleLatencyMs.update(System.currentTimeMillis() - responseReceiveTime);
}
RequestOrResponseType type = ((RequestOrResponse) requestInfo.getRequest()).getRequestType();
logger.debug("Handling response of type {} for {}", type, requestInfo.getRequest().getCorrelationId());
switch(type) {
case PutRequest:
putManager.handleResponse(responseInfo);
break;
case GetRequest:
getManager.handleResponse(responseInfo);
break;
case DeleteRequest:
deleteManager.handleResponse(responseInfo);
break;
case TtlUpdateRequest:
ttlUpdateManager.handleResponse(responseInfo);
break;
case UndeleteRequest:
undeleteManager.handleResponse(responseInfo);
break;
default:
logger.error("Unexpected response type: {} received, discarding", type);
}
}
} catch (Exception e) {
logger.error("Unexpected error received while handling a response: ", e);
routerMetrics.operationManagerHandleResponseErrorCount.inc();
}
}
}
use of com.github.ambry.network.RequestInfo in project ambry by linkedin.
the class DeleteManager method handleResponse.
/**
* Handles responses received for each of the {@link DeleteOperation} within this delete manager.
* @param responseInfo the {@link ResponseInfo} containing the response.
*/
void handleResponse(ResponseInfo responseInfo) {
long startTime = time.milliseconds();
DeleteResponse deleteResponse = RouterUtils.extractResponseAndNotifyResponseHandler(responseHandler, routerMetrics, responseInfo, DeleteResponse::readFrom, DeleteResponse::getError);
RequestInfo routerRequestInfo = responseInfo.getRequestInfo();
int correlationId = ((DeleteRequest) routerRequestInfo.getRequest()).getCorrelationId();
DeleteOperation deleteOperation = correlationIdToDeleteOperation.remove(correlationId);
// If it is still an active operation, hand over the response. Otherwise, ignore.
if (deleteOperations.contains(deleteOperation)) {
boolean exceptionEncountered = false;
try {
deleteOperation.handleResponse(responseInfo, deleteResponse);
} catch (Exception e) {
exceptionEncountered = true;
deleteOperation.setOperationException(new RouterException("Delete handleResponse encountered unexpected error", e, RouterErrorCode.UnexpectedInternalError));
}
if (exceptionEncountered || deleteOperation.isOperationComplete()) {
if (deleteOperations.remove(deleteOperation)) {
onComplete(deleteOperation);
}
}
routerMetrics.deleteManagerHandleResponseTimeMs.update(time.milliseconds() - startTime);
} else {
routerMetrics.ignoredResponseCount.inc();
}
}
use of com.github.ambry.network.RequestInfo in project ambry by linkedin.
the class CloudOperationTest method doDirectPut.
/**
* Do a put directly to the mock servers. This allows for blobs with malformed properties to be constructed.
* @param blobProperties the {@link BlobProperties} for the blob.
* @param userMetadata user meta data of the blob.
* @param blobContent the raw content for the blob to upload (i.e. this can be serialized composite blob metadata or
* an encrypted blob).
* @return the blob id
* @throws Exception Any unexpected exception
*/
private BlobId doDirectPut(BlobProperties blobProperties, byte[] userMetadata, ByteBuf blobContent) throws Exception {
List<PartitionId> writablePartitionIds = mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
PartitionId partitionId = writablePartitionIds.get(random.nextInt(writablePartitionIds.size()));
BlobId blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), blobProperties.getAccountId(), blobProperties.getContainerId(), partitionId, blobProperties.isEncrypted(), BlobId.BlobDataType.DATACHUNK);
Iterator<MockServer> servers = partitionId.getReplicaIds().stream().map(ReplicaId::getDataNodeId).map(dataNodeId -> mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort())).iterator();
ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadata);
while (servers.hasNext()) {
MockServer server = servers.next();
PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), BlobType.DataBlob, null);
// Make sure we release the BoundedNettyByteBufReceive.
server.send(request).release();
request.release();
}
// send to Cloud destinations.
PutRequest request = new PutRequest(random.nextInt(), "clientId", blobId, blobProperties, userMetadataBuf.duplicate(), blobContent.retainedDuplicate(), blobContent.readableBytes(), BlobType.DataBlob, null);
// Get the cloud replica.
ReplicaId replica = partitionId.getReplicaIds().get(0);
Assert.assertEquals("It should be a cloud backed replica.", replica.getReplicaType(), ReplicaType.CLOUD_BACKED);
String hostname = replica.getDataNodeId().getHostname();
Port port = new Port(-1, PortType.PLAINTEXT);
List<RequestInfo> requestList = new ArrayList<>();
RequestInfo requestInfo = new RequestInfo(hostname, port, request, replica, null);
requestList.add(requestInfo);
List<ResponseInfo> responseList = sendAndWaitForResponses(requestList);
request.release();
blobContent.release();
return blobId;
}
use of com.github.ambry.network.RequestInfo in project ambry by linkedin.
the class GetBlobOperationTest method testNetworkClientTimeoutAllFailure.
/**
* Test the case where all requests time out within the SocketNetworkClient.
* @throws Exception
*/
@Test
public void testNetworkClientTimeoutAllFailure() throws Exception {
doPut();
GetBlobOperation op = createOperation(routerConfig, null);
while (!op.isOperationComplete()) {
op.poll(requestRegistrationCallback);
for (RequestInfo requestInfo : requestRegistrationCallback.getRequestsToSend()) {
ResponseInfo fakeResponse = new ResponseInfo(requestInfo, NetworkClientErrorCode.NetworkError, null);
op.handleResponse(fakeResponse, null);
fakeResponse.release();
if (op.isOperationComplete()) {
break;
}
}
requestRegistrationCallback.getRequestsToSend().clear();
}
// At this time requests would have been created for all replicas, as none of them were delivered,
// and cross-colo proxying is enabled by default.
Assert.assertEquals("Must have attempted sending requests to all replicas", replicasCount, correlationIdToGetOperation.size());
assertFailureAndCheckErrorCode(op, RouterErrorCode.OperationTimedOut);
}
use of com.github.ambry.network.RequestInfo in project ambry by linkedin.
the class NonBlockingRouterTestBase method testResponseDeserializationError.
/**
* Test that operations succeed even in the presence of responses that are corrupt and fail to deserialize.
* @param opHelper the {@link OperationHelper}
* @param networkClient the {@link SocketNetworkClient}
* @param blobId the id of the blob to get/delete. For puts, this will be null.
* @throws Exception
*/
protected void testResponseDeserializationError(OperationHelper opHelper, SocketNetworkClient networkClient, BlobId blobId) throws Exception {
mockSelectorState.set(MockSelectorState.Good);
FutureResult futureResult = opHelper.submitOperation(blobId);
int requestParallelism = opHelper.requestParallelism;
List<RequestInfo> allRequests = new ArrayList<>();
Set<Integer> allDropped = new HashSet<>();
long loopStartTimeMs = SystemTime.getInstance().milliseconds();
while (allRequests.size() < requestParallelism) {
if (loopStartTimeMs + AWAIT_TIMEOUT_MS < SystemTime.getInstance().milliseconds()) {
Assert.fail("Waited too long for requests.");
}
opHelper.pollOpManager(allRequests, allDropped);
}
List<ResponseInfo> responseInfoList = new ArrayList<>();
loopStartTimeMs = SystemTime.getInstance().milliseconds();
do {
if (loopStartTimeMs + AWAIT_TIMEOUT_MS < SystemTime.getInstance().milliseconds()) {
Assert.fail("Waited too long for the response.");
}
responseInfoList.addAll(networkClient.sendAndPoll(allRequests, allDropped, 10));
allRequests.clear();
} while (responseInfoList.size() < requestParallelism);
// corrupt the first response.
ByteBuf response = responseInfoList.get(0).content();
byte b = response.getByte(response.writerIndex() - 1);
response.setByte(response.writerIndex() - 1, (byte) ~b);
for (ResponseInfo responseInfo : responseInfoList) {
opHelper.handleResponse(responseInfo);
}
responseInfoList.forEach(ResponseInfo::release);
allRequests.clear();
if (testEncryption) {
opHelper.awaitOpCompletionOrTimeOut(futureResult);
} else {
opHelper.pollOpManager(allRequests, allDropped);
}
try {
futureResult.get(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
Assert.fail("Operation should have succeeded with one corrupt response");
}
}
Aggregations