use of com.github.ambry.config.RouterConfig in project ambry by linkedin.
the class CloudOperationTest method getBlobAndAssertSuccess.
/**
* Construct GetBlob operations with appropriate callbacks, then poll those operations until they complete,
* and ensure that the whole blob data is read out and the contents match.
* @param blobId id of the blob to get
* @param expectedLifeVersion the expected lifeVersion from get operation.
* @param expectedBlobSize the expected blob size
* @param expectedBlobProperties the expected {@link BlobProperties} for the blob.
* @param expectedUserMetadata the expected user meta data
* @param expectPutContent the expected blob content
* @param options options of the get blob operation
* @throws Exception Any unexpected exception
*/
private void getBlobAndAssertSuccess(final BlobId blobId, final short expectedLifeVersion, final int expectedBlobSize, final BlobProperties expectedBlobProperties, final byte[] expectedUserMetadata, final byte[] expectPutContent, final GetBlobOptionsInternal options) throws Exception {
final CountDownLatch readCompleteLatch = new CountDownLatch(1);
final AtomicLong readCompleteResult = new AtomicLong(0);
// callback to compare the data
Callback<GetBlobResultInternal> callback = (result, exception) -> {
Assert.assertNull("Shouldn't have exception", exception);
try {
BlobInfo blobInfo;
switch(options.getBlobOptions.getOperationType()) {
case All:
Assert.assertFalse("not supposed to be raw mode", options.getBlobOptions.isRawMode());
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertArrayEquals("User metadata must be the same", expectedUserMetadata, blobInfo.getUserMetadata());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
break;
case Data:
Assert.assertNull("Unexpected blob info in operation result", result.getBlobResult.getBlobInfo());
break;
case BlobInfo:
blobInfo = result.getBlobResult.getBlobInfo();
Assert.assertTrue("Blob properties must be the same", RouterTestHelpers.arePersistedFieldsEquivalent(expectedBlobProperties, blobInfo.getBlobProperties()));
Assert.assertEquals("Blob size should in received blobProperties should be the same as actual", expectedBlobSize, blobInfo.getBlobProperties().getBlobSize());
Assert.assertNull("Unexpected blob data in operation result", result.getBlobResult.getBlobDataChannel());
Assert.assertEquals("LifeVersion mismatch", expectedLifeVersion, blobInfo.getLifeVersion());
}
} catch (Throwable e) {
Assert.fail("Shouldn't receive exception here");
}
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo) {
final ByteBufferAsyncWritableChannel asyncWritableChannel = new ByteBufferAsyncWritableChannel();
Utils.newThread(() -> {
Future<Long> readIntoFuture = result.getBlobResult.getBlobDataChannel().readInto(asyncWritableChannel, null);
assertBlobReadSuccess(options.getBlobOptions, readIntoFuture, asyncWritableChannel, result.getBlobResult.getBlobDataChannel(), readCompleteLatch, readCompleteResult, expectedBlobSize, expectPutContent);
}, false).start();
} else {
readCompleteLatch.countDown();
}
};
// create GetBlobOperation
final Map<Integer, GetOperation> correlationIdToGetOperation = new HashMap<>();
final RequestRegistrationCallback<GetOperation> requestRegistrationCallback = new RequestRegistrationCallback<>(correlationIdToGetOperation);
NonBlockingRouter.currentOperationsCount.incrementAndGet();
GetBlobOperation op = new GetBlobOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, callback, routerCallback, blobIdFactory, null, null, null, time, false, null);
requestRegistrationCallback.setRequestsToSend(new ArrayList<>());
// Wait operation to complete
while (!op.isOperationComplete()) {
op.poll(requestRegistrationCallback);
List<ResponseInfo> responses = sendAndWaitForResponses(requestRegistrationCallback.getRequestsToSend());
for (ResponseInfo responseInfo : responses) {
GetResponse getResponse = RouterUtils.extractResponseAndNotifyResponseHandler(responseHandler, routerMetrics, responseInfo, stream -> GetResponse.readFrom(stream, mockClusterMap), response -> {
ServerErrorCode serverError = response.getError();
if (serverError == ServerErrorCode.No_Error) {
serverError = response.getPartitionResponseInfoList().get(0).getErrorCode();
}
return serverError;
});
op.handleResponse(responseInfo, getResponse);
responseInfo.release();
}
}
readCompleteLatch.await();
Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
// Ensure that a ChannelClosed exception is not set when the ReadableStreamChannel is closed correctly.
Assert.assertNull("Callback operation exception should be null", op.getOperationException());
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo && !options.getBlobOptions.isRawMode() && !options.getChunkIdsOnly) {
int sizeWritten = expectedBlobSize;
if (options.getBlobOptions.getRange() != null) {
ByteRange range = options.getBlobOptions.getRange().toResolvedByteRange(expectedBlobSize, options.getBlobOptions.resolveRangeOnEmptyBlob());
sizeWritten = (int) range.getRangeSize();
}
Assert.assertEquals("Size read must equal size written", sizeWritten, readCompleteResult.get());
}
}
use of com.github.ambry.config.RouterConfig in project ambry by linkedin.
the class GetBlobOperationTest method testInstantiation.
/**
* Test {@link GetBlobOperation} instantiation and validate the get methods.
*/
@Test
public void testInstantiation() {
Callback<GetBlobResultInternal> getRouterCallback = new Callback<GetBlobResultInternal>() {
@Override
public void onCompletion(GetBlobResultInternal result, Exception exception) {
// no op.
}
};
blobId = new BlobId(routerConfig.routerBlobidCurrentVersion, BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0), false, BlobId.BlobDataType.DATACHUNK);
blobIdStr = blobId.getID();
// test a good case
// operationCount is not incremented here as this operation is not taken to completion.
GetBlobOperation op = new GetBlobOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, new GetBlobOptionsInternal(new GetBlobOptionsBuilder().build(), false, routerMetrics.ageAtGet), getRouterCallback, routerCallback, blobIdFactory, kms, cryptoService, cryptoJobHandler, time, false, quotaChargeCallback);
Assert.assertEquals("Callbacks must match", getRouterCallback, op.getCallback());
Assert.assertEquals("Blob ids must match", blobIdStr, op.getBlobIdStr());
// test the case where the tracker type is bad
Properties properties = getDefaultNonBlockingRouterProperties(true);
properties.setProperty("router.get.operation.tracker.type", "NonExistentTracker");
RouterConfig badConfig = new RouterConfig(new VerifiableProperties(properties));
try {
new GetBlobOperation(badConfig, routerMetrics, mockClusterMap, responseHandler, blobId, new GetBlobOptionsInternal(new GetBlobOptionsBuilder().build(), false, routerMetrics.ageAtGet), getRouterCallback, routerCallback, blobIdFactory, kms, cryptoService, cryptoJobHandler, time, false, quotaChargeCallback);
Assert.fail("Instantiation of GetBlobOperation with an invalid tracker type must fail");
} catch (IllegalArgumentException e) {
// expected. Nothing to do.
}
}
use of com.github.ambry.config.RouterConfig in project ambry by linkedin.
the class GetBlobOperationTest method testSuccessInThePresenceOfVariousErrors.
/**
* Test the case with multiple errors (server level and partition level) from multiple servers,
* with just one server returning a successful response. The operation should succeed.
* @throws Exception
*/
@Test
public void testSuccessInThePresenceOfVariousErrors() throws Exception {
doPut();
// The put for the blob being requested happened.
String dcWherePutHappened = routerConfig.routerDatacenterName;
// test requests coming in from local dc as well as cross-colo.
Properties props = getDefaultNonBlockingRouterProperties(true);
props.setProperty("router.datacenter.name", "DC1");
routerConfig = new RouterConfig(new VerifiableProperties(props));
doTestSuccessInThePresenceOfVariousErrors(dcWherePutHappened);
props = getDefaultNonBlockingRouterProperties(true);
props.setProperty("router.datacenter.name", "DC2");
routerConfig = new RouterConfig(new VerifiableProperties(props));
doTestSuccessInThePresenceOfVariousErrors(dcWherePutHappened);
props = getDefaultNonBlockingRouterProperties(true);
props.setProperty("router.datacenter.name", "DC3");
routerConfig = new RouterConfig(new VerifiableProperties(props));
doTestSuccessInThePresenceOfVariousErrors(dcWherePutHappened);
}
use of com.github.ambry.config.RouterConfig in project ambry by linkedin.
the class GetBlobOperationTest method testTimeoutAndBlobNotFoundInOriginDc.
/**
* Test the case where origin replicas return Blob_Not_found and the rest times out.
* @throws Exception
*/
@Test
public void testTimeoutAndBlobNotFoundInOriginDc() throws Exception {
assumeTrue(operationTrackerType.equals(AdaptiveOperationTracker.class.getSimpleName()));
doPut();
// Pick a remote DC as the new local DC.
String newLocal = "DC1";
String oldLocal = localDcName;
Properties props = getDefaultNonBlockingRouterProperties(true);
props.setProperty("router.datacenter.name", newLocal);
props.setProperty("router.get.request.parallelism", "3");
props.setProperty("router.operation.tracker.max.inflight.requests", "3");
routerConfig = new RouterConfig(new VerifiableProperties(props));
GetBlobOperation op = createOperation(routerConfig, null);
correlationIdToGetOperation.clear();
for (MockServer server : mockServerLayout.getMockServers()) {
server.setServerErrorForAllRequests(ServerErrorCode.Blob_Not_Found);
}
op.poll(requestRegistrationCallback);
time.sleep(routerConfig.routerRequestTimeoutMs + 1);
// The request should have response from all remote replicas.
while (!op.isOperationComplete()) {
op.poll(requestRegistrationCallback);
List<ResponseInfo> responses = sendAndWaitForResponses(requestRegistrationCallback.getRequestsToSend());
for (ResponseInfo responseInfo : responses) {
GetResponse getResponse = responseInfo.getError() == null ? GetResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content()), mockClusterMap) : null;
op.handleResponse(responseInfo, getResponse);
responseInfo.release();
}
}
RouterException routerException = (RouterException) op.getOperationException();
// error code should be OperationTimedOut because it precedes BlobDoesNotExist
Assert.assertEquals(RouterErrorCode.BlobDoesNotExist, routerException.getErrorCode());
props = getDefaultNonBlockingRouterProperties(true);
props.setProperty("router.datacenter.name", oldLocal);
props.setProperty("router.get.request.parallelism", "2");
props.setProperty("router.operation.tracker.max.inflight.requests", "2");
routerConfig = new RouterConfig(new VerifiableProperties(props));
}
use of com.github.ambry.config.RouterConfig in project ambry by linkedin.
the class NonBlockingRouterTest method testSuccessfulPutDataChunkDelete.
/**
* Test that even when a composite blob put succeeds, the slipped put data chunks are deleted.
*/
@Test
public void testSuccessfulPutDataChunkDelete() throws Exception {
try {
// This test is somehow probabilistic. Since it is not possible to devise a mocking to enforce the occurrence of
// slipped puts given we cannot control the order of the hosts requests are sent and not all requests are sent when
// put requests are guaranteed to fail/succeed. So, we are setting the number of chunks and max attempts high enough
// to guarantee that slipped puts would eventually happen and operation would succeed.
maxPutChunkSize = PUT_CONTENT_SIZE / 8;
final int NUM_MAX_ATTEMPTS = 100;
Properties props = getNonBlockingRouterProperties("DC1");
props.setProperty("router.max.slipped.put.attempts", Integer.toString(NUM_MAX_ATTEMPTS));
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// Since this test wants to ensure that successfully put data chunks are deleted when the overall put operation
// succeeds but some chunks succeed only after a retry, it uses a notification system to track the deletions.
final CountDownLatch deletesDoneLatch = new CountDownLatch(1);
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId, Account account, Container container) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.countDown();
}
};
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(mockClusterMap, routerConfig), new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
setOperationParams();
// In each DC, set up the servers such that one node always succeeds and the other nodes return an unknown_error and
// no_error alternately. This will make it with a very high probability that there will at least be a time that a
// put will succeed on a node but will fail on the other two.
List<DataNodeId> dataNodeIds = mockClusterMap.getDataNodeIds();
List<ServerErrorCode> serverErrorList = new ArrayList<>();
for (int i = 0; i < NUM_MAX_ATTEMPTS; i++) {
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.No_Error);
}
Set<String> healthyNodeDC = new HashSet<>();
for (DataNodeId dataNodeId : dataNodeIds) {
MockServer server = mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort());
if (healthyNodeDC.contains(dataNodeId.getDatacenterName())) {
server.setServerErrors(serverErrorList);
} else {
server.resetServerErrors();
}
healthyNodeDC.add(dataNodeId.getDatacenterName());
}
// Submit the put operation and wait for it to succeed.
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
// Now, wait until at least one delete happens within AWAIT_TIMEOUT_MS.
Assert.assertTrue("Some blobs should have been deleted within " + AWAIT_TIMEOUT_MS, deletesDoneLatch.await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
// Wait for the rest of the deletes to finish.
long waitStart = SystemTime.getInstance().milliseconds();
while (router.getBackgroundOperationsCount() != 0 && SystemTime.getInstance().milliseconds() < waitStart + AWAIT_TIMEOUT_MS) {
Thread.sleep(1000);
}
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
Assert.assertNotSame("We should not be deleting the valid blob by mistake", blobId, blobIdAndServiceId.getKey());
Assert.assertEquals("Unexpected service ID for deleted blob", BackgroundDeleteRequest.SERVICE_ID_PREFIX + putBlobProperties.getServiceId(), blobIdAndServiceId.getValue());
}
} finally {
if (router != null) {
router.close();
assertClosed();
}
}
}
Aggregations