use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class NonBlockingRouterTest method testSuccessfulPutDataChunkDelete.
/**
* Test that even when a composite blob put succeeds, the slipped put data chunks are deleted.
*/
@Test
public void testSuccessfulPutDataChunkDelete() throws Exception {
try {
// This test is somehow probabilistic. Since it is not possible to devise a mocking to enforce the occurrence of
// slipped puts given we cannot control the order of the hosts requests are sent and not all requests are sent when
// put requests are guaranteed to fail/succeed. So, we are setting the number of chunks and max attempts high enough
// to guarantee that slipped puts would eventually happen and operation would succeed.
maxPutChunkSize = PUT_CONTENT_SIZE / 8;
final int NUM_MAX_ATTEMPTS = 100;
Properties props = getNonBlockingRouterProperties("DC1");
props.setProperty("router.max.slipped.put.attempts", Integer.toString(NUM_MAX_ATTEMPTS));
VerifiableProperties verifiableProperties = new VerifiableProperties((props));
RouterConfig routerConfig = new RouterConfig(verifiableProperties);
MockClusterMap mockClusterMap = new MockClusterMap();
MockTime mockTime = new MockTime();
MockServerLayout mockServerLayout = new MockServerLayout(mockClusterMap);
// Since this test wants to ensure that successfully put data chunks are deleted when the overall put operation
// succeeds but some chunks succeed only after a retry, it uses a notification system to track the deletions.
final CountDownLatch deletesDoneLatch = new CountDownLatch(1);
final Map<String, String> blobsThatAreDeleted = new HashMap<>();
LoggingNotificationSystem deleteTrackingNotificationSystem = new LoggingNotificationSystem() {
@Override
public void onBlobDeleted(String blobId, String serviceId, Account account, Container container) {
blobsThatAreDeleted.put(blobId, serviceId);
deletesDoneLatch.countDown();
}
};
router = new NonBlockingRouter(routerConfig, new NonBlockingRouterMetrics(mockClusterMap, routerConfig), new MockNetworkClientFactory(verifiableProperties, mockSelectorState, MAX_PORTS_PLAIN_TEXT, MAX_PORTS_SSL, CHECKOUT_TIMEOUT_MS, mockServerLayout, mockTime), deleteTrackingNotificationSystem, mockClusterMap, kms, cryptoService, cryptoJobHandler, accountService, mockTime, MockClusterMap.DEFAULT_PARTITION_CLASS);
setOperationParams();
// In each DC, set up the servers such that one node always succeeds and the other nodes return an unknown_error and
// no_error alternately. This will make it with a very high probability that there will at least be a time that a
// put will succeed on a node but will fail on the other two.
List<DataNodeId> dataNodeIds = mockClusterMap.getDataNodeIds();
List<ServerErrorCode> serverErrorList = new ArrayList<>();
for (int i = 0; i < NUM_MAX_ATTEMPTS; i++) {
serverErrorList.add(ServerErrorCode.Unknown_Error);
serverErrorList.add(ServerErrorCode.No_Error);
}
Set<String> healthyNodeDC = new HashSet<>();
for (DataNodeId dataNodeId : dataNodeIds) {
MockServer server = mockServerLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort());
if (healthyNodeDC.contains(dataNodeId.getDatacenterName())) {
server.setServerErrors(serverErrorList);
} else {
server.resetServerErrors();
}
healthyNodeDC.add(dataNodeId.getDatacenterName());
}
// Submit the put operation and wait for it to succeed.
String blobId = router.putBlob(putBlobProperties, putUserMetadata, putChannel, new PutBlobOptionsBuilder().build()).get();
// Now, wait until at least one delete happens within AWAIT_TIMEOUT_MS.
Assert.assertTrue("Some blobs should have been deleted within " + AWAIT_TIMEOUT_MS, deletesDoneLatch.await(AWAIT_TIMEOUT_MS, TimeUnit.MILLISECONDS));
// Wait for the rest of the deletes to finish.
long waitStart = SystemTime.getInstance().milliseconds();
while (router.getBackgroundOperationsCount() != 0 && SystemTime.getInstance().milliseconds() < waitStart + AWAIT_TIMEOUT_MS) {
Thread.sleep(1000);
}
for (Map.Entry<String, String> blobIdAndServiceId : blobsThatAreDeleted.entrySet()) {
Assert.assertNotSame("We should not be deleting the valid blob by mistake", blobId, blobIdAndServiceId.getKey());
Assert.assertEquals("Unexpected service ID for deleted blob", BackgroundDeleteRequest.SERVICE_ID_PREFIX + putBlobProperties.getServiceId(), blobIdAndServiceId.getValue());
}
} finally {
if (router != null) {
router.close();
assertClosed();
}
}
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class DeleteManagerTest method setServerResponse.
/**
* Sets all the servers if they should respond requests or not.
*
* @param shouldRespond {@code true} if the servers should respond, otherwise {@code false}.
*/
private void setServerResponse(boolean shouldRespond) {
for (DataNodeId dataNodeId : clusterMap.getDataNodeIds()) {
MockServer server = serverLayout.getMockServer(dataNodeId.getHostname(), dataNodeId.getPort());
server.setShouldRespond(shouldRespond);
}
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class MockReadableStreamChannel method testPutWithAllNodesFailure.
/**
* Test ensures failure when all server nodes encounter an error.
*/
@Test
public void testPutWithAllNodesFailure() throws Exception {
requestAndResultsList.clear();
requestAndResultsList.add(new RequestAndResult(chunkSize * 5));
List<DataNodeId> dataNodeIds = mockClusterMap.getDataNodeIds();
for (DataNodeId dataNodeId : dataNodeIds) {
String host = dataNodeId.getHostname();
int port = dataNodeId.getPort();
MockServer server = mockServerLayout.getMockServer(host, port);
server.setServerErrorForAllRequests(ServerErrorCode.Unknown_Error);
}
Exception expectedException = new RouterException("", RouterErrorCode.AmbryUnavailable);
submitPutsAndAssertFailure(expectedException, true, false, true);
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class Http2NetworkClientTest method putGetTest.
@Test
public void putGetTest() throws Exception {
MockClusterMap clusterMap = http2Cluster.getClusterMap();
DataNodeId dataNodeId = http2Cluster.getGeneralDataNode();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
SSLFactory sslFactory = new NettySslHttp2Factory(clientSSLConfig);
Http2NetworkClient networkClient = new Http2NetworkClient(new Http2ClientMetrics(new MetricRegistry()), new Http2ClientConfig(new VerifiableProperties(new Properties())), sslFactory, eventLoopGroup);
// Put a blob
int blobSize = 1024 * 1024;
byte[] usermetadata = new byte[1000];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", accountId, containerId, false);
TestUtils.RANDOM.nextBytes(usermetadata);
TestUtils.RANDOM.nextBytes(data);
List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
RequestInfo request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), putRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
List<ResponseInfo> responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
long startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 6000) {
fail("Network Client no reponse and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
DataInputStream dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
PutResponse putResponse = PutResponse.readFrom(dis);
assertEquals("No error expected.", ServerErrorCode.No_Error, putResponse.getError());
// Get the blob
// get blob properties
ArrayList<BlobId> ids = new ArrayList<BlobId>();
MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "http2-clientid", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), getRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 3000) {
fail("Network Client no response and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
GetResponse resp = GetResponse.readFrom(dis, clusterMap);
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
// verify BlobProperties
BlobProperties propertyOutput = blobAll.getBlobInfo().getBlobProperties();
assertEquals(blobSize, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
// verify UserMetadata
byte[] userMetadataOutput = blobAll.getBlobInfo().getUserMetadata();
assertArrayEquals(usermetadata, userMetadataOutput);
// verify content
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
}
use of com.github.ambry.clustermap.DataNodeId in project ambry by linkedin.
the class ServerPlaintextTokenTest method endToEndReplicationWithMultiNodeSinglePartitionTest.
@Test
public void endToEndReplicationWithMultiNodeSinglePartitionTest() {
DataNodeId dataNodeId = plaintextCluster.getClusterMap().getDataNodeIds().get(0);
ArrayList<String> dataCenterList = Utils.splitString("DC1,DC2,DC3", ",");
List<DataNodeId> dataNodes = plaintextCluster.getOneDataNodeFromEachDatacenter(dataCenterList);
ServerTestUtil.endToEndReplicationWithMultiNodeSinglePartitionTest("DC1", dataNodeId.getPort(), new Port(dataNodes.get(0).getPort(), PortType.PLAINTEXT), new Port(dataNodes.get(1).getPort(), PortType.PLAINTEXT), new Port(dataNodes.get(2).getPort(), PortType.PLAINTEXT), plaintextCluster, null, null, notificationSystem, routerProps, testEncryption);
}
Aggregations