use of com.github.ambry.utils.Time in project ambry by linkedin.
the class AmbryUrlSigningServiceTest method signAndVerifyTest.
/**
* Tests that generate and verify signed URLs.
* @throws Exception
*/
@Test
public void signAndVerifyTest() throws Exception {
Time time = new MockTime();
AmbryUrlSigningService signer = getUrlSignerWithDefaults(time);
doSignAndVerifyTest(signer, RestMethod.POST, time);
doSignAndVerifyTest(signer, RestMethod.GET, time);
signFailuresTest();
}
use of com.github.ambry.utils.Time in project ambry by linkedin.
the class MockSelector method testRequestDropping.
/**
* Test dropping of requests by closing
*/
@Test
public void testRequestDropping() {
AtomicInteger nextCorrelationId = new AtomicInteger(1);
Function<Integer, List<RequestInfo>> requestGen = numRequests -> IntStream.range(0, numRequests).mapToObj(i -> new RequestInfo(sslHost, sslPort, new MockSend(nextCorrelationId.getAndIncrement()), replicaOnSslNode, null)).collect(Collectors.toList());
List<ResponseInfo> responseInfoList;
// Drop requests while the requests are waiting for a connection.
// First poll will require connections to be created, so no responses will be returned.
responseInfoList = networkClient.sendAndPoll(requestGen.apply(3), Collections.emptySet(), POLL_TIMEOUT_MS);
Assert.assertEquals("No responses expected in first poll.", 0, responseInfoList.size());
// Drop requests on the second poll. The requests should be removed from the pending request list as a result.
responseInfoList = networkClient.sendAndPoll(Collections.emptyList(), new HashSet<>(Arrays.asList(2, 3)), POLL_TIMEOUT_MS);
Assert.assertEquals("Should receive only as many responses as there were requests", 3, responseInfoList.size());
for (ResponseInfo responseInfo : responseInfoList) {
MockSend send = (MockSend) responseInfo.getRequestInfo().getRequest();
if (send.getCorrelationId() == 1) {
NetworkClientErrorCode error = responseInfo.getError();
ByteBuf response = responseInfo.content();
Assert.assertNull("Should not have encountered an error", error);
Assert.assertNotNull("Should receive a valid response", response);
int correlationIdInRequest = send.getCorrelationId();
int correlationIdInResponse = response.readInt();
Assert.assertEquals("Received response for the wrong request", correlationIdInRequest, correlationIdInResponse);
} else {
Assert.assertEquals("Expected connection unavailable on dropped request", NetworkClientErrorCode.ConnectionUnavailable, responseInfo.getError());
Assert.assertNull("Should not receive a response", responseInfo.content());
}
}
responseInfoList.forEach(ResponseInfo::release);
// Test dropping of requests while the requests are in flight.
// Set the selector to idle mode to prevent responses from coming back (even though connections are available at
// this moment in time).
selector.setState(MockSelectorState.IdlePoll);
responseInfoList = networkClient.sendAndPoll(requestGen.apply(3), Collections.emptySet(), POLL_TIMEOUT_MS);
Assert.assertEquals("No responses expected in idle poll.", 0, responseInfoList.size());
// Set the selector back to normal mode and drop a request. It should be dropped by closing the connection.
selector.setState(MockSelectorState.Good);
responseInfoList = networkClient.sendAndPoll(Collections.emptyList(), Collections.singleton(4), POLL_TIMEOUT_MS);
Assert.assertEquals("Should receive only as many responses as there were requests", 3, responseInfoList.size());
for (ResponseInfo responseInfo : responseInfoList) {
MockSend send = (MockSend) responseInfo.getRequestInfo().getRequest();
if (send.getCorrelationId() != 4) {
NetworkClientErrorCode error = responseInfo.getError();
ByteBuf response = responseInfo.content();
Assert.assertNull("Should not have encountered an error", error);
Assert.assertNotNull("Should receive a valid response", response);
int correlationIdInRequest = send.getCorrelationId();
int correlationIdInResponse = response.readInt();
Assert.assertEquals("Received response for the wrong request", correlationIdInRequest, correlationIdInResponse);
responseInfo.release();
} else {
Assert.assertEquals("Expected network error (from closed connection for dropped request)", NetworkClientErrorCode.NetworkError, responseInfo.getError());
Assert.assertNull("Should not receive a response", responseInfo.content());
responseInfo.release();
}
}
// Dropping a request that is not currently pending or in flight should be a no-op.
responseInfoList = networkClient.sendAndPoll(Collections.emptyList(), Collections.singleton(1), POLL_TIMEOUT_MS);
Assert.assertEquals("No more responses expected.", 0, responseInfoList.size());
responseInfoList.forEach(ResponseInfo::release);
}
use of com.github.ambry.utils.Time in project ambry by linkedin.
the class ReplicationTest method replicaTokenTest.
/**
* Tests that replica tokens are set correctly and go through different stages correctly.
* @throws InterruptedException
*/
@Test
public void replicaTokenTest() throws InterruptedException {
final long tokenPersistInterval = 100;
Time time = new MockTime();
MockFindToken token1 = new MockFindToken(0, 0);
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(new MockReplicaId(ReplicaType.DISK_BACKED), new MockReplicaId(ReplicaType.DISK_BACKED), new InMemoryStore(null, Collections.emptyList(), Collections.emptyList(), null), token1, tokenPersistInterval, time, new Port(5000, PortType.PLAINTEXT));
// The equality check is for the reference, which is fine.
// Initially, the current token and the token to persist are the same.
assertEquals(token1, remoteReplicaInfo.getToken());
assertEquals(token1, remoteReplicaInfo.getTokenToPersist());
MockFindToken token2 = new MockFindToken(100, 100);
remoteReplicaInfo.initializeTokens(token2);
// Both tokens should be the newly initialized token.
assertEquals(token2, remoteReplicaInfo.getToken());
assertEquals(token2, remoteReplicaInfo.getTokenToPersist());
remoteReplicaInfo.onTokenPersisted();
MockFindToken token3 = new MockFindToken(200, 200);
remoteReplicaInfo.setToken(token3);
// Token to persist should still be the old token.
assertEquals(token3, remoteReplicaInfo.getToken());
assertEquals(token2, remoteReplicaInfo.getTokenToPersist());
remoteReplicaInfo.onTokenPersisted();
// Sleep for shorter than token persist interval.
time.sleep(tokenPersistInterval - 1);
// Token to persist should still be the old token.
assertEquals(token3, remoteReplicaInfo.getToken());
assertEquals(token2, remoteReplicaInfo.getTokenToPersist());
remoteReplicaInfo.onTokenPersisted();
MockFindToken token4 = new MockFindToken(200, 200);
remoteReplicaInfo.setToken(token4);
time.sleep(2);
// Token to persist should be the most recent token as of currentTime - tokenToPersistInterval
// which is token3 at this time.
assertEquals(token4, remoteReplicaInfo.getToken());
assertEquals(token3, remoteReplicaInfo.getTokenToPersist());
remoteReplicaInfo.onTokenPersisted();
time.sleep(tokenPersistInterval + 1);
// The most recently set token as of currentTime - tokenToPersistInterval is token4
assertEquals(token4, remoteReplicaInfo.getToken());
assertEquals(token4, remoteReplicaInfo.getTokenToPersist());
remoteReplicaInfo.onTokenPersisted();
}
use of com.github.ambry.utils.Time in project ambry by linkedin.
the class DumpIndexTool method main.
public static void main(String[] args) throws Exception {
final AtomicInteger exitCode = new AtomicInteger(0);
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
DumpIndexToolConfig config = new DumpIndexToolConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
// this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
StoreMetrics storeMetrics = new StoreMetrics("DumpIndexTool", clusterMap.getMetricRegistry());
ServerConfig serverConfig = new ServerConfig(verifiableProperties);
Time time = SystemTime.getInstance();
Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, verifiableProperties, clusterMap.getMetricRegistry());
DumpIndexTool dumpIndexTool = new DumpIndexTool(blobIdFactory, storeConfig, time, metrics, storeMetrics, throttler, storeKeyConverterFactory.getStoreKeyConverter());
Set<StoreKey> filterKeySet = new HashSet<>();
for (String key : config.filterSet) {
filterKeySet.add(new BlobId(key, clusterMap));
}
switch(config.typeOfOperation) {
case DumpIndex:
dumpIndex(dumpIndexTool, config.pathOfInput, filterKeySet);
break;
case DumpIndexSegment:
dumpIndexSegment(dumpIndexTool, config.pathOfInput, filterKeySet);
break;
case VerifyIndex:
IndexProcessingResults results = dumpIndexTool.processIndex(config.pathOfInput, filterKeySet, time.milliseconds(), config.detectDuplicatesAcrossKeys);
exitCode.set(reportVerificationResults(config.pathOfInput, results, config.failIfCraftedIdsPresent));
break;
case VerifyDataNode:
DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
if (dataNodeId == null) {
logger.error("No data node corresponding to {}:{}", config.hostname, config.port);
} else {
Set<File> replicaDirs = clusterMap.getReplicaIds(dataNodeId).stream().map(replicaId -> new File(replicaId.getMountPath())).collect(Collectors.toSet());
Map<File, IndexProcessingResults> resultsByReplica = dumpIndexTool.processIndex(replicaDirs, filterKeySet, config.parallelism, config.detectDuplicatesAcrossKeys);
replicaDirs.removeAll(resultsByReplica.keySet());
if (replicaDirs.size() != 0) {
logger.error("Results obtained missing {}", replicaDirs);
exitCode.set(5);
} else {
resultsByReplica.forEach((replicaDir, result) -> exitCode.set(Math.max(exitCode.get(), reportVerificationResults(replicaDir, result, config.failIfCraftedIdsPresent))));
}
}
break;
default:
throw new IllegalArgumentException("Unrecognized operation: " + config.typeOfOperation);
}
}
System.exit(exitCode.get());
}
Aggregations