use of com.github.ambry.replication.FindTokenHelper in project ambry by linkedin.
the class AmbryServer method startup.
public void startup() throws InstantiationException {
try {
logger.info("starting");
clusterParticipants = clusterAgentsFactory.getClusterParticipants();
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
ServerConfig serverConfig = new ServerConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
// verify the configs
properties.verify();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
// mismatch in sealed/stopped replica lists that maintained by each participant.
if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
}
logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
if (nodeId == null) {
throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
}
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
// In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
// and some components require sole participant, the first one in the list will be primary participant.
storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
storageManager.start();
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
replicationManager.start();
if (replicationConfig.replicationEnabledWithVcrCluster) {
logger.info("Creating Helix cluster spectator for cloud to store replication.");
vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
cloudToStoreReplicationManager.start();
}
logger.info("Creating StatsManager to publish stats");
accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
if (serverConfig.serverStatsPublishLocalEnabled) {
statsManager.start();
}
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (nodeId.hasSSLPort()) {
ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (nodeId.hasHttp2Port()) {
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
// Other code
List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
Set<String> validStatsTypes = new HashSet<>();
for (StatsReportType type : StatsReportType.values()) {
validStatsTypes.add(type.toString());
}
if (serverConfig.serverStatsPublishReportEnabled) {
serverConfig.serverStatsReportsToPublish.forEach(e -> {
if (validStatsTypes.contains(e)) {
ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
}
});
}
if (vcrClusterSpectator != null) {
vcrClusterSpectator.spectate();
}
Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
for (ClusterParticipant clusterParticipant : clusterParticipants) {
clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
}
if (nettyInternalMetrics != null) {
nettyInternalMetrics.start();
logger.info("NettyInternalMetric starts");
}
logger.info("started");
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
metrics.serverStartTimeInMs.update(processingTime);
logger.info("Server startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during startup", e);
throw new InstantiationException("failure during startup " + e);
}
}
use of com.github.ambry.replication.FindTokenHelper in project ambry by linkedin.
the class VcrServer method startup.
/**
* Start the VCR Server.
* @throws InstantiationException if an error was encountered during startup.
*/
public void startup() throws InstantiationException {
try {
logger.info("starting");
ServerConfig serverConfig = new ServerConfig(properties);
ServerSecurityServiceFactory serverSecurityServiceFactory = Utils.getObj(serverConfig.serverSecurityServiceFactory, properties, serverMetrics, registry);
serverSecurityService = serverSecurityServiceFactory.getServerSecurityService();
clusterMap = clusterAgentsFactory.getClusterMap();
logger.info("Initialized clusterMap");
registry = clusterMap.getMetricRegistry();
serverMetrics = new ServerMetrics(registry, VcrServer.class, VcrServer.class);
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
registry = clusterMap.getMetricRegistry();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
// verify the configs
properties.verify();
// initialize cloud destination
if (cloudDestinationFactory == null) {
cloudDestinationFactory = Utils.getObj(cloudConfig.cloudDestinationFactoryClass, properties, registry, clusterMap);
}
cloudDestination = cloudDestinationFactory.getCloudDestination();
// TODO Make sure that config.updaterPollingIntervalMs value is large (~one day) for VCR.
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
vcrClusterParticipant = ((VcrClusterAgentsFactory) Utils.getObj(cloudConfig.vcrClusterAgentsFactoryClass, cloudConfig, clusterMapConfig, clusterMap, accountService, storeConfig, cloudDestination, registry)).getVcrClusterParticipant();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
logger.info("Using http2 for VCR replication.");
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
logger.info("Using blocking channel for VCR replication.");
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
VcrMetrics vcrMetrics = new VcrMetrics(registry);
CloudStorageManager cloudStorageManager = new CloudStorageManager(properties, vcrMetrics, cloudDestination, clusterMap);
vcrReplicationManager = new VcrReplicationManager(cloudConfig, replicationConfig, clusterMapConfig, storeConfig, cloudStorageManager, storeKeyFactory, clusterMap, vcrClusterParticipant, cloudDestination, scheduler, connectionPool, vcrMetrics, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer);
vcrReplicationManager.start();
DataNodeId currentNode = vcrClusterParticipant.getCurrentDataNodeId();
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (currentNode.hasSSLPort()) {
ports.add(new Port(cloudConfig.vcrSslPort, PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
// todo fix enableDataPrefetch
ServerMetrics serverMetrics = new ServerMetrics(registry, VcrRequests.class, VcrServer.class);
VcrRequests requests = new VcrRequests(cloudStorageManager, networkServer.getRequestResponseChannel(), clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (currentNode.hasHttp2Port()) {
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", currentNode.getHttp2Port());
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
VcrRequests vcrRequestsForHttp2 = new VcrRequests(cloudStorageManager, requestResponseChannel, clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, vcrRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(currentNode.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, serverMetrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
logger.info("VCR startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during VCR startup", e);
throw new InstantiationException("failure during VCR startup " + e);
}
}
use of com.github.ambry.replication.FindTokenHelper in project ambry by linkedin.
the class AzureIntegrationTest method testFindEntriesSince.
/**
* Test findEntriesSince with specified cloud token factory.
* @param replicationCloudTokenFactory the factory to use.
* @throws Exception on error
*/
private void testFindEntriesSince(String replicationCloudTokenFactory) throws Exception {
logger.info("Testing findEntriesSince with {}", replicationCloudTokenFactory);
testProperties.setProperty(ReplicationConfig.REPLICATION_CLOUD_TOKEN_FACTORY, replicationCloudTokenFactory);
VerifiableProperties verifiableProperties = new VerifiableProperties(testProperties);
ReplicationConfig replicationConfig = new ReplicationConfig(verifiableProperties);
FindTokenFactory findTokenFactory = new FindTokenHelper(null, replicationConfig).getFindTokenFactoryFromReplicaType(ReplicaType.CLOUD_BACKED);
azureDest = (AzureCloudDestination) new AzureCloudDestinationFactory(verifiableProperties, new MetricRegistry(), clusterMap).getCloudDestination();
cleanup();
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
String partitionPath = String.valueOf(testPartition);
// Upload some blobs with different upload times
int blobCount = 90;
int chunkSize = 1000;
int maxTotalSize = 20000;
int expectedNumQueries = (blobCount * chunkSize) / maxTotalSize + 1;
long now = System.currentTimeMillis();
long startTime = now - TimeUnit.DAYS.toMillis(7);
for (int j = 0; j < blobCount; j++) {
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
InputStream inputStream = getBlobInputStream(chunkSize);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, startTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, chunkSize, (short) 0);
cloudBlobMetadata.setUploadTime(startTime + j * 1000);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, chunkSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
}
FindToken findToken = findTokenFactory.getNewFindToken();
// Call findEntriesSince in a loop until no new entries are returned
FindResult findResult;
int numQueries = 0;
int totalBlobsReturned = 0;
do {
findResult = findEntriesSinceWithRetry(partitionPath, findToken, maxTotalSize);
findToken = findResult.getUpdatedFindToken();
if (!findResult.getMetadataList().isEmpty()) {
numQueries++;
}
totalBlobsReturned += findResult.getMetadataList().size();
} while (!noMoreFindSinceEntries(findResult, findToken));
assertEquals("Wrong number of queries", expectedNumQueries, numQueries);
assertEquals("Wrong number of blobs", blobCount, totalBlobsReturned);
assertEquals("Wrong byte count", blobCount * chunkSize, findToken.getBytesRead());
cleanup();
}
use of com.github.ambry.replication.FindTokenHelper in project ambry by linkedin.
the class CloudTokenPersistorTest method basicTest.
@Test
public void basicTest() throws Exception {
Properties props = VcrTestUtil.createVcrProperties("DC1", "vcrClusterName", "zkConnectString", 12310, 12410, 12510, null);
props.setProperty("replication.cloud.token.factory", replicationCloudTokenFactory);
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
ClusterMap clusterMap = new MockClusterMap();
DataNodeId dataNodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
Map<String, Set<PartitionInfo>> mountPathToPartitionInfoList = new HashMap<>();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
PartitionId partitionId = clusterMap.getAllPartitionIds(null).get(0);
ReplicaId cloudReplicaId = new CloudReplica(partitionId, dataNodeId);
List<? extends ReplicaId> peerReplicas = cloudReplicaId.getPeerReplicaIds();
List<RemoteReplicaInfo> remoteReplicas = new ArrayList<RemoteReplicaInfo>();
List<RemoteReplicaInfo.ReplicaTokenInfo> replicaTokenInfos = new ArrayList<>();
for (ReplicaId remoteReplica : peerReplicas) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, cloudReplicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
remoteReplicas.add(remoteReplicaInfo);
replicaTokenInfos.add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
}
PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, partitionId, null, cloudReplicaId);
mountPathToPartitionInfoList.computeIfAbsent(cloudReplicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
LatchBasedInMemoryCloudDestination cloudDestination = new LatchBasedInMemoryCloudDestination(Collections.emptyList(), AzureCloudDestinationFactory.getReplicationFeedType(new VerifiableProperties(props)), clusterMap);
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
CloudTokenPersistor cloudTokenPersistor = new CloudTokenPersistor("replicaTokens", mountPathToPartitionInfoList, new ReplicationMetrics(new MetricRegistry(), Collections.emptyList()), clusterMap, new FindTokenHelper(blobIdFactory, replicationConfig), cloudDestination);
cloudTokenPersistor.persist(cloudReplicaId.getMountPath(), replicaTokenInfos);
List<RemoteReplicaInfo.ReplicaTokenInfo> retrievedReplicaTokenInfos = cloudTokenPersistor.retrieve(cloudReplicaId.getMountPath());
Assert.assertEquals("Number of tokens doesn't match.", replicaTokenInfos.size(), retrievedReplicaTokenInfos.size());
for (int i = 0; i < replicaTokenInfos.size(); i++) {
Assert.assertArrayEquals("Token is not correct.", replicaTokenInfos.get(i).getReplicaToken().toBytes(), retrievedReplicaTokenInfos.get(i).getReplicaToken().toBytes());
}
}
Aggregations