use of com.github.ambry.config.Http2ClientConfig in project ambry by linkedin.
the class AmbryServer method startup.
public void startup() throws InstantiationException {
try {
logger.info("starting");
clusterParticipants = clusterAgentsFactory.getClusterParticipants();
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
ServerConfig serverConfig = new ServerConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
// verify the configs
properties.verify();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
// mismatch in sealed/stopped replica lists that maintained by each participant.
if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
}
logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
if (nodeId == null) {
throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
}
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
// In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
// and some components require sole participant, the first one in the list will be primary participant.
storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
storageManager.start();
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
replicationManager.start();
if (replicationConfig.replicationEnabledWithVcrCluster) {
logger.info("Creating Helix cluster spectator for cloud to store replication.");
vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
cloudToStoreReplicationManager.start();
}
logger.info("Creating StatsManager to publish stats");
accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
if (serverConfig.serverStatsPublishLocalEnabled) {
statsManager.start();
}
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (nodeId.hasSSLPort()) {
ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (nodeId.hasHttp2Port()) {
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
// Other code
List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
Set<String> validStatsTypes = new HashSet<>();
for (StatsReportType type : StatsReportType.values()) {
validStatsTypes.add(type.toString());
}
if (serverConfig.serverStatsPublishReportEnabled) {
serverConfig.serverStatsReportsToPublish.forEach(e -> {
if (validStatsTypes.contains(e)) {
ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
}
});
}
if (vcrClusterSpectator != null) {
vcrClusterSpectator.spectate();
}
Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
for (ClusterParticipant clusterParticipant : clusterParticipants) {
clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
}
if (nettyInternalMetrics != null) {
nettyInternalMetrics.start();
logger.info("NettyInternalMetric starts");
}
logger.info("started");
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
metrics.serverStartTimeInMs.update(processingTime);
logger.info("Server startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during startup", e);
throw new InstantiationException("failure during startup " + e);
}
}
use of com.github.ambry.config.Http2ClientConfig in project ambry by linkedin.
the class VcrServer method startup.
/**
* Start the VCR Server.
* @throws InstantiationException if an error was encountered during startup.
*/
public void startup() throws InstantiationException {
try {
logger.info("starting");
ServerConfig serverConfig = new ServerConfig(properties);
ServerSecurityServiceFactory serverSecurityServiceFactory = Utils.getObj(serverConfig.serverSecurityServiceFactory, properties, serverMetrics, registry);
serverSecurityService = serverSecurityServiceFactory.getServerSecurityService();
clusterMap = clusterAgentsFactory.getClusterMap();
logger.info("Initialized clusterMap");
registry = clusterMap.getMetricRegistry();
serverMetrics = new ServerMetrics(registry, VcrServer.class, VcrServer.class);
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
registry = clusterMap.getMetricRegistry();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
// verify the configs
properties.verify();
// initialize cloud destination
if (cloudDestinationFactory == null) {
cloudDestinationFactory = Utils.getObj(cloudConfig.cloudDestinationFactoryClass, properties, registry, clusterMap);
}
cloudDestination = cloudDestinationFactory.getCloudDestination();
// TODO Make sure that config.updaterPollingIntervalMs value is large (~one day) for VCR.
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
vcrClusterParticipant = ((VcrClusterAgentsFactory) Utils.getObj(cloudConfig.vcrClusterAgentsFactoryClass, cloudConfig, clusterMapConfig, clusterMap, accountService, storeConfig, cloudDestination, registry)).getVcrClusterParticipant();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
logger.info("Using http2 for VCR replication.");
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
logger.info("Using blocking channel for VCR replication.");
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
VcrMetrics vcrMetrics = new VcrMetrics(registry);
CloudStorageManager cloudStorageManager = new CloudStorageManager(properties, vcrMetrics, cloudDestination, clusterMap);
vcrReplicationManager = new VcrReplicationManager(cloudConfig, replicationConfig, clusterMapConfig, storeConfig, cloudStorageManager, storeKeyFactory, clusterMap, vcrClusterParticipant, cloudDestination, scheduler, connectionPool, vcrMetrics, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer);
vcrReplicationManager.start();
DataNodeId currentNode = vcrClusterParticipant.getCurrentDataNodeId();
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (currentNode.hasSSLPort()) {
ports.add(new Port(cloudConfig.vcrSslPort, PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
// todo fix enableDataPrefetch
ServerMetrics serverMetrics = new ServerMetrics(registry, VcrRequests.class, VcrServer.class);
VcrRequests requests = new VcrRequests(cloudStorageManager, networkServer.getRequestResponseChannel(), clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (currentNode.hasHttp2Port()) {
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", currentNode.getHttp2Port());
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
VcrRequests vcrRequestsForHttp2 = new VcrRequests(cloudStorageManager, requestResponseChannel, clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, vcrRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(currentNode.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, serverMetrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
logger.info("VCR startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during VCR startup", e);
throw new InstantiationException("failure during VCR startup " + e);
}
}
use of com.github.ambry.config.Http2ClientConfig in project ambry by linkedin.
the class EmbeddedChannelPool method createHttp2ClientConfig.
/**
* Create a {@link Http2ClientConfig} with given timeout values.
* @param acquireTimeoutMs The timeout in ms for acquiring a channel.
* @param sendTimeoutMs The timeout for sending request out.
* @param receiveTimeoutMs The timeout for receiving response.
* @return
*/
private Http2ClientConfig createHttp2ClientConfig(long acquireTimeoutMs, long sendTimeoutMs, long receiveTimeoutMs) {
Properties properties = new Properties();
properties.setProperty(Http2ClientConfig.HTTP2_BLOCKING_CHANNEL_ACQUIRE_TIMEOUT_MS, String.valueOf(acquireTimeoutMs));
properties.setProperty(Http2ClientConfig.HTTP2_BLOCKING_CHANNEL_SEND_TIMEOUT_MS, String.valueOf(sendTimeoutMs));
properties.setProperty(Http2ClientConfig.HTTP2_BLOCKING_CHANNEL_RECEIVE_TIMEOUT_MS, String.valueOf(receiveTimeoutMs));
return new Http2ClientConfig(new VerifiableProperties(properties));
}
use of com.github.ambry.config.Http2ClientConfig in project ambry by linkedin.
the class Http2MultiplexedChannelPoolTest method setup.
@BeforeClass
public static void setup() {
loopGroup = new NioEventLoopGroup(4);
Properties properties = new Properties();
properties.setProperty(Http2ClientConfig.HTTP2_MAX_CONTENT_LENGTH, Integer.toString(maxContentLength));
properties.setProperty(Http2ClientConfig.HTTP2_MAX_CONCURRENT_STREAMS_PER_CONNECTION, Integer.toString(maxConcurrentStreamsPerConnection));
properties.setProperty(Http2ClientConfig.HTTP2_MIN_CONNECTION_PER_PORT, "1");
http2ClientConfigForOneConnection = new Http2ClientConfig(new VerifiableProperties(properties));
properties.setProperty(Http2ClientConfig.HTTP2_MIN_CONNECTION_PER_PORT, "2");
http2ClientConfigForTwoConnection = new Http2ClientConfig(new VerifiableProperties(properties));
}
use of com.github.ambry.config.Http2ClientConfig in project ambry by linkedin.
the class Http2NetworkClientTest method putGetTest.
@Test
public void putGetTest() throws Exception {
MockClusterMap clusterMap = http2Cluster.getClusterMap();
DataNodeId dataNodeId = http2Cluster.getGeneralDataNode();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
SSLFactory sslFactory = new NettySslHttp2Factory(clientSSLConfig);
Http2NetworkClient networkClient = new Http2NetworkClient(new Http2ClientMetrics(new MetricRegistry()), new Http2ClientConfig(new VerifiableProperties(new Properties())), sslFactory, eventLoopGroup);
// Put a blob
int blobSize = 1024 * 1024;
byte[] usermetadata = new byte[1000];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", accountId, containerId, false);
TestUtils.RANDOM.nextBytes(usermetadata);
TestUtils.RANDOM.nextBytes(data);
List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
// put blob 1
PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
RequestInfo request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), putRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
List<ResponseInfo> responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
long startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 6000) {
fail("Network Client no reponse and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
DataInputStream dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
PutResponse putResponse = PutResponse.readFrom(dis);
assertEquals("No error expected.", ServerErrorCode.No_Error, putResponse.getError());
// Get the blob
// get blob properties
ArrayList<BlobId> ids = new ArrayList<BlobId>();
MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
ids.add(blobId1);
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "http2-clientid", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), getRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
startTime = SystemTime.getInstance().milliseconds();
while (responseInfos.size() == 0) {
responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
if (SystemTime.getInstance().milliseconds() - startTime >= 3000) {
fail("Network Client no response and timeout.");
}
Thread.sleep(30);
}
assertEquals("Should be only one response", 1, responseInfos.size());
dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
GetResponse resp = GetResponse.readFrom(dis, clusterMap);
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
// verify BlobProperties
BlobProperties propertyOutput = blobAll.getBlobInfo().getBlobProperties();
assertEquals(blobSize, propertyOutput.getBlobSize());
assertEquals("serviceid1", propertyOutput.getServiceId());
assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
// verify UserMetadata
byte[] userMetadataOutput = blobAll.getBlobInfo().getUserMetadata();
assertArrayEquals(usermetadata, userMetadataOutput);
// verify content
byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
assertArrayEquals("Content mismatch.", data, actualBlobData);
}
Aggregations