use of com.github.ambry.config.NettyConfig in project ambry by linkedin.
the class FrontendIntegrationTest method multipartPostGetHeadTest.
/**
* Tests multipart POST and verifies it via GET operations.
* @throws Exception
*/
@Test
public void multipartPostGetHeadTest() throws Exception {
Account refAccount = ACCOUNT_SERVICE.createAndAddRandomAccount();
Container refContainer = refAccount.getContainerById(Container.DEFAULT_PUBLIC_CONTAINER_ID);
doPostGetHeadDeleteTest(0, refAccount, refContainer, refAccount.getName(), !refContainer.isCacheable(), refAccount.getName(), refContainer.getName(), true);
doPostGetHeadDeleteTest(FRONTEND_CONFIG.frontendChunkedGetResponseThresholdInBytes * 3, refAccount, refContainer, refAccount.getName(), !refContainer.isCacheable(), refAccount.getName(), refContainer.getName(), true);
// failure case
// size of content being POSTed is higher than what is allowed via multipart/form-data
long maxAllowedSizeBytes = new NettyConfig(FRONTEND_VERIFIABLE_PROPS).nettyMultipartPostMaxSizeBytes;
ByteBuffer content = ByteBuffer.wrap(TestUtils.getRandomBytes((int) maxAllowedSizeBytes + 1));
HttpHeaders headers = new DefaultHttpHeaders();
setAmbryHeadersForPut(headers, 7200, !refContainer.isCacheable(), refAccount.getName(), "application/octet-stream", null, refAccount.getName(), refContainer.getName());
HttpRequest httpRequest = RestTestUtils.createRequest(HttpMethod.POST, "/", headers);
HttpPostRequestEncoder encoder = createEncoder(httpRequest, content, ByteBuffer.allocate(0));
ResponseParts responseParts = nettyClient.sendRequest(encoder.finalizeRequest(), encoder, null).get();
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
assertTrue("No Date header", response.headers().getTimeMillis(HttpHeaderNames.DATE, -1) != -1);
assertFalse("Channel should not be active", HttpUtil.isKeepAlive(response));
}
use of com.github.ambry.config.NettyConfig in project ambry by linkedin.
the class FrontendIntegrationTest method multipartPostGetHeadUpdateDeleteUndeleteTest.
/**
* Tests multipart POST and verifies it via GET operations.
* @throws Exception
*/
@Test
public void multipartPostGetHeadUpdateDeleteUndeleteTest() throws Exception {
Account refAccount = ACCOUNT_SERVICE.createAndAddRandomAccount();
Container refContainer = refAccount.getContainerById(Container.DEFAULT_PUBLIC_CONTAINER_ID);
doPostGetHeadUpdateDeleteUndeleteTest(0, refAccount, refContainer, refAccount.getName(), !refContainer.isCacheable(), refAccount.getName(), refContainer.getName(), true);
doPostGetHeadUpdateDeleteUndeleteTest((int) FRONTEND_CONFIG.chunkedGetResponseThresholdInBytes * 3, refAccount, refContainer, refAccount.getName(), !refContainer.isCacheable(), refAccount.getName(), refContainer.getName(), true);
// failure case
// size of content being POSTed is higher than what is allowed via multipart/form-data
long maxAllowedSizeBytes = new NettyConfig(FRONTEND_VERIFIABLE_PROPS).nettyMultipartPostMaxSizeBytes;
ByteBuffer content = ByteBuffer.wrap(TestUtils.getRandomBytes((int) maxAllowedSizeBytes + 1));
HttpHeaders headers = new DefaultHttpHeaders();
setAmbryHeadersForPut(headers, TTL_SECS, !refContainer.isCacheable(), refAccount.getName(), "application/octet-stream", null, refAccount.getName(), refContainer.getName());
HttpRequest httpRequest = RestTestUtils.createRequest(HttpMethod.POST, "/", headers);
HttpPostRequestEncoder encoder = createEncoder(httpRequest, content, ByteBuffer.allocate(0));
ResponseParts responseParts = nettyClient.sendRequest(encoder.finalizeRequest(), encoder, null).get();
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
assertTrue("No Date header", response.headers().getTimeMillis(HttpHeaderNames.DATE, -1) != -1);
assertFalse("Channel should not be active", HttpUtil.isKeepAlive(response));
}
use of com.github.ambry.config.NettyConfig in project ambry by linkedin.
the class AmbryServer method startup.
public void startup() throws InstantiationException {
try {
logger.info("starting");
clusterParticipants = clusterAgentsFactory.getClusterParticipants();
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
DiskManagerConfig diskManagerConfig = new DiskManagerConfig(properties);
ServerConfig serverConfig = new ServerConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
StatsManagerConfig statsConfig = new StatsManagerConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
// verify the configs
properties.verify();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
// mismatch in sealed/stopped replica lists that maintained by each participant.
if (clusterParticipants != null && clusterParticipants.size() > 1 && serverConfig.serverParticipantsConsistencyCheckerPeriodSec > 0) {
consistencyChecker = new ParticipantsConsistencyChecker(clusterParticipants, metrics);
logger.info("Scheduling participants consistency checker with a period of {} secs", serverConfig.serverParticipantsConsistencyCheckerPeriodSec);
consistencyCheckerScheduler = Utils.newScheduler(1, "consistency-checker-", false);
consistencyCheckerTask = consistencyCheckerScheduler.scheduleAtFixedRate(consistencyChecker, 0, serverConfig.serverParticipantsConsistencyCheckerPeriodSec, TimeUnit.SECONDS);
}
logger.info("checking if node exists in clustermap host {} port {}", networkConfig.hostName, networkConfig.port);
DataNodeId nodeId = clusterMap.getDataNodeId(networkConfig.hostName, networkConfig.port);
if (nodeId == null) {
throw new IllegalArgumentException("The node " + networkConfig.hostName + ":" + networkConfig.port + "is not present in the clustermap. Failing to start the datanode");
}
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
// In most cases, there should be only one participant in the clusterParticipants list. If there are more than one
// and some components require sole participant, the first one in the list will be primary participant.
storageManager = new StorageManager(storeConfig, diskManagerConfig, scheduler, registry, storeKeyFactory, clusterMap, nodeId, new BlobStoreHardDelete(), clusterParticipants, time, new BlobStoreRecovery(), accountService);
storageManager.start();
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
replicationManager = new ReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, clusterParticipants.get(0), skipPredicate);
replicationManager.start();
if (replicationConfig.replicationEnabledWithVcrCluster) {
logger.info("Creating Helix cluster spectator for cloud to store replication.");
vcrClusterSpectator = _vcrClusterAgentsFactory.getVcrClusterSpectator(cloudConfig, clusterMapConfig);
cloudToStoreReplicationManager = new CloudToStoreReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, storeKeyFactory, clusterMap, scheduler, nodeId, connectionPool, registry, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer, vcrClusterSpectator, clusterParticipants.get(0));
cloudToStoreReplicationManager.start();
}
logger.info("Creating StatsManager to publish stats");
accountStatsMySqlStore = statsConfig.enableMysqlReport ? (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(properties, clusterMapConfig, registry).getAccountStatsStore() : null;
statsManager = new StatsManager(storageManager, clusterMap.getReplicaIds(nodeId), registry, statsConfig, time, clusterParticipants.get(0), accountStatsMySqlStore, accountService);
if (serverConfig.serverStatsPublishLocalEnabled) {
statsManager.start();
}
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (nodeId.hasSSLPort()) {
ports.add(new Port(nodeId.getSSLPort(), PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
FindTokenHelper findTokenHelper = new FindTokenHelper(storeKeyFactory, replicationConfig);
requests = new AmbryServerRequests(storageManager, networkServer.getRequestResponseChannel(), clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (nodeId.hasHttp2Port()) {
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", nodeId.getHttp2Port());
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
AmbryServerRequests ambryServerRequestsForHttp2 = new AmbryServerRequests(storageManager, requestResponseChannel, clusterMap, nodeId, registry, metrics, findTokenHelper, notificationSystem, replicationManager, storeKeyFactory, serverConfig, storeKeyConverterFactory, statsManager, clusterParticipants.get(0));
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, ambryServerRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(nodeId.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, metrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
// Other code
List<AmbryStatsReport> ambryStatsReports = new ArrayList<>();
Set<String> validStatsTypes = new HashSet<>();
for (StatsReportType type : StatsReportType.values()) {
validStatsTypes.add(type.toString());
}
if (serverConfig.serverStatsPublishReportEnabled) {
serverConfig.serverStatsReportsToPublish.forEach(e -> {
if (validStatsTypes.contains(e)) {
ambryStatsReports.add(new AmbryStatsReportImpl(serverConfig.serverQuotaStatsAggregateIntervalInMinutes, StatsReportType.valueOf(e)));
}
});
}
if (vcrClusterSpectator != null) {
vcrClusterSpectator.spectate();
}
Callback<StatsSnapshot> accountServiceCallback = new AccountServiceCallback(accountService);
for (ClusterParticipant clusterParticipant : clusterParticipants) {
clusterParticipant.participate(ambryStatsReports, accountStatsMySqlStore, accountServiceCallback);
}
if (nettyInternalMetrics != null) {
nettyInternalMetrics.start();
logger.info("NettyInternalMetric starts");
}
logger.info("started");
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
metrics.serverStartTimeInMs.update(processingTime);
logger.info("Server startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during startup", e);
throw new InstantiationException("failure during startup " + e);
}
}
use of com.github.ambry.config.NettyConfig in project ambry by linkedin.
the class MockChannelHandlerContext method channelInactiveTest.
/**
* ClosedChannelException is expected when write to a NettyResponseChannel and channel is inactive.
*/
@Test
public void channelInactiveTest() {
// request is keep-alive by default.
HttpRequest request = createRequestWithHeaders(HttpMethod.GET, TestingUri.Close.toString());
ChunkedWriteHandler chunkedWriteHandler = new ChunkedWriteHandler();
EmbeddedChannel channel = new EmbeddedChannel(chunkedWriteHandler);
VerifiableProperties verifiableProperties = new VerifiableProperties(new Properties());
NettyResponseChannel nettyResponseChannel = new NettyResponseChannel(new MockChannelHandlerContext(channel), new NettyMetrics(new MetricRegistry()), new PerformanceConfig(verifiableProperties), new NettyConfig(verifiableProperties));
channel.disconnect().awaitUninterruptibly();
assertFalse("Channel should be closed", channel.isOpen());
Future<Long> future = nettyResponseChannel.write(Unpooled.buffer(1), null);
try {
future.get();
fail("Future.get() should throw exception.");
} catch (InterruptedException | ExecutionException e) {
assertTrue("Should be ClosedChannelException", e.getCause() instanceof ClosedChannelException);
}
}
use of com.github.ambry.config.NettyConfig in project ambry by linkedin.
the class RestServerMain method getSSLFactoryIfRequired.
/**
* Instantiate an {@link SSLFactory} if any components require it.
* @param verifiableProperties The {@link VerifiableProperties} to check if any components require it.
* @return the {@link SSLFactory}, or {@code null} if no components require it.
* @throws GeneralSecurityException
* @throws IOException
*/
private static SSLFactory getSSLFactoryIfRequired(VerifiableProperties verifiableProperties) throws Exception {
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
boolean sslRequired = new NettyConfig(verifiableProperties).nettyServerEnableSSL || clusterMapConfig.clusterMapSslEnabledDatacenters.length() > 0;
return sslRequired ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
}
Aggregations