use of io.seata.common.thread.NamedThreadFactory in project seata by seata.
the class Server method main.
/**
* The entry point of application.
*
* @param args the input arguments
* @throws IOException the io exception
*/
public static void main(String[] args) throws IOException {
// get port first, use to logback.xml
int port = PortHelper.getPort(args);
System.setProperty(ConfigurationKeys.SERVER_PORT, Integer.toString(port));
// create logger
final Logger logger = LoggerFactory.getLogger(Server.class);
if (ContainerHelper.isRunningInContainer()) {
logger.info("The server is running in container.");
}
// initialize the parameter parser
// Note that the parameter parser should always be the first line to execute.
// Because, here we need to parse the parameters needed for startup.
ParameterParser parameterParser = new ParameterParser(args);
// initialize the metrics
MetricsManager.get().init();
System.setProperty(ConfigurationKeys.STORE_MODE, parameterParser.getStoreMode());
ThreadPoolExecutor workingThreads = new ThreadPoolExecutor(NettyServerConfig.getMinServerPoolSize(), NettyServerConfig.getMaxServerPoolSize(), NettyServerConfig.getKeepAliveTime(), TimeUnit.SECONDS, new LinkedBlockingQueue<>(NettyServerConfig.getMaxTaskQueueSize()), new NamedThreadFactory("ServerHandlerThread", NettyServerConfig.getMaxServerPoolSize()), new ThreadPoolExecutor.CallerRunsPolicy());
NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(workingThreads);
// server port
nettyRemotingServer.setListenPort(parameterParser.getPort());
UUIDGenerator.init(parameterParser.getServerNode());
// log store mode : file, db, redis
SessionHolder.init(parameterParser.getStoreMode());
DefaultCoordinator coordinator = new DefaultCoordinator(nettyRemotingServer);
coordinator.init();
nettyRemotingServer.setHandler(coordinator);
// register ShutdownHook
ShutdownHook.getInstance().addDisposable(coordinator);
ShutdownHook.getInstance().addDisposable(nettyRemotingServer);
// 127.0.0.1 and 0.0.0.0 are not valid here.
if (NetUtil.isValidIp(parameterParser.getHost(), false)) {
XID.setIpAddress(parameterParser.getHost());
} else {
XID.setIpAddress(NetUtil.getLocalIp());
}
XID.setPort(nettyRemotingServer.getListenPort());
try {
nettyRemotingServer.init();
} catch (Throwable e) {
logger.error("nettyServer init error:{}", e.getMessage(), e);
System.exit(-1);
}
System.exit(0);
}
use of io.seata.common.thread.NamedThreadFactory in project seata by seata.
the class NettyClientBootstrap method start.
@Override
public void start() {
if (this.defaultEventExecutorGroup == null) {
this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(nettyClientConfig.getClientWorkerThreads(), new NamedThreadFactory(getThreadPrefix(nettyClientConfig.getClientWorkerThreadPrefix()), nettyClientConfig.getClientWorkerThreads()));
}
this.bootstrap.group(this.eventLoopGroupWorker).channel(nettyClientConfig.getClientChannelClazz()).option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, nettyClientConfig.getConnectTimeoutMillis()).option(ChannelOption.SO_SNDBUF, nettyClientConfig.getClientSocketSndBufSize()).option(ChannelOption.SO_RCVBUF, nettyClientConfig.getClientSocketRcvBufSize());
if (nettyClientConfig.enableNative()) {
if (PlatformDependent.isOsx()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("client run on macOS");
}
} else {
bootstrap.option(EpollChannelOption.EPOLL_MODE, EpollMode.EDGE_TRIGGERED).option(EpollChannelOption.TCP_QUICKACK, true);
}
}
bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new IdleStateHandler(nettyClientConfig.getChannelMaxReadIdleSeconds(), nettyClientConfig.getChannelMaxWriteIdleSeconds(), nettyClientConfig.getChannelMaxAllIdleSeconds())).addLast(new ProtocolV1Decoder()).addLast(new ProtocolV1Encoder());
if (channelHandlers != null) {
addChannelPipelineLast(ch, channelHandlers);
}
}
});
if (initialized.compareAndSet(false, true) && LOGGER.isInfoEnabled()) {
LOGGER.info("NettyClientBootstrap has started");
}
}
use of io.seata.common.thread.NamedThreadFactory in project seata by seata.
the class ProtocolV1SerializerTest method testAll.
@Test
public void testAll() {
ProtocolV1Server server = new ProtocolV1Server();
ProtocolV1Client client = new ProtocolV1Client();
try {
server.start();
client.connect("127.0.0.1", 8811, 500);
Assertions.assertTrue(client.channel.isActive());
Map<String, String> head = new HashMap<>();
head.put("tracerId", "xxadadadada");
head.put("token", "adadadad");
head.put("hello", null);
BranchCommitRequest body = new BranchCommitRequest();
body.setBranchId(12345L);
body.setApplicationData("application");
body.setBranchType(BranchType.AT);
body.setResourceId("resource-1234");
body.setXid("xid-1234");
// test run times
int runTimes = 100000;
final int threads = 50;
final CountDownLatch cnt = new CountDownLatch(runTimes);
final AtomicInteger tag = new AtomicInteger(0);
final AtomicInteger success = new AtomicInteger(0);
// no queue
final ThreadPoolExecutor service1 = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new SynchronousQueue<>(), new NamedThreadFactory("client-", false));
for (int i = 0; i < threads; i++) {
service1.execute(() -> {
while (tag.getAndIncrement() < runTimes) {
try {
Future future = client.sendRpc(head, body);
RpcMessage resp = (RpcMessage) future.get(10, TimeUnit.SECONDS);
if (resp != null) {
success.incrementAndGet();
}
} catch (Exception e) {
LOGGER.error("Client send error", e);
} finally {
cnt.countDown();
}
}
});
}
cnt.await();
LOGGER.info("success {}/{}", success.get(), runTimes);
Assertions.assertEquals(success.get(), runTimes);
} catch (InterruptedException e) {
LOGGER.error("Thread interrupted", e);
} finally {
client.close();
server.stop();
}
}
Aggregations