-
Notifications
You must be signed in to change notification settings - Fork 336
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
How to modify the value of nginx-unit backlog parameter? I want to increase it to meet my performance needs. It's time to change the default value, 511 is no longer applicable #1384
Comments
Before adjustment: After adjustment: |
The above is my test. Backlog is very important, but I couldn't find where to configure in NGINX Unit ? |
Just to check, are you asking about NGINX Unit or nginx the web server? |
NGINX Unit |
I take it you're using Unit on Linux?. As you see we hardcode the listen(2) backlog to 511, also on Linux the maximum you can set this to is controlled via Currently there is no way to adjust this other than changing #define NXT_LISTEN_BACKLOG 511 in However I see no real reason why we couldn't make it configurable... |
Are you referring to the inability to configure through listeners, routes, and applications?Can we only manually recompile src/nxt_listen_stocket. h, right |
We'll discuss it with the team. "backlog" looks like more concise.
Btw, how does "backlog" apply to "processes"? |
Thank you for the team's efforts. I am a developer from China. There are 700M people using nginx products here. if ok I will promote it. |
|
Not sure I understand, the listen backlog is the second parameter to the listen(2) system-call.
It's a per-socket thing. |
Feel free to give the following patch a spin... (note: It doesn't effect the control socket) diff --git ./src/nxt_conf_validation.c ./src/nxt_conf_validation.c
index 04091745..267a897d 100644
--- ./src/nxt_conf_validation.c
+++ ./src/nxt_conf_validation.c
@@ -176,6 +176,8 @@ static nxt_int_t nxt_conf_vldt_app_name(nxt_conf_validation_t *vldt,
nxt_conf_value_t *value, void *data);
static nxt_int_t nxt_conf_vldt_forwarded(nxt_conf_validation_t *vldt,
nxt_conf_value_t *value, void *data);
+static nxt_int_t nxt_conf_vldt_listen_backlog(nxt_conf_validation_t *vldt,
+ nxt_conf_value_t *value, void *data);
static nxt_int_t nxt_conf_vldt_app(nxt_conf_validation_t *vldt,
nxt_str_t *name, nxt_conf_value_t *value);
static nxt_int_t nxt_conf_vldt_object(nxt_conf_validation_t *vldt,
@@ -424,6 +426,10 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_listener_members[] = {
.type = NXT_CONF_VLDT_OBJECT,
.validator = nxt_conf_vldt_object,
.u.members = nxt_conf_vldt_client_ip_members
+ }, {
+ .name = nxt_string("backlog"),
+ .type = NXT_CONF_VLDT_NUMBER,
+ .validator = nxt_conf_vldt_listen_backlog,
},
#if (NXT_TLS)
@@ -2677,6 +2683,28 @@ nxt_conf_vldt_forwarded(nxt_conf_validation_t *vldt, nxt_conf_value_t *value,
}
+static nxt_int_t
+nxt_conf_vldt_listen_backlog(nxt_conf_validation_t *vldt,
+ nxt_conf_value_t *value, void *data)
+{
+ int64_t backlog;
+
+ backlog = nxt_conf_get_number(value);
+
+ if (backlog < 1) {
+ return nxt_conf_vldt_error(vldt, "The \"backlog\" number must be "
+ "equal to or greater than 1.");
+ }
+
+ if (backlog > NXT_INT32_T_MAX) {
+ return nxt_conf_vldt_error(vldt, "The \"backlog\" number must "
+ "not exceed %d.", NXT_INT32_T_MAX);
+ }
+
+ return NXT_OK;
+}
+
+
static nxt_int_t
nxt_conf_vldt_app(nxt_conf_validation_t *vldt, nxt_str_t *name,
nxt_conf_value_t *value)
diff --git ./src/nxt_router.c ./src/nxt_router.c
index 43209451..5842bcff 100644
--- ./src/nxt_router.c
+++ ./src/nxt_router.c
@@ -166,7 +166,7 @@ static void nxt_router_app_prefork_ready(nxt_task_t *task,
static void nxt_router_app_prefork_error(nxt_task_t *task,
nxt_port_recv_msg_t *msg, void *data);
static nxt_socket_conf_t *nxt_router_socket_conf(nxt_task_t *task,
- nxt_router_temp_conf_t *tmcf, nxt_str_t *name);
+ nxt_router_temp_conf_t *tmcf, nxt_str_t *name, int backlog);
static nxt_int_t nxt_router_listen_socket_find(nxt_router_temp_conf_t *tmcf,
nxt_socket_conf_t *nskcf, nxt_sockaddr_t *sa);
@@ -1959,12 +1959,22 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf,
next = 0;
for ( ;; ) {
+ int listen_backlog = 0;
+ nxt_conf_value_t *backlog;
+
+ static const nxt_str_t backlog_path = nxt_string("backlog");
+
listener = nxt_conf_next_object_member(listeners, &name, &next);
if (listener == NULL) {
break;
}
- skcf = nxt_router_socket_conf(task, tmcf, &name);
+ backlog = nxt_conf_get_object_member(listener, &backlog_path, NULL);
+ if (backlog != NULL) {
+ listen_backlog = nxt_conf_get_number(backlog);
+ }
+
+ skcf = nxt_router_socket_conf(task, tmcf, &name, listen_backlog);
if (skcf == NULL) {
goto fail;
}
@@ -2684,7 +2694,7 @@ nxt_router_application_init(nxt_router_conf_t *rtcf, nxt_str_t *name,
static nxt_socket_conf_t *
nxt_router_socket_conf(nxt_task_t *task, nxt_router_temp_conf_t *tmcf,
- nxt_str_t *name)
+ nxt_str_t *name, int backlog)
{
size_t size;
nxt_int_t ret;
@@ -2728,7 +2738,7 @@ nxt_router_socket_conf(nxt_task_t *task, nxt_router_temp_conf_t *tmcf,
nxt_listen_socket_remote_size(ls);
ls->socket = -1;
- ls->backlog = NXT_LISTEN_BACKLOG;
+ ls->backlog = backlog > 0 ? backlog : NXT_LISTEN_BACKLOG;
ls->flags = NXT_NONBLOCK;
ls->read_after_accept = 1;
}
@@ -2875,7 +2885,7 @@ nxt_router_listen_socket_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg,
nxt_socket_defer_accept(task, s, rpc->socket_conf->listen->sockaddr);
- ret = nxt_listen_socket(task, s, NXT_LISTEN_BACKLOG);
+ ret = nxt_listen_socket(task, s, rpc->socket_conf->listen->backlog);
if (nxt_slow_path(ret != NXT_OK)) {
goto fail;
} E.g "listeners": {
"[::1]:8080": {
"pass": "routes",
"backlog": 1024
},
"127.0.0.1:8080": {
"pass": "routes"
}
},
One socket "::1" set to a backlog of 1024 the other "127.0.0.1" not set, using the default and the control socket just using the default. |
Great, really amazing brother, but I don't know C syntax and how to recompile and build nginx unit. Can you release the entire new and complete version? I will continue to test it |
How to provide a video tutorial and guidance so that people who don't know C language can build and compile their own nginx unit projects? It's definitely the most perfect. By the way, I am a PHP developer, and it's very practical but the threshold is too low, so many people don't know how to use C at a lower level (including me). However, I believe PHP is the best web language, and 80% of Chinese developers also think so. There are about 2 million PHP developers in China |
OK, no problem. Hopefully we can get this into the next release which is happening Real Soon Now(tm). |
Hmm, just something interesting... In #if (NXT_FREEBSD || NXT_MACOSX || NXT_OPENBSD)
/*
* A backlog is limited by system-wide sysctl kern.ipc.somaxconn.
* This is supported by FreeBSD 2.2, OpenBSD 2.0, and MacOSX.
*/
#define NXT_LISTEN_BACKLOG -1
#else
/*
* Linux, Solaris, and NetBSD treat negative value as 0.
* 511 is a safe default.
*/
#define NXT_LISTEN_BACKLOG 511
#endif However on at least Linux 2.6.12-rc5 (as far back the main repo goes and I very much doubt we care about anything even remotely that old), Linux does not treat a negative value as 0, it does the same as the BSDs and uses the setting of int __sys_listen(int fd, int backlog)
{
...
if ((unsigned int)backlog > somaxconn)
backlog = somaxconn;
...
} We pass in an So a first step could be to adjust the above to include Linux in setting the default backlog to -1. Maybe. FreeBSD seems to default to a value 128, on Linux since 5.4 it defaults to 4096 (previously 128)... this would also change it for the control socket... Then again https://lore.kernel.org/netdev/[email protected]/ it sounds good! Fun, fun, fun in the sun, sun sun... |
Yes, 511 is outdated and limits server capabilities. Modern servers and new versions of PHP are becoming more and more powerful. It is time to adjust the backlog default value to maximize performance. |
@oopsoop2 Out of interest, what Linux Kernel version are you running Unit under? |
This comment was marked as resolved.
This comment was marked as resolved.
Ok, good, if we change the default on Linux to use the OS's default, then you shouldn't actually need to do anything (unless you need to go higher than 4096...) |
ok, i got it |
@oopsoop2 on GitHub reported a performance issue related to the default listen(2) backlog size of 511 on nginx. They found that increasing it helped, nginx has a config option to configure this. They would like to be able to do the same on Unit (which also defaults to 511 on some systems, incl Linux). This seems reasonable. This adds a new per-listener 'backlog' config option, e.g { "listeners": { "[::1]:8080": { "pass": "routes", "backlog": 1024 }, } ... } This doesn't effect the control socket. Closes: nginx#1384 Reported-by: <https://github.com/oopsoop2> Signed-off-by: Andrew Clayton <[email protected]>
@oopsoop2 on GitHub reported a performance issue related to the default listen(2) backlog size of 511 on nginx. They found that increasing it helped, nginx has a config option to configure this. They would like to be able to do the same on Unit (which also defaults to 511 on some systems, incl Linux). This seems reasonable. This adds a new per-listener 'backlog' config option, e.g { "listeners": { "[::1]:8080": { "pass": "routes", "backlog": 1024 }, } ... } This doesn't effect the control socket. Closes: nginx#1384 Reported-by: <https://github.com/oopsoop2> Signed-off-by: Andrew Clayton <[email protected]>
RHEL 8 still ships with a 4.x series kernel. Can we handle that by only setting it to |
I actually cover that here
Also
|
Oh yep, you do. Fire away. |
@oopsoop2 on GitHub reported a performance issue related to the default listen(2) backlog size of 511 on nginx. They found that increasing it helped, nginx has a config option to configure this. They would like to be able to do the same on Unit (which also defaults to 511 on some systems, incl Linux). This seems reasonable. This adds a new per-listener 'backlog' config option, e.g { "listeners": { "[::1]:8080": { "pass": "routes", "backlog": 1024 }, } ... } This doesn't effect the control socket. Closes: nginx#1384 Reported-by: <https://github.com/oopsoop2> Signed-off-by: Andrew Clayton <[email protected]>
@oopsoop2 on GitHub reported a performance issue related to the default listen(2) backlog size of 511 on nginx. They found that increasing it helped, nginx has a config option to configure this. They would like to be able to do the same on Unit (which also defaults to 511 on some systems, incl Linux). This seems reasonable. This adds a new per-listener 'backlog' config option, e.g { "listeners": { "[::1]:8080": { "pass": "routes", "backlog": 1024 }, } ... } This doesn't effect the control socket. Closes: nginx#1384 Reported-by: <https://github.com/oopsoop2> Signed-off-by: Andrew Clayton <[email protected]>
On FreeBSD, OpenBSD & macOS we use a default listen(2) backlog of -1 which means use the OS's default value. On Linux (and others) we used a hard coded value of 511, presumably due to this comment /* Linux, Solaris, and NetBSD treat negative value as 0. */ On Linux (at least since 2.4), this is wrong, Linux treats -1 (and so on) as use the OS's default (net.core.somaxconn). See this code in net/socket.c::__sys_listen() if ((unsigned int)backlog > somaxconn) backlog = somaxconn; On Linux prior to 5.4 somaxconn defaulted to 128, since 5.4 it defaults to 4096. We've had complaints that a listen backlog of 511 is too small. This would help in those cases. Unless they are on an old Kernel, in which case it's worse, but then the plan is to also make this configurable. This would effect RHEL 8, which is based on 4.10, however they seem to set somaxconn to 2048, so that's fine. Another advantage of using -1 is that we will automatically keep up to date with the kernels default value. Before this change $ ss -tunxlp | grep unit Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 511 /opt/unit/control.unit.sock.tmp 4302333 * 0 users:(("unitd",pid=18290,fd=6),("unitd",pid=18289,fd=6),("unitd",pid=18287,fd=6)) tcp LISTEN 0 511 127.0.0.1:8080 0.0.0.0:* users:(("unitd",pid=18290,fd=12)) tcp LISTEN 0 511 [::1]:8080 [::]:* users:(("unitd",pid=18290,fd=11)) After $ ss -tunxlp | grep unit Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /opt/unit/control.unit.sock.tmp 5408464 * 0 users:(("unitd",pid=132442,fd=6),("unitd",pid=132441,fd=6),("unitd",pid=132439,fd=6)) tcp LISTEN 0 4096 127.0.0.1:8080 0.0.0.0:* users:(("unitd",pid=132442,fd=12)) tcp LISTEN 0 4096 [::1]:8080 [::]:* users:(("unitd",pid=132442,fd=11)) Link: <nginx#1384> Link: <https://lore.kernel.org/netdev/[email protected]/> Signed-off-by: Andrew Clayton <[email protected]>
On FreeBSD, OpenBSD & macOS we use a default listen(2) backlog of -1 which means use the OS's default value. On Linux (and others) we used a hard coded value of 511, presumably due to this comment /* Linux, Solaris, and NetBSD treat negative value as 0. */ On Linux (at least since 2.4), this is wrong, Linux treats -1 (and so on) as use the OS's default (net.core.somaxconn). See this code in net/socket.c::__sys_listen() if ((unsigned int)backlog > somaxconn) backlog = somaxconn; On Linux prior to 5.4 somaxconn defaulted to 128, since 5.4 it defaults to 4096. We've had complaints that a listen backlog of 511 is too small. This would help in those cases. Unless they are on an old Kernel, in which case it's worse, but then the plan is to also make this configurable. This would effect RHEL 8, which is based on 4.10, however they seem to set somaxconn to 2048, so that's fine. Another advantage of using -1 is that we will automatically keep up to date with the kernels default value. Before this change $ ss -tunxlp | grep unit Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 511 /opt/unit/control.unit.sock.tmp 4302333 * 0 users:(("unitd",pid=18290,fd=6),("unitd",pid=18289,fd=6),("unitd",pid=18287,fd=6)) tcp LISTEN 0 511 127.0.0.1:8080 0.0.0.0:* users:(("unitd",pid=18290,fd=12)) tcp LISTEN 0 511 [::1]:8080 [::]:* users:(("unitd",pid=18290,fd=11)) After $ ss -tunxlp | grep unit Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /opt/unit/control.unit.sock.tmp 5408464 * 0 users:(("unitd",pid=132442,fd=6),("unitd",pid=132441,fd=6),("unitd",pid=132439,fd=6)) tcp LISTEN 0 4096 127.0.0.1:8080 0.0.0.0:* users:(("unitd",pid=132442,fd=12)) tcp LISTEN 0 4096 [::1]:8080 [::]:* users:(("unitd",pid=132442,fd=11)) Link: <nginx#1384> Link: <https://lore.kernel.org/netdev/[email protected]/> Signed-off-by: Andrew Clayton <[email protected]>
@oopsoop2 on GitHub reported a performance issue related to the default listen(2) backlog size of 511 on nginx. They found that increasing it helped, nginx has a config option to configure this. They would like to be able to do the same on Unit (which also defaults to 511 on some systems). This seems reasonable. NOTE: On Linux before commit 97c15fa ("socket: Use a default listen backlog of -1 on Linux") we defaulted to 511. Since that commit we default to the Kernels default, which before 5.4 is 128 and after is 4096. This adds a new per-listener 'backlog' config option, e.g { "listeners": { "[::1]:8080": { "pass": "routes", "backlog": 1024 }, } ... } This doesn't effect the control socket. Closes: nginx#1384 Reported-by: <https://github.com/oopsoop2> Signed-off-by: Andrew Clayton <[email protected]>
Reasons for asking this question: I designed a web server test that server answers as fast as possible But Windows was dropping a few requests even if server answers as fast as possible. Because Clients disconnected when backlog was full , This is because modern servers fetch more than 511 resources at a time and nginx/php-fpm backlog ( #define NGX_LISTEN_BACKLOG 511) constant was hard coded to 511. Then I Calling listen(2)'s backlog parameter in server with a queue size larger than 511 solved the issue
And The rate at which new connections can be accepted is equal to the number of entries which can fit on the listen queue divided by the average length of time each entry spends on the queue. Therefore, the larger the queue, the greater the rate at which new connection requests can be accepted. so how to Adjusting the connection backlog size? like php-fpm (listen.backlog or nginx ip.80 backlog=xxx) can be setting, i canot find this setting in nginx united configure
The text was updated successfully, but these errors were encountered: