Skip to content

Commit

Permalink
feat(module): new method peer_conn.get_last_peer_connection_cached (#…
Browse files Browse the repository at this point in the history
…82)

FTI-5616

---------

Signed-off-by: tzssangglass <[email protected]>
  • Loading branch information
tzssangglass authored Mar 18, 2024
1 parent 71c3232 commit d1f8aaf
Show file tree
Hide file tree
Showing 5 changed files with 233 additions and 0 deletions.
52 changes: 52 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ Table of Contents
* [resty.kong.tag.get](#restykongtagget)
* [resty.kong.log.set\_log\_level](#restykonglogset_log_level)
* [resty.kong.log.get\_log\_level](#restykonglogget_log_level)
* [resty.kong.peer_conn.get\_last\_peer\_connection\_cached](#restykongpeer_connget_last_peer_connection_cached)
* [License](#license)

Description
Expand Down Expand Up @@ -476,6 +477,57 @@ for the possible value of `level`.

[Back to TOC](#table-of-contents)

resty.kong.peer_conn.get\_last\_peer\_connection\_cached
----------------------------------
**syntax:** *res = resty.kong.peer_conn.get_last_peer_connection_cached()*

**context:** *balancer_by_lua*

**subsystems:** *http*

This function retrieves information about whether the connection used in the previous attempt came from the upstream connection pool when the next_upstream retrying mechanism is in action.

The possible results are as follows:
- `false`: Indicates the connection was not reused from the upstream connection pool, meaning a new connection was created with the upstream in the previous attempt.
- `true`: Indicates the connection was reused from the upstream connection pool, meaning no new connection was created with the upstream in the previous attempt.

After applying the [dynamic upstream keepalive patch](https://github.com/Kong/kong/blob/3.6.0/build/openresty/patches/ngx_lua-0.10.26_01-dyn_upstream_keepalive.patch),
Nginx's upstream module attempts to retrieve connections from the upstream connection pool for each retry.
If the obtained connection is deemed unusable, Nginx considers that retry invalid and performs a compensatory retry.

Since each retry triggers the `balancer_by_lua` phase, the number of retries logged in Lua land during this phase may exceed the maximum limit set by `set_more_tries`.

Using this function in the `balancer_by_lua` phase allows for determining if the connection used in the previous retry was taken from the connection pool.
If the return value is `true`, it indicates that the connection from the pool used in the previous retry was unusable, rendering that retry void.

The value returned by this function helps in accurately assessing the actual number of new connections established with the upstream server during the retry process during the `balancer_by_lua phase`.

Example:
```lua
balancer_by_lua_block {
local ctx = ngx.ctx

ctx.tries = ctx.tries or {}
local tries = ctx.tries

-- update the number of tries
local try_count = #tries + 1

-- fetch the last peer connection cached
local peer_conn = require("resty.kong.peer_conn")
local last_peer_connection_cached = peer_conn.get_last_peer_connection_cached()
if try_count > 1 then
local previous_try = tries[try_count - 1]
previous_try.cached = peer_conn.get_last_peer_connection_cached()
else
tries[try_count].cached = false
end

...
}
```

[Back to TOC](#table-of-contents)

License
=======
Expand Down
1 change: 1 addition & 0 deletions config
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ ngx_module_srcs=" \
$ngx_addon_dir/src/ngx_http_lua_kong_log.c \
$ngx_addon_dir/src/ngx_http_lua_kong_log_handler.c \
$ngx_addon_dir/src/ngx_http_lua_kong_vars.c \
$ngx_addon_dir/src/ngx_http_lua_kong_peer_connection.c \
$ngx_addon_dir/src/ssl/ngx_lua_kong_ssl.c \
"

Expand Down
42 changes: 42 additions & 0 deletions lualib/resty/kong/peer_conn.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
local ffi = require "ffi"
local base = require "resty.core.base"

local get_request = base.get_request
local errmsg = base.get_errmsg_ptr()
local C = ffi.C
local ffi_str = ffi.string
local get_phase = ngx.get_phase
local NGX_ERROR = ngx.ERROR

local error = error


ffi.cdef[[
int
ngx_http_lua_kong_ffi_get_last_peer_connection_cached(ngx_http_request_t *r,
char **err);
]]


local function get_last_peer_connection_cached()
if get_phase() ~= "balancer" then
error("get_last_peer_connection_cached() can only be called in balancer phase")
end

local r = get_request()
if not r then
error("no request found")
end

local rc = C.ngx_http_lua_kong_ffi_get_last_peer_connection_cached(r, errmsg)

if rc == NGX_ERROR then
error(ffi_str(errmsg[0]), 2)
end

return rc == 1
end

return {
get_last_peer_connection_cached = get_last_peer_connection_cached,
}
33 changes: 33 additions & 0 deletions src/ngx_http_lua_kong_peer_connection.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/**
* Copyright 2019-2024 Kong Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


#include "ngx_http_lua_kong_common.h"


int
ngx_http_lua_kong_ffi_get_last_peer_connection_cached(ngx_http_request_t *r,
char **err)
{
if (r->upstream == NULL) {
*err = "no upstream found";
return NGX_ERROR;
}

ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"last_peer_connection_cached %d", r->upstream->peer.cached);
return r->upstream->peer.cached;
}
105 changes: 105 additions & 0 deletions t/011-get_last_peer_connection_cached.t
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
# vim:set ft= ts=4 sw=4 et:

use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);

log_level('debug');
repeat_each(1);

plan tests => repeat_each() * (blocks() * 2);

my $pwd = cwd();

no_long_string();
run_tests();


__DATA__

=== TEST 1: sanity
--- http_config
lua_package_path "../lua-resty-core/lib/?.lua;lualib/?.lua;;";
--- http_config
lua_shared_dict request_counter 1m;
upstream my_upstream {
server 127.0.0.1;
balancer_by_lua_block {
local peer_conn = require("resty.kong.peer_conn")
local last_peer_connection_cached = peer_conn.get_last_peer_connection_cached()

local balancer = require "ngx.balancer"
local host = "127.0.0.1"
local port = 8090;

local pool = host .. "|" .. port
local pool_opts = {
pool = pool,
pool_size = 512,
}

local ok, err = balancer.set_current_peer(host, port, pool_opts)
if not ok then
ngx.log(ngx.ERR, "failed to set the current peer: ", err)
return ngx.exit(500)
end

balancer.set_timeouts(60000, 60000, 60000)

local ok, err = balancer.enable_keepalive(60, 100)
if not ok then
ngx.log(ngx.ERR, "failed to enable keepalive: ", err)
return ngx.exit(500)
end
}
}

server {
listen 0.0.0.0:8090;
location /hello {
content_by_lua_block{
local request_counter = ngx.shared.request_counter
local first_request = request_counter:get("first_request")
if first_request == nil then
request_counter:set("first_request", "yes")
ngx.say("hello")
else
ngx.exit(ngx.HTTP_CLOSE)
end
}
}
}
--- config
location /hello {
proxy_pass http://my_upstream;
proxy_set_header Connection "keep-alive";
}

location = /t {
content_by_lua_block {
local http = require "resty.http"
local httpc = http.new()
local uri = "http://127.0.0.1:" .. ngx.var.server_port
.. "/hello"
local res, err = httpc:request_uri(uri)
if not res then
ngx.say(err)
return
end
res, err = httpc:request_uri(uri)
if not res then
ngx.say(err)
return
end
}
}
--- request
GET /t
--- error_code eval
[200, 502]
--- grep_error_log eval
qr/last_peer_connection_cached \d+/
--- grep_error_log_out
last_peer_connection_cached 0
last_peer_connection_cached 0
last_peer_connection_cached 1

0 comments on commit d1f8aaf

Please sign in to comment.