Reputation: 438
Here is working example of openresty nginx.conf file. In this example I make request to redis multiple times. As you can see I request data from redis first to check if domain can get SSL then again to get what backend to proxy request to, and then I've added S3 proxy and I would need to request data from redis again. I'm new to OpenResty and Lua and I wonder if it is possible to fetch data from redis once and use it multiple times across the script?
user www-data;
worker_processes auto;
pid /run/openresty.pid;
events {
worker_connections 1024;
}
error_log /var/log/openresty/error.log debug;
http {
resolver 127.0.0.53 ipv6=off;
lua_shared_dict acme 16m;
init_by_lua_block {
require("resty.acme.autossl").init({
tos_accepted = true,
staging = true,
account_key_path = "/etc/openresty/account.key",
account_email = "[email protected]",
domain_whitelist_callback = function(domain)
local redis = require "resty.redis"
local rds = redis:new()
local ok, err = rds:connect("127.0.0.1", 6379)
if not ok then
ngx.log(ngx.ERR, "failed to connect to redis: ", err)
return ngx.exit(500)
end
local res, err = rds:exists(domain)
if res == 1 then
return true
end
if res == 0 then
return false
end
end
})
}
init_worker_by_lua_block {
require("resty.acme.autossl").init_worker()
}
server {
access_log /var/log/openresty/access.log;
listen 80;
listen 443 ssl;
server_name _;
location / {
set $backend '';
set $tenant '';
access_by_lua '
local domain = ngx.req.get_headers()["Host"]
local key = "site:" .. domain
if not domain then
ngx.log(ngx.ERR, "message 404 missing")
return ngx.exit(404)
end
local redis = require "resty.redis"
local rds = redis:new()
local ok, err = rds:connect("127.0.0.1", 6379)
if not ok then
ngx.log(ngx.ERR, "failed to connect to redis: ", err)
return ngx.exit(500)
end
local all, err = rds:hgetall(key)
if not all then
ngx.log(ngx.ERR, "no komprende: ", err)
return ngx.exit(505)
end
if all == ngx.null then
ngx.log(ngx.ERR, "no host found for key ", key)
return ngx.exit(404)
end
local result = {}
for i = 1, #all, 2 do
result[all[i]] = all[i+1]
end
ngx.var.backend = result["backend"]
ngx.var.tenant = result["tenantID"]
ngx.log(ngx.ERR, "uhm: ", ngx.var.backend)
';
add_header X-TenantID $tenant always;
proxy_pass http://$backend;
}
location ~* ^/static/(.*) {
resolver 127.0.0.53 valid=300s;
resolver_timeout 10s;
set $s3_bucket 'drasha.ams3.digitaloceanspaces.com';
set $url_full '$1';
proxy_http_version 1.1;
proxy_set_header Host $s3_bucket;
proxy_set_header Authorization '';
proxy_hide_header x-amz-id-2;
proxy_hide_header x-amz-request-id;
proxy_hide_header Set-Cookie;
proxy_ignore_headers "Set-Cookie";
proxy_buffering off;
proxy_intercept_errors on;
proxy_pass http://$s3_bucket/AYAYA/$url_full;
}
lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;
lua_ssl_verify_depth 2;
ssl_certificate /etc/openresty/default.pem;
ssl_certificate_key /etc/openresty/default.key;
ssl_certificate_by_lua_block {
require("resty.acme.autossl").ssl_certificate()
}
location /.well-known {
content_by_lua_block {
require("resty.acme.autossl").serve_http_challenge()
}
}
}
}
Upvotes: 0
Views: 2699
Reputation: 3064
OpenResty run Lua hooks in a sandbox, so one cannot use global variables to share data.
You shall use Data Sharing within an Nginx Worker It is usual practice to cache anything on Lua module level, possibly with some reasonable expiration period if data stored in Redis may be changed.
BTW - don't use XXX_by_lua directives - you should take care about nginx escaping rules, use XXX_by_lua_block.
Additional example:
local redis = require"resty-redis"
-- the module
local _M = {}
local hgetall_results = {}
_M.hgetall = function(key)
if hgetall_results[key] then
return hgetall_results[key]
end
local rds = redis:new()
local ok, err = rds:connect("127.0.0.1", 6379)
local all, err = rds:hgetall(key)
local result = {}
for i = 1, #all, 2 do
result[all[i]] = all[i+1]
end
-- cache
hgetall_results[key] = result
return result
end
return _M
The example above just illustrates the usual module scoped cache pattern. The error handling is on your own.
Upvotes: 1