# Load the necessary dynamic modules # This is FreeBSD-specific, and requires the LUA and LUASTREAM build-time # options for the www/nginx port. Other OSes will have different # requirements. # The 'openresty' bundle, available on various Linux distributions, comes # with all of these modules, and probably won't need these load_module lines. load_module "/usr/local/libexec/nginx/ndk_http_module.so"; load_module "/usr/local/libexec/nginx/ngx_stream_module.so"; load_module "/usr/local/libexec/nginx/ngx_http_lua_module.so"; load_module "/usr/local/libexec/nginx/ngx_stream_lua_module.so"; # Adjust if you have thousands of nodes... worker_processes 2; events { # Ditto; threads per worker, really don't need much. For reference, # several (tens of) thousands is common for a busy webserver; clearly # not our case here. worker_connections 32; } # We use the 'stream' service, which is essentially just a TCP proxy with # some bells and whistles. stream { # You may want to enable the next few lines in order to get access logs. # I've disabled them by default; if you enable logging make sure you # point the log file to a directory that 1) exists and 2) is writable # by the nginx worker processes. #log_format basic '$remote_addr [$time_local] ' # '$protocol $status $bytes_sent $bytes_received ' # '$session_time'; #access_log /var/log/nginx/stream.log basic; # Make sure TCP_NODELAY is set; this prevents unnecessary waits in order # to fill data segments prior to sending. This is an interactive service # so any delay would make it seem like a slower BBS/phone line than it # really is :) tcp_nodelay on; # Define our upstreams - the BBS nodes/machines themselves. In my case, # there is only one, since all my BBS nodes live on the same machine. If # yours are distributed across several machines, add them here. upstream bbs { # Using a hostname; IP might be preferable. I limit the connection # count to the number of virtual modems on my OS/2 installation .. # IMPORTANT! If you have a different number of nodes behind a single # IP, change this number AND change the corresponding setting in # bbs_math.lua! server 192.88.99.15:8023 max_conns=2; } # Another upstream - the floppy.museum guestbook: upstream museum { server 192.88.99.42:23 max_conns=1; # server 192.88.99.43:23 max_conns=1; } # Create a shared DICT (an in-memory database) to keep track of active # users that have passed the challenge. This will be used to give a # sensible response to connections made when all nodes are busy. lua_shared_dict users 64k; # Define our actual listeners, math quiz and all. server { # Listen on port 23 (duh); the whole point of this exercise is to be # able to run a BBS on old hardware, answering on port 23, without # spending all our time dealing with garbage from the internet. listen 23; # The real magic: A block of LUA code which takes over the # connection before it gets proxied to the BBS nodes. # See https://anduin.net/~ltning/bbs/bbs_math.lua .. # We start by creating an Nginx variable, which will be used from # the script: set $bbs_challenge_passed 0; # Then we call the script: preread_by_lua_file bbs_math.lua; # Assuming the user has passed the challenge, we now have an active # connection, and the counter has presumably been increased (see the # bbs_math.lua file above). We need to decrease it again once the # session ends. # We register a lua function to be run at the end of the connection # (during the "log" phase), which triggers only if the challenge # was passed. log_by_lua_block { if tonumber(ngx.var.bbs_challenge_passed) == 1 then local users = ngx.shared.users users:incr("active", -1, 1) -- For debugging, uncomment the following line. The output -- lands in nginx' error log file. -- ngx.log(ngx.ERR, "Currently active users: ", users:get("active")) end } # Don't wait too long when we attempt to connect to the BBS. If it # doesn't want to talk to us (dead?), we drop the connection. proxy_connect_timeout 10s; # Set the size of the buffer used for input/output. The default # (16k) is much too large for our use case. Experiment with this # when using file transfer protocols with large block sizes. proxy_buffer_size 1k; # If there is *no* activity on an established connection (between # the client and the actual BBS), close the connection after this # many seconds. proxy_timeout 600s; # FOR NEWER Synchronet ONLY: # Use the PROXY protocol to connect; this will pass the real IP of # the connecting client to the upstream (BBS). SBBS needs to be # configured to accept PROXY type inbound connections. Set to # 'on' to enable this. proxy_protocol off; # Assuming we didn't bomb out during the math challenge, we pass the # connection to "bbs" (defined in the "upstream" section above). proxy_pass bbs; } server { listen 8023; set $museum_challenge_passed 0; preread_by_lua_file museum_math.lua; log_by_lua_block { if tonumber(ngx.var.museum_challenge_passed) == 1 then local users = ngx.shared.users users:incr("active", -1, 1) -- For debugging, uncomment the following line. The output -- lands in nginx' error log file. -- ngx.log(ngx.ERR, "Currently active users: ", users:get("active")) end } proxy_connect_timeout 5s; proxy_buffer_size 1; proxy_timeout 60s; proxy_protocol off; proxy_pass museum; } } http { include mime.types; default_type application/octet-stream; sendfile on; reset_timedout_connection off; client_body_timeout 10s; send_timeout 2s; lingering_timeout 5s; client_header_timeout 5s; keepalive_timeout 65; server { listen 443 ssl; server_name bbs.anduin.net; ssl_certificate_key /usr/local/etc/ssl/acme/bbs.anduin.net/cert.key; ssl_certificate /usr/local/etc/ssl/acme/bbs.anduin.net/cert.fullchain; location / { allow all; proxy_pass http://192.88.99.15:80; } } }