Ubuntu 软件包的 NGinx 缓存代理

Ubuntu 软件包的 NGinx 缓存代理

我已经将 NGinx 配置为缓存代理:

server {
        listen  3128;

        access_log      /var/log/nginx/cache-access.log combined_hostname;
        error_log       /var/log/nginx/cache-error.log;

        allow   10.0.0.0/8;
        allow   127.0.0.0/8;
        deny    all;

        resolver        127.0.0.1;

        # Merge /pool/ of all upstreams together
        location ~ /pool/(.*) {
                proxy_cache_valid       1y;
                proxy_store     /srv/cache/pool/$1;
                proxy_pass      $scheme://$host$request_uri;
        }

        # Cache things other than the .deb files themselves per host
        location / {
                proxy_cache_valid       1d;
                proxy_store     /srv/cache/$host/$request_uri;
                proxy_pass      $scheme://$host$request_uri;
        }

}

我还指出了apt-utilities 使用缓存:

Acquire::http::Proxy "http://dat.host.example.net:3128";
Acquire::https::Proxy "http://dat.host.example.net:3128";

这有效,但仅适用于通过常规 http 访问的软件包存储库。那些希望通过 https 访问的软件包存储库均会失败(有关“无效标头”的问题)。

我做错了什么?目前我只是将设置https::Proxy"DIRECT",但我想缓存包,无论使用哪种方法下载它们...

答案1

好吧,显然,NGinx不能将常规 HTTP 代理到 HTTPS. 它的主要作者只是说:“使用 Squid”

对我们来说幸运的是,使用 SSL 的上游包存储库会自动将 HTTP 重定向到 HTTPS——NGinx 的代理将悄悄地遵循这一重定向。

还有一个NGinx 补丁来填补这个相当大的空白,但目前我们不需要它,可以坚持使用 Ubuntu 提供的普通 NGinx。

答案2

Nginx 作为 HTTPS 后端的代理工作得很好。我在执行此操作时遇到的问题与从 Nginx 发送到代理服务器的请求没有正确的 SNI(用于 SSL)或 HTTPHost标头有关,而这通常是必需的,具体取决于代理后端服务器。

您可能需要在 Nginx 配置中添加几行:

# Send SNI information on the SSL request so that the backend can use the correct keys.
proxy_ssl_server_name on;

# Which SSL SNI domain to request from backend
proxy_ssl_name backend.example.com;

# If you're using host based virtual hosting, you'll need to add the `Host` HTTP header:
proxy_set_header Host backend.example.com;

# Optional. Make sure the Nginx <-> Backend connection is using secure HTTPS
proxy_ssl_verify on;
proxy_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;

这是我使用的。它似乎可以工作,但可能并不会更快。可能需要对缓存参数进行一些调整,但通往真实后端的主要管道可以正常工作。

proxy_cache_path /var/cache/nginx/mirror use_temp_path=off keys_zone=mirror:10m max_size=90g inactive=1y levels=1:1:1;

server {
  server_name mirror.example.com;

  listen 80;
  listen [::]:80;

  # Let's Encrypt
  location ^~ /.well-known/acme-challenge/ {
   default_type "text/plain";
   root /var/lib/letsencrypt/webroot;
  }
  location = /.well-known/acme-challenge/ {
   return 404;
  }

  listen 443 ssl;
  listen [::]:443 ssl;
  ssl_certificate     /etc/letsencrypt/live/mirror.example.com/fullchain.pem;
  ssl_certificate_key /etc/letsencrypt/live/mirror.example.com/privkey.pem;

  # Tell the remote/backend server about the client that is really making the request
  #proxy_set_header X-Real-IP $remote_addr;
  #proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

  # Enable range. If you're proxying large files, you can split them up into chunks to enable faster arbitrary seeks
  #proxy_cache_key $uri$is_args$args$slice_range;
  #proxy_set_header Range $slice_range;
  #slice 100m;

  # Keep our cache up to date
  #proxy_cache_use_stale updating;
  proxy_ignore_client_abort on;

  # Prevent multiple concurrent cache updates by blocking new clients until the backend request is filled.
  proxy_cache_lock on;
  proxy_cache_lock_age 35m;
  proxy_cache_lock_timeout 35m;

  # Backend host ssl configuration
  proxy_ssl_verify on;
  proxy_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;
  proxy_ssl_server_name on;

  # Must match the name of our proxy cache configuration
  proxy_cache mirror;

  # Automatically clean the cache
  #proxy_cache_purge $purge_method;

  # Proxy raspbian backend with Nginx `upstream` block
  location /raspbian {
    # HTTP is fine, right? 
    proxy_pass http://raspbian;

    Force HTTPS connection to backend, even if clients are requesting from us on HTTP
    #proxy_pass https://raspbian;

    # Use the same scheme as our clients. (Why??)
    #proxy_pass $scheme://raspbian;

    # Use Raspbian source
    proxy_set_header Host raspbian.raspberrypi.org;
    proxy_ssl_name raspbian.raspberrypi.org;
  }

  # Proxy Ubuntu backend with Nginx `upstream` block
  location /ubuntu {
    proxy_pass http://ubuntu;
    proxy_set_header Host us.archive.ubuntu.com;
    proxy_ssl_name us.archive.ubuntu.com;

    # Use DigitalOcean's mirror instead, for instance
    #proxy_pass http://digitalocean;
    #proxy_set_header Host mirrors.digitalocean.com;
    #proxy_ssl_name mirrors.digitalocean.com;
  }

  # One could probably get fancy with more granular lifetime controls for cache entries based on the URL.
  proxy_cache_valid 200 206 1w;
}

upstream raspbian { server raspbian.raspberrypi.org; }
upstream ubuntu { server us.archive.ubuntu.com; }
upstream digitalocean { server mirrors.digitalocean.com; }

相关内容