Reputation: 5102
I am trying to use nginx container to act as a reverse proxy which would distribute the load across multiple flask app instances, I am using docker-composer to do that.
version: '3'
services:
app:
build:
context: ./app
dockerfile: Dockerfile
expose:
- "8001"
proxy:
build:
context: ./nginx
dockerfile: Dockerfile
ports:
- "80:80"
links:
- app
and my nginx docker file is
FROM nginx
COPY nginx.conf /etc/nginx/nginx.conf
and my app dockerfile is
FROM python:3-alpine
RUN apk update && apk upgrade && pip install -U pip
RUN apk add --update alpine-sdk make gcc python3-dev python-dev libxslt-dev libxml2-dev libc-dev openssl-dev libffi-dev zlib-dev py-pip \
&& rm -rf /var/cache/apk/*
RUN set -x \
&& VER="17.03.0-ce" \
&& curl -L -o /tmp/docker-$VER.tgz https://get.docker.com/builds/Linux/x86_64/docker-$VER.tgz \
&& tar -xz -C /tmp -f /tmp/docker-$VER.tgz \
&& mv /tmp/docker/* /usr/bin
WORKDIR /app
ADD . /app
RUN pip install --trusted-host pypi.python.org -r requirements.txt
EXPOSE 8001
CMD ["python3.6", "main.py"]
I am using the following command to build
docker-compose build
and the following command to start containers
docker-compose up -d --scale app=5
which shows
Starting testapp_app_1 ... done
Creating testapp_app_2 ... done
Creating testapp_app_3 ... done
Creating testapp_app_4 ... done
Creating testapp_app_5 ... done
but which I check docker logs, seems like all the request are server by testingapp_app_1
my nginx conf file is
#worker_processes 4;
events { worker_connections 1024; }
http {
# sendfile on;
# upstream app_servers {
# server app:8001;
# }
server {
listen 80;
server_name localhost;
resolver 127.0.0.11;
set $backends app;
location / {
proxy_pass http://$backends:8001;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}
}
Please explain how to dynamically round-robin requests to each of the app containers
Upvotes: 0
Views: 1111
Reputation: 5102
The method that you mentioned doesn't work, the server cannot detect a new instance with the default nginx server, you would need jwilder/docker-gen image to integrate dynamically created app instances to the nginx load balancer.
your dockerfile in the root should look something like:
version: '3'
services:
nginx:
image: nginx
#restart: unless-stopped
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
container_name: nginx
ports:
- "80:80"
volumes:
- ./conf.d:/etc/nginx/conf.d
- ./vhost.d:/etc/nginx/vhost.d
nginx-gen:
image: jwilder/docker-gen
command: -notify-sighup nginx -watch -wait 5s:30s /etc/docker-gen/templates/nginx.tmpl /etc/nginx/conf.d/default.conf
container_name: nginx-gen
#restart: unless-stopped
volumes:
- ./conf.d:/etc/nginx/conf.d
- ./vhost.d:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
- ./nginx.tmpl:/etc/docker-gen/templates/nginx.tmpl:ro
networks:
default:
external:
name: nginx-proxy
and the dockerfile in the app directory would like :
version: '3'
services:
app:
build:
context: ./app
dockerfile: Dockerfile
expose:
- "8001"
environment:
VIRTUAL_HOST: localhost
networks:
default:
external:
name: nginx-proxy
It's important to have the same virtual hostname across all the instance and the jwilder/docker-gen instance will restart and set it in round-robin fashion.
Upvotes: 2