version: "3.8" services: traefik: image: traefik:latest container_name: traefik command: - "--entrypoints.web.address=:80" - "--entrypoints.websecure.address=:443" - "--certificatesresolvers.njalla.acme.email=thierrypouplier@gmail.com" - "--certificatesresolvers.njalla.acme.storage=/letsencrypt/acme.json" - "--certificatesresolvers.njalla.acme.httpchallenge.entrypoint=web" - "--log.level=DEBUG" - "--providers.docker=true" - "--providers.docker.exposedByDefault=false" ports: - "80:80" - "443:443" environment: - NJALLA_TOKEN=${NJALLA_TOKEN} volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - /mnt/HoardingCow_docker_data/Traefik:/letsencrypt restart: unless-stopped networks: - traefik-net ddns-updater: image: qmcgaw/ddns-updater container_name: ddns-updater networks: - traefik-net ports: - 8000:8000/tcp volumes: - /mnt/HoardingCow_docker_data/Ddns_updater:/updater/data environment: # - CONFIG= - PERIOD=5m - UPDATE_COOLDOWN_PERIOD=5m - PUBLICIP_FETCHERS=all - PUBLICIP_HTTP_PROVIDERS=all - PUBLICIPV4_HTTP_PROVIDERS=all - PUBLICIPV6_HTTP_PROVIDERS=all - PUBLICIP_DNS_PROVIDERS=all - PUBLICIP_DNS_TIMEOUT=3s - HTTP_TIMEOUT=10s # Web UI - LISTENING_ADDRESS=:8000 - ROOT_URL=/ # Backup - BACKUP_PERIOD=0 - BACKUP_DIRECTORY=/updater/data # Other - LOG_LEVEL=info - LOG_CALLER=hidden - SHOUTRRR_ADDRESSES= restart: unless-stopped networks: traefik-net: driver: bridge name: traefik-net # duckdns: # environment: # - PUID=1000 # - PGID=1000 # - TZ=America/Toronto # - SUBDOMAINS=aziworkhorse # - TOKEN=$[DUCKDNS_TOKEN] # image: lscr.io/linuxserver/duckdns # labels: # - "traefik.enable=false" # deploy: # placement: # constraints: # - node.role == manager # restart_policy: # condition: on-failure # networks: # - traefik-net # whoami: # image: traefik/whoami # container_name: whoami # labels: # - "traefik.enable=true" # - "traefik.http.routers.whoami.rule=Host(`test.aziworkhorse.duckdns.org`)" # - "traefik.http.routers.whoami.entrypoints=websecure" # - "traefik.http.routers.whoami.tls.certresolver=duckdns" # networks: # - traefik-net # deploy: # placement: # constraints: # - node.role == manager # restart_policy: # condition: on-failure # nginx: # environment: # - TZ=America/Toronto # image: jc21/nginx-proxy-manager:latest # ports: # - 443:443/tcp # - 80:80/tcp # - 81:81/tcp # restart: unless-stopped # volumes: # - /mnt/HoardingCow_docker_data/Nginx/letsencrypt:/etc/letsencrypt:rw # - /mnt/HoardingCow_docker_data/Nginx/data:/data:rw # - /mnt/HoardingCow_docker_data/Nginx/logs:/var/log/ninx:rw # deploy: # placement: # constraints: # - node.hostname == workHorse # pihole: # cap_add: # - NET_ADMIN # container_name: pihole # environment: # - TZ=America/Toronto # image: pihole/pihole:latest # ports: # - 53:53/tcp # - 53:53/udp # - 67:67/udp # - 1010:80/tcp # restart: unless-stopped # volumes: # - /mnt/HoardingCow_docker_data/Pi-Hole/dnsmasq.d:/etc/dnsmasq.d:rw # - /mnt/HoardingCow_docker_data/Pi-Hole/config:/etc/pihole:rw # openvpn: # cap_add: # - NET_ADMIN # container_name: openvpn # environment: # - TZ=America/Toronto # image: kylemanna/openvpn # ports: # - 1194:1194/udp # restart: unless-stopped # volumes: # - /mnt/HoardingCow_docker_data/OpenVPN:/etc/openvpn:rw