version: "3.0" # # updated: 2023-06-25 # stack: hyperpipe # x-logging: &x-logging logging: driver: loki options: loki-url: "http://loki:3100/loki/api/v1/push" loki-retries: "5" loki-batch-size: "400" x-environment: &x-environment TZ: "Europe/Paris" PUID: 1000 PGID: 1000 x-common: &x-common <<: *x-logging restart: "no" stop_grace_period: 5s stdin_open: true tty: true privileged: false security_opt: - no-new-privileges=true cap_drop: - ALL cap_add: - KILL dns: - 1.1.1.1 - 8.8.8.8 - 1.0.0.1 - 8.8.4.4 ipc: "shareable" extra_hosts: - "template.home:192.168.0.0" environment: *x-environment user: 1000:1000 labels: com.centurylinklabs.watchtower.enable: true logging: "promtail" com.stack.name: "common" com.stack.service.name: "common" devices: - /dev/kmsg:/dev/kmsg deploy: resources: limits: cpus: "0.50" memory: 256M ulimits: nproc: 65535 nofile: soft: 20000 hard: 40000 tmpfs: - /tmp:rw,noexec,nosuid,size=64k sysctls: net.core.somaxconn: 1024 net.ipv4.tcp_syncookies: 0 x-volume-timezone: &x-volume-timezone "/etc/timezone:/etc/timezone:ro" x-volume-localtime: &x-volume-localtime "/etc/localtime:/etc/localtime:ro" x-volume-docker-socket: &x-volume-docker-socket "/var/run/docker.sock:/var/run/docker.sock:rw" x-volume-cgroups: &x-volume-cgroups "/proc/cgroups:/cgroup:rw" x-volume-ssl: &x-volume-ssl "/opt/docker/ssl:/ssl:ro" services: piped-db: <<: *x-common user: 0:0 cap_add: - DAC_OVERRIDE - CHOWN - FOWNER - FSETID - SETGID - SETUID - NET_BIND_SERVICE - MKNOD container_name: piped-db hostname: piped-db image: postgres:latest restart: unless-stopped ports: - "5433:5432" expose: - "5432" healthcheck: test: ["CMD", "pg_isready", "-q", "-d", "piped", "-U", "root"] timeout: 45s interval: 10s retries: 10 environment: <<: *x-environment POSTGRES_DB: piped POSTGRES_USER: [username] POSTGRES_PASSWORD: [password] labels: com.stack.name: "piped" com.stack.service.name: "db" deploy: resources: limits: memory: 512M tmpfs: - /tmp:rw,noexec,nosuid,size=512M volumes: - *x-volume-timezone - *x-volume-localtime - /opt/docker/hyperpipe/datas/db:/var/lib/postgresql/data:rw piped-proxy: <<: *x-common read_only: true container_name: piped-proxy hostname: piped-proxy image: 1337kavin/piped-proxy:latest restart: unless-stopped environment: <<: *x-environment UDS: 1 labels: com.stack.name: "piped" com.stack.service.name: "proxy" deploy: resources: limits: memory: 512M tmpfs: - /tmp:rw,noexec,nosuid,size=512M volumes: - *x-volume-timezone - *x-volume-localtime - /opt/docker/hyperpipe/datas/piped-proxy:/app/socket:rw piped-back: <<: *x-common user: 0:0 cap_add: - DAC_OVERRIDE - CHOWN - FOWNER - FSETID - SETGID - SETUID - NET_BIND_SERVICE - MKNOD container_name: piped-back hostname: piped-back #image: 1337kavin/piped:latest image: zogg/piped:latest restart: unless-stopped ports: - "8046:8080" expose: - "8080" depends_on: - piped-db healthcheck: test: curl -f http://localhost:8080/ || exit 1 environment: <<: *x-environment DSN: "" labels: com.stack.name: "piped" com.stack.service.name: "back" deploy: resources: limits: memory: 2G tmpfs: - /tmp:rw,exec,size=512M volumes: - *x-volume-timezone - *x-volume-localtime - /opt/docker/hyperpipe/conf/config.properties:/app/config.properties:ro piped-front: <<: *x-common user: 0:0 cap_add: - DAC_OVERRIDE - CHOWN - FOWNER - FSETID - SETGID - SETUID - NET_BIND_SERVICE - MKNOD container_name: piped-front hostname: piped-front image: 1337kavin/piped-frontend:latest restart: unless-stopped ports: - "8047:80" expose: - "80" depends_on: - piped-back healthcheck: test: wget --no-verbose --tries=1 --spider http://localhost:80 entrypoint: ash -c 'sed -i s/pipedapi.kavin.rocks/pipedapi.domain.com/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh && nginx -g "daemon off;"' labels: com.stack.name: "piped" com.stack.service.name: "front" deploy: resources: limits: memory: 1G tmpfs: - /tmp:rw,noexec,nosuid,size=512M volumes: - *x-volume-timezone - *x-volume-localtime hyperpipe-nginx: <<: *x-common user: 0:0 cap_add: - DAC_OVERRIDE - CHOWN - FOWNER - FSETID - SETGID - SETUID - NET_BIND_SERVICE - MKNOD container_name: hyperpipe-nginx hostname: hyperpipe-nginx image: nginx:latest restart: unless-stopped depends_on: - piped-back - piped-front - piped-proxy ports: - "8045:80" expose: - "80" healthcheck: test: curl -f http://localhost:80/ || exit 1 labels: com.stack.name: "hyperpipe" com.stack.service.name: "nginx" deploy: resources: limits: memory: 512M tmpfs: - /tmp:rw,noexec,nosuid,size=512M volumes: - *x-volume-timezone - *x-volume-localtime - /opt/docker/hyperpipe/conf/nginx.conf:/etc/nginx/nginx.conf:ro - /opt/docker/hyperpipe/conf/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro - /opt/docker/hyperpipe/conf/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro - /opt/docker/hyperpipe/conf/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro - /opt/docker/hyperpipe/conf/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro - /opt/docker/hyperpipe/datas/piped-proxy:/var/run/ytproxy:rw hyperpipe-back: <<: *x-common container_name: hyperpipe-back hostname: hyperpipe-back image: codeberg.org/hyperpipe/hyperpipe-backend:latest restart: unless-stopped depends_on: - hyperpipe-nginx ports: - "3771:3000" expose: - "3000" environment: <<: *x-environment HYP_PROXY: "hyperpipe-proxy.onrender.com" labels: com.stack.name: "hyperpipe" com.stack.service.name: "back" tmpfs: - /tmp:rw,noexec,nosuid,size=512M volumes: - *x-volume-timezone - *x-volume-localtime hyperpipe-front: <<: *x-common user: 0:0 cap_add: - DAC_OVERRIDE - CHOWN - FOWNER - FSETID - SETGID - SETUID - NET_BIND_SERVICE - MKNOD container_name: hyperpipe-front hostname: hyperpipe-front image: codeberg.org/hyperpipe/hyperpipe:latest restart: unless-stopped depends_on: - hyperpipe-back ports: - "8745:80" expose: - "80" healthcheck: test: wget --no-verbose --tries=1 --spider http://localhost entrypoint: sh -c 'find /usr/share/nginx/html -type f -exec sed -i s/pipedapi.kavin.rocks/pipedapi.domain.com/g {} \; -exec sed -i s/hyperpipeapi.onrender.com/hyperpipeapi.domain.com/g {} \; && /docker-entrypoint.sh && nginx -g "daemon off;"' labels: com.stack.name: "hyperpipe" com.stack.service.name: "front" deploy: resources: limits: memory: 512M tmpfs: - /tmp:rw,noexec,nosuid,size=512M volumes: - *x-volume-timezone - *x-volume-localtime