from dockeasy import *dockeasy
Install
pip install dockeasyjust import * from the top-level module and you’re good to go:
Dockerfiles in Python
Build a Dockerfile by chaining methods. Every call returns a new Dockerfile — safe, immutable, and composable.
df = (Dockerfile()
.from_('python:3.12-slim')
.workdir('/app')
.copy('requirements.txt', '.')
.run('pip install -r requirements.txt')
.copy('.', '.')
.expose(8000)
.cmd(['python', 'main.py']))
print(df)FROM python:3.12-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
EXPOSE 8000
CMD ["python", "main.py"]
Multi-stage builds
Call .from_() again to start a new stage. Use from_= in .copy() to pull artifacts across stages.
df = (Dockerfile()
.from_('golang:1.22-alpine', as_='builder')
.workdir('/src')
.copy('.', '.')
.run('go build -o /app .')
.from_('gcr.io/distroless/static')
.copy('/app', '/app', from_='builder')
.cmd(['/app']))
print(df)FROM golang:1.22-alpine AS builder
WORKDIR /src
COPY . .
RUN go build -o /app .
FROM gcr.io/distroless/static
COPY --from=builder /app /app
CMD ["/app"]
Build cache mounts
.run_mount() adds --mount=type=cache for blazing-fast rebuilds with pip, uv, or apt.
df = (Dockerfile()
.from_('python:3.12-slim')
.workdir('/app')
.copy('requirements.txt', '.')
.run_mount('pip install -r requirements.txt', target='/root/.cache/pip')
.copy('.', '.')
.cmd(['python', 'main.py']))
print(df)FROM python:3.12-slim
WORKDIR /app
COPY requirements.txt .
RUN --mount=type=cache,target=/root/.cache/pip pip install -r requirements.txt
COPY . .
CMD ["python", "main.py"]
Framework Builders
One-liners that generate production-ready Dockerfiles for common stacks.
Python / uv app
python_app() builds a multistage Dockerfile: uv compiles deps in a build stage, only the .venv is copied to the slim runtime.
python_app()FROM ghcr.io/astral-sh/uv:python3.13-bookworm AS builder
WORKDIR /app
ENV UV_COMPILE_BYTECODE=1
ENV UV_LINK_MODE=copy
COPY pyproject.toml .
COPY uv.lock .
RUN --mount=type=cache,target=/root/.cache/uv uv sync --frozen --no-dev --no-install-project
COPY . .
RUN --mount=type=cache,target=/root/.cache/uv uv sync --frozen --no-dev
FROM python:3.13-slim
WORKDIR /app
COPY --from=builder /app /app
ENV PATH=/app/.venv/bin:$PATH
EXPOSE 8000
CMD ["python", "main.py"]
# With system packages, persistent volume dir, and a healthcheck
python_app(pkgs=['libpq-dev', 'curl'], vols=['/app/data'], healthcheck='/health')FROM ghcr.io/astral-sh/uv:python3.13-bookworm AS builder
WORKDIR /app
ENV UV_COMPILE_BYTECODE=1
ENV UV_LINK_MODE=copy
COPY pyproject.toml .
COPY uv.lock .
RUN --mount=type=cache,target=/root/.cache/uv uv sync --frozen --no-dev --no-install-project
COPY . .
RUN --mount=type=cache,target=/root/.cache/uv uv sync --frozen --no-dev
FROM python:3.13-slim
WORKDIR /app
RUN apt-get update && apt-get install -y libpq-dev curl && rm -rf /var/lib/apt/lists/*
COPY --from=builder /app /app
ENV PATH=/app/.venv/bin:$PATH
RUN mkdir -p /app/data
HEALTHCHECK --interval=30s --timeout=5s --retries=3 CMD curl -f http://localhost:8000/health
EXPOSE 8000
CMD ["python", "main.py"]
FastHTML app
fasthtml_app()FROM ghcr.io/astral-sh/uv:python3.13-bookworm AS builder
WORKDIR /app
ENV UV_COMPILE_BYTECODE=1
ENV UV_LINK_MODE=copy
COPY pyproject.toml .
COPY uv.lock .
RUN --mount=type=cache,target=/root/.cache/uv uv sync --frozen --no-dev --no-install-project
COPY . .
RUN --mount=type=cache,target=/root/.cache/uv uv sync --frozen --no-dev
FROM python:3.13-slim
WORKDIR /app
COPY --from=builder /app /app
ENV PATH=/app/.venv/bin:$PATH
EXPOSE 5001
CMD ["python", "app.py"]
Go app
go_app()FROM golang:1.22-alpine AS builder
WORKDIR /src
COPY go.mod .
COPY go.sum .
RUN --mount=type=cache,target=/go/pkg/mod go mod download
COPY . .
ENV CGO_ENABLED=0
RUN go build -ldflags="-s -w" -o /app .
FROM gcr.io/distroless/static
COPY --from=builder /app /app
EXPOSE 8080
CMD ["/app"]
Rust app
rust_app()FROM rust:1-slim-bookworm AS builder
WORKDIR /src
COPY . .
RUN --mount=type=cache,target=/usr/local/cargo/registry cargo build --release
FROM gcr.io/distroless/static
COPY --from=builder /src/target/release/app /app
EXPOSE 8080
CMD ["/app"]
Node.js app
# Two-stage: build → serve static output
node_app(static=True)FROM node:20-slim AS builder
WORKDIR /app
COPY package*.json .
RUN npm ci
COPY . .
RUN npm run build
FROM node:20-slim
WORKDIR /app
RUN npm install -g serve
COPY --from=builder /app/dist .
EXPOSE 3000
CMD ["serve", "-s", ".", "-l", "3000"]
Auto-detect your project
detect_app() sniffs the project directory and picks the right builder automatically.
def tmp(files):
d = Path(tempfile.mkdtemp())
for f, c in files.items(): (d/f).write_text(c)
return d
cases = [
({'go.mod': 'module x'}, 'go.mod → go_app'),
({'Cargo.toml': '[package]'}, 'Cargo.toml → rust_app'),
({'package.json': '{}', 'pyproject.toml': '[p]'}, 'package.json + pyproject.toml → fastapi_react'),
({'pyproject.toml': '[project]'}, 'pyproject.toml → python_app (port 8000)'),
]
for files, label in cases:
df = detect_app(tmp(files))
print(label)go.mod → go_app
Cargo.toml → rust_app
package.json + pyproject.toml → fastapi_react
pyproject.toml → python_app (port 8000)
Docker Compose
The Compose builder mirrors the Dockerfile API — chain .svc(), .network(), and .volume() calls, then render or save.
dc = (Compose()
.svc('web', image='nginx', ports={80: 80}, networks=['backend'])
# we are passing a dummy dockerfile. point the python_app builder to your project dir for real use
.svc('api', build=python_app().save(), ports={8000: 8000},
env={'DATABASE_URL': 'postgresql://db/app'},
depends_on=['db'], networks=['backend'])
.svc('db', image='postgres:16',
env={'POSTGRES_PASSWORD': 'secret'},
volumes={'pgdata': '/var/lib/postgresql/data'})
.network('backend')
.volume('pgdata')).save()
dcservices:
web:
image: nginx
ports:
- 80:80
networks:
- backend
api:
depends_on:
- db
ports:
- 8000:8000
build: .
environment:
- DATABASE_URL=postgresql://db/app
networks:
- backend
db:
image: postgres:16
environment:
- POSTGRES_PASSWORD=secret
volumes:
- pgdata:/var/lib/postgresql/data
networks:
backend: null
volumes:
pgdata: null
dc.save('docker-compose.yml') # writes the file
# Save to disk, bring it up, tear it down. This will fail as-is
dc.up() # docker compose up -d
dc.down(v=True) # docker compose down -vRunning containers
Build, run, test — all from Python.
tmp = tempfile.mkdtemp()
df = (Dockerfile()
.from_('alpine')
.cmd(['echo', 'hello from docker']))
tag = df.build(tag='myapp:latest', path=tmp, no_creds=True)
print(f'Built: {tag}')
out = drun(tag, remove=True)
print(out.strip())
rmi(tag, force=True)
print('Cleaned up.')Built: myapp:latest
hello from docker
Cleaned up.
# Start a named, detached container with port mapping
cid = drun('nginx', detach=True, ports={'80/tcp': 8080}, name='my-nginx', check=True)
print(containers()) # ['my-nginx']
print(logs('my-nginx', n=5)) # last 5 log lines
stop('my-nginx')
rm('my-nginx')['my-nginx']
;; 2026/03/22 18:20:16 [notice] 1#1: OS: Linux 6.8.0-90-generic
2026/03/22 18:20:16 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1024:524288
2026/03/22 18:20:16 [notice] 1#1: start worker processes
2026/03/22 18:20:16 [notice] 1#1: start worker process 29
2026/03/22 18:20:16 [notice] 1#1: start worker process 30
Smoke-test an image
test() runs a command inside the image and returns True if it exits 0 — handy for CI.
assert test('python:3.12-slim', ['python', '-c', 'import sys; sys.exit(0)'])
assert not test('python:3.12-slim', ['python', '-c', 'raise SystemExit(1)'])Config & Secrets
Store plain config (IPs, paths) and sensitive values (tokens, passwords) cleanly.
env_set('VPS_IP', '1.2.3.4')
env_set('APP_NAME', 'my-app')
print(env_get('VPS_IP'))
print(env_get('APP_NAME'))1.2.3.4
my-app
# Stored in OS keychain on macOS; env var fallback on Linux/CI
secret_set('FOO', 'bar')
print(secret_get('FOO'))bar
# Read multiple secrets at once — missing keys are silently skipped
secret_set('DATABASE_URL', 'postgresql://...')
cfg = secrets('FOO', 'DATABASE_URL', 'MISSING_KEY')
cfg{'FOO': 'bar', 'DATABASE_URL': 'postgresql://...'}
Reverse Proxy
caddy() generates a Caddyfile as a Python object — chainable, printable, saveable.
caddy_svc() writes the file and hands back service kwargs you can drop straight into Compose.
# Minimal: auto-TLS via Let's Encrypt
print(caddy('myapp.example.com'))# DNS-01 wildcard cert — works even when port 80 is closed
print(caddy('myapp.example.com', dns='cloudflare', email='me@example.com'))Zero open ports with Cloudflare Tunnel
caddy_svc() + cloudflared_svc() → a production stack with no inbound firewall rules at all.
Add crowdsec=True to either call to layer in IP reputation blocking.
import tempfile
tmp = tempfile.mkdtemp()
dc = (Compose()
.svc('app', build='.', networks=['web'], restart='unless-stopped')
.svc('caddy', **caddy_svc('myapp.example.com', cloudflared=True, conf=f'{tmp}/Caddyfile'))
.svc('cloudflared', **cloudflared_svc())
.network('web').volume('caddy_data').volume('caddy_config'))
print(dc)Next steps
The notebooks are executable specs — worth reading before shipping.
nbs/01_proxy.ipynb— live integration test: boots a FastHTML app, tunnels it via Cloudflare, and asserts it’s reachable over the internet. Shows the fullcaddy_svc/cloudflared_svc/crowdsecsurface area with every option.nbs/00_core.ipynb— complete Dockerfile and Compose API, including multi-stage builds, framework builders, and container management.