ci: add Drone pipeline, production Docker setup, frontend Dockerfile
Some checks failed
continuous-integration/drone/push Build encountered an error
Some checks failed
continuous-integration/drone/push Build encountered an error
This commit is contained in:
30
.drone.yml
Normal file
30
.drone.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: production-deploy
|
||||
|
||||
# Trigger on every push to main
|
||||
trigger:
|
||||
branch:
|
||||
- main
|
||||
event:
|
||||
- push
|
||||
|
||||
steps:
|
||||
- name: deploy
|
||||
image: appleboy/drone-ssh
|
||||
settings:
|
||||
host:
|
||||
from_secret: deploy_host
|
||||
username: root
|
||||
port: 22
|
||||
key:
|
||||
from_secret: deploy_ssh_key
|
||||
# Pull latest code, rebuild containers, reload Caddy
|
||||
script:
|
||||
- cd /opt/project-hub/src
|
||||
- git pull origin main
|
||||
- docker compose -f docker-compose.prod.yml up -d --build --force-recreate --remove-orphans
|
||||
- docker network connect pm-network caddy 2>/dev/null || true
|
||||
- docker exec caddy caddy reload --config /etc/caddy/Caddyfile --force
|
||||
- docker image prune -f
|
||||
- echo "Deploy complete."
|
||||
386
cli/dev.mjs
Executable file
386
cli/dev.mjs
Executable file
@@ -0,0 +1,386 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* dev — Project Hub CLI
|
||||
*
|
||||
* Usage: dev <command> [args]
|
||||
*
|
||||
* Install:
|
||||
* chmod +x cli/dev.mjs
|
||||
* ln -sf "$(pwd)/cli/dev.mjs" ~/.local/bin/dev
|
||||
*/
|
||||
import { spawnSync } from 'child_process';
|
||||
import {
|
||||
readFileSync, existsSync,
|
||||
realpathSync, readSync,
|
||||
} from 'fs';
|
||||
import { join, dirname } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { homedir } from 'os';
|
||||
|
||||
// ── Resolve project root (follows the symlink back to the real script) ──────
|
||||
const SCRIPT_REAL = realpathSync(fileURLToPath(import.meta.url));
|
||||
const PROJECT_ROOT = dirname(dirname(SCRIPT_REAL)); // cli/dev.mjs → cli/ → root
|
||||
const SSH_KEY = join(homedir(), '.ssh', 'project-hub');
|
||||
const REMOTE_HOST = 'project-hub'; // matches ~/.ssh/config entry
|
||||
const REMOTE_IP = '146.190.56.90';
|
||||
|
||||
// ── ANSI colour helpers ──────────────────────────────────────────────────────
|
||||
const A = {
|
||||
reset: '\x1b[0m', bold: '\x1b[1m', dim: '\x1b[2m',
|
||||
red: '\x1b[31m', green: '\x1b[32m', yellow: '\x1b[33m',
|
||||
blue: '\x1b[34m', magenta: '\x1b[35m', cyan: '\x1b[36m',
|
||||
};
|
||||
const c = (col, s) => `${A[col]}${s}${A.reset}`;
|
||||
const log = (...a) => console.log(...a);
|
||||
const err = (msg, code = 1) => { console.error(c('red', `✗ ${msg}`)); process.exit(code); };
|
||||
const ok = (msg) => log(c('green', `✓ ${msg}`));
|
||||
const inf = (msg) => log(c('cyan', `→ ${msg}`));
|
||||
const wrn = (msg) => log(c('yellow', `⚠ ${msg}`));
|
||||
|
||||
// ── .env loader ──────────────────────────────────────────────────────────────
|
||||
function loadEnv() {
|
||||
const p = join(PROJECT_ROOT, '.env');
|
||||
if (!existsSync(p)) return {};
|
||||
const env = {};
|
||||
for (const raw of readFileSync(p, 'utf8').split('\n')) {
|
||||
const line = raw.split('#')[0].trim();
|
||||
if (!line.includes('=')) continue;
|
||||
const [k, ...rest] = line.split('=');
|
||||
env[k.trim()] = rest.join('=').trim().replace(/^["']|["']$/g, '');
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
// ── Spawn helpers ─────────────────────────────────────────────────────────────
|
||||
function run(cmd, args = [], { cwd = PROJECT_ROOT, allowFail = false, env: extraEnv = {} } = {}) {
|
||||
const r = spawnSync(cmd, args, {
|
||||
stdio: 'inherit',
|
||||
cwd,
|
||||
env: { ...process.env, ...extraEnv },
|
||||
});
|
||||
if (r.error) err(`spawn error running ${cmd}: ${r.error.message}`);
|
||||
if (r.status !== 0 && !allowFail) process.exit(r.status ?? 1);
|
||||
return r;
|
||||
}
|
||||
|
||||
/** Non-interactive remote command. */
|
||||
function remote(remoteCmd) {
|
||||
return run('ssh', [REMOTE_HOST, remoteCmd]);
|
||||
}
|
||||
|
||||
/** Interactive SSH session (TTY allocated). */
|
||||
function remoteInteractive(remoteCmd = '') {
|
||||
const args = ['-t', REMOTE_HOST];
|
||||
if (remoteCmd) args.push(remoteCmd);
|
||||
return run('ssh', args);
|
||||
}
|
||||
|
||||
/** Read one line from stdin synchronously (used for confirmations). */
|
||||
function promptSync(question) {
|
||||
process.stdout.write(question);
|
||||
const buf = Buffer.alloc(512);
|
||||
const n = readSync(0, buf, 0, 512, null);
|
||||
return buf.slice(0, n).toString().trim();
|
||||
}
|
||||
|
||||
// ── Divider helper for remote output ─────────────────────────────────────────
|
||||
const DIVCMD = (label) =>
|
||||
`printf '\\n${c('bold', label)}\\n' && echo '─────────────────────────────────────────'`;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════ //
|
||||
// COMMANDS //
|
||||
// ═══════════════════════════════════════════════════════════════════════════ //
|
||||
const CMD = {
|
||||
|
||||
// ── Remote ─────────────────────────────────────────────────────────────────
|
||||
|
||||
/** Open interactive SSH session. */
|
||||
ssh() {
|
||||
inf(`Connecting to ${REMOTE_IP}…`);
|
||||
remoteInteractive();
|
||||
},
|
||||
|
||||
/** Print server stats: uptime, memory, disk, Docker, open ports. */
|
||||
'server:status'() {
|
||||
inf('Fetching server status…');
|
||||
// Keep remote commands simple — avoid complex shell quoting over SSH.
|
||||
const script = [
|
||||
'echo ""',
|
||||
'echo "=== HOST ==="',
|
||||
'hostname && uptime',
|
||||
'echo ""',
|
||||
'echo "=== MEMORY ==="',
|
||||
'free -h',
|
||||
'echo ""',
|
||||
'echo "=== DISK (/) ==="',
|
||||
'df -h /',
|
||||
'echo ""',
|
||||
'echo "=== DOCKER CONTAINERS ==="',
|
||||
'docker ps 2>/dev/null || echo "(docker not installed — run: dev server:setup)"',
|
||||
'echo ""',
|
||||
'echo "=== LISTENING PORTS ==="',
|
||||
'ss -tlnp 2>/dev/null',
|
||||
'echo ""',
|
||||
].join(' && ');
|
||||
remote(script);
|
||||
},
|
||||
|
||||
/** Tail container or journal logs. Usage: dev server:logs [service] */
|
||||
'server:logs'() {
|
||||
const svc = process.argv[3] || 'project-manager-api';
|
||||
inf(`Tailing logs for: ${c('cyan', svc)}`);
|
||||
remote(
|
||||
`docker logs --tail=200 -f ${svc} 2>&1 || ` +
|
||||
`journalctl -u ${svc} -n 200 -f 2>/dev/null || ` +
|
||||
`echo "No logs found for '${svc}'"`
|
||||
);
|
||||
},
|
||||
|
||||
/** Install Docker, UFW, create /opt/project-hub on the remote server. */
|
||||
'server:setup'() {
|
||||
inf('Setting up remote server…');
|
||||
wrn('This will install Docker, Docker Compose, and configure UFW.');
|
||||
const confirm = promptSync('Continue? (yes/no): ');
|
||||
if (confirm.toLowerCase() !== 'yes') { log('Aborted.'); return; }
|
||||
|
||||
const script = [
|
||||
'export DEBIAN_FRONTEND=noninteractive',
|
||||
'apt-get update -qq',
|
||||
'apt-get install -y -qq ca-certificates curl gnupg lsb-release ufw git',
|
||||
// Add Docker's official GPG key and apt repo
|
||||
'install -m 0755 -d /etc/apt/keyrings',
|
||||
'curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg',
|
||||
'chmod a+r /etc/apt/keyrings/docker.gpg',
|
||||
'echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list',
|
||||
'apt-get update -qq',
|
||||
'apt-get install -y -qq docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin',
|
||||
'systemctl enable --now docker',
|
||||
'ufw allow OpenSSH',
|
||||
'ufw allow 4000/tcp comment "project-hub API"',
|
||||
'ufw allow 80/tcp',
|
||||
'ufw allow 443/tcp',
|
||||
'ufw --force enable',
|
||||
'mkdir -p /opt/project-hub/data /opt/project-hub/uploads',
|
||||
'echo "SETUP_DONE"',
|
||||
].join(' && ');
|
||||
remote(script);
|
||||
ok('Server setup complete.');
|
||||
log(c('dim', ' Next: run dev deploy to push the backend.'));
|
||||
},
|
||||
|
||||
/** Reboot the remote server (requires typed confirmation). */
|
||||
'server:reboot'() {
|
||||
wrn(`This will reboot ${REMOTE_IP}.`);
|
||||
const answer = promptSync('Type YES to confirm: ');
|
||||
if (answer !== 'YES') { log('Aborted.'); return; }
|
||||
remote('echo "Rebooting in 1s…" && sleep 1 && reboot');
|
||||
ok('Reboot command sent. Server will be back in ~30s.');
|
||||
},
|
||||
|
||||
/**
|
||||
* One-time server app initialisation:
|
||||
* - Clone repo to /opt/project-hub/src (or pull if already there)
|
||||
* - Configure git credentials so future pulls work without prompts
|
||||
* - Add pm.jiosii.com block to Caddyfile (idempotent)
|
||||
* - First production build + start via docker-compose.prod.yml
|
||||
* - Connect pm-network to Caddy & reload
|
||||
*/
|
||||
'server:init-app'() {
|
||||
const env = loadEnv();
|
||||
const giteaToken = env.GITEA_API_TOKEN;
|
||||
const giteaUser = (env.GITEA_USERNAME || 'ryan').split('@')[0];
|
||||
if (!giteaToken) err('GITEA_API_TOKEN not found in .env');
|
||||
|
||||
inf('Initialising app on remote server…');
|
||||
wrn('This will clone the repo, build Docker images, and update Caddy.');
|
||||
const confirm = promptSync('Continue? (yes/no): ');
|
||||
if (confirm.toLowerCase() !== 'yes') { log('Aborted.'); return; }
|
||||
|
||||
const repoUrl = `https://${giteaUser}:${giteaToken}@git.jiosii.com/${giteaUser}/Project-Manager.git`;
|
||||
|
||||
// Step 1: Clone or pull the repo
|
||||
inf('Step 1/4 Clone / update source…');
|
||||
remote(
|
||||
`if [ -d /opt/project-hub/src/.git ]; then` +
|
||||
` git -C /opt/project-hub/src remote set-url origin '${repoUrl}' && ` +
|
||||
` git -C /opt/project-hub/src pull origin main; ` +
|
||||
`else ` +
|
||||
` git clone '${repoUrl}' /opt/project-hub/src; ` +
|
||||
`fi`
|
||||
);
|
||||
|
||||
// Step 2: First production build
|
||||
inf('Step 2/4 Building + starting containers (this may take a few minutes)…');
|
||||
remote(
|
||||
'cd /opt/project-hub/src && ' +
|
||||
'docker compose -f docker-compose.prod.yml up -d --build --remove-orphans'
|
||||
);
|
||||
|
||||
// Step 3: Connect Caddy to the pm-network
|
||||
inf('Step 3/4 Connecting Caddy to pm-network…');
|
||||
remote('docker network connect pm-network caddy 2>/dev/null || true');
|
||||
|
||||
// Step 4: Add pm.jiosii.com to Caddyfile (idempotent) and reload
|
||||
inf('Step 4/4 Updating Caddyfile and reloading…');
|
||||
// Use individual echo statements grouped with { } — avoids nested heredoc issues
|
||||
remote(
|
||||
'if ! grep -q "pm.jiosii.com" /opt/gitea-drone/Caddyfile; then ' +
|
||||
'{ ' +
|
||||
'echo ""; ' +
|
||||
'echo "# Project Manager - pm.jiosii.com"; ' +
|
||||
'echo "pm.jiosii.com {"; ' +
|
||||
'echo " @backend path /api/* /health /uploads/*"; ' +
|
||||
'echo " handle @backend {"; ' +
|
||||
'echo " reverse_proxy pm-backend:4000"; ' +
|
||||
'echo " }"; ' +
|
||||
'echo " handle {"; ' +
|
||||
'echo " reverse_proxy pm-frontend:80"; ' +
|
||||
'echo " }"; ' +
|
||||
'echo " log {"; ' +
|
||||
'echo " output file /var/log/caddy/pm.jiosii.com.log"; ' +
|
||||
'echo " }"; ' +
|
||||
'echo "}"; ' +
|
||||
'echo "www.pm.jiosii.com {"; ' +
|
||||
'echo " redir https://pm.jiosii.com{uri} permanent"; ' +
|
||||
'echo "}"; ' +
|
||||
'} >> /opt/gitea-drone/Caddyfile; ' +
|
||||
'echo "Caddyfile updated."; ' +
|
||||
'else echo "pm.jiosii.com already in Caddyfile, skipping."; fi'
|
||||
);
|
||||
remote('docker exec caddy caddy reload --config /etc/caddy/Caddyfile --force');
|
||||
|
||||
ok('Server app init complete.');
|
||||
log(c('dim', ' ➜ Add DNS A record: pm.jiosii.com → 146.190.56.90'));
|
||||
log(c('dim', ' ➜ Then visit: https://pm.jiosii.com'));
|
||||
log(c('dim', ' ➜ Activate Drone: https://drone.jiosii.com (see below)'));
|
||||
},
|
||||
|
||||
// ── Deploy ─────────────────────────────────────────────────────────────────
|
||||
|
||||
/** Pull latest code on server and rebuild production containers. */
|
||||
deploy() {
|
||||
inf(`Deploying to ${REMOTE_IP}…`);
|
||||
remote([
|
||||
'cd /opt/project-hub/src',
|
||||
'git pull origin main',
|
||||
'docker compose -f docker-compose.prod.yml up -d --build --force-recreate --remove-orphans',
|
||||
'docker network connect pm-network caddy 2>/dev/null || true',
|
||||
'docker exec caddy caddy reload --config /etc/caddy/Caddyfile --force',
|
||||
'docker image prune -f',
|
||||
'echo "Deploy complete."',
|
||||
].join(' && '));
|
||||
ok('Deploy complete.');
|
||||
log(c('dim', ' Live at https://pm.jiosii.com'));
|
||||
},
|
||||
|
||||
// ── Local Dev ──────────────────────────────────────────────────────────────
|
||||
|
||||
/** Check local backend health endpoint. */
|
||||
health() {
|
||||
const env = loadEnv();
|
||||
const port = env.PORT_BACKEND || '4000';
|
||||
const url = `http://localhost:${port}/health`;
|
||||
inf(`GET ${url}`);
|
||||
run('curl', ['-sS', '-m', '5', '--fail', '--show-error', url]);
|
||||
log('');
|
||||
},
|
||||
|
||||
/** docker compose up then vite dev server. */
|
||||
start() {
|
||||
inf('Starting local backend (Docker)…');
|
||||
run('docker', ['compose', 'up', '-d']);
|
||||
inf('Starting frontend dev server…');
|
||||
const env = loadEnv();
|
||||
const port = env.PORT_FRONTEND_DEV || '5173';
|
||||
run('npm', ['run', 'dev', '--prefix', 'frontend', '--', '--port', port]);
|
||||
},
|
||||
|
||||
/** Stop Docker containers + Vite. */
|
||||
stop() {
|
||||
inf('Stopping local services…');
|
||||
run('docker', ['compose', 'down'], { allowFail: true });
|
||||
run('bash', ['-c', "lsof -tiTCP:5173 -sTCP:LISTEN | xargs kill 2>/dev/null || true"], { allowFail: true });
|
||||
run('bash', ['-c', "lsof -tiTCP:5174 -sTCP:LISTEN | xargs kill 2>/dev/null || true"], { allowFail: true });
|
||||
ok('All local services stopped.');
|
||||
},
|
||||
|
||||
/** Build frontend and sync to dist/. */
|
||||
build() {
|
||||
inf('Building frontend…');
|
||||
run('npm', ['run', 'build', '--prefix', 'frontend']);
|
||||
run('bash', ['-c', 'rm -rf dist && mkdir -p dist && cp -R frontend/dist/* dist/']);
|
||||
ok('Build complete → dist/');
|
||||
},
|
||||
|
||||
/** Show project port ownership map. */
|
||||
ports() {
|
||||
run('node', ['scripts/ports-report.cjs']);
|
||||
},
|
||||
|
||||
/** Run MVP health checks. */
|
||||
test() {
|
||||
run('node', ['scripts/mvp-check.cjs']);
|
||||
},
|
||||
|
||||
/** Run wiring smoke test. */
|
||||
'test:wiring'() {
|
||||
run('node', ['scripts/wiring-smoke-test.cjs']);
|
||||
},
|
||||
|
||||
/** Run persistence self-test. */
|
||||
'test:persistence'() {
|
||||
run('node', ['scripts/persistence-self-test.cjs']);
|
||||
},
|
||||
|
||||
// ── Help ───────────────────────────────────────────────────────────────────
|
||||
help() {
|
||||
log('');
|
||||
log(c('bold', c('cyan', ' ⚡ dev — Project Hub CLI')));
|
||||
log(c('dim', ` Project root: ${PROJECT_ROOT}`));
|
||||
log(c('dim', ` Remote host: ${REMOTE_IP} (alias: project-hub)`));
|
||||
log('');
|
||||
|
||||
const h = (label, commands) => {
|
||||
log(c('bold', ` ${label}`));
|
||||
for (const [cmd, desc] of commands)
|
||||
log(` ${c('cyan', ('dev ' + cmd).padEnd(30))} ${c('dim', desc)}`);
|
||||
log('');
|
||||
};
|
||||
|
||||
h('Remote Server', [
|
||||
['ssh', 'Open interactive SSH session'],
|
||||
['server:status', 'Memory, disk, Docker containers, open ports'],
|
||||
['server:logs [service]', 'Tail container logs (default: project-manager-api)'],
|
||||
['server:setup', 'Install Docker + UFW, create /opt/project-hub'],
|
||||
['server:init-app', 'Clone repo + first build + Caddy config (run once)'],
|
||||
['server:reboot', 'Reboot remote server (requires typed confirmation)'],
|
||||
['deploy', 'git pull + docker compose up --build on server'],
|
||||
]);
|
||||
|
||||
h('Local Dev', [
|
||||
['start', 'docker compose up + vite dev server'],
|
||||
['stop', 'Stop Docker + kill Vite process'],
|
||||
['build', 'Build frontend → dist/'],
|
||||
['health', 'GET /health from local backend'],
|
||||
['ports', 'Show project port ownership map'],
|
||||
]);
|
||||
|
||||
h('Tests', [
|
||||
['test', 'Run MVP health checks'],
|
||||
['test:wiring', 'Run wiring smoke test (tasks, members, invites)'],
|
||||
['test:persistence', 'Run persistence self-test'],
|
||||
]);
|
||||
|
||||
log(c('dim', ' Run any command with --help for details (future)'));
|
||||
log('');
|
||||
},
|
||||
};
|
||||
|
||||
// ── Dispatch ──────────────────────────────────────────────────────────────────
|
||||
const [, , subcmd = 'help', ...rest] = process.argv;
|
||||
|
||||
if (CMD[subcmd]) {
|
||||
CMD[subcmd]();
|
||||
} else {
|
||||
err(`Unknown command: ${c('yellow', subcmd)}\n Run dev help to see available commands.`);
|
||||
}
|
||||
55
docker-compose.prod.yml
Normal file
55
docker-compose.prod.yml
Normal file
@@ -0,0 +1,55 @@
|
||||
# Production deployment compose file
|
||||
# Used by: dev server:init-app first run + Drone CI deploy step
|
||||
# Builds from source checked out at /opt/project-hub/src on the server.
|
||||
# Persistent data lives in /opt/project-hub/data + /opt/project-hub/uploads
|
||||
# (those dirs are outside the git checkout so they survive redeployments).
|
||||
|
||||
services:
|
||||
|
||||
backend:
|
||||
build:
|
||||
context: ./backend
|
||||
dockerfile: Dockerfile
|
||||
image: pm-backend:latest
|
||||
container_name: pm-backend
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PORT=4000
|
||||
- NODE_ENV=production
|
||||
- APP_URL=https://pm.jiosii.com
|
||||
- CORS_ORIGIN=https://pm.jiosii.com
|
||||
volumes:
|
||||
- /opt/project-hub/data:/app/data
|
||||
- /opt/project-hub/uploads:/app/uploads
|
||||
networks:
|
||||
- pm-network
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:4000/health"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: ./frontend
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- VITE_API_URL=https://pm.jiosii.com/api
|
||||
image: pm-frontend:latest
|
||||
container_name: pm-frontend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- backend
|
||||
networks:
|
||||
- pm-network
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost/health"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
networks:
|
||||
pm-network:
|
||||
name: pm-network
|
||||
24
frontend/Dockerfile
Normal file
24
frontend/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
||||
# ── Stage 1: Build ────────────────────────────────────────────────────────────
|
||||
FROM node:20-alpine AS build
|
||||
|
||||
ARG VITE_API_URL=http://localhost:4000/api
|
||||
ENV VITE_API_URL=$VITE_API_URL
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package*.json ./
|
||||
RUN npm ci --ignore-scripts
|
||||
|
||||
COPY . .
|
||||
RUN npm run build
|
||||
|
||||
# ── Stage 2: Serve ────────────────────────────────────────────────────────────
|
||||
FROM nginx:1.27-alpine
|
||||
|
||||
COPY --from=build /app/dist /usr/share/nginx/html
|
||||
COPY nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \
|
||||
CMD wget -qO- http://localhost/health || exit 1
|
||||
28
frontend/nginx.conf
Normal file
28
frontend/nginx.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
# Health probe (checked by Docker + Caddy)
|
||||
location /health {
|
||||
return 200 'ok';
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# SPA fallback — any path that isn't a real file serves index.html
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# Static asset caching
|
||||
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff2?)$ {
|
||||
expires 1y;
|
||||
add_header Cache-Control "public, immutable";
|
||||
}
|
||||
|
||||
gzip on;
|
||||
gzip_types text/plain text/css application/javascript application/json image/svg+xml;
|
||||
gzip_min_length 1024;
|
||||
}
|
||||
Reference in New Issue
Block a user