refactor: all-in-one docker deployment with auto-tuned resources and per-mode compose templates

This commit is contained in:
xboard
2026-04-19 21:18:15 +08:00
parent c36054b970
commit e01620689b
20 changed files with 488 additions and 231 deletions
+26
View File
@@ -0,0 +1,26 @@
# Xboard protocol fusion entrypoint.
#
# Caddy listens on a single public port and dispatches HTTP traffic to Octane
# while transparently upgrading WebSocket requests to the ws-server worker.
# This lets every external reverse proxy (nginx, Cloudflare, the user's own
# Caddy, ...) treat the panel as a single upstream and avoids exposing the
# 8076 WebSocket port directly.
{
admin off
auto_https off
persist_config off
log {
output stdout
format console
}
}
:{$CADDY_LISTEN_PORT:7001} {
@ws path /ws
reverse_proxy @ws 127.0.0.1:{$WS_PORT:8076}
reverse_proxy 127.0.0.1:{$OCTANE_INTERNAL_PORT:7002} {
header_up Host {host}
header_up X-Real-IP {remote_host}
}
}
+22
View File
@@ -0,0 +1,22 @@
# Caddy config used by compose.split.yaml — the embedded image's Caddy is
# disabled in this mode and a dedicated Caddy container fronts independent
# web and ws-server containers reachable via the compose network.
{
admin off
auto_https off
persist_config off
log {
output stdout
format console
}
}
:7001 {
@ws path /ws
reverse_proxy @ws ws-server:8076
reverse_proxy web:7001 {
header_up Host {host}
header_up X-Real-IP {remote_host}
}
}
+122 -5
View File
@@ -1,11 +1,128 @@
#!/bin/sh
set -e
echo "[entrypoint] Running database migrations..."
php /www/artisan migrate --force
# Resolve the binding scheme based on whether the embedded Caddy is enabled.
#
# When ENABLE_CADDY=true (default), Caddy owns the public port (7001) and
# dispatches traffic internally; Octane and ws-server bind to localhost only
# so they cannot be reached from outside the container.
#
# When ENABLE_CADDY=false (e.g. an external reverse proxy or split mode),
# Octane takes the public port directly to keep behaviour identical to the
# pre-Caddy releases.
if [ "${ENABLE_CADDY}" = "true" ]; then
: "${OCTANE_HOST:=127.0.0.1}"
: "${OCTANE_PORT:=7002}"
: "${WS_HOST:=127.0.0.1}"
: "${WS_PORT:=8076}"
: "${CADDY_LISTEN_PORT:=7001}"
else
: "${OCTANE_HOST:=0.0.0.0}"
: "${OCTANE_PORT:=7001}"
: "${WS_HOST:=0.0.0.0}"
: "${WS_PORT:=8076}"
fi
export OCTANE_HOST OCTANE_PORT WS_HOST WS_PORT CADDY_LISTEN_PORT
export OCTANE_INTERNAL_PORT="${OCTANE_PORT}"
echo "[entrypoint] Checking core plugins..."
php /www/artisan tinker --execute="App\Services\Plugin\PluginManager::installDefaultPlugins();" 2>/dev/null || true
# ---------------------------------------------------------------------------
# Auto-tune worker counts based on the host (CPU + memory).
#
# Heuristic: each PHP worker (Octane/Horizon) costs ~80 MiB. After reserving
# ~300 MiB for the always-on processes (caddy/redis/ws-server/masters), divide
# the remaining budget across roles. Any user-set ENV wins.
# ---------------------------------------------------------------------------
detect_cpus() {
if [ -r /sys/fs/cgroup/cpu.max ]; then
# cgroup v2: "<quota> <period>" or "max <period>"
read -r q p < /sys/fs/cgroup/cpu.max 2>/dev/null
if [ "$q" != "max" ] && [ -n "$q" ] && [ -n "$p" ] && [ "$p" -gt 0 ]; then
echo $(( (q + p - 1) / p ))
return
fi
fi
nproc 2>/dev/null || echo 1
}
echo "[entrypoint] Starting services..."
detect_mem_mib() {
if [ -r /sys/fs/cgroup/memory.max ]; then
m=$(cat /sys/fs/cgroup/memory.max 2>/dev/null)
if [ "$m" != "max" ] && [ -n "$m" ]; then
echo $(( m / 1024 / 1024 ))
return
fi
fi
# No cgroup limit: avoid over-provisioning on big hosts. Cap the assumed
# budget to MEM_FALLBACK_MIB (default 1024) unless the user opts out by
# setting it explicitly. Use whichever is smaller of MemAvailable and cap.
avail=$(awk '/MemAvailable/ {print int($2/1024)}' /proc/meminfo 2>/dev/null || echo 1024)
cap=${MEM_FALLBACK_MIB:-1024}
[ "$avail" -lt "$cap" ] && echo "$avail" || echo "$cap"
}
CPUS=$(detect_cpus)
MEM_MIB=$(detect_mem_mib)
# Resource profile presets. RESOURCE_PROFILE selects ratios for the budget split:
# minimal - smallest possible footprint (~250-350 MiB), single octane worker,
# horizon capped to 1/1/1. Suitable for VPS with <=512 MiB RAM.
# balanced - default; ~80 MiB per worker, octane gets 25% of slots.
# performance - larger reserves for opcache/caches, more aggressive horizon caps.
# auto - same as balanced.
: "${RESOURCE_PROFILE:=auto}"
case "$RESOURCE_PROFILE" in
minimal) RESERVED_MIB=200; SLOT_MIB=100; OCT_NUM=1; OCT_DEN=1; OCT_FORCE=1; auto_horizon_mem=128; auto_octane_gc=64 ;;
performance) RESERVED_MIB=400; SLOT_MIB=70; OCT_NUM=1; OCT_DEN=3; OCT_FORCE=0; auto_horizon_mem=384; auto_octane_gc=256 ;;
balanced|auto|*) RESERVED_MIB=300; SLOT_MIB=80; OCT_NUM=1; OCT_DEN=4; OCT_FORCE=0; auto_horizon_mem=256; auto_octane_gc=128 ;;
esac
BUDGET=$(( MEM_MIB - RESERVED_MIB ))
[ "$BUDGET" -lt "$SLOT_MIB" ] && BUDGET=$SLOT_MIB
SLOTS=$(( BUDGET / SLOT_MIB ))
clamp() { v=$1; lo=$2; hi=$3; [ "$v" -lt "$lo" ] && v=$lo; [ "$v" -gt "$hi" ] && v=$hi; echo "$v"; }
if [ "$OCT_FORCE" = "1" ]; then
auto_octane=1
auto_dp=1; auto_biz=1; auto_notif=1
else
auto_octane=$(clamp $(( (SLOTS * OCT_NUM) / OCT_DEN )) 1 "$CPUS")
remaining=$(( SLOTS - auto_octane - 2 ))
[ "$remaining" -lt 3 ] && remaining=3
auto_dp=$(clamp $(( remaining / 2 )) 1 $(( CPUS * 2 )))
auto_biz=$(clamp $(( remaining / 4 )) 1 "$CPUS")
auto_notif=$(clamp $(( remaining / 4 )) 1 "$CPUS")
fi
# User-set ENV always wins.
: "${OCTANE_WORKERS:=$auto_octane}"
: "${OCTANE_TASK_WORKERS:=1}"
: "${OCTANE_MAX_REQUESTS:=500}"
: "${OCTANE_GARBAGE_MB:=$auto_octane_gc}"
: "${OCTANE_MAX_EXECUTION_TIME:=60}"
: "${HORIZON_DATA_PIPELINE_MAX:=$auto_dp}"
: "${HORIZON_BUSINESS_MAX:=$auto_biz}"
: "${HORIZON_NOTIFICATION_MAX:=$auto_notif}"
: "${HORIZON_WORKER_MEMORY_MB:=$auto_horizon_mem}"
: "${HORIZON_WORKER_MAX_TIME:=0}"
: "${HORIZON_WORKER_MAX_JOBS:=0}"
export OCTANE_WORKERS OCTANE_TASK_WORKERS OCTANE_MAX_REQUESTS \
OCTANE_GARBAGE_MB OCTANE_MAX_EXECUTION_TIME \
HORIZON_DATA_PIPELINE_MAX HORIZON_BUSINESS_MAX HORIZON_NOTIFICATION_MAX \
HORIZON_WORKER_MEMORY_MB HORIZON_WORKER_MAX_TIME HORIZON_WORKER_MAX_JOBS \
RESOURCE_PROFILE
echo "[entrypoint] Auto-tune (profile=${RESOURCE_PROFILE}): cpus=${CPUS} mem=${MEM_MIB}MiB slots=${SLOTS} -> octane=${OCTANE_WORKERS} horizon(dp/biz/notif)=${HORIZON_DATA_PIPELINE_MAX}/${HORIZON_BUSINESS_MAX}/${HORIZON_NOTIFICATION_MAX} horizon_worker_mem=${HORIZON_WORKER_MEMORY_MB}MB"
echo "[entrypoint] Horizon supervisors use balance=auto with minProcesses=1, so they scale up to the cap on demand and back down when idle."
if [ ! -s /www/.env ] || ! grep -qE '^INSTALLED=(1|true)$' /www/.env || echo " $* " | grep -q ' xboard:install '; then
echo "[entrypoint] Skipping xboard:update (not yet installed or running xboard:install)."
else
echo "[entrypoint] Running xboard:update..."
php /www/artisan xboard:update --no-interaction || \
echo "[entrypoint] WARNING: xboard:update failed; continuing so supervisor can boot anyway." >&2
fi
echo "[entrypoint] Starting services (caddy=${ENABLE_CADDY} web=${ENABLE_WEB} horizon=${ENABLE_HORIZON} ws=${ENABLE_WS_SERVER})..."
exec "$@"
+17
View File
@@ -0,0 +1,17 @@
; Slim PHP defaults for the all-in-one container.
; Tunables are overridable via Docker ENV (PHP_MEMORY_LIMIT, etc.) if needed.
memory_limit = 256M
[opcache]
opcache.enable = 1
opcache.enable_cli = 0
opcache.memory_consumption = 96
opcache.interned_strings_buffer = 16
opcache.max_accelerated_files = 10000
opcache.validate_timestamps = 0
opcache.revalidate_freq = 0
opcache.fast_shutdown = 1
[swoole]
swoole.use_shortname = Off
+20 -3
View File
@@ -8,7 +8,7 @@ loglevel=info
[program:octane]
process_name=%(program_name)s_%(process_num)02d
command=php /www/artisan octane:start --host=0.0.0.0 --port=7001
command=php /www/artisan octane:start --host=%(ENV_OCTANE_HOST)s --port=%(ENV_OCTANE_PORT)s --workers=%(ENV_OCTANE_WORKERS)s --task-workers=%(ENV_OCTANE_TASK_WORKERS)s --max-requests=%(ENV_OCTANE_MAX_REQUESTS)s
autostart=%(ENV_ENABLE_WEB)s
autorestart=true
user=www
@@ -65,7 +65,7 @@ priority=300
[program:ws-server]
process_name=%(program_name)s_%(process_num)02d
command=php /www/artisan ws-server start
command=php /www/artisan ws-server start --host=%(ENV_WS_HOST)s --port=%(ENV_WS_PORT)s
autostart=%(ENV_ENABLE_WS_SERVER)s
autorestart=true
user=www
@@ -78,4 +78,21 @@ stopwaitsecs=5
stopsignal=SIGINT
stopasgroup=true
killasgroup=true
priority=400
priority=400
[program:caddy]
process_name=%(program_name)s_%(process_num)02d
command=caddy run --config /etc/caddy/Caddyfile --adapter caddyfile
autostart=%(ENV_ENABLE_CADDY)s
autorestart=true
user=root
redirect_stderr=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stdout_logfile_backups=0
numprocs=1
stopwaitsecs=5
stopsignal=TERM
stopasgroup=true
killasgroup=true
priority=500
+2
View File
@@ -29,4 +29,6 @@ frontend
docker-compose.yaml
bun.lockb
compose.yaml
compose.host.yaml
compose.split.yaml
.scribe
+10 -7
View File
@@ -7,7 +7,7 @@ RUN CFLAGS="-O0" install-php-extensions pcntl && \
CFLAGS="-O0 -g0" install-php-extensions bcmath && \
install-php-extensions zip && \
install-php-extensions redis && \
apk --no-cache add shadow sqlite mysql-client mysql-dev mariadb-connector-c git patch supervisor redis && \
apk --no-cache add shadow sqlite mysql-client mysql-dev mariadb-connector-c git patch supervisor redis caddy && \
addgroup -S -g 1000 www && adduser -S -G www -u 1000 www && \
(getent group redis || addgroup -S redis) && \
(getent passwd redis || adduser -S -G redis -H -h /data redis)
@@ -17,9 +17,9 @@ WORKDIR /www
COPY .docker /
# Add build arguments
ARG CACHEBUST
ARG REPO_URL
ARG BRANCH_NAME
ARG CACHEBUST=1
ARG REPO_URL=https://github.com/cedar2025/Xboard
ARG BRANCH_NAME=master
RUN echo "Attempting to clone branch: ${BRANCH_NAME} from ${REPO_URL} with CACHEBUST: ${CACHEBUST}" && \
rm -rf ./* && \
@@ -29,8 +29,10 @@ RUN echo "Attempting to clone branch: ${BRANCH_NAME} from ${REPO_URL} with CACHE
git submodule update --init --recursive --force
COPY .docker/supervisor/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
COPY .docker/caddy/Caddyfile /etc/caddy/Caddyfile
COPY .docker/php/zz-xboard.ini /usr/local/etc/php/conf.d/zz-xboard.ini
RUN composer install --no-cache --no-dev \
RUN composer install --no-cache --no-dev --no-security-blocking \
&& php artisan storage:link \
&& chown -R www:www /www \
&& chmod -R 775 /www \
@@ -39,8 +41,9 @@ RUN composer install --no-cache --no-dev \
ENV ENABLE_WEB=true \
ENABLE_HORIZON=true \
ENABLE_REDIS=false \
ENABLE_WS_SERVER=false
ENABLE_REDIS=true \
ENABLE_WS_SERVER=true \
ENABLE_CADDY=true
EXPOSE 7001
COPY .docker/entrypoint.sh /entrypoint.sh
+5 -4
View File
@@ -47,14 +47,15 @@ class XboardUpdate extends Command
$this->info('正在检查并安装默认插件...');
PluginManager::installDefaultPlugins();
$this->info('默认插件检查完成');
// Artisan::call('reset:traffic', ['--fix-null' => true]);
$this->info('正在重新计算所有用户的重置时间...');
Artisan::call('reset:traffic', ['--force' => true]);
$updateService = new UpdateService();
$updateService->updateVersionCache();
$themeService = app(ThemeService::class);
$themeService->refreshCurrentTheme();
Artisan::call('horizon:terminate');
try {
Artisan::call('horizon:terminate');
} catch (\Throwable $e) {
$this->warn('horizon:terminate skipped: ' . $e->getMessage());
}
$this->info('更新完毕,队列服务已重启,你无需进行任何操作。');
}
}
@@ -4,8 +4,10 @@ namespace App\Http\Controllers\V2\Server;
use App\Http\Controllers\Controller;
use App\Services\ServerService;
use App\WebSocket\NodeWorker;
use Illuminate\Http\Request;
use Illuminate\Http\JsonResponse;
use Illuminate\Support\Facades\Cache;
class ServerController extends Controller
{
@@ -16,14 +18,14 @@ class ServerController extends Controller
{
$websocket = ['enabled' => false];
if ((bool) admin_setting('server_ws_enable', 1)) {
if ((bool) admin_setting('server_ws_enable', 1) && Cache::has(NodeWorker::HEARTBEAT_CACHE_KEY)) {
$customUrl = trim((string) admin_setting('server_ws_url', ''));
if ($customUrl !== '') {
$wsUrl = rtrim($customUrl, '/');
} else {
$wsScheme = $request->isSecure() ? 'wss' : 'ws';
$wsUrl = "{$wsScheme}://{$request->getHost()}:8076";
$wsUrl = "{$wsScheme}://{$request->getHttpHost()}/ws";
}
$websocket = [
+9
View File
@@ -19,6 +19,10 @@ class NodeWorker
private const AUTH_TIMEOUT = 10;
private const PING_INTERVAL = 55;
public const HEARTBEAT_CACHE_KEY = 'ws_server:heartbeat';
private const HEARTBEAT_INTERVAL = 10;
private const HEARTBEAT_TTL = 30;
private Worker $worker;
private array $handlers = [
@@ -70,6 +74,11 @@ class NodeWorker
private function setupTimers(): void
{
Cache::put(self::HEARTBEAT_CACHE_KEY, time(), self::HEARTBEAT_TTL);
Timer::add(self::HEARTBEAT_INTERVAL, function () {
Cache::put(self::HEARTBEAT_CACHE_KEY, time(), self::HEARTBEAT_TTL);
});
Timer::add(self::PING_INTERVAL, function () {
$seen = [];
+38
View File
@@ -0,0 +1,38 @@
# Deployment for 1Panel users.
#
# 1Panel runs MySQL/Redis as separate containers on a Docker network named
# `1panel-network`. To let Xboard reach them by their container hostname
# (e.g. `1Panel-mysql-xxxx`), this compose file joins that external network
# in addition to publishing port 7001 for 1Panel's reverse proxy / website.
#
# During `php artisan xboard:install`, set:
# - Database Host: the container name shown in 1Panel under
# Database -> Connection Info -> Host
# - Redis: choose the built-in Redis (already provided by this image)
services:
xboard:
image: ghcr.io/cedar2025/xboard:latest
restart: unless-stopped
ports:
- "7001:7001"
networks:
- default
- 1panel-network
volumes:
- ./.env:/www/.env
- ./.docker/.data/:/www/.docker/.data
- ./storage/logs:/www/storage/logs
- ./storage/theme:/www/storage/theme
- ./plugins:/www/plugins
- redis-data:/data
environment:
- RESOURCE_PROFILE=balanced # minimal | balanced | performance | auto
- ENABLE_HORIZON=true
- docker=true
networks:
1panel-network:
external: true
volumes:
redis-data:
+19
View File
@@ -0,0 +1,19 @@
services:
xboard:
image: ghcr.io/cedar2025/xboard:latest
restart: unless-stopped
network_mode: host
volumes:
- ./.env:/www/.env
- ./.docker/.data/:/www/.docker/.data
- ./storage/logs:/www/storage/logs
- ./storage/theme:/www/storage/theme
- ./plugins:/www/plugins
- redis-data:/data
environment:
- RESOURCE_PROFILE=balanced # minimal | balanced | performance | auto
- ENABLE_HORIZON=true
- docker=true
volumes:
redis-data:
+23 -37
View File
@@ -1,44 +1,30 @@
# Default deployment: bridge network with port 7001 published to the host.
# Suitable for: bare docker-compose, aaPanel + Docker manager, custom reverse
# proxies (nginx, Caddy on host, Cloudflare Tunnel, etc.) that talk to
# 127.0.0.1:7001 on the host.
#
# For 1Panel users: use compose.1panel.sample.yaml so the container can reach
# the 1Panel-managed MySQL/Redis on the 1panel-network.
#
# For aaPanel native (openresty on host) users that prefer host networking:
# use compose.host.sample.yaml.
services:
web:
image: ghcr.io/cedar2025/xboard:new
volumes:
- redis-data:/data
- ./:/www/
environment:
- docker=true
depends_on:
- redis
network_mode: host
command: php artisan octane:start --port=7001 --host=0.0.0.0
restart: always
horizon:
image: ghcr.io/cedar2025/xboard:new
volumes:
- redis-data:/data
- ./:/www/
restart: always
network_mode: host
command: php artisan horizon
depends_on:
- redis
ws-server:
image: ghcr.io/cedar2025/xboard:new
volumes:
- redis-data:/data
- ./:/www/
restart: always
network_mode: host
command: php artisan ws-server start
depends_on:
- redis
redis:
image: redis:7-alpine
command: redis-server --unixsocket /data/redis.sock --unixsocketperm 777
xboard:
image: ghcr.io/cedar2025/xboard:latest
restart: unless-stopped
ports:
- "7001:7001"
volumes:
- ./.env:/www/.env
- ./.docker/.data/:/www/.docker/.data
- ./storage/logs:/www/storage/logs
- ./storage/theme:/www/storage/theme
- ./plugins:/www/plugins
- redis-data:/data
sysctls:
net.core.somaxconn: 1024
environment:
- RESOURCE_PROFILE=balanced # minimal | balanced | performance | auto
- ENABLE_HORIZON=true
- docker=true
volumes:
redis-data:
+73
View File
@@ -0,0 +1,73 @@
# Split deployment for K8s users or operators who want to scale, restart and
# limit each process independently. The single image is reused across services
# by overriding the command and disabling the supervisor programs that are not
# relevant to that role.
#
# Topology:
# caddy ──┬─→ web (HTTP) :7001 → octane :7001
# └─→ ws-server (WebSocket) /api/v2/server/ws → :8076
# horizon (queue worker, no public port)
# redis (state)
services:
caddy:
image: caddy:2-alpine
restart: unless-stopped
ports:
- "7001:7001"
depends_on:
- web
- ws-server
volumes:
- ./.docker/caddy/Caddyfile.split:/etc/caddy/Caddyfile:ro
web:
image: ghcr.io/cedar2025/xboard:latest
restart: unless-stopped
depends_on:
- redis
volumes: &shared-volumes
- ./.env:/www/.env
- ./.docker/.data/:/www/.docker/.data
- ./storage/logs:/www/storage/logs
- ./storage/theme:/www/storage/theme
- ./plugins:/www/plugins
environment:
docker: "true"
ENABLE_CADDY: "false"
ENABLE_HORIZON: "false"
ENABLE_WS_SERVER: "false"
horizon:
image: ghcr.io/cedar2025/xboard:latest
restart: unless-stopped
depends_on:
- redis
volumes: *shared-volumes
environment:
docker: "true"
ENABLE_CADDY: "false"
ENABLE_WEB: "false"
ENABLE_WS_SERVER: "false"
ws-server:
image: ghcr.io/cedar2025/xboard:latest
restart: unless-stopped
depends_on:
- redis
volumes: *shared-volumes
environment:
docker: "true"
ENABLE_CADDY: "false"
ENABLE_WEB: "false"
ENABLE_HORIZON: "false"
WS_HOST: "0.0.0.0"
redis:
image: redis:7-alpine
restart: unless-stopped
command: redis-server --unixsocket /data/redis.sock --unixsocketperm 777 --save 900 1 --save 300 10 --save 60 10000
volumes:
- redis-data:/data
volumes:
redis-data:
+12 -3
View File
@@ -176,7 +176,10 @@ return [
'balance' => 'auto',
'autoScalingStrategy' => 'time',
'minProcesses' => 1,
'maxProcesses' => 8,
'maxProcesses' => (int) env('HORIZON_DATA_PIPELINE_MAX', 8),
'memory' => (int) env('HORIZON_WORKER_MEMORY_MB', 128),
'maxTime' => (int) env('HORIZON_WORKER_MAX_TIME', 3600),
'maxJobs' => (int) env('HORIZON_WORKER_MAX_JOBS', 1000),
'balanceCooldown' => 1,
'tries' => 3,
'timeout' => 30,
@@ -186,7 +189,10 @@ return [
'queue' => ['default', 'order_handle'],
'balance' => 'simple',
'minProcesses' => 1,
'maxProcesses' => 3,
'maxProcesses' => (int) env('HORIZON_BUSINESS_MAX', 3),
'memory' => (int) env('HORIZON_WORKER_MEMORY_MB', 128),
'maxTime' => (int) env('HORIZON_WORKER_MAX_TIME', 3600),
'maxJobs' => (int) env('HORIZON_WORKER_MAX_JOBS', 1000),
'tries' => 3,
'timeout' => 30,
],
@@ -196,7 +202,10 @@ return [
'balance' => 'auto',
'autoScalingStrategy' => 'size',
'minProcesses' => 1,
'maxProcesses' => 3,
'maxProcesses' => (int) env('HORIZON_NOTIFICATION_MAX', 3),
'memory' => (int) env('HORIZON_WORKER_MEMORY_MB', 128),
'maxTime' => (int) env('HORIZON_WORKER_MAX_TIME', 3600),
'maxJobs' => (int) env('HORIZON_WORKER_MAX_JOBS', 1000),
'tries' => 3,
'timeout' => 60,
'backoff' => [3, 10, 30],
+8 -10
View File
@@ -102,8 +102,8 @@ return [
OperationTerminated::class => [
FlushTemporaryContainerInstances::class,
DisconnectFromDatabases::class,
CollectGarbage::class,
// DisconnectFromDatabases::class,
// CollectGarbage::class,
],
WorkerErrorOccurred::class => [
@@ -129,6 +129,7 @@ return [
'warm' => [
...Octane::defaultServicesToWarm(),
\App\Services\Plugin\PluginManager::class,
],
'flush' => [
@@ -147,8 +148,8 @@ return [
*/
'cache' => [
'rows' => 5000,
'bytes' => 20000,
'rows' => (int) env('OCTANE_CACHE_ROWS', 1000),
'bytes' => (int) env('OCTANE_CACHE_BYTES', 8192),
],
/*
@@ -163,10 +164,7 @@ return [
*/
'tables' => [
'example:1000' => [
'name' => 'string:1000',
'votes' => 'int',
],
//
],
/*
@@ -203,7 +201,7 @@ return [
|
*/
'garbage' => 128,
'garbage' => (int) env('OCTANE_GARBAGE_MB', 128),
/*
|--------------------------------------------------------------------------
@@ -216,6 +214,6 @@ return [
|
*/
'max_execution_time' => 60,
'max_execution_time' => (int) env('OCTANE_MAX_EXECUTION_TIME', 60),
];
@@ -0,0 +1,28 @@
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Support\Facades\Artisan;
use Illuminate\Support\Facades\Schema;
/**
* One-shot backfill of users.next_reset_at for legacy installs.
*
* Replaces the previous `reset:traffic --force` step in `xboard:update`,
* which had to run on every container start. Now it runs exactly once per
* database (Laravel migrations are tracked).
*/
return new class extends Migration {
public function up(): void
{
if (!Schema::hasColumn('v2_user', 'next_reset_at')) {
return;
}
Artisan::call('reset:traffic', ['--fix-null' => true]);
}
public function down(): void
{
// Backfill is non-destructive; nothing to roll back.
}
};
+18 -103
View File
@@ -33,33 +33,20 @@ sudo bash quick_start.sh
2. Configure Reverse Proxy:
```nginx
location /ws/ {
proxy_pass http://127.0.0.1:8076;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_read_timeout 60s;
}
location ^~ / {
proxy_pass http://127.0.0.1:7001;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Real-PORT $remote_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header Scheme $scheme;
proxy_set_header Server-Protocol $server_protocol;
proxy_set_header Server-Name $server_name;
proxy_set_header Server-Addr $server_addr;
proxy_set_header Server-Port $server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_cache off;
}
```
> The `/ws/` location enables WebSocket real-time node synchronization via `ws-server`. This service is enabled by default and can be toggled in Admin Panel > System Settings > Server.
> The all-in-one container's embedded Caddy fuses HTTP and the panel↔node WebSocket on port 7001. The single `Upgrade`/`Connection` pair above is enough; no separate `/ws/` location is needed. To opt out and expose Octane / `:8076` directly, set `ENABLE_CADDY=false` in `compose.yaml`.
3. Install Xboard:
```bash
@@ -74,85 +61,22 @@ yum update && yum install -y git
# Clone repository
git clone -b compose --depth 1 https://github.com/cedar2025/Xboard ./
# (Optional shortcut: skip the clone and just fetch the sample file with
# curl -fsSL https://raw.githubusercontent.com/cedar2025/Xboard/master/compose.sample.yaml -o compose.yaml
# — the running PHP code is in the Docker image, not in the clone.)
# Configure Docker Compose
```
4. Edit compose.yaml:
```yaml
services:
web:
image: ghcr.io/cedar2025/xboard:new
volumes:
- redis-data:/data
- ./.env:/www/.env
- ./.docker/.data/:/www/.docker/.data
- ./storage/logs:/www/storage/logs
- ./storage/theme:/www/storage/theme
- ./plugins:/www/plugins
environment:
- docker=true
depends_on:
- redis
command: php artisan octane:start --host=0.0.0.0 --port=7001
restart: on-failure
ports:
- 7001:7001
networks:
- 1panel-network
horizon:
image: ghcr.io/cedar2025/xboard:new
volumes:
- redis-data:/data
- ./.env:/www/.env
- ./.docker/.data/:/www/.docker/.data
- ./storage/logs:/www/storage/logs
- ./plugins:/www/plugins
restart: on-failure
command: php artisan horizon
networks:
- 1panel-network
depends_on:
- redis
ws-server:
image: ghcr.io/cedar2025/xboard:new
volumes:
- redis-data:/data
- ./.env:/www/.env
- ./.docker/.data/:/www/.docker/.data
- ./storage/logs:/www/storage/logs
- ./plugins:/www/plugins
restart: on-failure
ports:
- 8076:8076
networks:
- 1panel-network
command: php artisan ws-server start
depends_on:
- redis
redis:
image: redis:7-alpine
command: redis-server --unixsocket /data/redis.sock --unixsocketperm 777
restart: unless-stopped
networks:
- 1panel-network
volumes:
- redis-data:/data
volumes:
redis-data:
networks:
1panel-network:
external: true
4. Prepare `compose.yaml` from the **1Panel-specific** sample. This sample joins the external `1panel-network` so the container can reach the 1Panel-managed MySQL/Redis containers by their hostname:
```bash
cp compose.1panel.sample.yaml compose.yaml
```
The file is gitignored so your edits survive `git pull`. See [docker-compose.md](./docker-compose.md) for tuning environment variables (`RESOURCE_PROFILE`, `ENABLE_HORIZON`, `ENABLE_REDIS`, etc.) and the other `compose.*.sample.yaml` alternatives.
5. Initialize Installation:
```bash
# Install dependencies and initialize
docker compose run -it --rm web php artisan xboard:install
docker compose run -it --rm xboard php artisan xboard:install
```
⚠️ Important Configuration Notes:
@@ -186,20 +110,11 @@ docker compose up -d
## 4. Version Update
> 💡 Important Note: The update command varies depending on your installation version:
> - If you installed recently (new version), use this command:
```bash
docker compose pull && \
docker compose run -it --rm web php artisan xboard:update && \
docker compose up -d
docker compose pull && docker compose up -d
```
> - If you installed earlier (old version), replace `web` with `xboard`:
```bash
docker compose pull && \
docker compose run -it --rm xboard php artisan xboard:update && \
docker compose up -d
```
> 🤔 Not sure which to use? Try the new version command first, if it fails, use the old version command.
The container always runs `php artisan xboard:update` (migrate + plugin install + version cache + theme refresh) on boot, so no extra command is required.
## Important Notes
+13 -37
View File
@@ -65,14 +65,14 @@ cd /www/wwwroot/your-domain
chattr -i .user.ini
rm -rf .htaccess 404.html 502.html index.html .user.ini
# Clone repository
git clone https://github.com/cedar2025/Xboard.git ./
# Clone the compose branch
git clone -b compose --depth 1 https://github.com/cedar2025/Xboard.git ./
# Prepare configuration file
cp compose.sample.yaml compose.yaml
# Install dependencies and initialize
docker compose run -it --rm web sh init.sh
docker compose run -it --rm xboard php artisan xboard:install
```
> ⚠️ Please save the admin dashboard URL, username, and password shown after installation
@@ -84,54 +84,30 @@ docker compose up -d
#### 3.4 Configure Reverse Proxy
Add the following content to your site configuration:
```nginx
location /ws/ {
proxy_pass http://127.0.0.1:8076;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_read_timeout 60s;
}
location ^~ / {
proxy_pass http://127.0.0.1:7001;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Real-PORT $remote_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header Scheme $scheme;
proxy_set_header Server-Protocol $server_protocol;
proxy_set_header Server-Name $server_name;
proxy_set_header Server-Addr $server_addr;
proxy_set_header Server-Port $server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_cache off;
}
```
> The `/ws/` location enables real-time node synchronization via `ws-server`. This service is enabled by default and can be toggled in Admin Panel > System Settings > Server.
> The all-in-one container's embedded Caddy fuses HTTP and the panel↔node WebSocket on port 7001. The single `Upgrade`/`Connection` pair above is enough; no separate `/ws/` location is needed. To opt out and expose Octane / `:8076` directly, set `ENABLE_CADDY=false` in `compose.yaml`.
## Maintenance Guide
### Version Updates
> 💡 Important Note: Update commands may vary depending on your installed version:
> - For recent installations (new version), use:
```bash
docker compose pull && \
docker compose run -it --rm web sh update.sh && \
docker compose up -d
docker compose pull && docker compose up -d
```
> - For older installations, replace `web` with `xboard`:
```bash
git config --global --add safe.directory $(pwd)
git fetch --all && git reset --hard origin/master && git pull origin master
docker compose pull && \
docker compose run -it --rm xboard sh update.sh && \
docker compose up -d
```
> 🤔 Not sure which to use? Try the new version command first, if it fails, use the old version command.
The container always runs `php artisan xboard:update` (migrate + plugin install + version cache + theme refresh) on boot, so no extra command is required.
### Routine Maintenance
- Regular log checking: `docker compose logs`
+19 -20
View File
@@ -15,11 +15,12 @@ systemctl start docker
### 2. Deployment Steps
1. Get project files:
```bash
git clone -b compose --depth 1 https://github.com/cedar2025/Xboard
cd Xboard
```
1. Clone the `compose` branch (it ships `compose.sample.yaml` and the other `compose.*.sample.yaml` variants):
```bash
git clone -b compose --depth 1 https://github.com/cedar2025/Xboard
cd Xboard
cp compose.sample.yaml compose.yaml
```
2. Install database:
@@ -29,13 +30,23 @@ docker compose run -it --rm \
-e ENABLE_SQLITE=true \
-e ENABLE_REDIS=true \
-e ADMIN_ACCOUNT=admin@demo.com \
web php artisan xboard:install
xboard php artisan xboard:install
```
- Custom configuration installation (Advanced users)
```bash
docker compose run -it --rm web php artisan xboard:install
docker compose run -it --rm xboard php artisan xboard:install
```
> Please save the admin dashboard URL, username, and password shown after installation
> The repository ships **four** compose templates in the `compose` branch — pick the one matching your setup, copy it to `compose.yaml`, then run the install command:
>
> | File | Network | When to use |
> |------|---------|-------------|
> | `compose.sample.yaml` | bridge + ports `7001:7001` | bare docker, custom reverse proxy, aaPanel + Docker (default) |
> | `compose.host.sample.yaml` | `network_mode: host` | aaPanel native (openresty on host) |
> | `compose.1panel.sample.yaml` | bridge + external `1panel-network` | 1Panel users (so the container can reach 1Panel-managed MySQL/Redis) |
> | `compose.split.sample.yaml` | multi-container (web/horizon/ws-server/redis split) | K8s migration, advanced scaling |
>
> The local `compose.yaml` is gitignored so your edits survive `git pull` when you do clone the repo.
3. Start services:
```bash
@@ -48,22 +59,10 @@ docker compose up -d
### 3. Version Updates
> 💡 Important Note: Update commands may vary depending on your installed version:
> - For recent installations (new version), use:
```bash
cd Xboard
docker compose pull && \
docker compose run -it --rm web php artisan xboard:update && \
docker compose up -d
docker compose pull && docker compose up -d
```
> - For older installations, replace `web` with `xboard`:
```bash
cd Xboard
docker compose pull && \
docker compose run -it --rm xboard php artisan xboard:update && \
docker compose up -d
```
> 🤔 Not sure which to use? Try the new version command first, if it fails, use the old version command.
### 4. Version Rollback