1 Commits
main ... 0.0.1

Author SHA1 Message Date
uprightbass360
dcb837430a fix: resolve YAML syntax errors in create-release workflow
Replaced heredocs with echo statements to avoid YAML parsing issues.
The YAML parser was interpreting markdown headings and other content
within heredocs as YAML syntax, causing validation errors.

Using grouped echo statements ({ echo ...; } > file) works correctly
with GitHub Actions YAML parser while maintaining variable expansion.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-10 17:19:32 -05:00
18 changed files with 27 additions and 707 deletions

View File

@@ -105,21 +105,21 @@ NETWORK_GATEWAY=172.20.0.1
# =====================
# Change this to your server's public IP or domain name
SERVER_ADDRESS=127.0.0.1
REALM_PORT=8085
REALM_PORT=8215
# =====================
# Ports
# =====================
# Authentication server
AUTH_EXTERNAL_PORT=3724
AUTH_EXTERNAL_PORT=3784
AUTH_PORT=3724
# World server
WORLD_EXTERNAL_PORT=8085
WORLD_EXTERNAL_PORT=8215
WORLD_PORT=8085
# SOAP/Remote access
SOAP_EXTERNAL_PORT=7878
SOAP_EXTERNAL_PORT=7778
SOAP_PORT=7878
# MySQL database (for external access)

View File

@@ -118,11 +118,11 @@ ALPINE_IMAGE=alpine:latest
# =====================
# Ports
# =====================
AUTH_EXTERNAL_PORT=3724
AUTH_EXTERNAL_PORT=3784
AUTH_PORT=3724
WORLD_EXTERNAL_PORT=8085
WORLD_EXTERNAL_PORT=8215
WORLD_PORT=8085
SOAP_EXTERNAL_PORT=7878
SOAP_EXTERNAL_PORT=7778
SOAP_PORT=7878
# =====================
@@ -136,7 +136,7 @@ NETWORK_GATEWAY=172.20.0.1
# Server address / realm
# =====================
SERVER_ADDRESS=127.0.0.1
REALM_PORT=8085
REALM_PORT=8215
# =====================
# MySQL / Database Layer
@@ -559,8 +559,3 @@ MODULE_MOD_PYTHON_ENGINE=0
MODULE_WRATH_OF_THE_VANILLA_V2=0
MODULE_DUELS=0
MODULE_WOW_CORE=0
MODULE_CLANCENTAUR=0
MODULE_DELVES=0
MODULE_MOD_DISABLE_ACHIEVEMENTS=0
MODULE_LUA_BATTLEPASS=0
MODULE_MOD_GM_DISCORD=0

View File

@@ -230,7 +230,7 @@ jobs:
echo "<summary>View enabled modules</summary>" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
grep '^MODULE_.*=1' .env | sed 's/=1//' >> $GITHUB_STEP_SUMMARY || true
grep '^MODULE_.*=1' .env | sed 's/=1//' || true >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
echo "</details>" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY

View File

@@ -107,7 +107,7 @@ jobs:
echo "1. **Edit .env.prebuilt**:"
echo " \`\`\`bash"
echo " nano .env.prebuilt"
echo " # Set: DOCKERHUB_USERNAME=your-dockerhub-username"
echo " # Set: DOCKERHUB_USERNAME=uprightbass360"
echo " \`\`\`"
echo ""
echo "2. **Rename to .env**:"
@@ -180,7 +180,7 @@ jobs:
echo ""
echo "# Configure Docker Hub username"
echo "nano .env.prebuilt"
echo "# Set: DOCKERHUB_USERNAME=your-dockerhub-username"
echo "# Set: DOCKERHUB_USERNAME=uprightbass360"
echo ""
echo "# Deploy"
echo "mv .env.prebuilt .env"

View File

@@ -66,8 +66,6 @@ cp .env.prebuilt .env
Pre-built images include the **RealmMaster profile** (32 modules) and are automatically built nightly. See **[docs/PREBUILT_IMAGES.md](docs/PREBUILT_IMAGES.md)** for details.
**Note:** Remote deployments require one additional step after migration - see [Remote Deployment Guide](docs/GETTING_STARTED.md#remote-deployment).
See [Getting Started](#getting-started) for detailed walkthrough.
## What You Get

View File

@@ -5517,76 +5517,6 @@
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
},
{
"key": "MODULE_DELVES",
"name": "Delves",
"repo": "https://github.com/araxiaonline/Delves.git",
"description": "List of the Custom Made Single Player Delves for Araxia Online",
"type": "lua",
"category": "scripting",
"notes": "Discovered via GitHub topic 'azerothcore-module'",
"status": "active",
"order": 5000,
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
},
{
"key": "MODULE_CLANCENTAUR",
"name": "ClanCentaur",
"repo": "https://github.com/araxiaonline/ClanCentaur.git",
"description": "Custom SQL modifications and patch notes for new faction rewards, reputation items, and unique vendors on the Araxia WoW 3.3.5a server.",
"type": "sql",
"category": "database",
"notes": "Discovered via GitHub topic 'azerothcore-module'",
"status": "active",
"order": 5000,
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
},
{
"key": "MODULE_MOD_DISABLE_ACHIEVEMENTS",
"name": "mod-disable-achievements",
"repo": "https://github.com/olive-spore-734/mod-disable-achievements.git",
"description": "SQL with a long list of WotLK Achievements and their IDs, which should make it much easier to find and disable some. Made for AzerothCore.",
"type": "sql",
"category": "database",
"notes": "Discovered via GitHub topic 'azerothcore-module'",
"status": "active",
"order": 5000,
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
},
{
"key": "MODULE_LUA_BATTLEPASS",
"name": "lua-battlepass",
"repo": "https://github.com/Shonik/lua-battlepass.git",
"description": "Battle Pass System for AzerothCore",
"type": "lua",
"category": "scripting",
"notes": "Discovered via GitHub topic 'azerothcore-lua'",
"status": "active",
"order": 5000,
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
},
{
"key": "MODULE_MOD_GM_DISCORD",
"name": "mod-gm-discord",
"repo": "https://github.com/Diabloxx/mod-gm-discord.git",
"description": "GM to Discord Tools",
"type": "cpp",
"category": "uncategorized",
"notes": "Discovered via GitHub topic 'azerothcore-module'",
"status": "active",
"order": 5000,
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
}
]
}

View File

@@ -21,10 +21,10 @@
"MODULE_ARAC",
"MODULE_ASSISTANT",
"MODULE_REAGENT_BANK",
"MODULE_BLACK_MARKET_AUCTION_HOUSE",
"MODULE_ELUNA",
"MODULE_AIO",
"MODULE_ELUNA_SCRIPTS",
"MODULE_LUA_AH_BOT",
"MODULE_EVENT_SCRIPTS",
"MODULE_ACTIVE_CHAT",
"MODULE_GUILDHOUSE",

View File

@@ -1,13 +0,0 @@
# AzerothCore RealmMaster - Docker NFS Dependencies
# Ensures Docker waits for NFS mounts before starting to prevent race conditions
# where containers create local directories before NFS mounts are ready
[Unit]
# Wait for NFS mounts to be active before starting Docker
After=nfs-azerothcore.mount nfs-containers.mount
# Require the primary backup NFS mount (critical for data integrity)
Requires=nfs-azerothcore.mount
# Prefer the containers NFS mount but don't fail if unavailable
Wants=nfs-containers.mount

View File

@@ -32,7 +32,6 @@ REMOTE_IDENTITY=""
REMOTE_PROJECT_DIR=""
REMOTE_SKIP_STORAGE=0
REMOTE_COPY_SOURCE=0
REMOTE_SETUP_SOURCE=""
REMOTE_ARGS_PROVIDED=0
REMOTE_AUTO_DEPLOY=0
REMOTE_CLEAN_CONTAINERS=0
@@ -262,8 +261,6 @@ Options:
--remote-project-dir DIR Remote project directory (default: ~/<project-name>)
--remote-skip-storage Skip syncing the storage directory during migration
--remote-copy-source Copy the local project directory to remote instead of relying on git
--remote-setup-source Automatically run setup-source.sh on remote (clone AzerothCore SQL)
--remote-skip-source-setup Skip source repository setup (you'll run manually later)
--remote-auto-deploy Run './deploy.sh --yes --no-watch' on the remote host after migration
--remote-clean-containers Stop/remove remote containers & project images during migration
--remote-storage-path PATH Override STORAGE_PATH/STORAGE_PATH_LOCAL in the remote .env
@@ -297,8 +294,6 @@ while [[ $# -gt 0 ]]; do
--remote-project-dir) REMOTE_PROJECT_DIR="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
--remote-skip-storage) REMOTE_SKIP_STORAGE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-copy-source) REMOTE_COPY_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-setup-source) REMOTE_SETUP_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-skip-source-setup) REMOTE_SETUP_SOURCE=0; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-auto-deploy) REMOTE_AUTO_DEPLOY=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-clean-containers) REMOTE_CLEAN_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-storage-path) REMOTE_STORAGE_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
@@ -759,10 +754,6 @@ run_remote_migration(){
args+=(--env-file "$REMOTE_ENV_FILE")
fi
if [ -n "$REMOTE_SETUP_SOURCE" ]; then
args+=(--setup-source "$REMOTE_SETUP_SOURCE")
fi
(cd "$ROOT_DIR" && ./scripts/bash/migrate-stack.sh "${args[@]}")
}
@@ -900,24 +891,6 @@ apply_server_config(){
fi
}
update_realmlist(){
info "Updating realmlist in database with current SERVER_ADDRESS and REALM_PORT..."
local update_script="$ROOT_DIR/scripts/bash/update-realmlist.sh"
if [ ! -x "$update_script" ]; then
warn "Realmlist update script not found or not executable: $update_script"
return 0
fi
# Run the update script
if bash "$update_script"; then
ok "Realmlist updated successfully"
else
warn "Could not update realmlist - this is normal if database is still initializing"
info "The realmlist will be updated on next deployment or you can run: ./scripts/bash/update-realmlist.sh"
fi
}
main(){
if [ "$ASSUME_YES" -ne 1 ]; then
if [ -t 0 ]; then
@@ -974,32 +947,29 @@ main(){
fi
fi
show_step 1 7 "Checking build requirements"
show_step 1 4 "Checking build requirements"
if ! prompt_build_if_needed; then
err "Build required but not completed. Deployment cancelled."
exit 1
fi
if [ "$KEEP_RUNNING" -ne 1 ]; then
show_step 2 7 "Stopping runtime stack"
show_step 2 4 "Stopping runtime stack"
stop_runtime_stack
fi
show_step 3 7 "Importing user database files"
show_step 3 5 "Importing user database files"
info "Checking for database files in ./import/db/ and ./database-import/"
bash "$ROOT_DIR/scripts/bash/import-database-files.sh"
show_step 4 7 "Bringing your realm online"
show_step 4 6 "Bringing your realm online"
info "Pulling images and waiting for containers to become healthy; this may take a few minutes on first deploy."
stage_runtime
show_step 5 7 "Applying server configuration"
show_step 5 6 "Applying server configuration"
apply_server_config
show_step 6 7 "Updating realmlist"
update_realmlist
show_step 7 7 "Finalizing deployment"
show_step 6 6 "Finalizing deployment"
mark_deployment_complete
show_realm_ready

View File

@@ -199,32 +199,7 @@ The remote deployment process transfers:
- ✅ Docker images (exported to `local-storage/images/`)
- ✅ Project files (scripts, configs, docker-compose.yml, .env)
- ✅ Storage directory (unless `--remote-skip-storage` is used)
-AzerothCore source repository (must be set up separately - see below)
**IMPORTANT: AzerothCore Source Setup**
The AzerothCore source repository (~2GB) is NOT synced during migration to avoid slow transfers. However, it's **required** for database initialization.
**Option 1: Automatic Setup (Recommended)**
Use the `--remote-setup-source` flag to automatically clone the source on the remote host:
```bash
./deploy.sh --remote-host your-server --remote-user youruser --remote-setup-source
```
**Option 2: Manual Setup**
After migration completes, SSH to the remote host and run:
```bash
ssh your-server
cd ~/AzerothCore-RealmMaster # or your custom project directory
./scripts/bash/setup-source.sh
```
**Without this step, database initialization will fail with:**
`❌ FATAL: SQL source directory not found`
-Build artifacts (source code, compilation files stay local)
### Module Presets

View File

@@ -48,8 +48,11 @@ cp .env.prebuilt .env
Edit `.env` and set your Docker Hub username:
```bash
# Change this line to your Docker Hub username:
# Change this line:
DOCKERHUB_USERNAME=your-dockerhub-username
# To (example):
DOCKERHUB_USERNAME=uprightbass360
```
### 4. Optional: Customize Settings

View File

@@ -145,7 +145,7 @@ cd azerothcore-realmmaster-v1.0.0-realmmaster
# 3. Configure
nano .env.prebuilt
# Set: DOCKERHUB_USERNAME=your-dockerhub-username
# Set: DOCKERHUB_USERNAME=uprightbass360
# 4. Deploy
mv .env.prebuilt .env

View File

@@ -38,21 +38,6 @@ ls storage/config/mod_*.conf*
**Database connection issues**
```bash
# Check if source repository is set up (common issue after remote deployment)
ls -la local-storage/source/azerothcore*/data/sql/base/db_world/
# If empty or missing, set up source:
./scripts/bash/setup-source.sh
# Then restart database import:
docker compose run --rm ac-db-import
# Error: "SQL source directory not found"
# This means the AzerothCore source repository hasn't been cloned.
# Solution: Run ./scripts/bash/setup-source.sh
# See docs/GETTING_STARTED.md for details
# Legacy database issues:
# Verify MySQL is running and responsive
docker exec ac-mysql mysql -u root -p -e "SELECT 1;"

View File

@@ -153,35 +153,10 @@ if [ -f "$RESTORE_SUCCESS_MARKER" ]; then
if verify_databases_populated; then
echo "✅ Backup restoration completed successfully"
cat "$RESTORE_SUCCESS_MARKER" || true
# Check if there are pending module SQL updates to apply
echo "🔍 Checking for pending module SQL updates..."
local has_pending_updates=0
# Check if module SQL staging directory has files
if [ -d "/azerothcore/data/sql/updates/db_world" ] && [ -n "$(find /azerothcore/data/sql/updates/db_world -name 'MODULE_*.sql' -type f 2>/dev/null)" ]; then
echo " ⚠️ Found staged module SQL updates that may need application"
has_pending_updates=1
fi
if [ "$has_pending_updates" -eq 0 ]; then
echo "🚫 Skipping database import - data already restored and no pending updates"
echo "🚫 Skipping database import - data already restored from backup"
exit 0
fi
echo "📦 Running dbimport to apply pending module SQL updates..."
cd /azerothcore/env/dist/bin
seed_dbimport_conf
if ./dbimport; then
echo "✅ Module SQL updates applied successfully!"
exit 0
else
echo "⚠️ dbimport reported issues - check logs for details"
exit 1
fi
fi
echo "⚠️ Restoration marker found, but databases are empty - forcing re-import"
rm -f "$RESTORE_SUCCESS_MARKER" 2>/dev/null || true
rm -f "$RESTORE_SUCCESS_MARKER_TMP" 2>/dev/null || true
@@ -495,65 +470,6 @@ echo "🚀 Running database import..."
cd /azerothcore/env/dist/bin
seed_dbimport_conf
validate_sql_source(){
local sql_base_dir="/azerothcore/data/sql/base"
local required_dirs=("db_auth" "db_world" "db_characters")
local missing_dirs=()
echo "🔍 Validating SQL source availability..."
if [ ! -d "$sql_base_dir" ]; then
cat <<EOF
❌ FATAL: SQL source directory not found at $sql_base_dir
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
The AzerothCore source repository is not mounted or doesn't exist.
This directory should contain SQL schemas for database initialization.
📋 REMEDIATION STEPS:
1. SSH to this host:
ssh $(whoami)@$(hostname)
2. Navigate to project directory and run source setup:
cd $PROJECT_ROOT && ./scripts/bash/setup-source.sh
3. Restart database import:
docker compose run --rm ac-db-import
📦 ALTERNATIVE (Prebuilt Images):
If using Docker images with bundled SQL schemas:
- Set AC_SQL_SOURCE_PATH in .env to point to bundled location
- Example: AC_SQL_SOURCE_PATH=/bundled/sql
📚 Documentation: docs/GETTING_STARTED.md#database-setup
EOF
exit 1
fi
for dir in "${required_dirs[@]}"; do
local full_path="$sql_base_dir/$dir"
if [ ! -d "$full_path" ] || [ -z "$(ls -A "$full_path" 2>/dev/null)" ]; then
missing_dirs+=("$dir")
fi
done
if [ ${#missing_dirs[@]} -gt 0 ]; then
echo ""
echo "❌ FATAL: SQL source directories are empty or missing:"
printf ' - %s\n' "${missing_dirs[@]}"
echo ""
echo "The AzerothCore source directory exists but hasn't been populated with SQL files."
echo "Run './scripts/bash/setup-source.sh' on the host to clone and populate the repository."
echo ""
exit 1
fi
echo "✅ SQL source validation passed - all required schemas present"
}
maybe_run_base_import(){
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
local mysql_port="${MYSQL_PORT:-3306}"
@@ -590,8 +506,6 @@ maybe_run_base_import(){
fi
}
# Validate SQL source is available before attempting import
validate_sql_source
maybe_run_base_import
if ./dbimport; then
echo "✅ Database import completed successfully!"

View File

@@ -1,96 +0,0 @@
#!/bin/bash
# AzerothCore RealmMaster - Install Docker NFS Dependencies Fix
# This script installs a systemd drop-in configuration to ensure Docker
# waits for NFS mounts before starting, preventing backup folder deletion issues
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
DROP_IN_SOURCE="$PROJECT_ROOT/config/systemd/docker.service.d/nfs-dependencies.conf"
DROP_IN_TARGET="/etc/systemd/system/docker.service.d/nfs-dependencies.conf"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() { echo -e "${BLUE} $*${NC}"; }
log_ok() { echo -e "${GREEN}$*${NC}"; }
log_warn() { echo -e "${YELLOW}⚠️ $*${NC}"; }
log_err() { echo -e "${RED}$*${NC}"; }
# Check if running as root
if [ "$EUID" -ne 0 ]; then
log_err "This script must be run as root (use sudo)"
exit 1
fi
# Check if source file exists
if [ ! -f "$DROP_IN_SOURCE" ]; then
log_err "Source configuration file not found: $DROP_IN_SOURCE"
exit 1
fi
# Check if NFS mounts exist
log_info "Checking NFS mount configuration..."
if ! systemctl list-units --type=mount | grep -q "nfs-azerothcore.mount"; then
log_warn "nfs-azerothcore.mount not found. This fix requires NFS mounts to be configured."
log_warn "Continue anyway? (y/n)"
read -r response
if [[ ! "$response" =~ ^[Yy]$ ]]; then
log_info "Installation cancelled."
exit 0
fi
fi
# Create drop-in directory
log_info "Creating systemd drop-in directory..."
mkdir -p "$(dirname "$DROP_IN_TARGET")"
log_ok "Drop-in directory ready: $(dirname "$DROP_IN_TARGET")"
# Install configuration file
log_info "Installing NFS dependencies configuration..."
cp "$DROP_IN_SOURCE" "$DROP_IN_TARGET"
chmod 644 "$DROP_IN_TARGET"
log_ok "Configuration installed: $DROP_IN_TARGET"
# Show what was installed
echo ""
log_info "Installed configuration:"
echo "---"
cat "$DROP_IN_TARGET"
echo "---"
echo ""
# Reload systemd
log_info "Reloading systemd daemon..."
systemctl daemon-reload
log_ok "Systemd daemon reloaded"
# Verify configuration
log_info "Verifying Docker service dependencies..."
echo ""
systemctl show -p After,Requires,Wants docker.service | grep -E '^(After|Requires|Wants)='
echo ""
# Check if Docker is running
if systemctl is-active --quiet docker.service; then
log_warn "Docker is currently running"
log_warn "The new configuration will take effect on next Docker restart or system reboot"
echo ""
log_info "To apply immediately, restart Docker (WARNING: will stop all containers):"
echo " sudo systemctl restart docker.service"
echo ""
log_info "Or reboot the system:"
echo " sudo reboot"
else
log_ok "Docker is not running - configuration will apply on next start"
fi
echo ""
log_ok "Docker NFS dependencies fix installed successfully!"
log_info "Docker will now wait for NFS mounts before starting"
log_info "This prevents backup folders from being deleted during server restarts"

View File

@@ -144,7 +144,6 @@ Options:
--port PORT SSH port (default: 22)
--identity PATH SSH private key (passed to scp/ssh)
--project-dir DIR Remote project directory (default: ~/<project-name>)
--setup-source 0|1 Auto-setup AzerothCore source on remote (0=skip, 1=setup, unset=prompt)
--env-file PATH Use this env file for image lookup and upload (default: ./.env)
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
--storage PATH Remote storage directory (default: <project-dir>/storage)
@@ -169,7 +168,6 @@ SKIP_STORAGE=0
ASSUME_YES=0
COPY_SOURCE=0
SKIP_ENV=0
REMOTE_SETUP_SOURCE=""
PRESERVE_CONTAINERS=0
CLEAN_CONTAINERS=0
@@ -183,7 +181,6 @@ while [[ $# -gt 0 ]]; do
--env-file) ENV_FILE="$2"; shift 2;;
--tarball) TARBALL="$2"; shift 2;;
--storage) REMOTE_STORAGE="$2"; shift 2;;
--setup-source) REMOTE_SETUP_SOURCE="$2"; shift 2;;
--skip-storage) SKIP_STORAGE=1; shift;;
--skip-env) SKIP_ENV=1; shift;;
--preserve-containers) PRESERVE_CONTAINERS=1; shift;;
@@ -256,15 +253,7 @@ STAGE_SQL_PATH_RAW="$(read_env_value STAGE_PATH_MODULE_SQL "${LOCAL_STORAGE_ROOT
if [ -z "${STORAGE_PATH_LOCAL:-}" ]; then
STORAGE_PATH_LOCAL="$LOCAL_STORAGE_ROOT"
fi
# Ensure STORAGE_PATH is defined to avoid set -u failures during expansion
if [ -z "${STORAGE_PATH:-}" ]; then
STORAGE_PATH="$(read_env_value STORAGE_PATH "./storage")"
fi
# Ensure STORAGE_MODULE_SQL_PATH is defined to avoid set -u failures during expansion
if [ -z "${STORAGE_MODULE_SQL_PATH:-}" ]; then
STORAGE_MODULE_SQL_PATH="$(read_env_value STORAGE_MODULE_SQL_PATH "${STORAGE_PATH}/module-sql-updates")"
fi
# Expand any env references (e.g., ${STORAGE_PATH_LOCAL}, ${STORAGE_MODULE_SQL_PATH})
# Expand any env references (e.g., ${STORAGE_PATH_LOCAL})
STAGE_SQL_PATH_RAW="$(eval "echo \"$STAGE_SQL_PATH_RAW\"")"
LOCAL_STAGE_SQL_DIR="$(resolve_path_relative_to_project "$STAGE_SQL_PATH_RAW" "$PROJECT_ROOT")"
REMOTE_STAGE_SQL_DIR="$(resolve_path_relative_to_project "$STAGE_SQL_PATH_RAW" "$PROJECT_DIR")"
@@ -449,76 +438,6 @@ setup_remote_repository(){
echo " • Repository synchronized ✓"
}
setup_source_if_needed(){
local should_setup="${REMOTE_SETUP_SOURCE:-}"
# Check if source already exists and is populated
echo " • Checking for existing AzerothCore source repository..."
if run_ssh "[ -d '$PROJECT_DIR/local-storage/source/azerothcore-playerbots/data/sql/base/db_world' ] && [ -n \"\$(ls -A '$PROJECT_DIR/local-storage/source/azerothcore-playerbots/data/sql/base/db_world' 2>/dev/null)\" ]" 2>/dev/null; then
echo " ✅ Source repository already populated on remote"
return 0
elif run_ssh "[ -d '$PROJECT_DIR/local-storage/source/azerothcore/data/sql/base/db_world' ] && [ -n \"\$(ls -A '$PROJECT_DIR/local-storage/source/azerothcore/data/sql/base/db_world' 2>/dev/null)\" ]" 2>/dev/null; then
echo " ✅ Source repository already populated on remote"
return 0
fi
echo " ⚠️ Source repository not found or empty on remote"
# If not set, ask user (unless --yes)
if [ -z "$should_setup" ]; then
if [ "$ASSUME_YES" = "1" ]; then
# Auto-yes in non-interactive: default to YES for safety
echo " Auto-confirming source setup (--yes flag)"
should_setup=1
else
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📦 AzerothCore Source Repository Setup"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "The remote server needs AzerothCore source code for database schemas."
echo "This will clone ~2GB repository (one-time operation, takes 2-5 minutes)."
echo ""
echo "Without this, database initialization will FAIL."
echo ""
read -rp "Set up source repository now? [Y/n]: " answer
answer="${answer:-Y}"
case "${answer,,}" in
y|yes) should_setup=1 ;;
*) should_setup=0 ;;
esac
fi
fi
if [ "$should_setup" != "1" ]; then
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "⚠️ WARNING: Source setup skipped"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "You MUST run this manually on the remote host BEFORE starting services:"
echo ""
echo " ssh $USER@$HOST"
echo " cd $PROJECT_DIR"
echo " ./scripts/bash/setup-source.sh"
echo ""
return 0
fi
echo " 🔧 Setting up AzerothCore source repository on remote..."
echo " ⏳ Cloning AzerothCore (this may take 2-5 minutes)..."
# Run setup-source.sh on remote, capturing output
if run_ssh "cd '$PROJECT_DIR' && ./scripts/bash/setup-source.sh" 2>&1 | sed 's/^/ /'; then
echo " ✅ Source repository setup complete"
return 0
else
echo " ❌ Source setup failed (check output above for details)"
echo " ⚠️ Run manually: ssh $USER@$HOST 'cd $PROJECT_DIR && ./scripts/bash/setup-source.sh'"
return 1
fi
}
cleanup_stale_docker_resources(){
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
echo "⋅ Skipping remote container/image cleanup (--preserve-containers)"
@@ -550,9 +469,6 @@ cleanup_stale_docker_resources(){
validate_remote_environment
# Set up source repository if needed (after project files are synced)
setup_source_if_needed || true # Don't fail entire deployment if source setup fails
collect_deploy_image_refs
echo "⋅ Exporting deployment images to $TARBALL"

View File

@@ -1,177 +0,0 @@
#!/bin/bash
# Setup user environment with sudo access and bash completion
set -e
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() { echo -e "${BLUE} $*${NC}"; }
log_ok() { echo -e "${GREEN}$*${NC}"; }
log_warn() { echo -e "${YELLOW}⚠️ $*${NC}"; }
TARGET_USER="${1:-${USER}}"
# Check if running as root
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root (use sudo)"
exit 1
fi
echo ""
log_info "Setting up environment for user: $TARGET_USER"
echo ""
# 1. Add user to sudo group
log_info "Step 1/4: Adding $TARGET_USER to sudo group..."
if groups "$TARGET_USER" | grep -q "\bsudo\b"; then
log_ok "User already in sudo group"
else
usermod -aG sudo "$TARGET_USER"
log_ok "Added $TARGET_USER to sudo group"
fi
# 2. Change default shell to bash
log_info "Step 2/4: Setting default shell to bash..."
CURRENT_SHELL=$(getent passwd "$TARGET_USER" | cut -d: -f7)
if [ "$CURRENT_SHELL" = "/bin/bash" ]; then
log_ok "Default shell already set to bash"
else
chsh -s /bin/bash "$TARGET_USER"
log_ok "Changed default shell from $CURRENT_SHELL to /bin/bash"
fi
# 3. Create .bashrc with bash completion
log_info "Step 3/4: Setting up bash completion..."
USER_HOME=$(getent passwd "$TARGET_USER" | cut -d: -f6)
BASHRC="$USER_HOME/.bashrc"
if [ -f "$BASHRC" ]; then
log_warn ".bashrc already exists, checking for bash completion..."
if grep -q "bash_completion" "$BASHRC"; then
log_ok "Bash completion already configured in .bashrc"
else
log_info "Adding bash completion to existing .bashrc..."
cat >> "$BASHRC" << 'EOF'
# Enable bash completion
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
EOF
chown "$TARGET_USER:$TARGET_USER" "$BASHRC"
log_ok "Bash completion added to .bashrc"
fi
else
log_info "Creating new .bashrc with bash completion..."
cat > "$BASHRC" << 'EOF'
# ~/.bashrc: executed by bash(1) for non-login shells.
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# History settings
HISTCONTROL=ignoreboth
HISTSIZE=10000
HISTFILESIZE=20000
shopt -s histappend
# Check window size after each command
shopt -s checkwinsize
# Make less more friendly for non-text input files
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# Set a fancy prompt
PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
# Enable color support for ls and grep
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# Some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
# Enable bash completion
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# Docker completion (if docker is installed)
if [ -f /usr/share/bash-completion/completions/docker ]; then
. /usr/share/bash-completion/completions/docker
fi
EOF
chown "$TARGET_USER:$TARGET_USER" "$BASHRC"
chmod 644 "$BASHRC"
log_ok "Created .bashrc with bash completion"
fi
# 4. Create .bash_profile to source .bashrc for login shells
log_info "Step 4/4: Setting up bash_profile for login shells..."
BASH_PROFILE="$USER_HOME/.bash_profile"
if [ -f "$BASH_PROFILE" ]; then
if grep -q "\.bashrc" "$BASH_PROFILE"; then
log_ok ".bash_profile already sources .bashrc"
else
log_info "Adding .bashrc sourcing to existing .bash_profile..."
cat >> "$BASH_PROFILE" << 'EOF'
# Source .bashrc if it exists
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
EOF
chown "$TARGET_USER:$TARGET_USER" "$BASH_PROFILE"
log_ok ".bash_profile updated to source .bashrc"
fi
else
log_info "Creating .bash_profile..."
cat > "$BASH_PROFILE" << 'EOF'
# ~/.bash_profile: executed by bash(1) for login shells.
# Source .bashrc if it exists
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
EOF
chown "$TARGET_USER:$TARGET_USER" "$BASH_PROFILE"
chmod 644 "$BASH_PROFILE"
log_ok "Created .bash_profile"
fi
echo ""
log_ok "Environment setup complete for $TARGET_USER!"
echo ""
echo "Changes applied:"
echo " ✓ Added to sudo group (password required)"
echo " ✓ Default shell changed to /bin/bash"
echo " ✓ Bash completion enabled (.bashrc)"
echo " ✓ Login shell configured (.bash_profile)"
echo ""
log_warn "Important: You need to log out and log back in for shell changes to take effect"
log_info "To test sudo: sudo -v (will prompt for password)"
log_info "To test tab completion: type 'systemctl rest' and press TAB"
log_info "To verify shell: echo \$SHELL (should show /bin/bash)"
echo ""

View File

@@ -1,80 +0,0 @@
#!/bin/bash
# Updates the realmlist table in the database with current SERVER_ADDRESS and REALM_PORT from .env
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Source colors and functions
BLUE='\033[0;34m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
info() { printf '%b\n' "${BLUE} $*${NC}"; }
ok() { printf '%b\n' "${GREEN}$*${NC}"; }
warn() { printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
err() { printf '%b\n' "${RED}$*${NC}"; }
# Load environment variables from .env
if [ -f "$ROOT_DIR/.env" ]; then
# shellcheck disable=SC1091
set -a
source "$ROOT_DIR/.env"
set +a
else
err "No .env file found at $ROOT_DIR/.env"
exit 1
fi
# Check required variables
if [ -z "$SERVER_ADDRESS" ]; then
err "SERVER_ADDRESS not set in .env"
exit 1
fi
if [ -z "$REALM_PORT" ]; then
err "REALM_PORT not set in .env"
exit 1
fi
if [ -z "$MYSQL_HOST" ]; then
err "MYSQL_HOST not set in .env"
exit 1
fi
if [ -z "$MYSQL_USER" ]; then
err "MYSQL_USER not set in .env"
exit 1
fi
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
err "MYSQL_ROOT_PASSWORD not set in .env"
exit 1
fi
if [ -z "$DB_AUTH_NAME" ]; then
err "DB_AUTH_NAME not set in .env"
exit 1
fi
info "Updating realmlist table..."
info " Address: $SERVER_ADDRESS"
info " Port: $REALM_PORT"
# Try to update the database
if mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify "${DB_AUTH_NAME}" \
-e "UPDATE realmlist SET address='${SERVER_ADDRESS}', port=${REALM_PORT} WHERE id=1;" 2>/dev/null; then
ok "Realmlist updated successfully"
# Show the current realmlist entry
mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify "${DB_AUTH_NAME}" \
-e "SELECT id, name, address, port FROM realmlist WHERE id=1;" 2>/dev/null || true
exit 0
else
warn "Could not update realmlist table"
warn "This is normal if the database is not yet initialized or MySQL is not running"
exit 1
fi