mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-13 00:58:34 +00:00
makes import less redundant
This commit is contained in:
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,13 +1,9 @@
|
|||||||
data/
|
|
||||||
backups/
|
|
||||||
manual-backups/
|
|
||||||
V1/
|
|
||||||
database-import/*.sql
|
database-import/*.sql
|
||||||
database-import/*.sql.gz
|
database-import/*.sql.gz
|
||||||
|
database-import/ImportBackup*/
|
||||||
source/*
|
source/*
|
||||||
local-data-tools/
|
local-data-tools/
|
||||||
storage/
|
storage/
|
||||||
source/
|
|
||||||
local-storage/
|
local-storage/
|
||||||
.claude/
|
.claude/
|
||||||
images/
|
images/
|
||||||
@@ -17,6 +13,4 @@ scripts/__pycache__/
|
|||||||
.env
|
.env
|
||||||
package-lock.json
|
package-lock.json
|
||||||
package.json
|
package.json
|
||||||
.modules_state
|
|
||||||
.modules-meta
|
|
||||||
todo.md
|
todo.md
|
||||||
|
|||||||
@@ -13,41 +13,27 @@ IMPORT_DIR="./database-import"
|
|||||||
STORAGE_PATH="${STORAGE_PATH:-./storage}"
|
STORAGE_PATH="${STORAGE_PATH:-./storage}"
|
||||||
STORAGE_PATH_LOCAL="${STORAGE_PATH_LOCAL:-./local-storage}"
|
STORAGE_PATH_LOCAL="${STORAGE_PATH_LOCAL:-./local-storage}"
|
||||||
BACKUP_ROOT="${STORAGE_PATH}/backups"
|
BACKUP_ROOT="${STORAGE_PATH}/backups"
|
||||||
BACKUP_DIR="${BACKUP_ROOT}/daily"
|
|
||||||
TIMESTAMP=$(date +%Y-%m-%d)
|
|
||||||
|
|
||||||
shopt -s nullglob
|
shopt -s nullglob
|
||||||
|
|
||||||
sql_files=("$IMPORT_DIR"/*.sql "$IMPORT_DIR"/*.sql.gz)
|
sql_files=("$IMPORT_DIR"/*.sql "$IMPORT_DIR"/*.sql.gz)
|
||||||
archive_files=("$IMPORT_DIR"/*.tar "$IMPORT_DIR"/*.tar.gz "$IMPORT_DIR"/*.tgz "$IMPORT_DIR"/*.zip)
|
shopt -u nullglob
|
||||||
|
|
||||||
declare -a full_backup_dirs=()
|
if [ ! -d "$IMPORT_DIR" ] || [ ${#sql_files[@]} -eq 0 ]; then
|
||||||
for dir in "$IMPORT_DIR"/*/; do
|
echo "📁 No loose database files found in $IMPORT_DIR - skipping import"
|
||||||
dir="${dir%/}"
|
|
||||||
# Skip if no dump-like files inside
|
|
||||||
if compgen -G "$dir"/*.sql >/dev/null || compgen -G "$dir"/*.sql.gz >/dev/null; then
|
|
||||||
full_backup_dirs+=("$dir")
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ ! -d "$IMPORT_DIR" ] || { [ ${#sql_files[@]} -eq 0 ] && [ ${#archive_files[@]} -eq 0 ] && [ ${#full_backup_dirs[@]} -eq 0 ]; }; then
|
|
||||||
echo "📁 No database files or full backups found in $IMPORT_DIR - skipping import"
|
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
shopt -u nullglob
|
|
||||||
|
|
||||||
# Exit if backup system already has databases restored
|
# Exit if backup system already has databases restored
|
||||||
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
|
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
|
||||||
echo "✅ Database already restored - skipping import"
|
echo "✅ Database already restored - skipping import"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "📥 Found database files in $IMPORT_DIR"
|
echo "📥 Found ${#sql_files[@]} database files in $IMPORT_DIR"
|
||||||
echo "📂 Copying to backup system for import..."
|
echo "📂 Bundling files for backup import..."
|
||||||
|
|
||||||
# Ensure backup directory exists
|
# Ensure backup directory exists
|
||||||
mkdir -p "$BACKUP_DIR" "$BACKUP_ROOT"
|
mkdir -p "$BACKUP_ROOT"
|
||||||
|
|
||||||
generate_unique_path(){
|
generate_unique_path(){
|
||||||
local target="$1"
|
local target="$1"
|
||||||
@@ -60,142 +46,52 @@ generate_unique_path(){
|
|||||||
printf '%s\n' "$target"
|
printf '%s\n' "$target"
|
||||||
}
|
}
|
||||||
|
|
||||||
copied_sql=0
|
|
||||||
staged_dirs=0
|
|
||||||
staged_archives=0
|
|
||||||
|
|
||||||
# Copy files with smart naming
|
|
||||||
for file in "${sql_files[@]:-}"; do
|
|
||||||
[ -f "$file" ] || continue
|
|
||||||
|
|
||||||
filename=$(basename "$file")
|
|
||||||
|
|
||||||
# Try to detect database type by filename
|
|
||||||
if echo "$filename" | grep -qi "auth"; then
|
|
||||||
target_name="acore_auth_${TIMESTAMP}.sql"
|
|
||||||
elif echo "$filename" | grep -qi "world"; then
|
|
||||||
target_name="acore_world_${TIMESTAMP}.sql"
|
|
||||||
elif echo "$filename" | grep -qi "char"; then
|
|
||||||
target_name="acore_characters_${TIMESTAMP}.sql"
|
|
||||||
else
|
|
||||||
# Fallback - use original name with timestamp
|
|
||||||
base_name="${filename%.*}"
|
|
||||||
ext="${filename##*.}"
|
|
||||||
target_name="${base_name}_${TIMESTAMP}.${ext}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Add .gz extension if source is compressed
|
|
||||||
if [[ "$filename" == *.sql.gz ]]; then
|
|
||||||
target_name="${target_name}.gz"
|
|
||||||
fi
|
|
||||||
|
|
||||||
target_path="$BACKUP_DIR/$target_name"
|
|
||||||
|
|
||||||
echo "📋 Copying $filename → $target_name"
|
|
||||||
cp "$file" "$target_path"
|
|
||||||
copied_sql=$((copied_sql + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
stage_backup_directory(){
|
stage_backup_directory(){
|
||||||
local src_dir="$1"
|
local src_dir="$1"
|
||||||
if [ -z "$src_dir" ] || [ ! -d "$src_dir" ]; then
|
if [ -z "$src_dir" ] || [ ! -d "$src_dir" ]; then
|
||||||
return
|
echo "⚠️ Invalid source directory: $src_dir"
|
||||||
|
return 1
|
||||||
fi
|
fi
|
||||||
local dirname
|
local dirname
|
||||||
dirname="$(basename "$src_dir")"
|
dirname="$(basename "$src_dir")"
|
||||||
local dest="$BACKUP_ROOT/$dirname"
|
local dest="$BACKUP_ROOT/$dirname"
|
||||||
dest="$(generate_unique_path "$dest")"
|
dest="$(generate_unique_path "$dest")"
|
||||||
echo "📦 Staging full backup directory $(basename "$src_dir") → $(basename "$dest")"
|
echo "📦 Copying backup directory $(basename "$src_dir") → $(basename "$dest")"
|
||||||
cp -a "$src_dir" "$dest"
|
if ! cp -a "$src_dir" "$dest"; then
|
||||||
staged_dirs=$((staged_dirs + 1))
|
echo "❌ Failed to copy backup directory"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
printf '%s\n' "$dest"
|
||||||
}
|
}
|
||||||
|
|
||||||
extract_archive(){
|
bundle_loose_files(){
|
||||||
local archive="$1"
|
local batch_timestamp
|
||||||
local base_name
|
batch_timestamp="$(date +%Y%m%d_%H%M%S)"
|
||||||
base_name="$(basename "$archive")"
|
local batch_name="ImportBackup_${batch_timestamp}"
|
||||||
local tmp_dir
|
local batch_dir="$IMPORT_DIR/$batch_name"
|
||||||
tmp_dir="$(mktemp -d)"
|
local moved=0
|
||||||
local extracted=0
|
|
||||||
|
|
||||||
cleanup_tmp(){
|
batch_dir="$(generate_unique_path "$batch_dir")"
|
||||||
rm -rf "$tmp_dir"
|
if ! mkdir -p "$batch_dir"; then
|
||||||
}
|
echo "❌ Failed to create batch directory: $batch_dir"
|
||||||
|
exit 1
|
||||||
case "$archive" in
|
|
||||||
*.tar.gz|*.tgz)
|
|
||||||
if tar -xzf "$archive" -C "$tmp_dir"; then
|
|
||||||
extracted=1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*.tar)
|
|
||||||
if tar -xf "$archive" -C "$tmp_dir"; then
|
|
||||||
extracted=1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*.zip)
|
|
||||||
if ! command -v unzip >/dev/null 2>&1; then
|
|
||||||
echo "⚠️ unzip not found; cannot extract $base_name"
|
|
||||||
elif unzip -q "$archive" -d "$tmp_dir"; then
|
|
||||||
extracted=1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "⚠️ Unsupported archive format for $base_name"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if [ "$extracted" -ne 1 ]; then
|
|
||||||
cleanup_tmp
|
|
||||||
return
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mapfile -d '' entries < <(find "$tmp_dir" -mindepth 1 -maxdepth 1 -print0) || true
|
for file in "${sql_files[@]}"; do
|
||||||
local dest=""
|
[ -f "$file" ] || continue
|
||||||
if [ ${#entries[@]} -eq 1 ] && [ -d "${entries[0]}" ]; then
|
echo "📦 Moving $(basename "$file") → $(basename "$batch_dir")/"
|
||||||
local inner_name
|
if ! mv "$file" "$batch_dir/"; then
|
||||||
inner_name="$(basename "${entries[0]}")"
|
echo "❌ Failed to move $file"
|
||||||
dest="$BACKUP_ROOT/$inner_name"
|
exit 1
|
||||||
dest="$(generate_unique_path "$dest")"
|
|
||||||
mv "${entries[0]}" "$dest"
|
|
||||||
else
|
|
||||||
local base="${base_name%.*}"
|
|
||||||
base="${base%.*}" # handle double extensions like .tar.gz
|
|
||||||
dest="$(generate_unique_path "$BACKUP_ROOT/$base")"
|
|
||||||
mkdir -p "$dest"
|
|
||||||
if [ ${#entries[@]} -gt 0 ]; then
|
|
||||||
mv "${entries[@]}" "$dest"/
|
|
||||||
fi
|
fi
|
||||||
fi
|
moved=$((moved + 1))
|
||||||
echo "🗂️ Extracted $base_name → $(basename "$dest")"
|
done
|
||||||
staged_archives=$((staged_archives + 1))
|
|
||||||
cleanup_tmp
|
echo "🗂️ Created import batch $(basename "$batch_dir") with $moved file(s)"
|
||||||
|
local dest_path
|
||||||
|
dest_path="$(stage_backup_directory "$batch_dir")"
|
||||||
|
echo "✅ Backup batch copied to $(basename "$dest_path")"
|
||||||
|
echo "💡 Files will be automatically imported during deployment"
|
||||||
}
|
}
|
||||||
|
|
||||||
for dir in "${full_backup_dirs[@]:-}"; do
|
bundle_loose_files
|
||||||
stage_backup_directory "$dir"
|
|
||||||
done
|
|
||||||
|
|
||||||
for archive in "${archive_files[@]:-}"; do
|
|
||||||
extract_archive "$archive"
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$copied_sql" -gt 0 ]; then
|
|
||||||
echo "✅ $copied_sql database file(s) copied to $BACKUP_DIR"
|
|
||||||
fi
|
|
||||||
if [ "$staged_dirs" -gt 0 ]; then
|
|
||||||
dir_label="directories"
|
|
||||||
[ "$staged_dirs" -eq 1 ] && dir_label="directory"
|
|
||||||
echo "✅ $staged_dirs full backup $dir_label staged in $BACKUP_ROOT"
|
|
||||||
fi
|
|
||||||
if [ "$staged_archives" -gt 0 ]; then
|
|
||||||
archive_label="archives"
|
|
||||||
[ "$staged_archives" -eq 1 ] && archive_label="archive"
|
|
||||||
echo "✅ $staged_archives backup $archive_label extracted to $BACKUP_ROOT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$copied_sql" -eq 0 ] && [ "$staged_dirs" -eq 0 ] && [ "$staged_archives" -eq 0 ]; then
|
|
||||||
echo "⚠️ No valid files or backups were staged. Ensure your dumps are .sql/.sql.gz or packaged in directories/archives."
|
|
||||||
else
|
|
||||||
echo "💡 Files will be automatically imported during deployment"
|
|
||||||
fi
|
|
||||||
|
|||||||
Reference in New Issue
Block a user