#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import struct
import zlib
import time
import re
import mmap
import hashlib
import random
import multiprocessing
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from collections import namedtuple

# ====================== 配色 + 逐字彩虹变色核心 ======================
class Color:
    BOLD        = "\033[1m"
    RESET       = "\033[0m"

# 终端可用彩色码
COLOR_CODES = ["\033[90m","\033[91m","\033[92m","\033[93m","\033[94m","\033[95m","\033[96m","\033[97m"]

# 一字一随机颜色
def rainbow_text(text):
    res = ""
    for ch in text:
        c = random.choice(COLOR_CODES)
        res += c + ch
    return res + Color.RESET

# 全局彩色边框
BORDER_TOP    = rainbow_text("="*60)
BORDER_MID    = rainbow_text("-"*60)
LINE_DIVIDER  = BORDER_MID
SHORT_DIVIDER = rainbow_text("-"*40)

# ====================== 动画工具函数 ======================
def loading_anim(text, seconds=1.5):
    frames = ["⠋","⠙","⠹","⠸","⠼","⠴","⠦","⠧","⠇","⠏"]
    start = time.time()
    idx = 0
    while time.time() - start < seconds:
        print(f"\r\033[94m{frames[idx]} {rainbow_text(text)}...{Color.RESET}", end="", flush=True)
        idx = (idx + 1) % len(frames)
        time.sleep(0.08)
    print("\r" + " " * 60 + "\r", end="")

def progress_bar(current, total, prefix="进度", length=30):
    percent = int(100 * current / total)
    filled = int(length * current / total)
    bar = "█" * filled + "░" * (length - filled)
    print(f"\r\033[92m{prefix} |{bar}| {percent}% ({current}/{total}){Color.RESET}", end="", flush=True)
    if current >= total:
        print()

# ====================== PAK同步路径配置 ======================
SRC_PAK_PATH = Path("/storage/emulated/0/勿辞制作区/提取的pak/载具天线+头像框+击杀反馈水印+载具入场+大厅水印pak")
DST_PAK_PATH = Path("/storage/emulated/0/勿辞制作区/pak")
AUTO_SELECT_PAK_NAME = ""

def get_file_md5(file_path):
    md5 = hashlib.md5()
    with open(file_path, 'rb') as f:
        while chunk := f.read(4096):
            md5.update(chunk)
    return md5.hexdigest()

def auto_sync_pak():
    global AUTO_SELECT_PAK_NAME
    AUTO_SELECT_PAK_NAME = ""
    DST_PAK_PATH.mkdir(parents=True, exist_ok=True)

    print(f"\n{BORDER_TOP}")
    loading_anim("正在检测源文件夹")
    if not SRC_PAK_PATH.exists():
        print(rainbow_text("[提示] 源文件夹不存在，跳过同步"))
        print(f"{BORDER_TOP}\n")
        return

    pak_list = list(SRC_PAK_PATH.glob("*.pak"))
    if not pak_list:
        print(rainbow_text("[提示] 源文件夹内无PAK文件"))
        print(f"{BORDER_TOP}\n")
        return

    loading_anim("正在校验PAK文件")
    source_pak = pak_list[0]
    pak_name = source_pak.name
    dst_file = DST_PAK_PATH / pak_name

    if dst_file.exists():
        if get_file_md5(source_pak) == get_file_md5(dst_file):
            print(rainbow_text(f"[同步] {pak_name} 内容相同，无需复制"))
        else:
            shutil.copy2(source_pak, dst_file)
            print(rainbow_text(f"[同步] {pak_name} 内容不同，已覆盖复制"))
    else:
        shutil.copy2(source_pak, dst_file)
        print(rainbow_text(f"[同步] {pak_name} 已复制到pak目录"))

    AUTO_SELECT_PAK_NAME = pak_name
    print(f"{BORDER_TOP}\n")

# ====================== 全局配置（修复点：降低文件大小阈值） ======================
PAK_DIR = "/storage/emulated/0/勿辞制作区/pak/"
UNPACK_DIR = "/storage/emulated/0/勿辞制作区/uexp解包"
PACK_TEMP_DIR = "/storage/emulated/0/勿辞制作区/uexp打包"
CONFIG_DIR = "/storage/emulated/0/勿辞制作区/配置"
TARGET_FILE_PATTERN = "AvatarFrame.uexp"
MIN_FILE_SIZE = 100 * 1024  # 从500KB降到100KB，避免误杀有效文件
MIN_PRINT = True
MAX_WORKERS = min(multiprocessing.cpu_count(), 8)
ENCRYPT_KEY = 0x79

def clear_pack_unpack_dirs():
    for dir_path in [UNPACK_DIR, PACK_TEMP_DIR]:
        path = Path(dir_path)
        if path.exists():
            try:
                shutil.rmtree(path)
            except Exception as e:
                print(rainbow_text(f"[错误] 清空目录 {dir_path} 失败: {str(e)}"))
                sys.exit(1)
        path.mkdir(parents=True, exist_ok=True)

CompressionInfo = namedtuple('CompressionInfo', [
    'offset', 'size', 'zip', 'zsize', 'encrypted',
    'chunks', 'chunk_size'
])

# ====================== 工具类 ======================
class Utils:
    @staticmethod
    def decompress_zlib(data):
        try:
            return zlib.decompress(data)
        except Exception:
            return b''
    @staticmethod
    def encrypt_data(data, encrypt_flag):
        if encrypt_flag:
            return bytes([b ^ ENCRYPT_KEY for b in data])
        return data
    @staticmethod
    def decrypt_data(data, encrypt_flag):
        return Utils.encrypt_data(data, encrypt_flag)
    @staticmethod
    def compress_to_max_size(input_data, max_size):
        for level in range(9, -1, -1):
            try:
                compressed = zlib.compress(input_data, level)
                if len(compressed) <= max_size:
                    return compressed
            except Exception:
                continue
        return input_data.ljust(max_size, b'\x00')

    @staticmethod
    def dec_to_hex(decimal_number):
        try:
            hex_str = format(int(decimal_number), '08X')
            hex_array = [hex_str[i:i + 2] for i in range(0, 8, 2)]
            return ''.join(reversed(hex_array))
        except ValueError:
            print(rainbow_text(f"[错误] 无效的十进制数: {decimal_number}"))
            return None

    @staticmethod
    def find_hex_reverse(data, hex_to_find, start):
        return data.rfind(hex_to_find, 0, start)
    @staticmethod
    def find_hex(data, hex_to_find, start, end):
        return data.find(hex_to_find, start, end)

# ====================== 解包核心（修复点：增加目标文件调试打印） ======================
class FastPakExtractor:
    def __init__(self):
        self.encrypt = 0
        self.mm = None
        self.selected_pak = None
        self.compression_info = {}
        self.base_path = "../../../"
        self.valid_target_file = None
        self.unpack_elapsed = 0

    def get_pak_files(self):
        pak_dir = Path(PAK_DIR)
        if not pak_dir.exists():
            print(rainbow_text(f"[错误] PAK目录不存在: {PAK_DIR}"))
            return []
        return sorted(list(pak_dir.glob("*.pak")), key=lambda x: x.stat().st_size, reverse=True)

    def select_pak_file(self, pak_files):
        global AUTO_SELECT_PAK_NAME
        loading_anim("正在匹配源PAK文件")
        if AUTO_SELECT_PAK_NAME:
            for pak in pak_files:
                if pak.name == AUTO_SELECT_PAK_NAME:
                    self.selected_pak = pak
                    print(rainbow_text(f"[自动选中] 源文件PAK：{pak.name}"))
                    return True

        print(f"\n{rainbow_text('可用的 PAK 列表：')}")
        print(BORDER_MID)
        for i, pak in enumerate(pak_files, 1):
            size_mb = pak.stat().st_size / 1024 / 1024
            print(rainbow_text(f"{i}. {pak.name} ({size_mb:.0f} MB)"))
        print(BORDER_MID)
        while True:
            try:
                choice = input(f"\r\033[94m{rainbow_text('请输入序号选择')} (1-{len(pak_files)}) : {Color.RESET}").strip()
                if choice == "0":
                    sys.exit(0)
                choice = int(choice)
                if 1 <= choice <= len(pak_files):
                    self.selected_pak = pak_files[choice-1]
                    return True
                print(rainbow_text(f"请输入1~{len(pak_files)}之间的数字"))
            except ValueError:
                print(rainbow_text("请输入正确数字"))

    def find_magic_offset(self):
        pattern1 = b"\x2E\x2E\x2F\x2E\x2E\x2F\x2E\x2E\x2F"
        pattern2 = b"\x57\x57\x56\x57\x57\x56\x57\x57\x56"
        pattern3 = b"\x2E\x2E\x2F"
        scan_len = min(5 * 1024 * 1024, self.file_size)
        data = self.mm[-scan_len:]
        offset1 = data.find(pattern1)
        offset2 = data.find(pattern2)
        offset3 = data.rfind(pattern3)
        if offset1 != -1:
            self.encrypt = 0
            return self.file_size - scan_len + offset1 - 4
        elif offset2 != -1:
            self.encrypt = 1
            return self.file_size - scan_len + offset2 - 4
        elif offset3 != -1:
            self.encrypt = 0
            return self.file_size - scan_len + offset3 - 4
        else:
            return self.file_size - 0x2C

    def get_base_path(self, offset):
        if offset + 4 > self.file_size:
            return "../../../", offset + 4
        try:
            name_size = struct.unpack('<I', self.mm[offset:offset+4])[0]
        except Exception:
            return "../../../", offset + 4
        if name_size == 0:
            return "../../../", offset + 4
        if name_size > 1024 or offset + 4 + name_size > self.file_size:
            return "../../../", offset + 4 + name_size
        try:
            base_path = self.mm[offset+4:offset+4+name_size].decode('utf-8', errors='ignore').rstrip('\x00')
            if name_size != 0x0A and name_size < 0xFF:
                base_path = "../../../" + base_path
            self.base_path = base_path
            return base_path, offset + 4 + name_size
        except Exception:
            return "../../../", offset + 4 + name_size

    def parse_file_entry(self, offset):
        entry_start = offset
        if offset + 20 + 49 > self.file_size:
            return None, offset + 69
        try:
            hash_data = self.mm[offset:offset+20]
            offset += 20
            entry_data = self.mm[offset:offset+49]
            offset += 49
            entry = {
                'hash': hash_data,
                'offset': struct.unpack('<Q', entry_data[0:8])[0],
                'size': struct.unpack('<Q', entry_data[8:16])[0],
                'zip': struct.unpack('<I', entry_data[16:20])[0],
                'zsize': struct.unpack('<Q', entry_data[20:28])[0],
                'chunks': [],
                'chunk_size': 0x10000,
                'encrypted': 0
            }
            if entry['zip'] != 0:
                if offset + 4 > self.file_size:
                    return entry, offset
                chunk_count = struct.unpack('<I', self.mm[offset:offset+4])[0]
                offset += 4
                chunk_count = min(chunk_count, 1000)
                for _ in range(chunk_count):
                    if offset + 16 > self.file_size:
                        break
                    chunk_data = self.mm[offset:offset+16]
                    offset += 16
                    chunk_offset = struct.unpack('<Q', chunk_data[0:8])[0]
                    chunk_end = struct.unpack('<Q', chunk_data[8:16])[0]
                    entry['chunks'].append((chunk_offset, chunk_end))
            if offset + 5 > self.file_size:
                return entry, offset
            chunk_size_data = self.mm[offset:offset+5]
            offset += 5
            entry['chunk_size'] = struct.unpack('<I', chunk_size_data[0:4])[0]
            entry['encrypted'] = chunk_size_data[4]
            if self.encrypt:
                try:
                    encrypted_entry = self.mm[entry_start:offset]
                    decrypted_entry = self.decrypt_data(encrypted_entry)
                    if len(decrypted_entry) >= 69:
                        entry['offset'] = struct.unpack('<Q', decrypted_entry[20:28])[0]
                        entry['size'] = struct.unpack('<Q', decrypted_entry[28:36])[0]
                        entry['zip'] = struct.unpack('<I', decrypted_entry[36:40])[0]
                        entry['zsize'] = struct.unpack('<Q', decrypted_entry[40:48])[0]
                except Exception:
                    pass
            return entry, offset
        except Exception:
            return None, offset + 69

    def decrypt_data(self, data):
        if not self.encrypt:
            return data
        return bytes([b ^ ENCRYPT_KEY for b in data])

    def extract_file(self, entry, output_path):
        try:
            if entry['chunks']:
                return self.extract_chunked(entry, output_path)
            else:
                return self.extract_simple(entry, output_path)
        except Exception:
            return False

    def extract_simple(self, entry, output_path):
        try:
            if entry['zip'] != 0:
                compressed_data = self.mm[entry['offset']:entry['offset']+entry['zsize']]
                if self.encrypt and entry['encrypted'] == 1:
                    compressed_data = self.decrypt_data(compressed_data)
                data = Utils.decompress_zlib(compressed_data)
            else:
                data = self.mm[entry['offset']:entry['offset']+entry['size']]
                if self.encrypt and entry['encrypted'] == 1:
                    data = self.decrypt_data(data)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            with open(output_path, 'wb') as f:
                f.write(data)
            # 修复点：增加调试打印，显示找到的文件大小
            if output_path.name == TARGET_FILE_PATTERN:
                file_size = output_path.stat().st_size
                print(rainbow_text(f"[调试] 找到目标文件：{output_path.name}，大小：{file_size} 字节"))
                if file_size > MIN_FILE_SIZE:
                    self.valid_target_file = output_path
                    print(rainbow_text(f"[找到目标文件] {output_path.name}（大小符合要求）"))
                else:
                    print(rainbow_text(f"[警告] 文件大小不足 {MIN_FILE_SIZE} 字节，未设为有效目标文件"))
            return True
        except Exception as e:
            if not MIN_PRINT:
                print(rainbow_text(f"[警告] 提取失败 {output_path.name}: {str(e)}"))
            return False

    def extract_chunked(self, entry, output_path):
        try:
            total_size = entry['size']
            remaining = total_size
            output_path.parent.mkdir(parents=True, exist_ok=True)
            with open(output_path, 'wb') as out:
                for chunk_offset, chunk_end in entry['chunks']:
                    if remaining <= 0:
                        break
                    chunk_zsize = chunk_end - chunk_offset
                    chunk_data = self.mm[chunk_offset:chunk_offset+chunk_zsize]
                    if self.encrypt and entry['encrypted'] == 1:
                        chunk_data = self.decrypt_data(chunk_data)
                    if entry['zip'] != 0:
                        chunk_data = Utils.decompress_zlib(chunk_data)
                    write_size = min(len(chunk_data), remaining)
                    out.write(chunk_data[:write_size])
                    remaining -= write_size
            # 修复点：增加调试打印，显示找到的文件大小
            if output_path.name == TARGET_FILE_PATTERN:
                file_size = output_path.stat().st_size
                print(rainbow_text(f"[调试] 找到目标文件：{output_path.name}，大小：{file_size} 字节"))
                if file_size > MIN_FILE_SIZE:
                    self.valid_target_file = output_path
                    print(rainbow_text(f"[找到目标文件] {output_path.name}（大小符合要求）"))
                else:
                    print(rainbow_text(f"[警告] 文件大小不足 {MIN_FILE_SIZE} 字节，未设为有效目标文件"))
            return True
        except Exception as e:
            if not MIN_PRINT:
                print(rainbow_text(f"[警告] 提取失败 {output_path.name}: {str(e)}"))
            return False

    def parse_toc(self, toc_offset, toc_size):
        toc_data = self.mm[toc_offset:toc_offset+toc_size]
        toc_len = len(toc_data)
        pos = 0
        entries = []
        stack = [(1, 0)]
        target_set = self.target_files
        while stack:
            if pos + 8 > toc_len:
                break
            flag, count = stack.pop()
            if flag == 1:
                dir_count = struct.unpack('<Q', toc_data[pos:pos+8])[0]
                pos += 8
                stack.append((0, dir_count))
                continue
            if count == 0:
                continue
            count -= 1
            if pos + 4 > toc_len:
                break
            name_size = struct.unpack('<i', toc_data[pos:pos+4])[0]
            pos += 4
            dir_name = ""
            if name_size >= 0:
                if pos + name_size > toc_len:
                    pos += name_size
                    continue
                try:
                    dir_name = toc_data[pos:pos+name_size].decode('utf-8', errors='ignore').rstrip('\x00')
                except Exception:
                    pass
                pos += name_size
            else:
                abs_name_size = abs(name_size) * 2
                if pos + abs_name_size > toc_len:
                    pos += abs_name_size
                    continue
                try:
                    dir_name = toc_data[pos:pos+abs_name_size].decode('utf-16-le', errors='ignore').rstrip('\x00')
                except Exception:
                    pass
                pos += abs_name_size
            if pos + 8 > toc_len:
                break
            file_count = struct.unpack('<Q', toc_data[pos:pos+8])[0]
            pos += 8
            if file_count == 0:
                stack.append((0, count))
                continue
            file_count = min(file_count, 100000)
            for _ in range(file_count):
                if pos + 4 > toc_len:
                    break
                name_size = struct.unpack('<i', toc_data[pos:pos+4])[0]
                pos += 4
                file_name = ""
                if name_size > 0:
                    if pos + name_size > toc_len:
                        pos += name_size
                        continue
                    try:
                        file_name = toc_data[pos:pos+name_size].decode('utf-8', errors='ignore').rstrip('\x00')
                    except Exception:
                        pass
                    pos += name_size
                else:
                    abs_name_size = abs(name_size) * 2
                    if pos + abs_name_size > toc_len:
                        pos += abs_name_size
                        continue
                    try:
                        file_name = toc_data[pos:pos+abs_name_size].decode('utf-16-le', errors='ignore').rstrip('\x00')
                    except Exception:
                        pass
                    pos += abs_name_size
                full_path = f"{dir_name}{file_name}"
                if pos + 4 > toc_len:
                    break
                entry_index = struct.unpack('<I', toc_data[pos:pos+4])[0]
                pos += 4
                if file_name.lower() in target_set:
                    entries.append((entry_index, full_path, file_name))
            if count > 0:
                stack.append((0, count))
        return entries

    def unpack_pak(self, target_set):
        if not self.selected_pak:
            print(rainbow_text("[错误] 未选择PAK文件"))
            return False
        start_time = time.time()
        self.target_files = {f.lower() for f in target_set}
        self.valid_target_file = None
        self.compression_info.clear()
        self.file_size = self.selected_pak.stat().st_size
        try:
            loading_anim("正在加载并解析PAK结构")
            with open(self.selected_pak, 'rb') as f:
                self.mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
            magic_offset = self.find_magic_offset()
            base_path, pos = self.get_base_path(magic_offset)
            file_count = struct.unpack('<I', self.mm[pos:pos+4])[0]
            pos += 4
            file_count = min(file_count, 100000)
            entries = []
            for i in range(file_count):
                entry, pos = self.parse_file_entry(pos)
                if entry:
                    entries.append(entry)
                progress_bar(i+1, file_count, "解包扫描")
            pos += 8
            if self.encrypt:
                pos += 1
            toc_offset = pos
            toc_size = self.file_size - toc_offset
            toc_entries = self.parse_toc(toc_offset, toc_size)
            tasks = []
            for entry_idx, full_path, file_name in toc_entries:
                if entry_idx < len(entries):
                    entry = entries[entry_idx].copy()
                    rel_path = full_path
                    if base_path and base_path != "../../../":
                        rel_path = base_path + full_path
                    while rel_path.startswith('../'):
                        rel_path = rel_path[3:]
                    output_path = Path(UNPACK_DIR) / rel_path
                    self.compression_info[full_path] = CompressionInfo(
                        offset=entry['offset'], size=entry['size'], zip=entry['zip'],
                        zsize=entry['zsize'], encrypted=entry['encrypted'],
                        chunks=entry['chunks'], chunk_size=entry['chunk_size']
                    )
                    tasks.append((entry, output_path))
            # 执行解包任务
            with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
                futures = {executor.submit(self.extract_file, e, o): (e, o) for e, o in tasks}
                for future in as_completed(futures):
                    future.result()
            self.unpack_elapsed = time.time() - start_time
            # 修复点：解包后检查是否找到有效目标文件，提前退出避免后续报错
            if self.valid_target_file is None:
                print(rainbow_text("[错误] 解包后未找到符合条件的AvatarFrame.uexp文件，请检查PAK文件或文件大小阈值"))
                print(rainbow_text("[提示] 可手动检查解包目录是否存在该文件"))
                return False
            return True
        except Exception as e:
            print(f"\n{rainbow_text(f'[错误] 解包失败: {str(e)}')}")
            return False
        finally:
            if self.mm:
                self.mm.close()

# ====================== 打包核心 ======================
class FastPakPacker:
    def __init__(self, extractor):
        self.extractor = extractor
        self.encrypt = extractor.encrypt
        self.success_count = 0
        self.fail_count = 0
        self.file_success = 0
        self.file_fail = 0
        self.pack_elapsed = 0

    def replace_non_chunked(self, pak_file, info, new_data):
        try:
            if info.zip != 0:
                compressed = Utils.compress_to_max_size(new_data, info.zsize)
                compressed = Utils.encrypt_data(compressed, self.encrypt and info.encrypted == 1)
                pak_file.seek(info.offset)
                pak_file.write(compressed.ljust(info.zsize, b'\x00'))
            else:
                data_to_write = new_data.ljust(info.size, b'\x00')
                data_to_write = Utils.encrypt_data(data_to_write, self.encrypt and info.encrypted == 1)
                pak_file.seek(info.offset)
                pak_file.write(data_to_write)
            self.success_count += 1
            return True
        except Exception as e:
            print(rainbow_text(f"[错误] 非分块文件写入失败: {str(e)[:50]}"))
            self.fail_count += 1
            return False

    def replace_chunked(self, pak_file, info, new_data):
        try:
            data_pos = 0
            total_size = info.size
            for i, (chunk_offset, chunk_end) in enumerate(info.chunks):
                max_chunk_size = chunk_end - chunk_offset
                if i < len(info.chunks) - 1:
                    current_chunk_size = min(info.chunk_size, total_size - data_pos)
                else:
                    current_chunk_size = total_size - data_pos
                if current_chunk_size <= 0:
                    break
                chunk_data = new_data[data_pos:data_pos + current_chunk_size]
                data_pos += current_chunk_size
                if info.zip != 0:
                    chunk_data = Utils.compress_to_max_size(chunk_data, max_chunk_size)
                chunk_data = Utils.encrypt_data(chunk_data, self.encrypt and info.encrypted == 1)
                if len(chunk_data) < max_chunk_size:
                    chunk_data = chunk_data.ljust(max_chunk_size, b'\x00')
                pak_file.seek(chunk_offset)
                pak_file.write(chunk_data)
                progress_bar(i+1, len(info.chunks), "打包进度")
            self.success_count += len(info.chunks)
            return True
        except Exception as e:
            print(rainbow_text(f"[错误] 分块文件写入失败: {str(e)[:50]}"))
            self.fail_count += len(info.chunks)
            return False

    def pack_pak(self):
        original_pak = self.extractor.selected_pak
        if not original_pak:
            print(rainbow_text("[错误] 未选择PAK文件"))
            return False
        start_time = time.time()
        self.success_count = 0
        self.fail_count = 0
        self.file_success = 0
        self.file_fail = 0
        file_map = {}
        for root, _, files in os.walk(UNPACK_DIR):
            for file in files:
                file_map[file.lower()] = Path(root) / file
        try:
            temp_pak = Path(PACK_TEMP_DIR) / f"temp_{original_pak.name}"
            Path(PACK_TEMP_DIR).mkdir(parents=True, exist_ok=True)
            shutil.copy2(original_pak, temp_pak)
            with open(temp_pak, 'r+b') as pak_file:
                for idx, (full_path, info) in enumerate(self.extractor.compression_info.items()):
                    target_name = full_path.split('/')[-1].lower()
                    if target_name not in file_map:
                        self.file_fail += 1
                        continue
                    in_path = file_map[target_name]
                    if not in_path.exists():
                        self.file_fail += 1
                        continue
                    with open(in_path, 'rb') as df:
                        new_data = df.read()
                    if info.chunks:
                        if self.replace_chunked(pak_file, info, new_data):
                            self.file_success += 1
                        else:
                            self.file_fail += 1
                    else:
                        if self.replace_non_chunked(pak_file, info, new_data):
                            self.file_success += 1
                        else:
                            self.file_fail += 1
                    progress_bar(idx+1, len(self.extractor.compression_info), "文件处理")
            shutil.move(temp_pak, original_pak)
            self.pack_elapsed = time.time() - start_time

            print(f"\n{BORDER_MID}")
            print(rainbow_text(f"压缩成功: [{self.success_count}/{self.success_count+self.fail_count}]"))
            print(rainbow_text(f"压缩失败: [{self.fail_count}/{self.success_count+self.fail_count}]"))
            print(rainbow_text(f"文件打包成功: [{self.file_success}/{self.file_success+self.file_fail}]"))
            print(rainbow_text(f"文件打包失败: [{self.file_fail}/{self.file_success+self.file_fail}]"))
            print(f"{BORDER_MID}")
            return True
        except Exception as e:
            print(f"\n{rainbow_text(f'[错误] 打包失败: {str(e)}')}")
            temp_pak = Path(PACK_TEMP_DIR) / f"temp_{original_pak.name}"
            if temp_pak.exists():
                temp_pak.unlink()
            return False

# ====================== 头像框核心（修复点：增加None判断） ======================
class AvatarFrameTool:
    def __init__(self):
        self.extractor = FastPakExtractor()
        self.packer = FastPakPacker(self.extractor)

    def parse_config_file(self, file_path):
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                content = f.read()
            content = re.sub(r'#.*|//.*|;.*', '', content)
            numbers = re.findall(r'\d+', content)
            array = []
            for i in range(0, len(numbers)-1, 2):
                try:
                    a = int(numbers[i])
                    b = int(numbers[i+1])
                    array.append((a, b))
                except:
                    continue
            return array
        except Exception as e:
            print(rainbow_text(f"[错误] 读取配置文件出错：{str(e)}"))
            return []

    def select_config_file(self):
        if not Path(CONFIG_DIR).exists():
            print(rainbow_text(f"[错误] 配置目录不存在: {CONFIG_DIR}"))
            return None
        config_files = list(Path(CONFIG_DIR).glob("*.py")) + list(Path(CONFIG_DIR).glob("*.txt")) + list(Path(CONFIG_DIR).glob("*.conf"))
        if not config_files:
            print(rainbow_text("[错误] 配置目录无有效文件"))
            return None
        print(f"\n{rainbow_text('可用配置文件：')}")
        print(BORDER_MID)
        for i, file in enumerate(config_files, 1):
            print(rainbow_text(f"{i}. {file.name}"))
        print(BORDER_MID)
        while True:
            try:
                choice = input(f"\r\033[94m{rainbow_text('选择配置序号')}: {Color.RESET}").strip()
                if choice == "0":
                    return None
                choice = int(choice)
                if 1 <= choice <= len(config_files):
                    return config_files[choice-1]
                print(rainbow_text("无效选择"))
            except ValueError:
                print(rainbow_text("请输入数字"))

    def auto_detect_feature_codes_avatar(self, file_path):
        # 修复点：提前判断file_path是否为None
        if file_path is None:
            print(rainbow_text("[错误] 传入的文件路径为空，无法识别特征码"))
            return None, None
        target_hex = "538C1E00"
        target_bytes = bytes.fromhex(target_hex)
        try:
            loading_anim("正在识别特征码")
            with open(file_path, "rb") as file:
                file_contents = file.read()
        except FileNotFoundError:
            print(rainbow_text(f"[错误] 文件未找到: {file_path.name}"))
            return None, None
        target_pos = file_contents.find(target_bytes)
        if target_pos == -1:
            print(rainbow_text(f"[错误] 未找到目标十六进制值 {target_hex}"))
            return None, None
        start_pos1 = target_pos - 188
        end_pos1 = start_pos1 + 4
        if start_pos1 >= 0 and end_pos1 <= len(file_contents):
            feature_code1 = file_contents[start_pos1:end_pos1].hex().upper()
        else:
            print(rainbow_text("[错误] 无法提取第一个特征码（偏移越界）"))
            return None, None
        start_pos2 = target_pos - 172
        end_pos2 = start_pos2 + 4
        if start_pos2 >= 0 and end_pos2 <= len(file_contents):
            feature_code2 = file_contents[start_pos2:end_pos2].hex().upper()
        else:
            print(rainbow_text("[错误] 无法提取第二个特征码（偏移越界）"))
            return None, None
        return feature_code1, feature_code2

    def modify_file_hex(self, file_path, A, B, hex_start, hex_end):
        # 修复点：提前判断file_path是否为None
        if file_path is None:
            print(rainbow_text("[错误] 传入的文件路径为空，无法修改"))
            return False
        search_seq1 = bytes.fromhex(A)
        search_seq2 = bytes.fromhex(B)
        hex_to_find_start = bytes.fromhex(hex_start)
        hex_to_find_end = bytes.fromhex(hex_end)
        try:
            with open(file_path, "rb") as file:
                file_contents = file.read()
        except FileNotFoundError:
            print(rainbow_text(f"[错误] 文件未找到: {file_path.name}"))
            return False
        search_index1 = file_contents.find(search_seq1)
        search_index2 = file_contents.find(search_seq2)
        if search_index1 == -1 or search_index2 == -1:
            return False
        start_index1 = Utils.find_hex_reverse(file_contents, hex_to_find_start, search_index1)
        if start_index1 == -1:
            return False
        end_index1 = Utils.find_hex(file_contents, hex_to_find_end, start_index1 + len(hex_to_find_start), search_index1)
        if end_index1 == -1:
            return False
        start_index2 = Utils.find_hex_reverse(file_contents, hex_to_find_start, search_index2)
        if start_index2 == -1:
            return False
        end_index2 = Utils.find_hex(file_contents, hex_to_find_end, start_index2 + len(hex_to_find_start), search_index2)
        if end_index2 == -1:
            return False
        data_to_replace1 = file_contents[start_index1 + len(hex_to_find_start):end_index1]
        data_to_replace2 = file_contents[start_index2 + len(hex_to_find_start):end_index2]
        new_contents = bytearray(file_contents)
        new_contents[start_index1 + len(hex_to_find_start):end_index1] = data_to_replace2
        new_contents[start_index2 + len(hex_to_find_start):end_index2] = data_to_replace1
        try:
            with open(file_path, "wb") as file:
                file.write(new_contents)
            return True
        except Exception as e:
            print(rainbow_text(f"[错误] 写入文件出错: {str(e)[:50]}"))
            return False

    def batch_process_hex(self):
        target_file = self.extractor.valid_target_file
        # 修复点：提前判断target_file是否为None，避免后续报错
        if target_file is None:
            print(rainbow_text("[错误] 未找到有效的目标文件，无法执行修改操作"))
            return False
            
        config_path = self.select_config_file()
        if not config_path:
            return False
        code_array = self.parse_config_file(config_path)
        if not code_array:
            print(rainbow_text("[错误] 配置文件无有效规则"))
            return False
        total_rules = len(code_array)
        print(f"\n{rainbow_text(f'解析到: {total_rules} 组替换规则')}")
        feature_code1, feature_code2 = self.auto_detect_feature_codes_avatar(target_file)
        if not feature_code1 or not feature_code2:
            print(rainbow_text("[错误] 特征码识别失败，终止修改"))
            return False
        print(rainbow_text(f"识别到特征码1: {feature_code1}"))
        print(rainbow_text(f"识别到特征码2: {feature_code2}"))
        print(f"\n{rainbow_text('开始批量替换处理')}")
        success_count = 0
        for idx, (old_num, new_num) in enumerate(code_array, 1):
            print(SHORT_DIVIDER)
            print(rainbow_text(f"[第 {idx}/{total_rules} 组] {old_num} → {new_num}"))
            A = Utils.dec_to_hex(old_num)
            B = Utils.dec_to_hex(new_num)
            if A is None or B is None:
                print(rainbow_text("[警告] 跳过无效代码对"))
                continue
            if self.modify_file_hex(target_file, A, B, feature_code1, feature_code2):
                success_count += 1
                print(rainbow_text("[成功] 替换完成"))
            else:
                print(rainbow_text("[警告] 未找到序列，跳过"))
            progress_bar(idx, total_rules, "批量替换")
        print(f"\n{BORDER_MID}")
        print(rainbow_text(f"批量替换完成: 成功 {success_count}/{total_rules} 组"))
        print(f"{BORDER_MID}")
        return True

    def run(self):
        print(f"\n{BORDER_TOP}")
        print(f"{rainbow_text('        全自动头像框美化工具        ')}")
        print(f"{BORDER_TOP}")

        print(f"\n{rainbow_text('[1/3] 进入PAK选择环节')}")
        pak_files = self.extractor.get_pak_files()
        if not pak_files:
            input(rainbow_text("按回车退出..."))
            return
        self.extractor.select_pak_file(pak_files)

        print(f"\n{rainbow_text('[2/3] 开始解包解析')}")
        if not self.extractor.unpack_pak({TARGET_FILE_PATTERN}):
            input(rainbow_text("按回车退出..."))
            return
        print(f"\n{rainbow_text(f'解包耗时: {self.extractor.unpack_elapsed:.3f}s')}")

        print(f"\n{rainbow_text('[3/3] 头像框数据修改')}")
        if not self.batch_process_hex():
            input(rainbow_text("按回车退出..."))
            return

        print(f"\n{rainbow_text('[4/3] 重新打包PAK')}")
        print(rainbow_text(f"当前源文件: {self.extractor.selected_pak.name}"))
        self.packer.pack_pak()
        print(rainbow_text(f"打包耗时: {self.packer.pack_elapsed:.3f}s"))

        print(f"\n{BORDER_TOP}")
        print(f"{rainbow_text('        全部任务执行完成！        ')}")
        print(f"{BORDER_TOP}")
        input(f"\r\033[94m{rainbow_text('按 Enter 键退出程序...')}{Color.RESET}")

if __name__ == "__main__":
    random.seed(time.time())
    os.environ['PYTHONIOENCODING'] = 'utf-8'
    clear_pack_unpack_dirs()
    auto_sync_pak()
    AvatarFrameTool().run()
