#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import shutil import struct import zlib import time import re import mmap import hashlib import random import multiprocessing from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path from collections import namedtuple # ====================== 配色 + 逐字彩虹变色核心 ====================== class Color: BOLD = "\033[1m" RESET = "\033[0m" # 终端可用彩色码 COLOR_CODES = ["\033[90m","\033[91m","\033[92m","\033[93m","\033[94m","\033[95m","\033[96m","\033[97m"] # 一字一随机颜色 def rainbow_text(text): res = "" for ch in text: c = random.choice(COLOR_CODES) res += c + ch return res + Color.RESET # 全局彩色边框 BORDER_TOP = rainbow_text("="*60) BORDER_MID = rainbow_text("-"*60) LINE_DIVIDER = BORDER_MID SHORT_DIVIDER = rainbow_text("-"*40) # ====================== 动画工具函数 ====================== def loading_anim(text, seconds=1.5): frames = ["⠋","⠙","⠹","⠸","⠼","⠴","⠦","⠧","⠇","⠏"] start = time.time() idx = 0 while time.time() - start < seconds: print(f"\r\033[94m{frames[idx]} {rainbow_text(text)}...{Color.RESET}", end="", flush=True) idx = (idx + 1) % len(frames) time.sleep(0.08) print("\r" + " " * 60 + "\r", end="") def progress_bar(current, total, prefix="进度", length=30): percent = int(100 * current / total) filled = int(length * current / total) bar = "█" * filled + "░" * (length - filled) print(f"\r\033[92m{prefix} |{bar}| {percent}% ({current}/{total}){Color.RESET}", end="", flush=True) if current >= total: print() # ====================== PAK同步路径配置 ====================== SRC_PAK_PATH = Path("/storage/emulated/0/勿辞制作区/提取的pak/载具天线+头像框+击杀反馈水印+载具入场+大厅水印pak") DST_PAK_PATH = Path("/storage/emulated/0/勿辞制作区/pak") AUTO_SELECT_PAK_NAME = "" def get_file_md5(file_path): md5 = hashlib.md5() with open(file_path, 'rb') as f: while chunk := f.read(4096): md5.update(chunk) return md5.hexdigest() def auto_sync_pak(): global AUTO_SELECT_PAK_NAME AUTO_SELECT_PAK_NAME = "" DST_PAK_PATH.mkdir(parents=True, exist_ok=True) print(f"\n{BORDER_TOP}") loading_anim("正在检测源文件夹") if not SRC_PAK_PATH.exists(): print(rainbow_text("[提示] 源文件夹不存在,跳过同步")) print(f"{BORDER_TOP}\n") return pak_list = list(SRC_PAK_PATH.glob("*.pak")) if not pak_list: print(rainbow_text("[提示] 源文件夹内无PAK文件")) print(f"{BORDER_TOP}\n") return loading_anim("正在校验PAK文件") source_pak = pak_list[0] pak_name = source_pak.name dst_file = DST_PAK_PATH / pak_name if dst_file.exists(): if get_file_md5(source_pak) == get_file_md5(dst_file): print(rainbow_text(f"[同步] {pak_name} 内容相同,无需复制")) else: shutil.copy2(source_pak, dst_file) print(rainbow_text(f"[同步] {pak_name} 内容不同,已覆盖复制")) else: shutil.copy2(source_pak, dst_file) print(rainbow_text(f"[同步] {pak_name} 已复制到pak目录")) AUTO_SELECT_PAK_NAME = pak_name print(f"{BORDER_TOP}\n") # ====================== 全局配置(修复点:降低文件大小阈值) ====================== PAK_DIR = "/storage/emulated/0/勿辞制作区/pak/" UNPACK_DIR = "/storage/emulated/0/勿辞制作区/uexp解包" PACK_TEMP_DIR = "/storage/emulated/0/勿辞制作区/uexp打包" CONFIG_DIR = "/storage/emulated/0/勿辞制作区/配置" TARGET_FILE_PATTERN = "AvatarFrame.uexp" MIN_FILE_SIZE = 100 * 1024 # 从500KB降到100KB,避免误杀有效文件 MIN_PRINT = True MAX_WORKERS = min(multiprocessing.cpu_count(), 8) ENCRYPT_KEY = 0x79 def clear_pack_unpack_dirs(): for dir_path in [UNPACK_DIR, PACK_TEMP_DIR]: path = Path(dir_path) if path.exists(): try: shutil.rmtree(path) except Exception as e: print(rainbow_text(f"[错误] 清空目录 {dir_path} 失败: {str(e)}")) sys.exit(1) path.mkdir(parents=True, exist_ok=True) CompressionInfo = namedtuple('CompressionInfo', [ 'offset', 'size', 'zip', 'zsize', 'encrypted', 'chunks', 'chunk_size' ]) # ====================== 工具类 ====================== class Utils: @staticmethod def decompress_zlib(data): try: return zlib.decompress(data) except Exception: return b'' @staticmethod def encrypt_data(data, encrypt_flag): if encrypt_flag: return bytes([b ^ ENCRYPT_KEY for b in data]) return data @staticmethod def decrypt_data(data, encrypt_flag): return Utils.encrypt_data(data, encrypt_flag) @staticmethod def compress_to_max_size(input_data, max_size): for level in range(9, -1, -1): try: compressed = zlib.compress(input_data, level) if len(compressed) <= max_size: return compressed except Exception: continue return input_data.ljust(max_size, b'\x00') @staticmethod def dec_to_hex(decimal_number): try: hex_str = format(int(decimal_number), '08X') hex_array = [hex_str[i:i + 2] for i in range(0, 8, 2)] return ''.join(reversed(hex_array)) except ValueError: print(rainbow_text(f"[错误] 无效的十进制数: {decimal_number}")) return None @staticmethod def find_hex_reverse(data, hex_to_find, start): return data.rfind(hex_to_find, 0, start) @staticmethod def find_hex(data, hex_to_find, start, end): return data.find(hex_to_find, start, end) # ====================== 解包核心(修复点:增加目标文件调试打印) ====================== class FastPakExtractor: def __init__(self): self.encrypt = 0 self.mm = None self.selected_pak = None self.compression_info = {} self.base_path = "../../../" self.valid_target_file = None self.unpack_elapsed = 0 def get_pak_files(self): pak_dir = Path(PAK_DIR) if not pak_dir.exists(): print(rainbow_text(f"[错误] PAK目录不存在: {PAK_DIR}")) return [] return sorted(list(pak_dir.glob("*.pak")), key=lambda x: x.stat().st_size, reverse=True) def select_pak_file(self, pak_files): global AUTO_SELECT_PAK_NAME loading_anim("正在匹配源PAK文件") if AUTO_SELECT_PAK_NAME: for pak in pak_files: if pak.name == AUTO_SELECT_PAK_NAME: self.selected_pak = pak print(rainbow_text(f"[自动选中] 源文件PAK:{pak.name}")) return True print(f"\n{rainbow_text('可用的 PAK 列表:')}") print(BORDER_MID) for i, pak in enumerate(pak_files, 1): size_mb = pak.stat().st_size / 1024 / 1024 print(rainbow_text(f"{i}. {pak.name} ({size_mb:.0f} MB)")) print(BORDER_MID) while True: try: choice = input(f"\r\033[94m{rainbow_text('请输入序号选择')} (1-{len(pak_files)}) : {Color.RESET}").strip() if choice == "0": sys.exit(0) choice = int(choice) if 1 <= choice <= len(pak_files): self.selected_pak = pak_files[choice-1] return True print(rainbow_text(f"请输入1~{len(pak_files)}之间的数字")) except ValueError: print(rainbow_text("请输入正确数字")) def find_magic_offset(self): pattern1 = b"\x2E\x2E\x2F\x2E\x2E\x2F\x2E\x2E\x2F" pattern2 = b"\x57\x57\x56\x57\x57\x56\x57\x57\x56" pattern3 = b"\x2E\x2E\x2F" scan_len = min(5 * 1024 * 1024, self.file_size) data = self.mm[-scan_len:] offset1 = data.find(pattern1) offset2 = data.find(pattern2) offset3 = data.rfind(pattern3) if offset1 != -1: self.encrypt = 0 return self.file_size - scan_len + offset1 - 4 elif offset2 != -1: self.encrypt = 1 return self.file_size - scan_len + offset2 - 4 elif offset3 != -1: self.encrypt = 0 return self.file_size - scan_len + offset3 - 4 else: return self.file_size - 0x2C def get_base_path(self, offset): if offset + 4 > self.file_size: return "../../../", offset + 4 try: name_size = struct.unpack(' 1024 or offset + 4 + name_size > self.file_size: return "../../../", offset + 4 + name_size try: base_path = self.mm[offset+4:offset+4+name_size].decode('utf-8', errors='ignore').rstrip('\x00') if name_size != 0x0A and name_size < 0xFF: base_path = "../../../" + base_path self.base_path = base_path return base_path, offset + 4 + name_size except Exception: return "../../../", offset + 4 + name_size def parse_file_entry(self, offset): entry_start = offset if offset + 20 + 49 > self.file_size: return None, offset + 69 try: hash_data = self.mm[offset:offset+20] offset += 20 entry_data = self.mm[offset:offset+49] offset += 49 entry = { 'hash': hash_data, 'offset': struct.unpack(' self.file_size: return entry, offset chunk_count = struct.unpack(' self.file_size: break chunk_data = self.mm[offset:offset+16] offset += 16 chunk_offset = struct.unpack(' self.file_size: return entry, offset chunk_size_data = self.mm[offset:offset+5] offset += 5 entry['chunk_size'] = struct.unpack('= 69: entry['offset'] = struct.unpack(' MIN_FILE_SIZE: self.valid_target_file = output_path print(rainbow_text(f"[找到目标文件] {output_path.name}(大小符合要求)")) else: print(rainbow_text(f"[警告] 文件大小不足 {MIN_FILE_SIZE} 字节,未设为有效目标文件")) return True except Exception as e: if not MIN_PRINT: print(rainbow_text(f"[警告] 提取失败 {output_path.name}: {str(e)}")) return False def extract_chunked(self, entry, output_path): try: total_size = entry['size'] remaining = total_size output_path.parent.mkdir(parents=True, exist_ok=True) with open(output_path, 'wb') as out: for chunk_offset, chunk_end in entry['chunks']: if remaining <= 0: break chunk_zsize = chunk_end - chunk_offset chunk_data = self.mm[chunk_offset:chunk_offset+chunk_zsize] if self.encrypt and entry['encrypted'] == 1: chunk_data = self.decrypt_data(chunk_data) if entry['zip'] != 0: chunk_data = Utils.decompress_zlib(chunk_data) write_size = min(len(chunk_data), remaining) out.write(chunk_data[:write_size]) remaining -= write_size # 修复点:增加调试打印,显示找到的文件大小 if output_path.name == TARGET_FILE_PATTERN: file_size = output_path.stat().st_size print(rainbow_text(f"[调试] 找到目标文件:{output_path.name},大小:{file_size} 字节")) if file_size > MIN_FILE_SIZE: self.valid_target_file = output_path print(rainbow_text(f"[找到目标文件] {output_path.name}(大小符合要求)")) else: print(rainbow_text(f"[警告] 文件大小不足 {MIN_FILE_SIZE} 字节,未设为有效目标文件")) return True except Exception as e: if not MIN_PRINT: print(rainbow_text(f"[警告] 提取失败 {output_path.name}: {str(e)}")) return False def parse_toc(self, toc_offset, toc_size): toc_data = self.mm[toc_offset:toc_offset+toc_size] toc_len = len(toc_data) pos = 0 entries = [] stack = [(1, 0)] target_set = self.target_files while stack: if pos + 8 > toc_len: break flag, count = stack.pop() if flag == 1: dir_count = struct.unpack(' toc_len: break name_size = struct.unpack('= 0: if pos + name_size > toc_len: pos += name_size continue try: dir_name = toc_data[pos:pos+name_size].decode('utf-8', errors='ignore').rstrip('\x00') except Exception: pass pos += name_size else: abs_name_size = abs(name_size) * 2 if pos + abs_name_size > toc_len: pos += abs_name_size continue try: dir_name = toc_data[pos:pos+abs_name_size].decode('utf-16-le', errors='ignore').rstrip('\x00') except Exception: pass pos += abs_name_size if pos + 8 > toc_len: break file_count = struct.unpack(' toc_len: break name_size = struct.unpack(' 0: if pos + name_size > toc_len: pos += name_size continue try: file_name = toc_data[pos:pos+name_size].decode('utf-8', errors='ignore').rstrip('\x00') except Exception: pass pos += name_size else: abs_name_size = abs(name_size) * 2 if pos + abs_name_size > toc_len: pos += abs_name_size continue try: file_name = toc_data[pos:pos+abs_name_size].decode('utf-16-le', errors='ignore').rstrip('\x00') except Exception: pass pos += abs_name_size full_path = f"{dir_name}{file_name}" if pos + 4 > toc_len: break entry_index = struct.unpack(' 0: stack.append((0, count)) return entries def unpack_pak(self, target_set): if not self.selected_pak: print(rainbow_text("[错误] 未选择PAK文件")) return False start_time = time.time() self.target_files = {f.lower() for f in target_set} self.valid_target_file = None self.compression_info.clear() self.file_size = self.selected_pak.stat().st_size try: loading_anim("正在加载并解析PAK结构") with open(self.selected_pak, 'rb') as f: self.mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) magic_offset = self.find_magic_offset() base_path, pos = self.get_base_path(magic_offset) file_count = struct.unpack('= 0 and end_pos1 <= len(file_contents): feature_code1 = file_contents[start_pos1:end_pos1].hex().upper() else: print(rainbow_text("[错误] 无法提取第一个特征码(偏移越界)")) return None, None start_pos2 = target_pos - 172 end_pos2 = start_pos2 + 4 if start_pos2 >= 0 and end_pos2 <= len(file_contents): feature_code2 = file_contents[start_pos2:end_pos2].hex().upper() else: print(rainbow_text("[错误] 无法提取第二个特征码(偏移越界)")) return None, None return feature_code1, feature_code2 def modify_file_hex(self, file_path, A, B, hex_start, hex_end): # 修复点:提前判断file_path是否为None if file_path is None: print(rainbow_text("[错误] 传入的文件路径为空,无法修改")) return False search_seq1 = bytes.fromhex(A) search_seq2 = bytes.fromhex(B) hex_to_find_start = bytes.fromhex(hex_start) hex_to_find_end = bytes.fromhex(hex_end) try: with open(file_path, "rb") as file: file_contents = file.read() except FileNotFoundError: print(rainbow_text(f"[错误] 文件未找到: {file_path.name}")) return False search_index1 = file_contents.find(search_seq1) search_index2 = file_contents.find(search_seq2) if search_index1 == -1 or search_index2 == -1: return False start_index1 = Utils.find_hex_reverse(file_contents, hex_to_find_start, search_index1) if start_index1 == -1: return False end_index1 = Utils.find_hex(file_contents, hex_to_find_end, start_index1 + len(hex_to_find_start), search_index1) if end_index1 == -1: return False start_index2 = Utils.find_hex_reverse(file_contents, hex_to_find_start, search_index2) if start_index2 == -1: return False end_index2 = Utils.find_hex(file_contents, hex_to_find_end, start_index2 + len(hex_to_find_start), search_index2) if end_index2 == -1: return False data_to_replace1 = file_contents[start_index1 + len(hex_to_find_start):end_index1] data_to_replace2 = file_contents[start_index2 + len(hex_to_find_start):end_index2] new_contents = bytearray(file_contents) new_contents[start_index1 + len(hex_to_find_start):end_index1] = data_to_replace2 new_contents[start_index2 + len(hex_to_find_start):end_index2] = data_to_replace1 try: with open(file_path, "wb") as file: file.write(new_contents) return True except Exception as e: print(rainbow_text(f"[错误] 写入文件出错: {str(e)[:50]}")) return False def batch_process_hex(self): target_file = self.extractor.valid_target_file # 修复点:提前判断target_file是否为None,避免后续报错 if target_file is None: print(rainbow_text("[错误] 未找到有效的目标文件,无法执行修改操作")) return False config_path = self.select_config_file() if not config_path: return False code_array = self.parse_config_file(config_path) if not code_array: print(rainbow_text("[错误] 配置文件无有效规则")) return False total_rules = len(code_array) print(f"\n{rainbow_text(f'解析到: {total_rules} 组替换规则')}") feature_code1, feature_code2 = self.auto_detect_feature_codes_avatar(target_file) if not feature_code1 or not feature_code2: print(rainbow_text("[错误] 特征码识别失败,终止修改")) return False print(rainbow_text(f"识别到特征码1: {feature_code1}")) print(rainbow_text(f"识别到特征码2: {feature_code2}")) print(f"\n{rainbow_text('开始批量替换处理')}") success_count = 0 for idx, (old_num, new_num) in enumerate(code_array, 1): print(SHORT_DIVIDER) print(rainbow_text(f"[第 {idx}/{total_rules} 组] {old_num} → {new_num}")) A = Utils.dec_to_hex(old_num) B = Utils.dec_to_hex(new_num) if A is None or B is None: print(rainbow_text("[警告] 跳过无效代码对")) continue if self.modify_file_hex(target_file, A, B, feature_code1, feature_code2): success_count += 1 print(rainbow_text("[成功] 替换完成")) else: print(rainbow_text("[警告] 未找到序列,跳过")) progress_bar(idx, total_rules, "批量替换") print(f"\n{BORDER_MID}") print(rainbow_text(f"批量替换完成: 成功 {success_count}/{total_rules} 组")) print(f"{BORDER_MID}") return True def run(self): print(f"\n{BORDER_TOP}") print(f"{rainbow_text(' 全自动头像框美化工具 ')}") print(f"{BORDER_TOP}") print(f"\n{rainbow_text('[1/3] 进入PAK选择环节')}") pak_files = self.extractor.get_pak_files() if not pak_files: input(rainbow_text("按回车退出...")) return self.extractor.select_pak_file(pak_files) print(f"\n{rainbow_text('[2/3] 开始解包解析')}") if not self.extractor.unpack_pak({TARGET_FILE_PATTERN}): input(rainbow_text("按回车退出...")) return print(f"\n{rainbow_text(f'解包耗时: {self.extractor.unpack_elapsed:.3f}s')}") print(f"\n{rainbow_text('[3/3] 头像框数据修改')}") if not self.batch_process_hex(): input(rainbow_text("按回车退出...")) return print(f"\n{rainbow_text('[4/3] 重新打包PAK')}") print(rainbow_text(f"当前源文件: {self.extractor.selected_pak.name}")) self.packer.pack_pak() print(rainbow_text(f"打包耗时: {self.packer.pack_elapsed:.3f}s")) print(f"\n{BORDER_TOP}") print(f"{rainbow_text(' 全部任务执行完成! ')}") print(f"{BORDER_TOP}") input(f"\r\033[94m{rainbow_text('按 Enter 键退出程序...')}{Color.RESET}") if __name__ == "__main__": random.seed(time.time()) os.environ['PYTHONIOENCODING'] = 'utf-8' clear_pack_unpack_dirs() auto_sync_pak() AvatarFrameTool().run()