提交部分代码

dev
ZhuoZhou 2025-04-17 16:08:36 +08:00
parent 1a2c7c45f4
commit 0b8c07118f
6 changed files with 594 additions and 63 deletions

121
.gitignore vendored
View File

@ -1,74 +1,69 @@
# ---> Unity
# This .gitignore file should be placed at the root of your Unity project directory
#
# Get latest from https://github.com/github/gitignore/blob/main/Unity.gitignore
#
/[Ll]ibrary/
/[Tt]emp/
/[Oo]bj/
/[Bb]uild/
/[Bb]uilds/
/[Ll]ogs/
/[Uu]ser[Ss]ettings/
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# MemoryCaptures can get excessive in size.
# They also could contain extremely sensitive data
/[Mm]emoryCaptures/
# C extensions
*.so
# Recordings can get excessive in size
/[Rr]ecordings/
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# Uncomment this line if you wish to ignore the asset store tools plugin
# /[Aa]ssets/AssetStoreTools*
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Autogenerated Jetbrains Rider plugin
/[Aa]ssets/Plugins/Editor/JetBrains*
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Visual Studio cache directory
.vs/
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
# Gradle cache directory
.gradle/
# Translations
*.mo
*.pot
# Autogenerated VS/MD/Consulo solution and project files
ExportedObj/
.consulo/
*.csproj
*.unityproj
*.sln
*.suo
*.tmp
*.user
*.userprefs
*.pidb
*.booproj
*.svd
*.pdb
*.mdb
*.opendb
*.VC.db
# Django stuff:
*.log
# Unity3D generated meta files
*.pidb.meta
*.pdb.meta
*.mdb.meta
# Sphinx documentation
docs/_build/
# Unity3D generated file on crash reports
sysinfo.txt
# PyBuilder
target/
# Builds
*.apk
*.aab
*.unitypackage
*.app
# Crashlytics generated file
crashlytics-build.properties
# Packed Addressables
/[Aa]ssets/[Aa]ddressable[Aa]ssets[Dd]ata/*/*.bin*
# Temporary auto-generated Android Assets
/[Aa]ssets/[Ss]treamingAssets/aa.meta
/[Aa]ssets/[Ss]treamingAssets/aa/*
.idea
Error.D2.Android
Error.D2.iOS
Log.D2
d2
atlas
atlas_remote_config.json
.DS_Store

346
check_zip_psd.py Normal file
View File

@ -0,0 +1,346 @@
###-----检测psd是否合格通过的psd压缩-----
import os
import time
import config
from psd_tools import PSDImage
from PIL import Image
import json
import utils
import sys
curr = os.path.dirname(sys.argv[0])
if curr != "":
os.system("cd " + curr)
def check_psd(psd_path):
psd_name = os.path.split(psd_path)[1]
names = psd_name.split('_')
log(f'开始检测:{psd_name}')
error_log = []
if len(names) != 3 and len(names) != 4:
error_log.append(config.error_log.psd_name_error.value)
# elif not names[0][0].islower():
# error_log.append(f'{psd_name}:{config.error_log.psd_name_error.value}')
# elif not names[1][0].islower():
# error_log.append(f'{psd_name}:{config.error_log.psd_name_error.value}')
# elif not names[2][0].isdigit():
# error_log.append(f'{psd_name}:{config.error_log.psd_name_error.value}')
psd = PSDImage.open(psd_path)
color_mode = get_psd_color_mode(psd_path)
if color_mode == 'CMYK':
error_log.append(f'{psd_name}:{config.error_log.psd_cmyk.value}')
print("PSD 文件的色彩空间是 CMYK")
# elif color_mode == 'RGB':
# print("PSD 文件的色彩空间是 RGB")
psd_width = psd.width
psd_height = psd.height
if (psd_width != config.psd_standard_width and psd_width != config.psd_standard_width2) or (psd_height != config.psd_standard_height and psd_height != config.psd_standard_height2):
error_log.append(f'{psd_name}:{config.error_log.psd_size_error.value}')
all_items = []
all_full = {}
all_mask = []
all_color_it_full = []
all_color_it_item = []
is_exist_base = False
is_exist_titem_group = False
all_occlusion_degree = []
for layer in psd:
if ' ' in layer.name:
error_log.append(f'{psd_name}:{layer.name}:{config.error_log.name_contains_spaces.value}')
if layer.name == 'nouse' and layer.is_group():
continue
if layer.name == 'titem' and layer.is_group():
is_exist_titem_group = True
for item_layer in layer:
if ' ' in item_layer.name:
error_log.append(f'{psd_name}:{item_layer.name}:{config.error_log.name_contains_spaces.value}')
# item_names = item_layer.name.split('_')
if 'titem' in item_layer.name:
if item_layer.name in all_items or item_layer.name in all_color_it_item:
error_log.append(f'{psd_name}:{item_layer.name}:{config.error_log.exit_repeat_layer.value}')
else:
if 'titemcolor' in item_layer.name:
all_color_it_item.append(item_layer.name)
else:
all_items.append(item_layer.name)
else:
error_log.append(f'{psd_name}:{item_layer.name}:{config.error_log.psd_item_name_error.value}')
elif not layer.is_group() and 'tfull' in layer.name:
if layer.name in all_full.keys() or layer.name in all_color_it_full:
error_log.append(f'{psd_name}:{layer.name}:{config.error_log.exit_repeat_layer.value}')
else:
if 'tfullcolor' in layer.name:
all_color_it_full.append(layer.name)
else:
all_full[layer.name] = (layer.left, layer.top, layer.width, layer.height)
elif not layer.is_group() and 'tmask' in layer.name:
if layer.name in all_mask:
error_log.append(f'{psd_name}:{layer.name}:{config.error_log.exit_repeat_layer.value}')
else:
all_mask.append(layer.name)
elif layer.name == 'base':
if is_exist_base:
error_log.append(f'{psd_name}:{config.error_log.exit_more_base.value}')
else:
is_exist_base = True
else:
error_log.append(f'{psd_name}:{layer.name}:{config.error_log.layer_not_need.value}')
if not is_exist_base:
error_log.append(f'{psd_name}:{config.error_log.psd_not_exit_base.value}')
if not is_exist_titem_group:
error_log.append(f'{psd_name}:{config.error_log.item_group_not_exit.value}')
all_item_count = len(all_items)
if len(all_full) != all_item_count or (
len(all_color_it_item) != 0 and len(all_color_it_item) != all_item_count) or (
len(all_color_it_full) != 0 and len(all_color_it_full) != all_item_count):
error_log.append(f'{psd_name}:{config.error_log.item_or_full_num_error.value}')
for item in all_items:
full_name = item.replace('item', 'full')
if full_name not in all_full.keys():
error_log.append(f'{psd_name}:{item}:{config.error_log.psd_not_full.value}')
if len(all_color_it_item) > 0 and len(all_color_it_full) > 0:
for item in all_items:
fullcolor_name = item.replace('item', 'fullcolor')
itemcolor_name = item.replace('item', 'itemcolor')
if fullcolor_name not in all_color_it_full:
error_log.append(f'{psd_name}:{item}:{config.error_log.psd_not_fullcolor.value}')
if itemcolor_name not in all_color_it_item:
error_log.append(f'{psd_name}:{item}:{config.error_log.psd_not_itemcolor.value}')
# if not os.path.exists('./test'):
# os.mkdir('./test')
# 计算mask和full的遮罩百分比
for cur_mask in all_mask:
img_mask = psd.composite(layer_filter=lambda mask_layer: mask_layer.name == cur_mask)
full_name = cur_mask.replace('mask', 'full')
cur_full_rect = None
if full_name in all_full.keys():
cur_full_rect = all_full[full_name]
img_full = psd.composite(layer_filter=lambda full_layer: full_layer.name == full_name)
# img_mask.save(f'./test/{cur_mask}.png')
# img_full.save(f'./test/{full_name}.png')
per = get_item_mask_contact_ratio(img_full, img_mask, cur_full_rect)
all_occlusion_degree.append(f'{cur_mask}遮挡{full_name}百分比:{per}\n')
# print(f'{cur_mask}遮挡{full_name}百分比:{per}')
else:
error_log.append(f'{psd_name}:{cur_mask}:{config.error_log.psd_mask_not_full.value}')
is_error = len(error_log) > 0
is_error_text = '' if is_error else ''
if is_error:
error_psd_dic[psd_name] = error_log
log(f'{psd_name}:检测完毕,是否检测通过:{is_error_text}')
for clog in error_log:
log(clog)
if not is_error:
occlusion_degree_file_path = f'./OcclusionDegree/{psd_name}.txt'
if not os.path.exists('./OcclusionDegree/'):
os.mkdir('./OcclusionDegree/')
if not os.path.exists(occlusion_degree_file_path):
with open(occlusion_degree_file_path, 'w') as occlusion_degree_file:
for text in all_occlusion_degree:
occlusion_degree_file.writelines(text)
pass
return len(error_log) == 0
def get_item_mask_contact_ratio(full_image, mask_image, full_rect):
# full_image = full_image.convert('RGBA')
# mask_image = mask_image.convert('RGBA')
x, y, w, h = full_rect
full_pix_count = 0
mask_pix_count = 0
for cur_x in range(x, x + w):
for cur_y in range(y, y + h):
full_pix = full_image.getpixel((cur_x, cur_y))
mask_pix = mask_image.getpixel((cur_x, cur_y))
if not full_pix == (255, 255, 255, 0):
full_pix_count += 1
if not mask_pix == (255, 255, 255, 0) and not mask_pix == full_pix:
mask_pix_count += 1
# print(f'mask_pix_count::: {mask_pix_count}')
# print(f'full_pix_count {full_pix_count}')
percentage = "{:.2%}".format(float(mask_pix_count) / float(full_pix_count))
return percentage
def log(content, new_line=True):
curr_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if new_line:
print(curr_time + ' ' + content)
else:
print(curr_time + ' ' + content, end=' ')
###生成version文件
def make_version_file(remake):
version_file = 'version.json';
if remake:
if os.path.exists(version_file):
os.remove(version_file)
if not os.path.exists(version_file):
with open(version_file, 'w') as f:
json.dump({}, f, sort_keys=True, indent=4)
###加载版本文件
def load_version():
global versions
make_version_file(False);
with open('version.json', 'r') as f:
try:
versions = json.load(f)
except:
# 删除版本文件并重新生成
make_version_file(True);
def write_version():
with open('version.json', 'w') as f:
json.dump(versions, f, sort_keys=True, indent=4)
def convert_2_zip_file(path, is_name_end_zip=False):
file_info_arr = os.path.split(path)
file_path = file_info_arr[0]
psd_name = os.path.split(path)[1].replace('.psd', '')
psd_type = psd_name.split('_')[0]
if is_name_end_zip:
psd_path = os.path.join('./', file_info_arr[1])
os.chdir(file_path)
# 检测成功压缩到zip
utils.zip_file(psd_path, f'./{psd_name}.zip')
os.remove(psd_path)
os.chdir('../')
os.chdir('../')
else:
zip_out_path = os.path.join('./Level', psd_type)
if not os.path.exists(zip_out_path):
os.mkdir(zip_out_path)
psd_path = os.path.join('./', file_info_arr[1])
os.chdir(file_path)
# 检测成功压缩到zip
utils.zip_file(psd_path, f'../{zip_out_path}/{psd_name}.zip')
os.remove(psd_path)
os.chdir('../')
def get_psd_color_mode(psd_path):
with open(psd_path, "rb") as f:
# 读取文件头信息
header = f.read(24)
# 获取颜色模式字节
color_mode_byte = header[10]
if color_mode_byte == 0:
return "Bitmap"
elif color_mode_byte == 2:
return "RGB"
elif color_mode_byte == 4:
return "CMYK"
else:
return "Unknown"
if __name__ == '__main__':
global all_psd_count
all_psd_count = 0
global pass_psd_count
pass_psd_count = 0
global no_change_psd_count
no_change_psd_count = 0
global error_psd_dic
error_psd_dic = {}
global zip_fail_count
zip_fail_count = 0
global re_zip_count
re_zip_count = 0
load_version()
psd_name_list = []
is_force = sys.argv[1] if len(sys.argv) > 1 else False
# is_force = True
for root, dirs, files in os.walk('./'):
for name in files:
cur_file_path = os.path.join(root, name)
if name.endswith('.psd'):
all_psd_count += 1
if name.endswith('.psd') or name.endswith('.zip'):
file_info_arr = os.path.split(cur_file_path)
file_path = file_info_arr[0]
psd_name = file_info_arr[1]
if psd_name not in psd_name_list:
psd_name_list.append(psd_name)
else:
log("存在重复名称关卡:{}".format(psd_name))
else:
continue
if name.endswith('.psd') or (name.endswith('.zip') and is_force):
file_info_arr = os.path.split(cur_file_path)
file_path = file_info_arr[0]
psd_name = file_info_arr[1]
hash_old = ""
if psd_name in versions:
hash_old = versions[psd_name]
is_zip = name.endswith('.zip')
if os.path.exists(cur_file_path):
hash_now = ""
if name.endswith('.psd'):
hash_now = utils.calc_hash(cur_file_path)
if hash_now != hash_old or is_force:
psd_file_path = cur_file_path
if is_zip:
utils.unzip_file(cur_file_path)
psd_file_path = "{}.psd".format(os.path.splitext(psd_file_path)[0])
hash_now = utils.calc_hash(psd_file_path)
try:
is_passed = check_psd(psd_file_path)
if is_passed:
# 检测成功压缩到zip
convert_2_zip_file(psd_file_path, is_zip)
if is_zip:
re_zip_count += 1
versions[psd_name] = hash_now
write_version()
if name.endswith('.psd'):
pass_psd_count += 1
except Exception as e:
zip_fail_count += 1
log(f'检测或者压缩失败::{psd_file_path},{e}')
error_psd_dic[psd_name] = [f'{psd_file_path},{e}']
elif hash_now == hash_old:
no_change_psd_count += 1
convert_2_zip_file(cur_file_path, is_zip)
log(f'psd压缩完毕总共psd数量{all_psd_count}')
log(f'成功压缩新增psd个数{pass_psd_count}')
log(f'成功压缩无改动psd个数{no_change_psd_count}')
log(f'未检测通过psd个数{len(error_psd_dic)}')
log(f'重新压缩psd数量{re_zip_count}')
log(f'压缩失败个数:{zip_fail_count}')
if len(error_psd_dic) > 0:
log(f'以下psd出现问题')
for error_psd_name, error_psd_log in error_psd_dic.items():
log(f'psd名称{error_psd_name}')
for error_log in error_psd_log:
log(error_log)

24
config.py Normal file
View File

@ -0,0 +1,24 @@
import enum
psd_standard_width = 3000
psd_standard_height = 2000
psd_standard_width2 = 2000
psd_standard_height2 = 3000
class error_log(enum.Enum):
psd_name_error = 'psd名称格式不正确'
psd_size_error = 'psd尺寸大小不正确'
exit_repeat_layer = '存在重复图层'
psd_item_name_error = 'titem分组下存在不是titem的图层'
item_group_not_exit = 'titem分组不存在'
psd_not_exit_base = '不存在base底图'
exit_more_base = '存在多个base图层'
psd_not_full = '没有item对应的full'
psd_not_fullcolor = '没有item对应的fullcolor'
psd_not_itemcolor = '没有item对应的itemcolor'
item_or_full_num_error = 'itemfull或itemcolor,fullcolor的个数不全部相同'
name_contains_spaces = '该图层名字中包含空格'
psd_mask_not_full = '没有mask对应的full'
layer_not_need = '图层第一级有不属于我们需要的图层'
psd_cmyk = 'psd是CMYK的色彩空间'

2
psd检测-强制.bat Normal file
View File

@ -0,0 +1,2 @@
python check_zip_psd.py force
pause

2
psd检测.bat Normal file
View File

@ -0,0 +1,2 @@
python check_zip_psd.py
pause

162
utils.py Normal file
View File

@ -0,0 +1,162 @@
#!/usr/bin/env python
# coding:utf-8
import datetime
import os
import zipfile
import subprocess
import json
import hashlib
def zip_dir(dirpath, out_fullname):
"""
压缩指定文件夹
:param dirpath: 目标文件夹路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return:
"""
zip = zipfile.ZipFile(out_fullname, "w", zipfile.ZIP_DEFLATED)
for path, dirnames, filenames in os.walk(dirpath):
# 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩
fpath = path.replace(dirpath, '')
for filename in filenames:
zip.write(os.path.join(path, filename), os.path.join(fpath, filename))
zip.close()
def unzip_dir(zip_src, dst_dir):
"""
解压文件到指定文件夹
:param zip_src: zip文件
:param dst_dir: 解压目录
:return:
"""
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
print("{}不是zip文件".format(zip_src))
def zip_file(file_path, out_fullname, is_fixedtime=True):
"""
压缩指定文件
:param dirpath: 目标文件路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return:
"""
zip_file = zipfile.ZipFile(out_fullname, 'w')
zip_file.write(file_path, compress_type=zipfile.ZIP_DEFLATED)
if is_fixedtime:
# 分离文件名称
names = zip_file.namelist()
for a_name in names:
zipinfo_obj = zip_file.getinfo(a_name)
file_stat = os.stat(file_path)
zipinfo_obj.date_time = datetime.datetime.fromtimestamp(file_stat.st_mtime).timetuple()[:6]
zip_file.close()
def unzip_file(zip_file_path):
"""
解压文件
:param zip_src: zip文件
:return:
"""
zip_file = zipfile.ZipFile(zip_file_path)
# 解压
# zip_file.extractall(path="{}/../".format(os.path.splitext(zip_file_path)[0]))
# 设置要解压缩的文件和目录
extract_dir = "{}".format(os.path.split(zip_file_path)[0])
# 解压缩文件并将修改时间设置为原有时间
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
for zip_info in zip_file.infolist():
zip_file.extract(zip_info.filename, path=extract_dir)
file_path = os.path.join(extract_dir, zip_info.filename)
mtime = int(datetime.datetime(*zip_info.date_time).timestamp())
os.utime(file_path, (mtime, mtime))
def DoCmd(strcmd, logPath=""):
if len(logPath) > 1:
logfile = file(logPath, "a")
logfile.writelines("-----------------------------")
logfile.writelines(strcmd)
process = subprocess.Popen(
strcmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(strcmd)
lines_out = process.stdout.readlines()
for l in lines_out:
print(l)
if len(logPath) > 1:
logfile.writelines(l)
lines_error = process.stderr.readlines()
if len(logPath) > 1 and len(lines_error) > 0:
logfile.writelines("has error:\n\n")
for l in lines_error:
print(l)
if len(logPath) > 1:
logfile.writelines(l)
print("end: " + strcmd)
if len(logPath) > 0:
logfile.writelines("end: " + strcmd)
logfile.close()
#
return lines_out, lines_error
def get_file_last_line(fname):
"""
f_name为所读xx.txt文件
输出为文件最后一行
"""
print(fname)
with open(fname, 'r') as f: # 打开文件
first_line = f.readline() # 读第一行
off = -50 # 设置偏移量
while True:
f.seek(off, 2) # seek(off, 2)表示文件指针:从文件末尾(2)开始向前50个字符(-50)
lines = f.readlines() # 读取文件指针范围内所有行
if len(lines) >= 2: # 判断是否最后至少有两行,这样保证了最后一行是完整的
last_line = lines[-1] # 取最后一行
break
# 如果off为50时得到的readlines只有一行内容那么不能保证最后一行是完整的
# 所以off翻倍重新运行直到readlines不止一行
off *= 2
print('文件' + fname + '第一行为:' + first_line)
print('文件' + fname + '最后一行为:' + last_line)
return last_line
def open_json(path):
dic = {}
with open(path, 'r') as f:
dic = json.load(f)
return dic
def write_json(path, content):
with open(path, 'w') as f:
json.dump(content, f)
# 根据文件内容生成hash值filepath文件路径
def calc_hash(filepath):
with open(filepath, 'rb') as f:
sha1obj = hashlib.sha1()
sha1obj.update(f.read())
hash = sha1obj.hexdigest()
return hash