from pathlib import Path import pandas as pd import re from utils.log import log from hebei import download_dir from utils import base_country_code, base_mysql from utils.base_country_code import format_sql_value CUSTOM_COMMODITY_REPLACEMENTS = { '稻谷及大米': '稻谷、大米及大米粉', '有机发光二极管平板显示模组': '有机发光二极管(OLED)平板显示模组', } PRESERVE_PARENTHESES_KEYWORDS = { '汽车(包括底盘)', } def clean_commodity_name(name, preserve_keywords=None): """ 自定义清洗商品名称逻辑,支持条件保留中文括号内容 :param name: 商品名称字符串 :param preserve_keywords: 需要保留括号的关键词集合 :return: 清洗后的商品名称 """ name = str(name).strip().replace('(', '(').replace(')', ')') # 去除非必要符号 name = re.sub(r'[#*?]', '', name) name = re.sub(r'_x000D_', '', name) # 判断是否需要保留括号内容 if preserve_keywords: for keyword in preserve_keywords: if keyword == name: # 匹配到关键词时,不移除括号内容 return name # 默认移除中文括号及内容 name = re.sub(r'([^)]*)', '', name) return name.strip() def process_folder(path): year, month = base_country_code.extract_year_month_from_path(path) name_index = 1 if year == 2025 and month >= 3 else 0 value_index = 5 if year == 2025 and month >= 3 else 4 res = df_data(path, name_index, value_index) if not res: log.info(f"{path} 上月目录里文件未找到包含 主出、主进商品 sheet") return export_df, import_df = res merged_df = pd.merge( import_df.groupby('commodity')['import'].sum().reset_index() if not import_df.empty else pd.DataFrame(columns=['commodity', 'import']), export_df.groupby('commodity')['export'].sum().reset_index() if not export_df.empty else pd.DataFrame(columns=['commodity', 'export']), on='commodity', how='outer' ).infer_objects() save_to_database(merged_df, year, month) def save_to_database(merged_df, year, month): processed_commodities = set() sql_arr = [] sql_arr_copy = [] year_month = f'{year}-{month:02d}' try: for _, row in merged_df.iterrows(): commodity_name = str(row['commodity']).strip() commodity_code,commodity_name_fix = base_mysql.get_commodity_id(commodity_name) if not commodity_code: log.info(f"未找到商品名称 '{commodity_name}' 对应的 ID") continue if not commodity_name_fix or commodity_name_fix in processed_commodities: continue monthly_import = round(row['import'], 4) monthly_export = round(row['export'], 4) monthly_total = round( (0 if pd.isna(monthly_import) else monthly_import) + (0 if pd.isna(monthly_export) else monthly_export), 4 ) if year_month == '2023-02': monthly_import = round(monthly_import / 2, 4) monthly_export = round(monthly_export / 2, 4) monthly_total = round(monthly_import + monthly_export, 4) sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade " f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time) VALUES " f"('2023', '2023-01', '130000', '河北省', '{commodity_code}', '{commodity_name_fix}', {format_sql_value(monthly_total)}, {format_sql_value(monthly_export)}, {format_sql_value(monthly_import)}, now())" f"ON DUPLICATE KEY UPDATE create_time = now() ;") sql_arr_copy.append(sql) sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade " f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time) VALUES " f"('{year}', '{year_month}', '130000', '河北省', '{commodity_code}', '{commodity_name_fix}', {format_sql_value(monthly_total)}, {format_sql_value(monthly_export)}, {format_sql_value(monthly_import)}, now())" f"ON DUPLICATE KEY UPDATE create_time = now() ;") sql_arr.append(sql) processed_commodities.add(commodity_name_fix) except Exception as e: log.info(f"{year_month} prov_commodity_trade 生成 SQL 文件时发生异常: {str(e)}") log.info(f"√ {year_month} prov_commodity_trade 成功生成 SQL 文件 size {len(sql_arr)} ") # 解析完后生成sql文件批量入库 base_mysql.bulk_insert(sql_arr) if year_month == '2023-02': log.info(f"√ {year_month} prov_commodity_trade copy 成功生成 SQL 文件 size {len(sql_arr_copy)} ") base_mysql.bulk_insert(sql_arr_copy) log.info(f"√ {year_month} prov_commodity_trade SQL 存表完成!") def df_data(path, name_index, value_index): file_paths = list(Path(path).glob('*')) if not file_paths: log.info("未找到任何文件") return None import_df = pd.DataFrame() export_df = pd.DataFrame() for file_path in file_paths: if '出口' in file_path.name: df = pd.read_excel(file_path, header=None).iloc[5:] temp_df = df[[name_index, value_index]].rename(columns={name_index: 'commodity', value_index: 'export'}) # temp_df['commodity'] = temp_df['commodity'].str.replace(r'[*#]', '', regex=True) temp_df['commodity'] = ( temp_df['commodity'] .astype(str) .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS)) .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False) ) temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce') temp_df = temp_df.drop_duplicates(subset=['commodity'], keep='first') export_df = pd.concat([export_df, temp_df]) if '进口' in file_path.name: df = pd.read_excel(file_path, header=None).iloc[5:] temp_df = df[[name_index, value_index]].rename(columns={name_index: 'commodity', value_index: 'import'}) temp_df['commodity'] = ( temp_df['commodity'] .astype(str) .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS)) .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False) ) temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce') temp_df = temp_df.drop_duplicates(subset=['commodity'], keep='first') import_df = pd.concat([import_df, temp_df]) return export_df, import_df def hierarchical_traversal(root_path): """分层遍历:省份->年份->月目录""" root = Path(root_path) # 获取所有年份目录 year_dirs = [ item for item in root.iterdir() if item.is_dir() and base_country_code.YEAR_PATTERN.match(item.name) ] # 按年倒序 for year_dir in sorted(year_dirs, key=lambda x: x.name, reverse=True): log.info(f"\n年份:{year_dir.name} | 省份:hebei") # 提取月份目录 month_dirs = [] for item in year_dir.iterdir(): if item.is_dir() and base_country_code.MONTH_PATTERN.match(item.name): month_dirs.append({ "path": item, "month": int(item.name) }) # 按月倒序输出 if month_dirs: for md in sorted(month_dirs, key=lambda x: x["month"], reverse=True): log.info(f" 月份:{md['month']:02d} | 路径:{md['path']}") process_folder(md['path']) if __name__ == '__main__': hierarchical_traversal(download_dir) # root = Path(download_dir)/'2023'/'02' # process_folder(root) log.info(f"河北石家庄海关出入口商品所有文件处理完成!")