gov_commodity_zhejiang_import_export.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. from pathlib import Path
  2. import re
  3. import pandas as pd
  4. from zhejiang import download_dir
  5. from utils import base_country_code, base_mysql
  6. from utils.base_country_code import format_sql_value
  7. from utils.log import log
  8. CUSTOM_COMMODITY_REPLACEMENTS = {
  9. '稻谷及大米': '稻谷、大米及大米粉',
  10. '有机发光二极管平板显示模组': '有机发光二极管(OLED)平板显示模组',
  11. }
  12. PRESERVE_PARENTHESES_KEYWORDS = {
  13. '汽车(包括底盘)',
  14. }
  15. def clean_commodity_name(name, preserve_keywords=None):
  16. """
  17. 自定义清洗商品名称逻辑,支持条件保留中文括号内容
  18. :param name: 商品名称字符串
  19. :param preserve_keywords: 需要保留括号的关键词集合
  20. :return: 清洗后的商品名称
  21. """
  22. name = str(name).strip().replace('(', '(').replace(')', ')')
  23. # 去除非必要符号
  24. name = re.sub(r'[#*?]', '', name)
  25. name = re.sub(r'_x000D_', '', name)
  26. # 判断是否需要保留括号内容
  27. if preserve_keywords:
  28. for keyword in preserve_keywords:
  29. if keyword == name:
  30. # 匹配到关键词时,不移除括号内容
  31. return name
  32. # 默认移除中文括号及内容
  33. name = re.sub(r'([^)]*)', '', name)
  34. return name.strip()
  35. def get_df_import_export(path, year_month):
  36. file_paths = list(Path(path).glob('*'))
  37. if not file_paths:
  38. log.info("未找到任何文件")
  39. return None
  40. flag = True
  41. file_path = file_paths[0]
  42. if len(file_paths) > 1:
  43. for file_path in file_paths:
  44. if '主要出口商品' in file_path.name:
  45. file_path = file_path
  46. flag = False
  47. break
  48. log.info(f"处理文件: {file_path.name}")
  49. xls = pd.ExcelFile(file_path)
  50. import_df = pd.DataFrame()
  51. export_df = pd.DataFrame()
  52. if flag:
  53. df = pd.read_excel(xls, sheet_name=4, header=None).iloc[2:]
  54. else:
  55. df = pd.read_excel(file_path, header=None).iloc[1:]
  56. temp_df = df[[0, 1]].rename(columns={0: 'commodity', 1: 'export'})
  57. temp_df['commodity'] = (
  58. temp_df['commodity']
  59. .astype(str)
  60. .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS))
  61. .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False)
  62. )
  63. temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce').astype(float)
  64. if year_month and year_month == '2024-07':
  65. temp_df['export'] = temp_df['export'] / 10000
  66. export_df = pd.concat([export_df, temp_df])
  67. flag_2 = True
  68. if len(file_paths) > 1:
  69. for file_path in file_paths:
  70. if '主要进口商品' in file_path.name:
  71. file_path = file_path
  72. flag_2 = False
  73. break
  74. if flag_2:
  75. df = pd.read_excel(xls, sheet_name=5, header=None).iloc[2:]
  76. else:
  77. df = pd.read_excel(file_path, header=None).iloc[1:]
  78. temp_df = df[[0, 1]].rename(columns={0: 'commodity', 1: 'import'})
  79. temp_df['commodity'] = (
  80. temp_df['commodity']
  81. .astype(str)
  82. .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS))
  83. .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False)
  84. )
  85. temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce').astype(float)
  86. if year_month and year_month == '2024-07':
  87. temp_df['import'] = temp_df['import'] / 10000
  88. import_df = pd.concat([import_df, temp_df])
  89. return import_df, export_df
  90. def process_folder(path):
  91. res = get_df_import_export(path, None)
  92. if not res:
  93. log.info(f"{path} 目录里文件未找到包含 主出、主进商品 sheet")
  94. return
  95. import_df, export_df = res
  96. year, month = base_country_code.extract_year_month_from_path(path)
  97. year_month = f'{year}-{month:02d}'
  98. # 当月数据分组清洗
  99. curr_import = import_df.groupby('commodity')['import'].sum().reset_index()
  100. curr_export = export_df.groupby('commodity')['export'].sum().reset_index()
  101. if not month == 1:
  102. previous_month_dir = base_country_code.get_previous_month_dir(path)
  103. res = get_df_import_export(previous_month_dir, year_month)
  104. if not res:
  105. log.info(f"{path} 上月目录里文件未找到包含 主出、主进商品 sheet")
  106. return
  107. prev_import_df, prev_export_df = res
  108. # 上月数据分组
  109. prev_import = prev_import_df.groupby('commodity')['import'].sum().reset_index()
  110. prev_export = prev_export_df.groupby('commodity')['export'].sum().reset_index()
  111. # 差值计算
  112. curr_import = pd.merge(curr_import, prev_import, on='commodity', how='left')
  113. curr_import['import'] = round(curr_import['import_x'] - curr_import['import_y'], 4)
  114. curr_export = pd.merge(curr_export, prev_export, on='commodity', how='left')
  115. curr_export['export'] = round(curr_export['export_x'] - curr_export['export_y'], 4)
  116. log.info(f"合并文件: {path}*********{previous_month_dir}")
  117. # 合并进出口数据
  118. merged_df = pd.merge(curr_import, curr_export, on='commodity', how='outer')
  119. save_to_database(merged_df, year, month)
  120. def save_to_database(merged_df, year, month):
  121. year_month = f'{year}-{month:02d}'
  122. processed_commodities = set()
  123. sql_arr = []
  124. try:
  125. for _, row in merged_df.iterrows():
  126. commodity_name = str(row['commodity']).strip()
  127. if commodity_name == '消费品' or commodity_name == '劳动密集型产品':
  128. log.info(f'{commodity_name} 商品不存在,跳过')
  129. continue
  130. commodity_code, commodity_name_fix = base_mysql.get_commodity_id(commodity_name)
  131. if not commodity_code:
  132. log.info(f"未找到商品名称 '{commodity_name}' 对应的 ID")
  133. continue
  134. if not commodity_name_fix or commodity_name_fix in processed_commodities:
  135. log.info(f"已处理过 '{commodity_name_fix}',传入name:{commodity_name}")
  136. continue
  137. if year == 2025 or (year == 2024 and month in [7, 8, 9, 10, 11, 12]):
  138. monthly_import = round(row['import'], 4)
  139. monthly_export = round(row['export'], 4)
  140. monthly_total = round(
  141. (0 if pd.isna(monthly_import) else monthly_import) +
  142. (0 if pd.isna(monthly_export) else monthly_export),
  143. 4
  144. )
  145. else:
  146. monthly_import = round(row['import'] / 10000, 4)
  147. monthly_export = round(row['export'] / 10000, 4)
  148. monthly_total = round(
  149. (0 if pd.isna(monthly_import) else monthly_import) +
  150. (0 if pd.isna(monthly_export) else monthly_export),
  151. 4
  152. )
  153. sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade "
  154. f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time) VALUES "
  155. f"('{year}', '{year_month}', '330000', '浙江省', '{commodity_code}', '{commodity_name_fix}', {format_sql_value(monthly_total)}, {format_sql_value(monthly_export)}, {format_sql_value(monthly_import)}, now()) ON DUPLICATE KEY UPDATE create_time = now() ;")
  156. sql_arr.append(sql)
  157. processed_commodities.add(commodity_name_fix)
  158. # log.info(f'{commodity_name} -> {commodity_name_fix}')
  159. except Exception as e:
  160. log.info(f"{year_month} prov_commodity_trade 生成 SQL 文件时发生异常: {str(e)}")
  161. log.info(f"√ {year_month} prov_commodity_trade 成功生成 SQL 文件 size {len(sql_arr)} ")
  162. # 解析完后生成sql文件批量入库
  163. base_mysql.bulk_insert(sql_arr)
  164. log.info(f"√ {year_month} prov_commodity_trade SQL 存表完成!\n")
  165. def hierarchical_traversal(root_path):
  166. """分层遍历:省份->年份->月目录"""
  167. root = Path(root_path)
  168. # 获取所有年份目录
  169. year_dirs = [
  170. item for item in root.iterdir()
  171. if item.is_dir() and base_country_code.YEAR_PATTERN.match(item.name)
  172. ]
  173. # 按年倒序
  174. for year_dir in sorted(year_dirs, key=lambda x: x.name, reverse=True):
  175. log.info(f"\n年份:{year_dir.name} | 省份:jiangsu")
  176. # 提取月份目录
  177. month_dirs = []
  178. for item in year_dir.iterdir():
  179. if item.is_dir() and base_country_code.MONTH_PATTERN.match(item.name):
  180. month_dirs.append({
  181. "path": item,
  182. "month": int(item.name)
  183. })
  184. # 按月倒序输出
  185. if month_dirs:
  186. for md in sorted(month_dirs, key=lambda x: x["month"], reverse=True):
  187. log.info(f" 月份:{md['month']:02d} | 路径:{md['path']}")
  188. process_folder(md['path'])
  189. if __name__ == '__main__':
  190. hierarchical_traversal(download_dir)
  191. # root = Path(download_dir)/'2023'/'01'
  192. # process_folder(root)
  193. log.info("浙江杭州海关类章所有文件处理完成!")