gov_commodity_anhui_import_export.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import re
  2. from pathlib import Path
  3. import pandas as pd
  4. from anhui import download_dir
  5. from utils import base_country_code, base_mysql
  6. from utils.base_country_code import format_sql_value
  7. from utils.log import log
  8. CUSTOM_COMMODITY_REPLACEMENTS = {
  9. '家具': '家具及其零件',
  10. '眼镜': '眼镜及其零件',
  11. }
  12. # 需要保留中文括号及内容的商品关键词
  13. PRESERVE_PARENTHESES_KEYWORDS = {
  14. '汽车(包括底盘)',
  15. }
  16. def clean_commodity_name(name, preserve_keywords=None):
  17. """
  18. 自定义清洗商品名称逻辑,支持条件保留中文括号内容
  19. :param name: 商品名称字符串
  20. :param preserve_keywords: 需要保留括号的关键词集合
  21. :return: 清洗后的商品名称
  22. """
  23. name = str(name).strip()
  24. # 去除非必要符号
  25. name = re.sub(r'[#*]', '', name)
  26. # 判断是否需要保留括号内容
  27. if preserve_keywords:
  28. for keyword in preserve_keywords:
  29. if keyword == name:
  30. # 匹配到关键词时,不移除括号内容
  31. return name
  32. # 默认移除中文括号及内容
  33. name = re.sub(r'([^)]*)', '', name)
  34. return name.strip()
  35. def process_folder(path):
  36. file_paths = list(Path(path).glob('*'))
  37. if not file_paths:
  38. log.info("未找到任何文件")
  39. return
  40. year, month = base_country_code.extract_year_month_from_path(path)
  41. import_df = pd.DataFrame()
  42. export_df = pd.DataFrame()
  43. for file in file_paths:
  44. file_path = Path(path) / file
  45. df = pd.read_excel(file_path, header=None).iloc[6:]
  46. value_index = 1 if month == 2 else 3
  47. if "进口商品总值" in file.name:
  48. temp_df = df[[0, value_index]].rename(columns={0: 'commodity', value_index: 'import'})
  49. temp_df['commodity'] = (
  50. temp_df['commodity']
  51. .astype(str)
  52. .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS))
  53. .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False)
  54. )
  55. temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce')
  56. # 去重 commodity 列,保留第一个出现的行
  57. temp_df = temp_df.drop_duplicates(subset=['commodity'], keep='first')
  58. import_df = pd.concat([import_df, temp_df])
  59. elif "出口商品总值" in file.name:
  60. temp_df = df[[0, value_index]].rename(columns={0: 'commodity', value_index: 'export'})
  61. temp_df['commodity'] = (
  62. temp_df['commodity']
  63. .astype(str)
  64. .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS))
  65. .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False)
  66. )
  67. temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce')
  68. temp_df = temp_df.drop_duplicates(subset=['commodity'], keep='first')
  69. export_df = pd.concat([export_df, temp_df])
  70. save_to_database(import_df, export_df, year, month)
  71. def save_to_database(import_df, export_df, year, month):
  72. # 合并数据(使用outer join保留所有商品)
  73. merged_df = pd.merge(
  74. import_df.groupby('commodity')['import'].sum().reset_index(),
  75. export_df.groupby('commodity')['export'].sum().reset_index(),
  76. on='commodity',
  77. how='outer'
  78. )
  79. year_month = f'{year}-{month:02d}'
  80. processed_commodities = set()
  81. sql_arr = []
  82. sql_arr_copy = []
  83. try:
  84. for _, row in merged_df.iterrows():
  85. commodity_name = str(row['commodity'])
  86. if commodity_name == '肉类' or commodity_name == '其他' or commodity_name == '干鲜瓜果' or commodity_name == '钟表':
  87. log.info(f'{commodity_name} 商品不存在,跳过')
  88. continue
  89. commodity_code, commodity_name_fix = base_mysql.get_commodity_id(commodity_name)
  90. if not commodity_code:
  91. log.info(f"未找到商品名称 '{commodity_name}' 对应的 ID")
  92. continue
  93. if not commodity_name_fix or commodity_name_fix in processed_commodities:
  94. continue
  95. monthly_import = round(row['import'] * 10000, 4)
  96. monthly_export = round(row['export'] * 10000, 4)
  97. monthly_total = round(
  98. (0 if pd.isna(monthly_import) else monthly_import) +
  99. (0 if pd.isna(monthly_export) else monthly_export),
  100. 4
  101. )
  102. if month == 2:
  103. year_month_2 = f'{year}-01'
  104. monthly_import = round(monthly_import / 2, 4)
  105. monthly_export = round(monthly_export / 2, 4)
  106. monthly_total = round(monthly_import + monthly_export, 4)
  107. sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade "
  108. f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time) VALUES "
  109. f"('{year}', '{year_month_2}', '340000', '安徽省', '{commodity_code}', '{commodity_name_fix}', {format_sql_value(monthly_total)}, {format_sql_value(monthly_export)}, {format_sql_value(monthly_import)}, now())"
  110. f"ON DUPLICATE KEY UPDATE create_time = now() ;")
  111. sql_arr_copy.append(sql)
  112. sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade "
  113. f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time) VALUES "
  114. f"('{year}', '{year_month}', '340000', '安徽省', '{commodity_code}', '{commodity_name_fix}', {format_sql_value(monthly_total)}, {format_sql_value(monthly_export)}, {format_sql_value(monthly_import)}, now())"
  115. f"ON DUPLICATE KEY UPDATE create_time = now() ;")
  116. sql_arr.append(sql)
  117. processed_commodities.add(commodity_name_fix)
  118. # log.info(f'{commodity_name} -> {commodity_name_fix}')
  119. except Exception as e:
  120. log.info(f"{year_month} prov_commodity_trade 生成 SQL 文件时发生异常: {str(e)}")
  121. log.info(f"√ {year_month} prov_commodity_trade 成功生成 SQL 文件 size {len(sql_arr)} ")
  122. # 解析完后生成sql文件批量入库
  123. base_mysql.bulk_insert(sql_arr)
  124. if month == 2:
  125. log.info(f"√ {year_month} prov_commodity_trade copy 成功生成 SQL 文件 size {len(sql_arr_copy)} ")
  126. base_mysql.bulk_insert(sql_arr_copy)
  127. log.info(f"√ {year_month} prov_commodity_trade SQL 存表完成!\n")
  128. def hierarchical_traversal(root_path):
  129. """分层遍历:省份->年份->月目录"""
  130. root = Path(root_path)
  131. # 获取所有年份目录
  132. year_dirs = [
  133. item for item in root.iterdir()
  134. if item.is_dir() and base_country_code.YEAR_PATTERN.match(item.name)
  135. ]
  136. # 按年倒序
  137. for year_dir in sorted(year_dirs, key=lambda x: x.name, reverse=True):
  138. # 构造完整的路径:download/shandong/2025/03
  139. log.info(f"\n年份:{year_dir.name} | 省份:anhui")
  140. # 提取月份目录
  141. month_dirs = []
  142. for item in year_dir.iterdir():
  143. if item.is_dir() and base_country_code.MONTH_PATTERN.match(item.name):
  144. month_dirs.append({
  145. "path": item,
  146. "month": int(item.name)
  147. })
  148. # 按月倒序输出
  149. if month_dirs:
  150. for md in sorted(month_dirs, key=lambda x: x["month"], reverse=True):
  151. log.info(f" 月份:{md['month']:02d} | 路径:{md['path']}")
  152. process_folder(md['path'])
  153. if __name__ == '__main__':
  154. hierarchical_traversal(download_dir)
  155. # root = Path(base_country_code.download_dir)/'2025'/'04'
  156. # process_folder(root)
  157. log.info("安徽合肥海关类章所有文件处理完成!")