gov_commodity_hebei_import_export.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. from pathlib import Path
  2. import pandas as pd
  3. import re
  4. from utils import base_country_code, base_mysql
  5. from utils.base_country_code import format_sql_value
  6. CUSTOM_COMMODITY_REPLACEMENTS = {
  7. '稻谷及大米': '稻谷、大米及大米粉',
  8. '有机发光二极管平板显示模组': '有机发光二极管(OLED)平板显示模组',
  9. }
  10. PRESERVE_PARENTHESES_KEYWORDS = {
  11. '汽车(包括底盘)',
  12. }
  13. def clean_commodity_name(name, preserve_keywords=None):
  14. """
  15. 自定义清洗商品名称逻辑,支持条件保留中文括号内容
  16. :param name: 商品名称字符串
  17. :param preserve_keywords: 需要保留括号的关键词集合
  18. :return: 清洗后的商品名称
  19. """
  20. name = str(name).strip().replace('(', '(').replace(')', ')')
  21. # 去除非必要符号
  22. name = re.sub(r'[#*?]', '', name)
  23. name = re.sub(r'_x000D_', '', name)
  24. # 判断是否需要保留括号内容
  25. if preserve_keywords:
  26. for keyword in preserve_keywords:
  27. if keyword == name:
  28. # 匹配到关键词时,不移除括号内容
  29. return name
  30. # 默认移除中文括号及内容
  31. name = re.sub(r'([^)]*)', '', name)
  32. return name.strip()
  33. def process_folder(path):
  34. year, month = base_country_code.extract_year_month_from_path(path)
  35. name_index = 1 if year == 2025 and month >= 3 else 0
  36. value_index = 5 if year == 2025 and month >= 3 else 4
  37. res = df_data(path, name_index, value_index)
  38. if not res:
  39. print(f"{path} 上月目录里文件未找到包含 主出、主进商品 sheet")
  40. return
  41. export_df, import_df = res
  42. merged_df = pd.merge(
  43. import_df.groupby('commodity')['import'].sum().reset_index() if not import_df.empty else pd.DataFrame(columns=['commodity', 'import']),
  44. export_df.groupby('commodity')['export'].sum().reset_index() if not export_df.empty else pd.DataFrame(columns=['commodity', 'export']),
  45. on='commodity',
  46. how='outer'
  47. ).infer_objects()
  48. save_to_database(merged_df, year, month)
  49. def save_to_database(merged_df, year, month):
  50. processed_commodities = set()
  51. sql_arr = []
  52. sql_arr_copy = []
  53. year_month = f'{year}-{month:02d}'
  54. try:
  55. for _, row in merged_df.iterrows():
  56. commodity_name = str(row['commodity']).strip()
  57. commodity_code,commodity_name_fix = base_mysql.get_commodity_id(commodity_name)
  58. if not commodity_code:
  59. print(f"未找到商品名称 '{commodity_name}' 对应的 ID")
  60. continue
  61. if not commodity_name_fix or commodity_name_fix in processed_commodities:
  62. continue
  63. monthly_import = round(row['import'], 4)
  64. monthly_export = round(row['export'], 4)
  65. monthly_total = round(monthly_import + monthly_export, 4)
  66. if year_month == '2023-02':
  67. monthly_import = round(monthly_import / 2, 4)
  68. monthly_export = round(monthly_export / 2, 4)
  69. monthly_total = round(monthly_import + monthly_export, 4)
  70. sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade "
  71. f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time) VALUES "
  72. f"('2023', '2023-01', '130000', '河北省', '{commodity_code}', '{commodity_name_fix}', {format_sql_value(monthly_total)}, {format_sql_value(monthly_export)}, {format_sql_value(monthly_import)}, now());")
  73. sql_arr_copy.append(sql)
  74. sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade "
  75. f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time) VALUES "
  76. f"('{year}', '{year_month}', '130000', '河北省', '{commodity_code}', '{commodity_name_fix}', {format_sql_value(monthly_total)}, {format_sql_value(monthly_export)}, {format_sql_value(monthly_import)}, now());")
  77. sql_arr.append(sql)
  78. processed_commodities.add(commodity_name_fix)
  79. except Exception as e:
  80. print(f"{year_month} prov_commodity_trade 生成 SQL 文件时发生异常: {str(e)}")
  81. print(f"√ {year_month} prov_commodity_trade 成功生成 SQL 文件 size {len(sql_arr)} ")
  82. # 解析完后生成sql文件批量入库
  83. base_mysql.bulk_insert(sql_arr)
  84. if year_month == '2023-02':
  85. print(f"√ {year_month} prov_commodity_trade copy 成功生成 SQL 文件 size {len(sql_arr_copy)} ")
  86. base_mysql.bulk_insert(sql_arr_copy)
  87. print(f"√ {year_month} prov_commodity_trade SQL 存表完成!")
  88. def df_data(path, name_index, value_index):
  89. file_paths = list(Path(path).glob('*'))
  90. if not file_paths:
  91. print("未找到任何文件")
  92. return None
  93. import_df = pd.DataFrame()
  94. export_df = pd.DataFrame()
  95. for file_path in file_paths:
  96. if '出口' in file_path.name:
  97. df = pd.read_excel(file_path, header=None).iloc[5:]
  98. temp_df = df[[name_index, value_index]].rename(columns={name_index: 'commodity', value_index: 'export'})
  99. # temp_df['commodity'] = temp_df['commodity'].str.replace(r'[*#]', '', regex=True)
  100. temp_df['commodity'] = (
  101. temp_df['commodity']
  102. .astype(str)
  103. .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS))
  104. .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False)
  105. )
  106. temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce')
  107. temp_df = temp_df.drop_duplicates(subset=['commodity'], keep='first')
  108. export_df = pd.concat([export_df, temp_df])
  109. if '进口' in file_path.name:
  110. df = pd.read_excel(file_path, header=None).iloc[5:]
  111. temp_df = df[[name_index, value_index]].rename(columns={name_index: 'commodity', value_index: 'import'})
  112. temp_df['commodity'] = (
  113. temp_df['commodity']
  114. .astype(str)
  115. .apply(lambda x: clean_commodity_name(x, preserve_keywords=PRESERVE_PARENTHESES_KEYWORDS))
  116. .replace(CUSTOM_COMMODITY_REPLACEMENTS, regex=False)
  117. )
  118. temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce')
  119. temp_df = temp_df.drop_duplicates(subset=['commodity'], keep='first')
  120. import_df = pd.concat([import_df, temp_df])
  121. return export_df, import_df
  122. def hierarchical_traversal(root_path):
  123. """分层遍历:省份->年份->月目录"""
  124. root = Path(root_path)
  125. # 获取所有年份目录
  126. year_dirs = [
  127. item for item in root.iterdir()
  128. if item.is_dir() and base_country_code.YEAR_PATTERN.match(item.name)
  129. ]
  130. # 按年倒序
  131. for year_dir in sorted(year_dirs, key=lambda x: x.name, reverse=True):
  132. print(f"\n年份:{year_dir.name} | 省份:jiangsu")
  133. # 提取月份目录
  134. month_dirs = []
  135. for item in year_dir.iterdir():
  136. if item.is_dir() and base_country_code.MONTH_PATTERN.match(item.name):
  137. month_dirs.append({
  138. "path": item,
  139. "month": int(item.name)
  140. })
  141. # 按月倒序输出
  142. if month_dirs:
  143. for md in sorted(month_dirs, key=lambda x: x["month"], reverse=True):
  144. print(f" 月份:{md['month']:02d} | 路径:{md['path']}")
  145. process_folder(md['path'])
  146. if __name__ == '__main__':
  147. hierarchical_traversal(base_country_code.download_dir)
  148. # root = Path(base_country_code.download_dir)/'2023'/'02'
  149. # process_folder(root)
  150. print(f"河北石家庄海关出入口商品所有文件处理完成!")