gov_commodity_jiangsu_import_export.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. import re
  2. from pathlib import Path
  3. import pandas as pd
  4. from utils import base_country_code, base_mysql
  5. from utils.base_country_code import format_sql_value
  6. from utils.log import log
  7. YEAR_PATTERN = re.compile(r"^\d{4}$")
  8. MONTH_PATTERN = re.compile(r"^(0[1-9]|1[0-2])$")
  9. all_records = []
  10. def process_folder(path, all_records):
  11. file_paths = list(Path(path).glob('*'))
  12. if not file_paths:
  13. log.info("未找到任何文件")
  14. return
  15. year, month = base_country_code.extract_year_month_from_path(path)
  16. year_month = f'{year}-{month:02d}'
  17. if len(file_paths) == 1:
  18. file_path = file_paths[0]
  19. log.info(f"处理单文件: {file_path.name}")
  20. # 读取所有sheet
  21. xls = pd.ExcelFile(file_path)
  22. import_df = pd.DataFrame()
  23. export_df = pd.DataFrame()
  24. total_df = pd.DataFrame()
  25. sheet_name = base_country_code.find_sheet_by_keyword(file_path, "类章")
  26. if not sheet_name:
  27. log.info(f"{file_path} 未找到包含 类章 sheet")
  28. return
  29. skip_index = 4 if year_month == '2024-11' else 5
  30. df = pd.read_excel(xls, sheet_name=sheet_name, header=None).iloc[skip_index:]
  31. temp_df = df[[0, 5]].rename(columns={0: 'commodity', 5: 'import'})
  32. temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce')
  33. temp_df['import'] = temp_df['import'] * 10000
  34. import_df = pd.concat([import_df, temp_df])
  35. temp_df = df[[0, 3]].rename(columns={0: 'commodity', 3: 'export'})
  36. temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce')
  37. temp_df['export'] = temp_df['export'] * 10000
  38. export_df = pd.concat([export_df, temp_df])
  39. temp_df = df[[0, 1]].rename(columns={0: 'commodity', 1: 'total'})
  40. temp_df['total'] = pd.to_numeric(temp_df['total'].replace('--', 0), errors='coerce')
  41. temp_df['total'] = temp_df['total'] * 10000
  42. total_df = pd.concat([total_df, temp_df])
  43. save_to_database(import_df, export_df, total_df, year, month, all_records)
  44. else: # 2024-10 -2023-01
  45. import_df = pd.DataFrame()
  46. export_df = pd.DataFrame()
  47. total_df = pd.DataFrame()
  48. for file in file_paths:
  49. if "商品类章" in file.name:
  50. log.info(f"处理多文件: {file.name}")
  51. file_path = Path(path) / file
  52. df = pd.read_excel(file_path, header=None).iloc[6:]
  53. temp_df = df[[1, 5]].rename(columns={1: 'commodity', 5: 'import'})
  54. temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce')
  55. temp_df['import'] = temp_df['import'] * 10
  56. import_df = pd.concat([import_df, temp_df])
  57. temp_df = df[[1, 3]].rename(columns={1: 'commodity', 3: 'export'})
  58. temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce')
  59. temp_df['export'] = temp_df['export'] * 10
  60. export_df = pd.concat([export_df, temp_df])
  61. temp_df = df[[1, 2]].rename(columns={1: 'commodity', 2: 'total'})
  62. temp_df['total'] = pd.to_numeric(temp_df['total'].replace('--', 0), errors='coerce')
  63. temp_df['total'] = temp_df['total'] * 10
  64. total_df = pd.concat([total_df, temp_df])
  65. break
  66. save_to_database(import_df, export_df, total_df, year, month, all_records)
  67. def save_to_database(import_df, export_df, total_df, year, month, all_records):
  68. # 直接合并,不使用 groupby,保持原始顺序
  69. merged_df = pd.concat(
  70. [import_df.set_index('commodity'), export_df.set_index('commodity'), total_df.set_index('commodity')], axis=1,
  71. join='outer').reset_index()
  72. merged_df = merged_df
  73. merged_df['original_order'] = merged_df.index # 保留原始顺序
  74. merged_df = merged_df.sort_values('original_order').reset_index(drop=True)
  75. sql_arr = []
  76. processed_commodities = set()
  77. all_records_index = 0
  78. year_month = f'{year}-{month:02d}'
  79. for _, row in merged_df.iterrows():
  80. commodity_name = str(row['commodity'])
  81. # commodity_name = str(row['commodity']).strip()
  82. # 找类名确定索引
  83. result = extract_category_or_chapter(commodity_name, all_records_index)
  84. if result is None:
  85. log.info(f"未找到商品名称 '{commodity_name}' 对应的ID")
  86. continue
  87. if result >= len(all_records):
  88. log.info(f"all_records 已超限 '{commodity_name}' 跳过")
  89. continue
  90. all_records_index = result
  91. commodity_code, category_name = int(all_records[all_records_index][0]), str(all_records[all_records_index][1])
  92. if commodity_code in processed_commodities:
  93. continue
  94. monthly_import = round(row['import'], 4)
  95. monthly_export = round(row['export'], 4)
  96. monthly_total = round(row['total'], 4)
  97. sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade "
  98. f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time, commodity_source) VALUES "
  99. f"('{year}', '{year_month}', '320000', '江苏省', '{commodity_code}', '{category_name}', {monthly_total}, {monthly_export}, {monthly_import}, now(), 1);")
  100. sql_arr.append(sql)
  101. processed_commodities.add(commodity_code)
  102. log.info(f"√ {year_month} 成功生成SQL文件 size {len(sql_arr)} ")
  103. base_mysql.bulk_insert(sql_arr)
  104. log.info(f"√ {year_month} prov_commodity_trade SQL 存表完成!")
  105. def extract_category_or_chapter(text, all_records_index):
  106. text = text.strip()
  107. # 匹配“第一类”或“第1类”
  108. first_class_match = re.match(r'^第(一|\d+)类', text, re.IGNORECASE | re.UNICODE)
  109. if first_class_match and (first_class_match.group(1) == '1' or first_class_match.group(1) == '一'):
  110. return 0
  111. else:
  112. return all_records_index + 1
  113. def hierarchical_traversal(root_path, all_records):
  114. """分层遍历:省份->年份->月目录"""
  115. root = Path(root_path)
  116. # 获取所有年份目录
  117. year_dirs = [
  118. item for item in root.iterdir()
  119. if item.is_dir() and YEAR_PATTERN.match(item.name)
  120. ]
  121. # 按年倒序
  122. for year_dir in sorted(year_dirs, key=lambda x: x.name, reverse=True):
  123. # 构造完整的路径:download/shandong/2025/03
  124. log.info(f"\n年份:{year_dir.name} | 省份:jiangsu")
  125. # 提取月份目录
  126. month_dirs = []
  127. for item in year_dir.iterdir():
  128. if item.is_dir() and MONTH_PATTERN.match(item.name):
  129. month_dirs.append({
  130. "path": item,
  131. "month": int(item.name)
  132. })
  133. # 按月倒序输出
  134. if month_dirs:
  135. for md in sorted(month_dirs, key=lambda x: x["month"], reverse=True):
  136. log.info(f" 月份:{md['month']:02d} | 路径:{md['path']}")
  137. process_folder(md['path'], all_records)
  138. if __name__ == '__main__':
  139. all_records = base_mysql.get_hs_all()
  140. hierarchical_traversal(base_country_code.download_dir, all_records)
  141. # root = Path(base_country_code.download_dir)/'2024'/'11'
  142. # process_folder(root, all_records)
  143. print("江苏南京海关类章所有文件处理完成!")