gov_commodity_jiangsu_import_export.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. import re
  2. from pathlib import Path
  3. import pandas as pd
  4. from utils import base_country_code, base_mysql
  5. from utils.log import log
  6. YEAR_PATTERN = re.compile(r"^\d{4}$")
  7. MONTH_PATTERN = re.compile(r"^(0[1-9]|1[0-2])$")
  8. all_records = []
  9. def process_folder(path, all_records):
  10. file_paths = list(Path(path).glob('*'))
  11. if not file_paths:
  12. log.info("未找到任何文件")
  13. return
  14. year, month = base_country_code.extract_year_month_from_path(path)
  15. year_month = f'{year}-{month:02d}'
  16. if len(file_paths) == 1:
  17. file_path = file_paths[0]
  18. log.info(f"处理单文件: {file_path.name}")
  19. # 读取所有sheet
  20. xls = pd.ExcelFile(file_path)
  21. import_df = pd.DataFrame()
  22. export_df = pd.DataFrame()
  23. total_df = pd.DataFrame()
  24. skip_index = 4 if year_month == '2024-11' else 5
  25. sheet_index = 6 if year_month == '2024-11' else 4
  26. df = pd.read_excel(xls, sheet_name=sheet_index, header=None).iloc[skip_index:]
  27. temp_df = df[[0, 5]].rename(columns={0: 'commodity', 5: 'import'})
  28. temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce')
  29. temp_df['import'] = temp_df['import'] * 10000
  30. import_df = pd.concat([import_df, temp_df])
  31. temp_df = df[[0, 3]].rename(columns={0: 'commodity', 3: 'export'})
  32. temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce')
  33. temp_df['export'] = temp_df['export'] * 10000
  34. export_df = pd.concat([export_df, temp_df])
  35. temp_df = df[[0, 1]].rename(columns={0: 'commodity', 1: 'total'})
  36. temp_df['total'] = pd.to_numeric(temp_df['total'].replace('--', 0), errors='coerce')
  37. temp_df['total'] = temp_df['total'] * 10000
  38. total_df = pd.concat([total_df, temp_df])
  39. save_to_database(import_df, export_df, total_df, year, month, all_records)
  40. else: # 2024-10 -2023-01
  41. import_df = pd.DataFrame()
  42. export_df = pd.DataFrame()
  43. total_df = pd.DataFrame()
  44. for file in file_paths:
  45. if "商品类章" in file.name:
  46. log.info(f"处理多文件: {file.name}")
  47. file_path = Path(path) / file
  48. df = pd.read_excel(file_path, header=None).iloc[6:]
  49. temp_df = df[[1, 5]].rename(columns={1: 'commodity', 5: 'import'})
  50. temp_df['import'] = pd.to_numeric(temp_df['import'].replace('--', 0), errors='coerce')
  51. temp_df['import'] = temp_df['import'] * 10
  52. import_df = pd.concat([import_df, temp_df])
  53. temp_df = df[[1, 3]].rename(columns={1: 'commodity', 3: 'export'})
  54. temp_df['export'] = pd.to_numeric(temp_df['export'].replace('--', 0), errors='coerce')
  55. temp_df['export'] = temp_df['export'] * 10
  56. export_df = pd.concat([export_df, temp_df])
  57. temp_df = df[[1, 2]].rename(columns={1: 'commodity', 2: 'total'})
  58. temp_df['total'] = pd.to_numeric(temp_df['total'].replace('--', 0), errors='coerce')
  59. temp_df['total'] = temp_df['total'] * 10
  60. total_df = pd.concat([total_df, temp_df])
  61. break
  62. save_to_database(import_df, export_df, total_df, year, month, all_records)
  63. def save_to_database(import_df, export_df, total_df, year, month, all_records):
  64. # 直接合并,不使用 groupby,保持原始顺序
  65. merged_df = pd.concat(
  66. [import_df.set_index('commodity'), export_df.set_index('commodity'), total_df.set_index('commodity')], axis=1,
  67. join='outer').reset_index()
  68. merged_df = merged_df
  69. merged_df['original_order'] = merged_df.index # 保留原始顺序
  70. merged_df = merged_df.sort_values('original_order').reset_index(drop=True)
  71. sql_arr = []
  72. processed_commodities = set()
  73. all_records_index = 0
  74. year_month = f'{year}-{month:02d}'
  75. for _, row in merged_df.iterrows():
  76. commodity_name = str(row['commodity'])
  77. # commodity_name = str(row['commodity']).strip()
  78. # 找类名确定索引
  79. result = extract_category_or_chapter(commodity_name, all_records_index)
  80. if result is None:
  81. log.info(f"未找到商品名称 '{commodity_name}' 对应的ID")
  82. continue
  83. if result >= len(all_records):
  84. log.info(f"all_records 已超限 '{commodity_name}' 跳过")
  85. continue
  86. all_records_index = result
  87. commodity_code, category_name = int(all_records[all_records_index][0]), str(all_records[all_records_index][1])
  88. if commodity_code in processed_commodities:
  89. continue
  90. monthly_import = round(row['import'], 4)
  91. monthly_export = round(row['export'], 4)
  92. monthly_total = round(row['total'], 4)
  93. sql = (f"INSERT INTO t_yujin_crossborder_prov_commodity_trade "
  94. f"(crossborder_year, crossborder_year_month, prov_code, prov_name, commodity_code, commodity_name, monthly_total, monthly_export, monthly_import, create_time, commodity_source) VALUES "
  95. f"('{year}', '{year_month}', '320000', '江苏省', '{commodity_code}', '{category_name}', {monthly_total}, {monthly_export}, {monthly_import}, now(), 1)"
  96. f"ON DUPLICATE KEY UPDATE create_time = now() ;")
  97. sql_arr.append(sql)
  98. processed_commodities.add(commodity_code)
  99. log.info(f"√ {year_month} 成功生成SQL文件 size {len(sql_arr)} ")
  100. base_mysql.bulk_insert(sql_arr)
  101. log.info(f"√ {year_month} prov_commodity_trade SQL 存表完成!")
  102. def extract_category_or_chapter(text, all_records_index):
  103. text = text.strip()
  104. # 匹配“第一类”或“第1类”
  105. first_class_match = re.match(r'^第(一|\d+)类', text, re.IGNORECASE | re.UNICODE)
  106. if first_class_match and (first_class_match.group(1) == '1' or first_class_match.group(1) == '一'):
  107. return 0
  108. else:
  109. return all_records_index + 1
  110. def hierarchical_traversal(root_path, all_records):
  111. """分层遍历:省份->年份->月目录"""
  112. root = Path(root_path)
  113. # 获取所有年份目录
  114. year_dirs = [
  115. item for item in root.iterdir()
  116. if item.is_dir() and YEAR_PATTERN.match(item.name)
  117. ]
  118. # 按年倒序
  119. for year_dir in sorted(year_dirs, key=lambda x: x.name, reverse=True):
  120. # 构造完整的路径:download/shandong/2025/03
  121. log.info(f"\n年份:{year_dir.name} | 省份:jiangsu")
  122. # 提取月份目录
  123. month_dirs = []
  124. for item in year_dir.iterdir():
  125. if item.is_dir() and MONTH_PATTERN.match(item.name):
  126. month_dirs.append({
  127. "path": item,
  128. "month": int(item.name)
  129. })
  130. # 按月倒序输出
  131. if month_dirs:
  132. for md in sorted(month_dirs, key=lambda x: x["month"], reverse=True):
  133. log.info(f" 月份:{md['month']:02d} | 路径:{md['path']}")
  134. process_folder(md['path'], all_records)
  135. if __name__ == '__main__':
  136. all_records = base_mysql.get_hs_all()
  137. hierarchical_traversal(base_country_code.download_dir, all_records)
  138. # root = Path(base_country_code.download_dir)/'2024'/'11'
  139. # process_folder(root, all_records)
  140. print("江苏南京海关类章所有文件处理完成!")