fetch_daily.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. #!/usr/bin/env python
  2. # coding=utf-8
  3. """
  4. 按天增量获取数据 - 通用版本
  5. 支持并发获取,自动跳过已有数据
  6. 用法:
  7. python fetch_daily.py tasks/xxx/query.sql # 获取最近7天
  8. python fetch_daily.py tasks/xxx/query.sql --days 30 # 获取最近30天
  9. python fetch_daily.py tasks/xxx/query.sql --start 20260101 --end 20260107
  10. python fetch_daily.py tasks/xxx/query.sql --date 20260105 # 单天
  11. python fetch_daily.py tasks/xxx/query.sql --force # 强制重新获取
  12. python fetch_daily.py tasks/xxx/query.sql --workers 10 # 设置天级并发数
  13. python fetch_daily.py tasks/xxx/query.sql --parallel 50 # 单天多线程下载(默认50,大数据量推荐)
  14. python fetch_daily.py tasks/xxx/query.sql --parallel 0 # 关闭多线程,使用单线程下载
  15. """
  16. import argparse
  17. import sys
  18. from datetime import datetime, timedelta
  19. from pathlib import Path
  20. from concurrent.futures import ThreadPoolExecutor, as_completed
  21. import threading
  22. sys.path.insert(0, str(Path(__file__).parent / "lib"))
  23. from odps_module import ODPSClient
  24. # 线程安全的计数器
  25. counter_lock = threading.Lock()
  26. success_count = 0
  27. fail_count = 0
  28. def get_existing_dates(daily_dir):
  29. """获取已下载的日期列表"""
  30. existing = set()
  31. if not daily_dir.exists():
  32. return existing
  33. for f in daily_dir.glob("*.csv"):
  34. try:
  35. dt = f.stem
  36. if len(dt) == 8 and dt.isdigit():
  37. existing.add(dt)
  38. except:
  39. pass
  40. return existing
  41. def get_date_range(start_str, end_str):
  42. """生成日期范围列表"""
  43. start = datetime.strptime(start_str, "%Y%m%d")
  44. end = datetime.strptime(end_str, "%Y%m%d")
  45. dates = []
  46. current = start
  47. while current <= end:
  48. dates.append(current.strftime("%Y%m%d"))
  49. current += timedelta(days=1)
  50. return dates
  51. def fetch_single_day(dt, sql_template, daily_dir, parallel_threads=0):
  52. """获取单天数据"""
  53. global success_count, fail_count
  54. try:
  55. client = ODPSClient()
  56. sql = sql_template.replace("${dt}", dt)
  57. output_file = daily_dir / f"{dt}.csv"
  58. # 下载到文件
  59. if parallel_threads > 0:
  60. # 多线程并行下载(适合大数据量)
  61. client.execute_sql_result_save_file_parallel(sql, str(output_file), workers=parallel_threads)
  62. else:
  63. # 单线程下载
  64. client.execute_sql_result_save_file(sql, str(output_file))
  65. # 检查结果
  66. if output_file.exists():
  67. row_count = sum(1 for _ in open(output_file)) - 1 # 减去表头
  68. with counter_lock:
  69. success_count += 1
  70. if row_count > 0:
  71. return (dt, "success", row_count)
  72. else:
  73. return (dt, "empty", 0)
  74. else:
  75. with counter_lock:
  76. fail_count += 1
  77. return (dt, "fail", 0)
  78. except Exception as e:
  79. with counter_lock:
  80. fail_count += 1
  81. return (dt, "error", str(e))
  82. def main():
  83. global success_count, fail_count
  84. parser = argparse.ArgumentParser(description="按天增量获取数据")
  85. parser.add_argument("sql_file", type=str, help="SQL文件路径")
  86. parser.add_argument("--days", type=int, default=7, help="获取最近N天 (默认7)")
  87. parser.add_argument("--start", type=str, help="开始日期 YYYYMMDD")
  88. parser.add_argument("--end", type=str, help="结束日期 YYYYMMDD")
  89. parser.add_argument("--date", type=str, help="单天日期 YYYYMMDD")
  90. parser.add_argument("--force", action="store_true", help="强制重新获取")
  91. parser.add_argument("--workers", type=int, default=5, help="天级并发数 (默认5)")
  92. parser.add_argument("--parallel", type=int, default=50, help="单天多线程下载 (默认50, 大数据量推荐)")
  93. args = parser.parse_args()
  94. # 解析 SQL 文件路径
  95. sql_file = Path(args.sql_file).resolve()
  96. if not sql_file.exists():
  97. print(f"错误: 找不到 {sql_file}")
  98. return
  99. # 输出目录:SQL 同目录下的 output/SQL文件名/
  100. output_dir = sql_file.parent / "output"
  101. daily_dir = output_dir / sql_file.stem
  102. daily_dir.mkdir(parents=True, exist_ok=True)
  103. print(f"SQL文件: {sql_file}")
  104. print(f"数据目录: {daily_dir}")
  105. # 确定日期范围
  106. if args.date:
  107. target_dates = [args.date]
  108. elif args.start and args.end:
  109. target_dates = get_date_range(args.start, args.end)
  110. else:
  111. today = datetime.now()
  112. end_date = (today - timedelta(days=1)).strftime("%Y%m%d")
  113. start_date = (today - timedelta(days=args.days)).strftime("%Y%m%d")
  114. target_dates = get_date_range(start_date, end_date)
  115. print(f"目标日期: {target_dates[0]} ~ {target_dates[-1]} ({len(target_dates)}天)")
  116. # 检查已有数据
  117. existing_dates = get_existing_dates(daily_dir)
  118. print(f"已有数据: {len(existing_dates)}天")
  119. # 确定需要获取的日期
  120. if args.force:
  121. missing_dates = target_dates
  122. print(f"强制模式: 重新获取所有 {len(missing_dates)} 天")
  123. else:
  124. missing_dates = [d for d in target_dates if d not in existing_dates]
  125. print(f"需要获取: {len(missing_dates)}天")
  126. if not missing_dates:
  127. print("没有需要获取的数据,退出")
  128. return
  129. # 读取 SQL 模板
  130. sql_template = sql_file.read_text(encoding="utf-8")
  131. # 检测 SQL 中是否包含 ${dt} 变量
  132. has_dt_var = "${dt}" in sql_template
  133. # 重置计数器
  134. success_count = 0
  135. fail_count = 0
  136. # 如果 SQL 中没有 ${dt},只需执行一次
  137. if not has_dt_var:
  138. print("\n检测到 SQL 中不含 ${dt} 变量,只执行一次...")
  139. target_dates = ["20000101"] # 用虚拟日期
  140. missing_dates = target_dates
  141. output_file = output_dir / f"{sql_file.stem}.csv"
  142. output_file.parent.mkdir(parents=True, exist_ok=True)
  143. try:
  144. client = ODPSClient()
  145. if args.parallel > 0:
  146. client.execute_sql_result_save_file_parallel(sql_template, str(output_file), workers=args.parallel)
  147. else:
  148. client.execute_sql_result_save_file(sql_template, str(output_file))
  149. print(f"数据目录: {output_file}")
  150. except Exception as e:
  151. print(f"✗ 执行失败: {e}")
  152. return
  153. # 并发获取
  154. print(f"目标日期: {target_dates[0]} ~ {target_dates[-1]} ({len(target_dates)}天)")
  155. workers = min(args.workers, len(missing_dates))
  156. if args.parallel > 0:
  157. print(f"\n开始获取 (天级并发: {workers}, 单天多线程: {args.parallel})...")
  158. else:
  159. print(f"\n开始获取 (并发数: {workers})...")
  160. with ThreadPoolExecutor(max_workers=workers) as executor:
  161. futures = {
  162. executor.submit(fetch_single_day, dt, sql_template, daily_dir, args.parallel): dt
  163. for dt in missing_dates
  164. }
  165. completed = 0
  166. for future in as_completed(futures):
  167. completed += 1
  168. dt, status, info = future.result()
  169. if status == "success":
  170. print(f" [{completed}/{len(missing_dates)}] ✓ {dt}: {info} 行")
  171. elif status == "empty":
  172. print(f" [{completed}/{len(missing_dates)}] ⚠ {dt}: 无数据")
  173. elif status == "error":
  174. print(f" [{completed}/{len(missing_dates)}] ✗ {dt}: {info}")
  175. else:
  176. print(f" [{completed}/{len(missing_dates)}] ✗ {dt}: 失败")
  177. print(f"\n完成! 成功: {success_count}, 失败: {fail_count}")
  178. print(f"数据目录: {daily_dir}")
  179. if __name__ == "__main__":
  180. main()