re_search.py 1.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445
  1. """
  2. @author: luojunhui
  3. """
  4. import time
  5. import json
  6. import pymysql
  7. import requests
  8. trace_id = "search-5b5343dc-b6a8-4f65-9e6b-e04b9961e530-1716955405"
  9. sql = f"""select trace_id, article_title, article_text, gh_id, account_name from long_articles_video where trace_id = '{trace_id}';"""
  10. connection = pymysql.connect(
  11. host="rm-bp1159bu17li9hi94.mysql.rds.aliyuncs.com", # 数据库IP地址,内网地址
  12. port=3306, # 端口号
  13. user="crawler", # mysql用户名
  14. passwd="crawler123456@", # mysql用户登录密码
  15. db="piaoquan-crawler", # 数据库名
  16. charset="utf8mb4" # 如果数据库里面的文本是utf8编码的,charset指定是utf8
  17. )
  18. cursor = connection.cursor()
  19. cursor.execute(sql)
  20. out_video_list = cursor.fetchall()
  21. result = out_video_list[0]
  22. params = {
  23. "trace_id": result[0],
  24. "title": result[1],
  25. "ghId": result[3],
  26. "content": result[2],
  27. "accountName": result[4]
  28. }
  29. # print(params)
  30. url = "http://localhost:8111/re_search_videos"
  31. a = time.time()
  32. header = {
  33. "Content-Type": "application/json",
  34. }
  35. response = requests.post(url, json=params, headers=header, timeout=600)
  36. b = time.time()
  37. print(response.text)
  38. print(b - a)
  39. print(json.dumps(response.json(), ensure_ascii=False, indent=4))