sph_crawling_data.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. import json
  2. import random
  3. import time
  4. import requests
  5. from common import Material, Oss, Common, Feishu
  6. from common.sql_help import sqlCollect
  7. from data_channel.data_help import dataHelp
  8. from data_channel.shipinhao import SPH
  9. class SphHistory:
  10. """获取视频号所有内容"""
  11. @classmethod
  12. def sph_data_info(cls):
  13. user_list = cls.get_sph_user()
  14. if user_list == None:
  15. return
  16. for user in user_list:
  17. Common.logger("sph_crawling").info(f"{user}开始获取数据")
  18. account_id = SPH.get_account_id(user)
  19. if account_id == False:
  20. print(f"{account_id}:没有获取到视频account_id,无法抓取数据")
  21. continue
  22. url = "http://61.48.133.26:30001/FinderGetUpMasterNextPage"
  23. last_buffer = ""
  24. try:
  25. while True:
  26. headers = {
  27. 'Content-Type': 'application/json'
  28. }
  29. payload = json.dumps({
  30. "username": account_id,
  31. "last_buffer": last_buffer
  32. })
  33. response = requests.request("POST", url, headers=headers, data=payload)
  34. time.sleep(random.randint(1, 5))
  35. if response.text == "" or response.text == None:
  36. break
  37. res_json = response.json()
  38. try:
  39. if len(res_json["DownloadAddress"]) == 0 or res_json["DownloadAddress"] == "" or res_json["DownloadAddress"] == None:
  40. break
  41. except:
  42. pass
  43. if "objectId" not in response.text or response.status_code != 200:
  44. break
  45. if len(res_json["UpMasterHomePage"]) == 0:
  46. break
  47. if not res_json["UpMasterHomePage"]:
  48. break
  49. last_buffer = res_json.get('last_buffer')
  50. for obj in res_json["UpMasterHomePage"]:
  51. Common.logger("sph_crawling").info(f"{user}扫描到一条数据")
  52. objectId = obj['objectId']
  53. object_id = sqlCollect.sph_data_info_v_id(objectId, "视频号")
  54. if object_id:
  55. continue
  56. objectNonceId = obj['objectNonceId']
  57. url = "http://61.48.133.26:30001/GetFinderDownloadAddress"
  58. payload = json.dumps({
  59. "objectId": objectId,
  60. "objectNonceId": objectNonceId
  61. })
  62. headers = {
  63. 'Content-Type': 'text/plain'
  64. }
  65. response = requests.request("POST", url, headers=headers, data=payload)
  66. time.sleep(random.randint(0, 1))
  67. video_obj = response.json()
  68. video_url = video_obj.get('DownloadAddress')
  69. duration = dataHelp.video_duration(video_url)
  70. cover = video_obj.get('thumb_url')
  71. if len(video_url) == 0:
  72. continue
  73. v_id = f"sph/{objectId}"
  74. try:
  75. Common.logger("sph_crawling").info(f"{user}视频ID:{objectId},视频链接:{video_url}开始发送oss")
  76. oss_video_key = Oss.channel_upload_oss(video_url, v_id) # 视频发送OSS
  77. oss_video_key = oss_video_key.get("oss_object_key")
  78. Common.logger("sph_crawling").info(f"{user}视频发送oss成功,视频oss地址{oss_video_key}")
  79. Common.logger("sph_crawling").info(f"{user}视频ID:{objectId},封面链接:{cover}开始发送oss")
  80. oss_cover_key = Oss.channel_upload_oss(cover, f"sph/{objectId}.jpg") # 视频发送OSS
  81. oss_cover_key = oss_cover_key.get("oss_object_key")
  82. Common.logger("sph_crawling").info(f"{user}封面发送oss成功,封面oss地址{oss_video_key}")
  83. create_time = obj['createtime'] # 发布时间
  84. except:
  85. continue
  86. share_cnt = int(obj['forward_count']) # 分享
  87. like_cnt = int(obj['like_count']) # 点赞
  88. video_title = video_obj.get('title').split("\n")[0].split("#")[0]
  89. user_name = obj['username'] # 用户名标示
  90. nick_name = obj['nickname'] # 用户名
  91. comment_count = obj['comment_count'] # 评论数
  92. fav_count = obj['fav_count'] # 大拇指点赞数
  93. sqlCollect.sph_data_info('视频号', objectId, video_url, cover, video_title, str(share_cnt), str(like_cnt), oss_video_key, oss_cover_key, nick_name, user_name, comment_count, fav_count, create_time,duration)
  94. Common.logger("sph_crawling").info(f"{nick_name}插入数据成功")
  95. sqlCollect.update_sph_channel_user_status(user)
  96. Common.logger("sph_crawling").info(f"{user}用户抓取完成")
  97. count = sqlCollect.sph_data_info_count(user, "视频号")
  98. text = (
  99. f"**{user}抓取完成共抓了{count}条数据**\n"
  100. )
  101. Feishu.finish_bot(text,
  102. "https://open.feishu.cn/open-apis/bot/v2/hook/029fa989-9847-4574-8e1b-5c396e665f16",
  103. "【 视频号历史数据抓取通知 】")
  104. except Exception as e:
  105. Common.logger("sph_crawling").info(f"{user}异常,异常信息{e}")
  106. Feishu.finish_bot(e,
  107. "https://open.feishu.cn/open-apis/bot/v2/hook/029fa989-9847-4574-8e1b-5c396e665f16",
  108. "【 视频号抓取异常通知 】")
  109. continue
  110. @classmethod
  111. def get_sph_user(cls):
  112. data = sqlCollect.sph_channel_user_list()
  113. if data == None:
  114. user_list = Material.get_sph_user()
  115. if user_list:
  116. for user in user_list:
  117. sqlCollect.insert_sph_channel_user("视频号", user)
  118. else:
  119. return None
  120. result_list = [item for sublist in data for item in sublist]
  121. return result_list
  122. if __name__ == '__main__':
  123. SphHistory.sph_data_info()
  124. # count = sqlCollect.sph_data_info_count("郑蓝旗", "视频号")
  125. # print(count)