sph_crawling_data.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. import configparser
  2. import json
  3. import os
  4. import random
  5. import time
  6. import requests
  7. from common import Material, Oss, Common
  8. from common.sql_help import sqlCollect
  9. from data_channel.piaoquan import PQ
  10. from data_channel.shipinhao import SPH
  11. config = configparser.ConfigParser()
  12. config.read('./config.ini')
  13. class SphHistory:
  14. @classmethod
  15. def remove_files(cls, video_path_url):
  16. """
  17. 删除指定目录下的所有文件和子目录
  18. """
  19. if os.path.exists(video_path_url) and os.path.isdir(video_path_url):
  20. for root, dirs, files in os.walk(video_path_url):
  21. for file in files:
  22. file_path = os.path.join(root, file)
  23. os.remove(file_path)
  24. for dir in dirs:
  25. dir_path = os.path.join(root, dir)
  26. os.rmdir(dir_path)
  27. @classmethod
  28. def create_folders(cls):
  29. """
  30. 根据标示和任务标示创建目录
  31. """
  32. video_path_url = config['PATHS']['VIDEO_PATH']+"/sph_crawling/"
  33. # video_path_url = '/root/video_rewriting/path/sph_crawling/'
  34. if not os.path.exists(video_path_url):
  35. os.makedirs(video_path_url)
  36. return video_path_url
  37. """获取视频号所有内容"""
  38. @classmethod
  39. def sph_data_info(cls):
  40. user_list = cls.get_sph_user()
  41. video_path_url = cls.create_folders()
  42. if user_list == None:
  43. return
  44. for user in user_list:
  45. Common.logger("sph_crawling").info(f"{user}开始获取数据")
  46. account_id = SPH.get_account_id(user)
  47. if account_id == False:
  48. print(f"{account_id}:没有获取到视频account_id,无法抓取数据")
  49. continue
  50. url = "http://61.48.133.26:30001/FinderGetUpMasterNextPage"
  51. last_buffer = ""
  52. try:
  53. while True:
  54. headers = {
  55. 'Content-Type': 'application/json'
  56. }
  57. payload = json.dumps({
  58. "username": account_id,
  59. "last_buffer": last_buffer
  60. })
  61. response = requests.request("POST", url, headers=headers, data=payload)
  62. time.sleep(random.randint(1, 5))
  63. res_json = response.json()
  64. try:
  65. if len(res_json["DownloadAddress"]) == 0 or res_json["DownloadAddress"] == "" or res_json["DownloadAddress"] == None:
  66. break
  67. except:
  68. pass
  69. if "objectId" not in response.text or response.status_code != 200:
  70. break
  71. if len(res_json["UpMasterHomePage"]) == 0:
  72. break
  73. if not res_json["UpMasterHomePage"]:
  74. break
  75. last_buffer = res_json.get('last_buffer')
  76. for obj in res_json["UpMasterHomePage"]:
  77. Common.logger("sph_crawling").info(f"{user}扫描到一条数据")
  78. objectId = obj['objectId']
  79. objectNonceId = obj['objectNonceId']
  80. url = "http://61.48.133.26:30001/GetFinderDownloadAddress"
  81. payload = json.dumps({
  82. "objectId": objectId,
  83. "objectNonceId": objectNonceId
  84. })
  85. headers = {
  86. 'Content-Type': 'text/plain'
  87. }
  88. response = requests.request("POST", url, headers=headers, data=payload)
  89. time.sleep(random.randint(0, 1))
  90. video_obj = response.json()
  91. video_url = video_obj.get('DownloadAddress')
  92. if len(video_url) == 0:
  93. continue
  94. v_id = f"sph/{objectId}"
  95. Common.logger("sph_crawling").info(f"{user}视频ID:{v_id},视频链接:{video_url}开始发送oss")
  96. oss_video_key = Oss.channel_upload_oss(video_url, v_id) # 视频发送OSS
  97. oss_video_key = oss_video_key.get("oss_object_key")
  98. Common.logger("sph_crawling").info(f"{user}视频发送oss成功,视频oss地址{oss_video_key}")
  99. share_cnt = int(obj['forward_count']) # 分享
  100. like_cnt = int(obj['like_count']) # 点赞
  101. video_title = video_obj.get('title').split("\n")[0].split("#")[0]
  102. cover = video_obj.get('thumb_url')
  103. jpg_path = PQ.download_video_jpg(cover, video_path_url, v_id) # 下载视频封面
  104. if os.path.isfile(jpg_path):
  105. oss_jpg_key = Oss.stitching_fm_upload_oss(jpg_path, v_id) # 封面发送OSS
  106. oss_cover_key = oss_jpg_key.get("oss_object_key")
  107. Common.logger("sph_crawling").info(f"{user}封面发送oss成功,封面oss地址{oss_video_key}")
  108. else:
  109. oss_cover_key = ''
  110. Common.logger("sph_crawling").info(f"{user}封面发送oss失败")
  111. create_time = obj['createtime'] # 发布时间
  112. user_name = obj['username'] # 用户名标示
  113. nick_name = obj['nickname'] # 用户名
  114. comment_count = obj['comment_count'] # 评论数
  115. fav_count = obj['fav_count'] # 大拇指点赞数
  116. sqlCollect.sph_data_info('视频号', objectId, video_url, cover, video_title, str(share_cnt), str(like_cnt), oss_video_key, oss_cover_key, nick_name, user_name, comment_count, fav_count, create_time)
  117. Common.logger("sph_crawling").info(f"{nick_name}插入数据成功")
  118. cls.remove_files(video_path_url)
  119. return "完成"
  120. except Exception as e:
  121. Common.logger("sph_crawling").info(f"{user}异常,异常信息{e}")
  122. cls.remove_files(video_path_url)
  123. continue
  124. @classmethod
  125. def get_sph_user(cls):
  126. data = sqlCollect.sph_channel_user_list()
  127. if data == None:
  128. user_list = Material.get_sph_user()
  129. if user_list:
  130. for user in user_list:
  131. sqlCollect.insert_sph_channel_user("视频号", user)
  132. else:
  133. return None
  134. result_list = [item for sublist in data for item in sublist]
  135. return result_list
  136. if __name__ == '__main__':
  137. SphHistory.sph_data_info()