dy_ls.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. import random
  2. import time
  3. import requests
  4. import json
  5. from common import Common, Feishu, AliyunLogger
  6. from common.sql_help import sqlCollect
  7. class DYLS:
  8. @classmethod
  9. def get_dy_zr_list(cls, task_mark, url_id, number, mark, channel_id, name):
  10. url = "http://47.236.68.175:8889/crawler/dou_yin/blogger"
  11. list = []
  12. next_cursor = ''
  13. for i in range(20):
  14. try:
  15. payload = json.dumps({
  16. "account_id": url_id,
  17. "source": "app",
  18. "sort": "最热",
  19. "cursor": next_cursor
  20. })
  21. headers = {
  22. 'Content-Type': 'application/json'
  23. }
  24. response = requests.request("POST", url, headers=headers, data=payload)
  25. time.sleep(random.randint(1, 5))
  26. response = response.json()
  27. code = response['code']
  28. if code != 0:
  29. Common.logger("dy-ls").info(f"抖音历史数据获取失败,接口为/dou_yin/blogge\n")
  30. return list
  31. data_list = response['data']
  32. next_cursor = str(data_list['next_cursor'])
  33. data = data_list['data']
  34. for i in range(len(data)):
  35. video_id = data[i].get('aweme_id') # 文章id
  36. # status = sqlCollect.is_used(task_mark, video_id, mark, "抖音")
  37. # if status:
  38. status = sqlCollect.is_used(task_mark, video_id, mark, "抖音历史")
  39. video_uri = data[i].get('video', {}).get('play_addr', {}).get('uri')
  40. ratio = f'{data[i].get("video", {}).get("height")}p'
  41. video_url = f'https://www.iesdouyin.com/aweme/v1/play/?video_id={video_uri}&ratio={ratio}&line=0' # 视频链接
  42. digg_count = int(data[i].get('statistics').get('digg_count')) # 点赞
  43. share_count = int(data[i].get('statistics').get('share_count')) # 转发
  44. duration = data[i].get('duration')
  45. duration = duration / 1000
  46. old_title = data[i].get('desc', "").strip().replace("\n", "") \
  47. .replace("/", "").replace("\\", "").replace("\r", "") \
  48. .replace(":", "").replace("*", "").replace("?", "") \
  49. .replace("?", "").replace('"', "").replace("<", "") \
  50. .replace(">", "").replace("|", "").replace(" ", "") \
  51. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  52. .replace("'", "").replace("#", "").replace("Merge", "")
  53. log_data = f"user:{url_id},,video_id:{video_id},,video_url:{video_url},,original_title:{old_title},,share_count:{share_count},,digg_count:{digg_count},,duration:{duration}"
  54. AliyunLogger.logging(channel_id, name, url_id, video_id, "扫描到一条视频", "2001", log_data)
  55. Common.logger("dy-ls").info(
  56. f"扫描:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count}")
  57. if status:
  58. AliyunLogger.logging(channel_id, name, url_id, video_id, "该视频已改造过", "2002", log_data)
  59. continue
  60. video_percent = '%.2f' % (int(share_count) / int(digg_count))
  61. special = float(0.25)
  62. if int(share_count) < 500:
  63. AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享小于500", "2003", log_data)
  64. Common.logger("dy-ls").info(
  65. f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
  66. continue
  67. if float(video_percent) < special:
  68. AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享/点赞小于0.25", "2003", log_data)
  69. Common.logger("dy-ls").info(
  70. f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
  71. continue
  72. if int(duration) < 30 or int(duration) > 720:
  73. AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003", log_data)
  74. Common.logger("dy-ls").info(
  75. f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
  76. continue
  77. cover_url = data[i].get('video').get('cover').get('url_list')[0] # 视频封面
  78. all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url, "rule": video_percent,
  79. "old_title": old_title}
  80. list.append(all_data)
  81. AliyunLogger.logging(channel_id, name, url_id, video_id, "符合规则等待改造", "2004", log_data)
  82. if len(list) == int(number):
  83. Common.logger("dy-ls").info(f"获取抖音历史视频总数:{len(list)}\n")
  84. return list
  85. if next_cursor == False:
  86. return list
  87. except Exception as exc:
  88. Common.logger("dy-ls").info(f"抖音历史数据获取失败:{exc}\n")
  89. return list
  90. return list
  91. return list
  92. @classmethod
  93. def get_dyls_list(cls, task_mark, url_id, number, mark):
  94. next_cursor = ""
  95. for i in range(10):
  96. list = []
  97. try:
  98. # 抖查查
  99. url = "http://47.236.68.175:8889/crawler/dou_yin/blogger"
  100. payload = json.dumps({
  101. "account_id": url_id,
  102. "source": "抖查查",
  103. "cursor": next_cursor
  104. })
  105. headers = {
  106. 'Content-Type': 'application/json'
  107. }
  108. time.sleep(random.randint(1, 5))
  109. response = requests.request("POST", url, headers=headers, data=payload)
  110. response = response.json()
  111. data_all_list = response["data"]
  112. has_more = data_all_list["has_more"]
  113. next_cursor = str(data_all_list["next_cursor"])
  114. data_list = data_all_list["data"]
  115. for data in data_list:
  116. # comment_count = data["comment_count"]
  117. # download_count = data["download_count"]
  118. share_count = data["share_count"]
  119. good_count = data["good_count"]
  120. # collect_count = data["collect_count"]
  121. duration = data["duration"]
  122. video_id = data["video_id"]
  123. old_title = data["video_desc"]
  124. status = sqlCollect.is_used(video_id, mark, "抖音")
  125. if status:
  126. status = sqlCollect.is_used(video_id, mark, "抖音历史")
  127. if status == False:
  128. continue
  129. video_percent = '%.2f' % (int(share_count) / int(good_count))
  130. special = float(0.25)
  131. duration = duration / 1000
  132. if int(share_count) < 500 or float(video_percent) < special or int(duration) < 30 or int(duration) > 720:
  133. Common.logger("dy-ls").info(
  134. f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{good_count} ,时长:{int(duration)} ")
  135. continue
  136. video_url, image_url = cls.get_video(video_id)
  137. if video_url:
  138. all_data = {"video_id": video_id, "cover": image_url, "video_url": video_url, "rule": video_percent,
  139. "old_title": old_title}
  140. list.append(all_data)
  141. if len(list) == int(number):
  142. Common.logger("dy-ls").info(f"获取抖音历史视频总数:{len(list)}\n")
  143. return list
  144. else:
  145. Common.logger("dy-ls").info(f"抖音历史获取url失败")
  146. Feishu.finish_bot("dou_yin/detail接口无法获取到视频链接",
  147. "https://open.feishu.cn/open-apis/bot/v2/hook/575ca6a1-84b4-4a2f-983b-1d178e7b16eb",
  148. "【抖音异常提示 】")
  149. if has_more == False:
  150. return list
  151. except Exception as exc:
  152. Common.logger("dy-ls").info(f"抖音历史数据获取失败:{exc}\n")
  153. return list
  154. @classmethod
  155. def get_video(cls, video_id):
  156. url = "http://47.236.68.175:8889/crawler/dou_yin/detail"
  157. for i in range(3):
  158. payload = json.dumps({
  159. "content_id": str(video_id)
  160. })
  161. headers = {
  162. 'Content-Type': 'application/json'
  163. }
  164. response = requests.request("POST", url, headers=headers, data=payload)
  165. response = response.json()
  166. code = response["code"]
  167. if code == 10000:
  168. time.sleep(60)
  169. data = response["data"]["data"]
  170. video_url = data["video_url_list"][0]["video_url"]
  171. image_url = data["image_url_list"][0]["image_url"]
  172. return video_url, image_url
  173. return None, None
  174. if __name__ == '__main__':
  175. DYLS.get_dy_zr_list(1,2,1,3)
  176. # DYLS.get_dyls_list("1","MS4wLjABAAAA2QEvnEb7cQDAg6vZXq3j8_LlbO_DiturnV7VeybFKY4",1,"1")