wangkun 1 year ago
parent
commit
05a1416276

+ 1 - 1
benshanzhufu/benshanzhufu_main/run_bszf_recommend_dev.py → benshanzhufu/benshanzhufu_main/run_bszf_dev.py

@@ -5,7 +5,7 @@ import os
 import sys
 sys.path.append(os.getcwd())
 from common.common import Common
-from benshanzhufu.benshanzhufu_recommend.benshanzhufu_recommend_scheduling import BenshanzhufuRecommend
+from benshanzhufu.benshanzhufu_recommend.benshanzhufu_recommend_dev import BenshanzhufuRecommend
 
 
 def benshanzhufu_recommend_main(log_type, crawler, env):

+ 262 - 0
benshanzhufu/benshanzhufu_recommend/benshanzhufu_recommend_dev.py

@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/4/25
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+from urllib import parse
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.mq import MQ
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.feishu import Feishu
+from common.publish import Publish
+from common.public import get_config_from_mysql, download_rule
+from common.userAgent import get_random_user_agent
+proxies = {"http": None, "https": None}
+
+
+class BenshanzhufuRecommend:
+    platform = "本山祝福"
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 推荐列表获取视频
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        # 翻页参数
+        visitor_key = ""
+        page = 1
+        while True:
+            now = int(time.time() * 1000)
+            url = "https://bszf.wentingyou.cn/index.php/v111/index/index?parameter="
+            header = {
+                "content-time": str(now),
+                "chatKey": "wx0fb8149da961d3b0",
+                "cache-time": str(now),
+                "User-Agent": get_random_user_agent("pc"),
+                "Referer": "https://servicewechat.com/wx0fb8149da961d3b0/2/page-frame.html"
+            }
+            parameter = {
+                "page": page,
+                "ini_id": visitor_key
+            }
+            params = parse.quote(json.dumps(parameter))
+            url = url + str(params)
+            urllib3.disable_warnings()
+            proxy = Common.tunnel_proxies()
+            Common.logger(log_type, crawler).info(f"proxy:{proxy}")
+            r = requests.get(headers=header, url=url, proxies=proxy, verify=False)
+            if r.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n")
+                Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
+                return
+            elif r.json()['message'] != "list success":
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
+                return
+            elif "data" not in r.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
+                return
+            elif len(r.json()['data']["list"]) == 0:
+                Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n")
+                Common.logging(log_type, crawler, env, f"没有更多数据了~ {r.json()}\n")
+                return
+            else:
+                # 翻页
+                visitor_key = r.json()["data"]["visitor_key"]
+                page += 1
+                feeds = r.json()["data"]["list"]
+                for i in range(len(feeds)):
+                    publish_time_stamp = feeds[i].get("update_time", 0)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    video_url = feeds[i].get("video_url", "")
+                    if ".mp4" not in video_url:
+                        video_url = ""
+
+                    video_dict = {
+                        'video_title': feeds[i].get("title", "").replace(" ", "").replace("'", "").replace('"', ""),
+                        'video_id': str(feeds[i].get("nid", "")),
+                        'play_cnt': 0,
+                        'comment_cnt': feeds[i].get("commentCount", 0),
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': "本山祝福",
+                        'user_id': "benshanzhufu",
+                        'avatar_url': feeds[i].get("video_cover", ""),
+                        'cover_url': feeds[i].get("video_cover", ""),
+                        'video_url': video_url,
+                        'session': f"benshanzhufu-{int(time.time())}"
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+
+                    # 过滤无效视频
+                    if video_dict["video_id"] == "" or video_dict["cover_url"] == "" or video_dict["video_url"] == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, "无效视频\n")
+                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                        Common.logging(log_type, crawler, env, '已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
+                    else:
+                        video_dict["out_user_id"] = video_dict["user_id"]
+                        video_dict["platform"] = crawler
+                        video_dict["strategy"] = log_type
+                        video_dict["out_video_id"] = video_dict["video_id"]
+                        video_dict["width"] = 0
+                        video_dict["height"] = 0
+                        video_dict["crawler_rule"] = json.dumps(rule_dict)
+                        video_dict["user_id"] = our_uid
+                        video_dict["publish_time"] = video_dict["publish_time_str"]
+                        video_dict["fans_cnt"] = 0
+                        video_dict["videos_cnt"] = 0
+                        mq.send_msg(video_dict)
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐榜爬虫策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐榜爬虫策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "推荐榜爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "440018", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "推荐榜爬虫策略",
+                   video_dict['video_id'],
+                   video_dict['video_title'],
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "440018", "E2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频信息已保存至云文档\n")
+
+
+if __name__ == "__main__":
+    # print(get_config_from_mysql("recommend", "benshanzhufu", "dev", "filter"))
+    print(get_random_user_agent("pc"))
+    pass

+ 30 - 19
dev/mitm/start_selenium.py

@@ -1,29 +1,40 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2023/9/20
+import os
+import time
+
 from selenium import webdriver
-from common.common import Common  # 导入封装好的 log 记录方法
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
 
 
 def start_selenium():
-    # 创建 ChromeOptions 对象
-    chrome_options = webdriver.ChromeOptions()
-    chrome_options.add_argument('--remote-debugging-port=8888')
-
-    # 启动带有指定选项的 Selenium WebDriver
-    driver = webdriver.Chrome(options=chrome_options)
-
-    # 访问抖音首页
-    driver.get('https://www.douyin.com/')
-
-    # 获取页面内容
-    page_content = driver.page_source
-
-    # 调用 log 记录方法,将页面内容传递给它
-    Common.logger('mitm', 'dev').info(page_content)
-
-    # 关闭 WebDriver
+    quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
+    os.system(quit_cmd)
+    time.sleep(1)
+    # 启动 Chrome,指定端口号:12306
+    cmd = 'open -a "Google Chrome" --args --remote-debugging-port=12306'
+    os.system(cmd)
+    # 打印请求配置
+    ca = DesiredCapabilities.CHROME
+    ca["goog:loggingPrefs"] = {"performance": "ALL"}
+    # 配置 chromedriver
+    chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
+
+    # 初始化浏览器
+    browser = webdriver.ChromeOptions()
+    # browser.add_argument(f'--proxy-server={Common.tunnel_proxies()}')  # 代理的IP地址和端口号
+    browser.add_experimental_option("debuggerAddress", "127.0.0.1:12306")
+    # driver初始化
+    driver = webdriver.Chrome(desired_capabilities=ca, options=browser, service=Service(chromedriver))
+    driver.implicitly_wait(10)
+    print("打开聂小雨")
+    driver.get(f"https://www.kuaishou.com/search/video?searchKey=%E8%81%82%E5%B0%8F%E9%9B%A8")
+    time.sleep(2)
+    driver.close()
     driver.quit()
 
 
-start_selenium()
+if __name__ == "__main__":
+    start_selenium()