wangkun 2 éve
szülő
commit
344e22e9c1

BIN
ganggangdouchuan/.DS_Store


+ 3 - 0
ganggangdouchuan/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 3 - 0
ganggangdouchuan/ganggangdouchuan_main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 3 - 0
ganggangdouchuan/ganggangdouchuan_main/run_ganggangdouchuan_recommend.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 3 - 0
ganggangdouchuan/ganggangdouchuan_recommend/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 265 - 0
ganggangdouchuan/ganggangdouchuan_recommend/ganggangdouchuan_recommend.py

@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import difflib
+import os
+import sys
+import time
+import ffmpeg
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from appium.webdriver.webdriver import WebDriver
+from selenium.common import NoSuchElementException
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.common import Common
+
+
+class GanggangdouchuanRecommend:
+    i = 0
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, oss_endpoint, env):
+        # try:
+        if env == "dev":
+            chromedriverExecutable = '/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver'
+        else:
+            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+
+        Common.logger(log_type, crawler).info('启动微信')
+        caps = {
+            "platformName": "Android",  # 手机操作系统 Android / iOS
+            "deviceName": "a0a65126",  # 连接的设备名(模拟器或真机),安卓可以随便写
+            "platforVersion": "11",  # 手机对应的系统版本(Android 11)
+            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+            "autoGrantPermissions": "true",  # 让 appium 自动授权 base 权限,
+            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+            "resetkeyboard": True,  # 执行完程序恢复原来输入法
+            "noReset": True,  # 不重置APP
+            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+            "newCommandTimeout": 6000,  # 初始等待时间
+            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+            "showChromedriverLog": True,
+            'enableWebviewDetailsCollection': True,
+            'setWebContentsDebuggingEnabled': True,
+            'recreateChromeDriverSessions': True,
+            'chromedriverExecutable': chromedriverExecutable,
+            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+            'browserName': ''
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(20)
+        # 向下滑动页面,展示出小程序选择面板
+        for i in range(120):
+            try:
+                # 发现微信消息 TAB,代表微信已启动成功
+                if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
+                    break
+                # 发现并关闭系统菜单栏
+                elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
+                    Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
+                    driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
+                else:
+                    pass
+            except NoSuchElementException:
+                time.sleep(1)
+        Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
+        size = driver.get_window_size()
+        driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
+                     int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
+        # 打开小程序"刚刚都传"
+        time.sleep(3)
+        Common.logger(log_type, crawler).info('打开小程序"刚刚都传"')
+        driver.find_elements(By.XPATH, '//*[@text="刚刚都传"]')[-1].click()
+        cls.get_videoList(log_type, crawler, oss_endpoint, env, driver)
+        driver.quit()
+        Common.logger(log_type, crawler).info('退出微信成功\n')
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f'start_wechat异常:{e}\n')
+        #     cmd = "cd ~ && source .bash_profile && adb kill-server && adb start-server"
+        #     os.system(cmd)
+
+    @classmethod
+    def search_elements(cls, log_type, crawler, driver: WebDriver, element):
+        try:
+            windowHandles = driver.window_handles
+            for handle in windowHandles:
+                driver.switch_to.window(handle)
+                time.sleep(1)
+                if len(driver.find_elements(By.XPATH, element)) != 0:
+                    return driver.find_elements(By.XPATH, element)
+                else:
+                    return None
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'search_element异常:{e}\n')
+
+    @classmethod
+    def get_video_url(cls, log_type, crawler, driver: WebDriver, video_element):
+        try:
+            time.sleep(1)
+            # Common.logger(log_type, crawler).info('进入视频详情')
+            video_element.click()
+            time.sleep(3)
+            video_url_element = cls.search_elements(log_type, crawler, driver, '//wx-video[@id="v_id"]')
+            if video_url_element is None or len(video_url_element) == 0:
+                Common.logger(log_type, crawler).info('未获取到视频 URL')
+                return 0
+            else:
+                return video_url_element[0].get_attribute('src')
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_video_url异常:{e}\n')
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, oss_endpoint, env, driver: WebDriver):
+        # try:
+        driver.implicitly_wait(15)
+        Common.logger(log_type, crawler).info('切换到小程序\n')
+        time.sleep(5)
+        webviews = driver.contexts
+        driver.switch_to.context(webviews[1])
+
+        time.sleep(1)
+        cls.search_elements(log_type, crawler, driver, '//wx-view[text()="视频"]')[0].click()
+
+        time.sleep(1)
+        index = 0
+
+        while True:
+            if cls.search_elements(log_type, crawler, driver, '//wx-view[@class="lists"]') is None:
+                Common.logger(log_type, crawler).info('窗口已销毁\n')
+                return
+
+            Common.logger(log_type, crawler).info('获取视频列表\n')
+            video_elements = cls.search_elements(log_type, crawler, driver, '//wx-view[@class="list"]')
+            if video_elements is None:
+                Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+                return
+
+            video_element_temp = video_elements[index:]
+            if len(video_element_temp) == 0:
+                Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                return
+
+            for i, video_element in enumerate(video_element_temp):
+                if video_element is None:
+                    Common.logger(log_type, crawler).info('到底啦~\n')
+                    return
+                cls.i += 1
+                cls.search_elements(log_type, crawler, driver, '//wx-view[@class="list"]')
+                Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
+                time.sleep(3)
+                driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
+
+                video_title = video_element.find_elements(By.XPATH, '//wx-view[@class="title"]//span[2]')[cls.i - 1].get_attribute('innerHTML')
+                cover_url = video_element.find_elements(By.XPATH, '//wx-image[@class="poster-img"]')[cls.i - 1].get_attribute('src')
+
+                Common.logger(log_type, crawler).info(f'video_title:{video_title}')
+                Common.logger(log_type, crawler).info(f'cover_url:{cover_url}')
+
+                # cls.download_publish(log_type, crawler, oss_endpoint, env, job, driver, video_element, video_title, cover_url)
+                # time.sleep(3)
+
+            Common.logger(log_type, crawler).info('已抓取完一组视频,休眠10秒\n')
+            time.sleep(10)
+            index = index + len(video_element_temp)
+
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f'get_recommend异常,重启APP:{e}\n')
+        #     cls.i = 0
+        #     cls.quit(log_type, driver)
+        #     cls.start_wechat(log_type, crawler, oss_endpoint, env, job)
+
+    # @classmethod
+    # def title_like(cls, log_type, title):
+    #     sheet = Feishu.get_values_batch(log_type, 'ggdc', '070a67')
+    #     for i in range(1, len(sheet)):
+    #         video_title = sheet[i][7]
+    #         if video_title is None:
+    #             pass
+    #         elif difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+    #             return True
+    #         else:
+    #             pass
+    #
+    # @classmethod
+    # def download_publish(cls, log_type, crawler, oss_endpoint, env, job, driver: WebDriver, video_element, video_title, cover_url):
+    #     try:
+    #         if video_title == 0 or cover_url == 0:
+    #             Common.logger(log_type, crawler).info('无效视频\n')
+    #         elif video_title in [x for y in Feishu.get_values_batch(log_type, 'ggdc', '070a67') for x in y]:
+    #             Common.logger(log_type, crawler).info('视频已下载\n')
+    #         elif any(word if word in video_title else False for word in cls.filter_words(log_type)) is True:
+    #             Common.logger(log_type, crawler).info('已中过滤词\n')
+    #         else:
+    #             video_url = cls.get_video_url(log_type, driver, video_element)
+    #             if video_url == 0:
+    #                 Common.logger(log_type, crawler).info('video_url:未获取到\n')
+    #             elif video_url in [x for y in Feishu.get_values_batch(log_type, 'ggdc', '070a67') for x in y]:
+    #                 Common.logger(log_type, crawler).info('视频已下载\n')
+    #             else:
+    #                 Common.logger(log_type, crawler).info(f'cover_url:{cover_url}')
+    #                 Common.logger(log_type, crawler).info(f'video_url:{video_url}')
+    #
+    #                 # 下载视频
+    #                 Common.download_method(log_type, 'video', video_title, video_url)
+    #                 # # 获取视频时长
+    #                 # video_info = cls.get_video_info_from_local(log_type, "./videos/" + video_title + "/video.mp4")
+    #                 # video_width = str(video_info[0])
+    #                 # video_height = str(video_info[1])
+    #                 # duration = video_info[2]
+    #                 # 下载封面
+    #                 Common.download_method(log_type, 'cover', video_title, cover_url)
+    #                 # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+    #                 with open("./videos/" + video_title
+    #                           + "/" + "info.txt", "a", encoding="UTF-8") as f_a:
+    #                     f_a.write("ggdc" + str(int(time.time())) + "\n" +
+    #                               str(video_title) + "\n" +
+    #                               '100' + "\n" +
+    #                               '100000' + "\n" +
+    #                               '100000' + "\n" +
+    #                               '100000' + "\n" +
+    #                               '100000' + "\n" +
+    #                               '1920*1080' + "\n" +
+    #                               str(int(time.time())) + "\n" +
+    #                               '刚刚都传小程序' + "\n" +
+    #                               str(cover_url) + "\n" +
+    #                               str(video_url) + "\n" +
+    #                               str(cover_url) + "\n" +
+    #                               "ganggangdouchuan" + str(int(time.time())))
+    #                 Common.logger(log_type, crawler).info("==========视频信息已保存至info.txt==========")
+    #
+    #                 # 上传视频
+    #                 Common.logger(log_type, crawler).info(f"开始上传视频:{video_title}")
+    #                 if env == 'dev':
+    #                     our_video_id = Publish.upload_and_publish(log_type, crawler, oss_endpoint, env, job)
+    #                     our_video_link = "https://testadmin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+    #                 else:
+    #                     our_video_id = Publish.upload_and_publish(log_type, crawler, oss_endpoint, env, job)
+    #                     our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+    #                 Common.logger(log_type, crawler).info("视频上传完成")
+    #
+    #                 # 视频信息保存至飞书
+    #                 Feishu.insert_columns(log_type, "ggdc", "070a67", "ROWS", 1, 2)
+    #                 # 视频ID工作表,首行写入数据
+    #                 upload_time = int(time.time())
+    #                 values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
+    #                            "推荐榜",
+    #                            video_title,
+    #                            our_video_link,
+    #                            cover_url,
+    #                            video_url]]
+    #                 time.sleep(1)
+    #                 Feishu.update_values(log_type, "ggdc", "070a67", "F2:V2", values)
+    #                 driver.press_keycode(AndroidKey.BACK)
+    #                 Common.logger(log_type, crawler).info(f"视频:{video_title},下载/上传成功\n")
+    #     except Exception as e:
+    #         Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
+
+
+if __name__ == '__main__':
+    GanggangdouchuanRecommend.start_wechat('recommend', 'ganggangdouchuan', 'out', 'dev')
+
+    pass

+ 181 - 0
ganggangdouchuan/ganggangdouchuan_recommend/insert.py

@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import json
+import os
+import sys
+import time
+from datetime import date, timedelta
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.scheduling_db import MysqlHelper
+
+
+class Insert:
+    @classmethod
+    def get_config(cls, log_type, crawler, text, env):
+        select_sql = f"""select * from crawler_config where source="benshanzhufu" """
+        contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
+        title_list = []
+        filter_list = []
+        for content in contents:
+            config = content['config']
+            config_dict = eval(config)
+            for k, v in config_dict.items():
+                if k == "title":
+                    title_list_config = v.split(",")
+                    for title in title_list_config:
+                        title_list.append(title)
+                if k == "filter":
+                    filter_list_config = v.split(",")
+                    for filter_word in filter_list_config:
+                        filter_list.append(filter_word)
+        if text == "title":
+            return title_list
+        elif text == "filter":
+            return filter_list
+
+    @classmethod
+    def before_day(cls):
+        publish_time_str_rule = (date.today() + timedelta(days=-30)).strftime("%Y-%m-%d %H:%M:%S")
+        publish_time_stamp_rule = int(time.mktime(time.strptime(publish_time_str_rule, "%Y-%m-%d %H:%M:%S")))
+        print(publish_time_str_rule)
+        print(publish_time_stamp_rule)
+
+    @classmethod
+    def insert_config(cls, log_type, crawler, env):
+        filter_sheet = Feishu.get_values_batch(log_type, crawler, "DjXfqG")
+        # title_sheet = Feishu.get_values_batch(log_type, crawler, "bHSW1p")
+        filter_list = []
+        # title_list = []
+        for x in filter_sheet:
+            for y in x:
+                if y is None:
+                    pass
+                else:
+                    filter_list.append(y)
+        # for x in title_sheet:
+        #     for y in x:
+        #         if y is None:
+        #             pass
+        #         else:
+        #             title_list.append(y)
+        # str_title = ','.join(title_list)
+        str_filter = ','.join(filter_list)
+        config_dict = {
+            # "title": str_title,
+            "filter": str_filter
+        }
+        str_config_dict = str(config_dict)
+        # print(f"config_dict:{config_dict}")
+        # print(f"str_config_dict:{str_config_dict}")
+        insert_sql = f""" insert into crawler_config(title, source, config) values("本山祝福小程序", "benshanzhufu", "{str_config_dict}") """
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+
+    @classmethod
+    def insert_video_from_feishu_to_mysql(cls, log_type, crawler, env):
+        benshanzhufu_sheetid = ['440018']
+        for sheetid in benshanzhufu_sheetid:
+            xiaoniangao_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            for i in range(1, len(xiaoniangao_sheet)):
+            # for i in range(1, 3):
+                if xiaoniangao_sheet[i][5] is None or xiaoniangao_sheet[i][9] is None:
+                    continue
+                video_id = xiaoniangao_sheet[i][8].replace("https://admin.piaoquantv.com/cms/post-detail/", "").replace(
+                    "/info", "")
+                if video_id == "None":
+                    continue
+                video_id = int(video_id)
+                out_user_id = str(xiaoniangao_sheet[i][17])
+                platform = "本山祝福"
+                strategy = "推荐榜爬虫策略"
+                out_video_id = str(xiaoniangao_sheet[i][6])
+                video_title = str(xiaoniangao_sheet[i][7])
+                cover_url = str(xiaoniangao_sheet[i][19])
+                video_url = str(xiaoniangao_sheet[i][20])
+                duration = int(xiaoniangao_sheet[i][13])
+                publish_time = str(xiaoniangao_sheet[i][15]).replace("/", "-")
+                play_cnt = int(xiaoniangao_sheet[i][9])
+                like_cnt = int(xiaoniangao_sheet[i][11])
+                share_cnt = int(xiaoniangao_sheet[i][12])
+                # collection_cnt = 0
+                comment_cnt = int(xiaoniangao_sheet[i][10])
+                user_id = str(xiaoniangao_sheet[i][17])
+                crawler_rule = json.dumps({})
+                width = int(xiaoniangao_sheet[i][14].split("*")[0])
+                height = int(xiaoniangao_sheet[i][14].split("*")[1])
+
+                # print(f"video_id:{video_id}, type:{type(video_id)}")
+                # print(f"user_id:{user_id}, type:{type(user_id)}")
+                # print(f"out_user_id:{out_user_id}, type:{type(out_user_id)}")
+                # print(f"platform:{platform}, type:{type(platform)}")
+                # print(f"strategy:{strategy}, type:{type(strategy)}")
+                # print(f"out_video_id:{out_video_id}, type:{type(out_video_id)}")
+                # print(f"video_title:{video_title}, type:{type(video_title)}")
+                # print(f"cover_url:{cover_url}, type:{type(cover_url)}")
+                # print(f"video_url:{video_url}, type:{type(video_url)}")
+                # print(f"duration:{duration}, type:{type(duration)}")
+                # print(f"publish_time:{publish_time}, type:{type(publish_time)}")
+                # print(f"play_cnt:{play_cnt}, type:{type(play_cnt)}")
+                # print(f"like_cnt:{like_cnt}, type:{type(like_cnt)}")
+                # print(f"share_cnt:{share_cnt}, type:{type(share_cnt)}")
+                # print(f"comment_cnt:{comment_cnt}, type:{type(comment_cnt)}")
+                # print(f"crawler_rule:{crawler_rule}, type:{type(crawler_rule)}")
+                # print(f"width:{width}, type:{type(width)}")
+                # print(f"height:{height}, type:{type(height)}\n")
+
+                select_sql = f""" select * from crawler_video where platform="{platform}" and out_video_id="{out_video_id}" """
+                Common.logger(log_type, crawler).info(f"select_sql:{select_sql}")
+                repeat_video = MysqlHelper.get_values(log_type, crawler, select_sql, env)
+                Common.logger(log_type, crawler).info(f"repeat_video:{repeat_video}")
+
+                if repeat_video is not None and len(repeat_video) != 0:
+                    Common.logger(log_type, crawler).info(f"{video_title} 已存在数据库中\n")
+                else:
+                    # 视频信息保存数据库
+                    insert_sql = f""" insert into crawler_video(video_id,
+                                        out_user_id,
+                                        platform,
+                                        strategy,
+                                        out_video_id,
+                                        video_title,
+                                        cover_url,
+                                        video_url,
+                                        duration,
+                                        publish_time,
+                                        play_cnt,
+                                        like_cnt,
+                                        share_cnt,
+                                        comment_cnt,
+                                        crawler_rule,
+                                        width,
+                                        height)
+                                        values({video_id},
+                                        "{out_user_id}",
+                                        "{platform}",
+                                        "{strategy}",
+                                        "{out_video_id}",
+                                        "{video_title}",
+                                        "{cover_url}",
+                                        "{video_url}",
+                                        {duration},
+                                        "{publish_time}",
+                                        {play_cnt},
+                                        {like_cnt},
+                                        {share_cnt},
+                                        {comment_cnt},
+                                        '{crawler_rule}',
+                                        {width},
+                                        {height}) """
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+
+
+if __name__ == "__main__":
+    # Insert.insert_config("insert", "benshanzhufu", "dev")
+    # print(Insert.get_config("insert", "benshanzhufu", "filter", "dev"))
+    Insert.insert_video_from_feishu_to_mysql("insert-prod", "benshanzhufu", "prod")
+    pass

BIN
ganggangdouchuan/logs/.DS_Store