publishCategoryArticles.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. """
  2. @author: luojunhui
  3. 品类文章发布到aigc系统的冷启层
  4. """
  5. import datetime
  6. import json
  7. import time
  8. import traceback
  9. from pandas import DataFrame
  10. from applications import aiditApi, log, bot
  11. from config import apolloConfig
  12. apollo = apolloConfig()
  13. class CategoryColdStartTask(object):
  14. """
  15. 品类冷启动发布任务
  16. """
  17. PUBLISHED_STATUS = 2
  18. INIT_STATUS = 1
  19. BAD_STATUS = 0
  20. def __init__(self, db_client):
  21. """
  22. :param db_client:
  23. """
  24. self.db_client = db_client
  25. self.category_map = json.loads(apollo.getConfigValue("category_cold_start_map"))
  26. self.category_cold_start_threshold = json.loads(apollo.getConfigValue("category_cold_start_threshold"))
  27. self.READ_THRESHOLD = self.category_cold_start_threshold.get("READ_THRESHOLD", 5000)
  28. self.READ_TIMES_THRESHOLD = self.category_cold_start_threshold.get("READ_TIMES_THRESHOLD", 1.3)
  29. self.LIMIT_TITLE_LENGTH = self.category_cold_start_threshold.get("LIMIT_TITLE_LENGTH", 15)
  30. log(
  31. task="category_publish_task",
  32. function="__init__",
  33. message="数据库初始化连接完成,apollo配置获取完成",
  34. data={
  35. "category": self.category_map,
  36. "threshold": self.category_cold_start_threshold
  37. }
  38. )
  39. def insert_into_db(self, crawler_plan_id, crawler_plan_name, create_timestamp):
  40. """
  41. 插入抓取计划到数据库中
  42. :param create_timestamp:
  43. :param crawler_plan_id:
  44. :param crawler_plan_name:
  45. :return:
  46. """
  47. insert_sql = f"""
  48. INSERT INTO article_crawler_plan
  49. (crawler_plan_id, name, create_timestamp)
  50. values
  51. (%s, %s, %s)
  52. """
  53. try:
  54. self.db_client.update(
  55. sql=insert_sql,
  56. params=(crawler_plan_id, crawler_plan_name, create_timestamp)
  57. )
  58. except Exception as e:
  59. bot(
  60. title="品类冷启任务,记录抓取计划id失败",
  61. detail={
  62. "error": str(e),
  63. "error_msg": traceback.format_exc(),
  64. "crawler_plan_id": crawler_plan_id,
  65. "crawler_plan_name": crawler_plan_name
  66. }
  67. )
  68. def get_articles_from_meta_table(self, category, article_source):
  69. """
  70. 从长文 meta 库中获取冷启文章
  71. :return:
  72. """
  73. sql = f"""
  74. SELECT
  75. article_id, out_account_id, article_index, title, link, read_cnt, status
  76. FROM
  77. crawler_meta_article
  78. WHERE
  79. category = "{category}" and platform = "{article_source}";
  80. """
  81. article_list = self.db_client.select(sql)
  82. log(
  83. task="category_publish_task",
  84. function="get_articles_from_meta_table",
  85. message="获取品类文章总数",
  86. data={
  87. "total_articles": len(article_list),
  88. "category": category
  89. }
  90. )
  91. article_df = DataFrame(article_list,
  92. columns=['article_id', 'gh_id', 'position', 'title', 'link', 'read_cnt', 'status'])
  93. return article_df
  94. def change_article_status(self, category):
  95. """
  96. 已经发布到生成计划中的 id,
  97. :return:
  98. """
  99. plan_id = self.category_map.get(category)
  100. if plan_id:
  101. article_list = aiditApi.get_generated_article_list(plan_id)
  102. title_list = [i[1] for i in article_list]
  103. if title_list:
  104. # update
  105. update_sql = f"""
  106. UPDATE
  107. crawler_meta_article
  108. SET
  109. status = %s
  110. WHERE
  111. title in %s and status = %s;
  112. """
  113. self.db_client.update(
  114. sql=update_sql,
  115. params=(self.PUBLISHED_STATUS, tuple(title_list), self.INIT_STATUS)
  116. )
  117. else:
  118. return
  119. def change_article_status_while_publishing(self, article_id_list):
  120. """
  121. :param: article_id_list: 文章的唯一 id
  122. :return:
  123. """
  124. update_sql = f"""
  125. UPDATE
  126. crawler_meta_article
  127. SET
  128. status = %s
  129. WHERE
  130. article_id in %s and status = %s;
  131. """
  132. affect_rows = self.db_client.update(
  133. sql=update_sql,
  134. params=(self.PUBLISHED_STATUS, tuple(article_id_list), self.INIT_STATUS)
  135. )
  136. if affect_rows != len(article_id_list):
  137. bot(
  138. title="品类冷启任务中,出现更新状文章状态失败异常",
  139. detail={
  140. "affected_rows": affect_rows,
  141. "task_rows": len(article_id_list)
  142. }
  143. )
  144. def filter_weixin_articles(self, articles_df, category):
  145. """
  146. 微信抓取文章过滤漏斗
  147. """
  148. articles_df['average_read'] = articles_df.groupby(['gh_id', 'position'])['read_cnt'].transform('mean')
  149. articles_df['read_times'] = articles_df['read_cnt'] / articles_df['average_read']
  150. total_length = articles_df.shape[0]
  151. # 第0层过滤已经发布的文章
  152. zero_level_funnel_df = articles_df[articles_df['status'] == self.INIT_STATUS]
  153. zero_level_funnel_length = zero_level_funnel_df.shape[0]
  154. # 第一层漏斗通过阅读均值倍数过滤
  155. first_level_funnel_df = zero_level_funnel_df[zero_level_funnel_df['read_times'] >= self.READ_TIMES_THRESHOLD]
  156. first_level_funnel_length = first_level_funnel_df.shape[0]
  157. # 第二层漏斗通过阅读量过滤
  158. second_level_funnel_df = first_level_funnel_df[
  159. first_level_funnel_df['read_cnt'] >= self.READ_THRESHOLD
  160. ]
  161. second_level_funnel_length = second_level_funnel_df.shape[0]
  162. # 第三层漏斗通过标题长度过滤
  163. third_level_funnel_df = second_level_funnel_df[
  164. second_level_funnel_df['title'].str.len() >= self.LIMIT_TITLE_LENGTH
  165. ]
  166. third_level_funnel_length = third_level_funnel_df.shape[0]
  167. # 最后一层通过敏感词过滤
  168. filter_df = third_level_funnel_df[
  169. (~third_level_funnel_df['title'].str.contains('农历'))
  170. & (~third_level_funnel_df['title'].str.contains('太极'))
  171. & (~third_level_funnel_df['title'].str.contains('节'))
  172. & (~third_level_funnel_df['title'].str.contains('早上好'))
  173. & (~third_level_funnel_df['title'].str.contains('赖清德'))
  174. & (~third_level_funnel_df['title'].str.contains('普京'))
  175. & (~third_level_funnel_df['title'].str.contains('俄'))
  176. & (~third_level_funnel_df['title'].str.contains('南海'))
  177. & (~third_level_funnel_df['title'].str.contains('台海'))
  178. & (~third_level_funnel_df['title'].str.contains('解放军'))
  179. & (~third_level_funnel_df['title'].str.contains('蔡英文'))
  180. & (~third_level_funnel_df['title'].str.contains('中国'))
  181. ]
  182. final_length = filter_df.shape[0]
  183. log(
  184. task="category_publish_task",
  185. function="publish_filter_articles",
  186. message="过滤后文章总数",
  187. data={
  188. "total_articles": final_length,
  189. "category": category
  190. }
  191. )
  192. bot(
  193. title="冷启任务发布通知",
  194. detail={
  195. "总文章数量": total_length,
  196. "通过已经发布状态过滤": "过滤数量: {} 剩余数量: {}".format(total_length - zero_level_funnel_length,
  197. zero_level_funnel_length),
  198. "通过阅读均值倍数过滤": "过滤数量: {} 剩余数量: {}".format(
  199. zero_level_funnel_length - first_level_funnel_length, first_level_funnel_length),
  200. "通过阅读量过滤": "过滤数量: {} 剩余数量: {}".format(
  201. first_level_funnel_length - second_level_funnel_length, second_level_funnel_length),
  202. "通过标题长度过滤": "过滤数量: {} 剩余数量: {}".format(
  203. second_level_funnel_length - third_level_funnel_length, third_level_funnel_length),
  204. "通过敏感词过滤": "过滤数量: {} 剩余数量: {}".format(third_level_funnel_length - final_length,
  205. final_length),
  206. "品类": category,
  207. "阅读均值倍数阈值": self.READ_TIMES_THRESHOLD,
  208. "阅读量阈值": self.READ_THRESHOLD,
  209. "标题长度阈值": self.LIMIT_TITLE_LENGTH
  210. },
  211. mention=False
  212. )
  213. return filter_df
  214. def filter_toutiao_articles(self, articles_df, category):
  215. """
  216. 头条文章过滤漏斗
  217. """
  218. total_length = articles_df.shape[0]
  219. # 第一层漏斗通过状态过滤
  220. zero_level_funnel_df = articles_df[articles_df['status'] == self.INIT_STATUS]
  221. zero_level_funnel_length = zero_level_funnel_df.shape[0]
  222. bot(
  223. title="账号冷启动---头条推荐流发布",
  224. detail={
  225. "category": category,
  226. "总文章数量": total_length,
  227. "通过已经发布状态过滤": "过滤数量: {} 剩余数量: {}".format(total_length - zero_level_funnel_length,
  228. zero_level_funnel_length),
  229. },
  230. mention=False
  231. )
  232. return zero_level_funnel_df
  233. def publish_filter_articles(self, category, articles_df, article_source):
  234. """
  235. 过滤文章
  236. :param category: 文章品类
  237. :param articles_df: 该品类下的文章data_frame
  238. :param article_source: 文章来源
  239. :return:
  240. """
  241. match article_source:
  242. case "weixin":
  243. filtered_articles_df = self.filter_weixin_articles(articles_df, category)
  244. input_source_channel = 5
  245. case "toutiao":
  246. filtered_articles_df = self.filter_toutiao_articles(articles_df, category)
  247. input_source_channel = 6
  248. case _:
  249. return
  250. url_list = filtered_articles_df['link'].values.tolist()
  251. if url_list:
  252. # create_crawler_plan
  253. crawler_plan_response = aiditApi.auto_create_crawler_task(
  254. plan_id=None,
  255. plan_name="自动绑定-{}--{}--{}".format(category, datetime.date.today().__str__(), len(url_list)),
  256. plan_tag="品类冷启动",
  257. article_source=article_source,
  258. url_list=url_list
  259. )
  260. log(
  261. task="category_publish_task",
  262. function="publish_filter_articles",
  263. message="成功创建抓取计划",
  264. data=crawler_plan_response
  265. )
  266. # save to db
  267. create_timestamp = int(time.time()) * 1000
  268. crawler_plan_id = crawler_plan_response['data']['id']
  269. crawler_plan_name = crawler_plan_response['data']['name']
  270. self.insert_into_db(crawler_plan_id, crawler_plan_name, create_timestamp)
  271. # auto bind to generate plan
  272. new_crawler_task_list = [
  273. {
  274. "contentType": 1,
  275. "inputSourceType": 2,
  276. "inputSourceSubType": None,
  277. "fieldName": None,
  278. "inputSourceValue": crawler_plan_id,
  279. "inputSourceLabel": crawler_plan_name,
  280. "inputSourceModal": 3,
  281. "inputSourceChannel": input_source_channel
  282. }
  283. ]
  284. generate_plan_response = aiditApi.bind_crawler_task_to_generate_task(
  285. crawler_task_list=new_crawler_task_list,
  286. generate_task_id=self.category_map[category]
  287. )
  288. log(
  289. task="category_publish_task",
  290. function="publish_filter_articles",
  291. message="成功绑定到生成计划",
  292. data=generate_plan_response
  293. )
  294. # change article status
  295. article_id_list = filtered_articles_df['article_id'].values.tolist()
  296. self.change_article_status_while_publishing(article_id_list=article_id_list)
  297. def do_job(self, article_source, category_list=None):
  298. """
  299. 执行任务
  300. :return:
  301. """
  302. if not category_list:
  303. category_list = self.category_map.keys()
  304. log(
  305. task="category_publish_task",
  306. function="do_job",
  307. message="开始自动创建品类文章抓取计划",
  308. data={
  309. "category_list": list(category_list)
  310. }
  311. )
  312. for category in category_list:
  313. try:
  314. category_df = self.get_articles_from_meta_table(category=category, article_source=article_source)
  315. self.publish_filter_articles(
  316. category=category,
  317. articles_df=category_df,
  318. article_source=article_source
  319. )
  320. except Exception as e:
  321. bot(
  322. title="品类冷启任务报错",
  323. detail={
  324. "category": category,
  325. "error": str(e),
  326. "function": "do_job",
  327. "traceback": traceback.format_exc()
  328. }
  329. )