aiditApi.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. """
  2. @author: luojunhui
  3. 通过抓包 aigc 平台,自动化一些操作
  4. """
  5. import requests
  6. import json
  7. from applications.decoratorApi import retryOnTimeout
  8. from applications.denetMysql import DeNetMysql
  9. HEADERS = {
  10. 'Accept': 'application/json',
  11. 'Accept-Language': 'zh,zh-CN;q=0.9',
  12. 'Content-Type': 'application/json',
  13. 'Origin': 'http://admin.cybertogether.net',
  14. 'Proxy-Connection': 'keep-alive',
  15. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
  16. }
  17. PERSON_COOKIE = {
  18. "token": "af54cdc404c3464d896745df389b2dce",
  19. "appType": 9,
  20. "platform": "pc",
  21. "appVersionCode": 1000,
  22. "clientTimestamp": 1,
  23. "fid": 1,
  24. "loginUid": 1,
  25. "pageSource": 1,
  26. "requestId": 1,
  27. "rid": 1,
  28. "uid": 1
  29. }
  30. def get_generated_article_list(plan_id):
  31. """
  32. 自动生成计划 id 获取该生成计划已经生成过的文章列表
  33. :param plan_id:
  34. :return:
  35. """
  36. db = DeNetMysql()
  37. sql = f"""
  38. SELECT
  39. account.wx_gh,
  40. content.title,
  41. content.content_link,
  42. content.view_count,
  43. content.like_count,
  44. from_unixtime(cprr.create_timestamp / 1000) AS 抓取时间,
  45. from_unixtime(content.publish_timestamp / 1000) AS 发布时间
  46. FROM crawler_plan_result_rel cprr
  47. JOIN crawler_plan plan ON cprr.plan_id = plan.id
  48. JOIN crawler_content content ON cprr.channel_source_id = content.channel_content_id
  49. JOIN crawler_account account ON content.channel_account_id = account.channel_account_id
  50. WHERE plan_id IN (
  51. SELECT
  52. input_source_value
  53. FROM
  54. produce_plan_input_source
  55. WHERE plan_id = '{plan_id}'
  56. );
  57. """
  58. article_list = db.select(sql)
  59. return article_list
  60. def get_generated_article_title(generate_task_id):
  61. """
  62. 生成计划 id 获取该生成计划已经生成过的文章标题
  63. :param generate_task_id:
  64. :return: title_set
  65. """
  66. db = DeNetMysql()
  67. sql = f"""
  68. SELECT DISTINCT output.output
  69. FROM produce_plan_exe_record planExeRecord
  70. JOIN produce_plan_module_output output ON output.plan_exe_id = planExeRecord.plan_exe_id AND output.produce_module_type = 3
  71. WHERE planExeRecord.plan_id = '{generate_task_id}';
  72. """
  73. title_tuple = db.select(sql)
  74. title_set = set([i[0] for i in title_tuple])
  75. return title_set
  76. def get_publish_account_from_aigc():
  77. """
  78. 从 aigc 系统中获取正在发布的账号
  79. :return:
  80. name: 公众号名称
  81. gh_id: 公众号 gh_id
  82. follower_count: 粉丝数量
  83. service_type_info: '公众号类型:0-订阅号,1-由历史老账号升级后的订阅号,2-服务号',
  84. verify_type_info:'公众号认证类型:-1-未认证,0-微信认证,1-新浪微博认证,3-已资质认证通过但还未通过名称认证,4-已资质认证通过、还未通过名称认证,但通过了新浪微博认证'
  85. """
  86. db = DeNetMysql()
  87. sql = """
  88. SELECT DISTINCT
  89. t3.`name`,
  90. t3.gh_id,
  91. t3.follower_count,
  92. t3.create_timestamp,
  93. t4.service_type_info,
  94. t4.verify_type_info,
  95. t3.id,
  96. group_concat(distinct t5.remark) as account_remark
  97. FROM
  98. publish_plan t1
  99. JOIN publish_plan_account t2 ON t1.id = t2.plan_id
  100. JOIN publish_account t3 ON t2.account_id = t3.id
  101. LEFT JOIN publish_account_wx_type t4 on t3.id = t4.account_id
  102. LEFT JOIN publish_account_remark t5 on t3.id = t5.publish_account_id
  103. WHERE
  104. t1.plan_status = 1
  105. AND t3.channel = 5
  106. GROUP BY t3.id;
  107. """
  108. info_tuple = db.select(sql)
  109. info_list = [
  110. {
  111. "name": line[0],
  112. "ghId": line[1],
  113. "follower_count": line[2],
  114. "account_init_timestamp": int(line[3]),
  115. "account_type": line[4],
  116. "account_auth": line[5],
  117. "account_id": line[6]
  118. } for line in info_tuple if '自动回复' not in str(line[7])
  119. ]
  120. return info_list
  121. def auto_create_crawler_task(plan_id, plan_name, plan_tag, url_list, article_source):
  122. """
  123. 通过 url 自动创建抓取计划
  124. :param plan_id: 计划 id, 若往已经存在的 plan_id 中加文章则需要传,否则会新生成一个 id
  125. :param plan_name: 计划名称
  126. :param plan_tag: 计划标签
  127. :param url_list: 输入的 url_list
  128. :param article_source: 文章来源
  129. :return:
  130. """
  131. match article_source:
  132. case "toutiao":
  133. channel = 6
  134. case "weixin":
  135. channel = 5
  136. case _:
  137. return
  138. url = "http://aigc-api.cybertogether.net/aigc/crawler/plan/save"
  139. payload = json.dumps({
  140. "params": {
  141. "contentFilters": [],
  142. "accountFilters": [],
  143. "filterAccountMatchMode": 1,
  144. "filterContentMatchMode": 1,
  145. "selectModeValues": [],
  146. "searchModeValues": [],
  147. "contentModal": 3,
  148. "analyze": {},
  149. "crawlerComment": 0,
  150. "inputGroup": None,
  151. "inputSourceGroups": [],
  152. "modePublishTime": [],
  153. "planType": 2,
  154. "frequencyType": 2,
  155. "planTag": plan_tag,
  156. "tagPenetrateFlag": 0,
  157. "id": plan_id,
  158. "name": plan_name,
  159. "channel": channel,
  160. "crawlerMode": 5,
  161. "inputModeValues": url_list,
  162. "modePublishTimeStart": None,
  163. "modePublishTimeEnd": None,
  164. "executeRate": None,
  165. "executeDate": None,
  166. "executeWindowStart": None,
  167. "executeWindowEnd": None,
  168. "executeTimeInterval": None,
  169. "executeNum": None,
  170. "addModal": None,
  171. "addChannel": None,
  172. "fileUpload": None,
  173. "prompt": None,
  174. "acelFlag": None,
  175. "tasks": []
  176. },
  177. "baseInfo": PERSON_COOKIE
  178. })
  179. response = requests.request("POST", url, headers=HEADERS, data=payload)
  180. return response.json()
  181. def bind_crawler_task_to_generate_task(crawler_task_list, generate_task_id):
  182. """
  183. 将抓取计划绑定至生成计划
  184. 生成计划已经存在
  185. :crawler_task_list: 要输入的抓取计划List
  186. :generate_task_id: 目标生成计划 id
  187. :article_source: 账号类型
  188. :return: response
  189. """
  190. url = "http://aigc-api.cybertogether.net/aigc/produce/plan/save"
  191. plan_info = get_generate_task_detail(generate_task_id)
  192. input_source_groups = plan_info.get("inputSourceGroups")
  193. existed_crawler_task = input_source_groups[0].get("inputSources")
  194. new_task_list = existed_crawler_task + crawler_task_list
  195. input_source_group_0 = input_source_groups[0]
  196. input_source_group_0['inputSources'] = new_task_list
  197. payload = json.dumps({
  198. "params": {
  199. "contentFilters": [],
  200. "produceModal": plan_info.get("produceModal"),
  201. "inputModal": plan_info.get("inputModal"),
  202. "tasks": plan_info.get("tasks", []),
  203. "modules": [],
  204. "moduleGroups": plan_info.get("moduleGroups"),
  205. "inputSourceGroups": [input_source_group_0],
  206. "layoutType": plan_info.get("layoutType"),
  207. "activeManualReview": plan_info.get("activeManualReview"),
  208. "totalProduceNum": plan_info.get("totalProduceNum"),
  209. "dailyProduceNum": plan_info.get("dailyProduceNum"),
  210. "maxConcurrentNum": plan_info.get("maxConcurrentNum"),
  211. "id": generate_task_id,
  212. "name": plan_info.get("name"),
  213. "planTag": plan_info.get("planTag"),
  214. "tagPenetrateFlag": plan_info.get("tagPenetrateFlag"),
  215. "inputType": plan_info.get("inputType"),
  216. "inputChannel": plan_info.get("inputChannel"),
  217. "activeManualReviewCount": plan_info.get("activeManualReviewCount"),
  218. "autoComposite": plan_info.get("autoComposite")
  219. },
  220. "baseInfo": PERSON_COOKIE
  221. })
  222. response = requests.request("POST", url, headers=HEADERS, data=payload)
  223. return response.json()
  224. @retryOnTimeout()
  225. def get_generate_task_detail(generate_task_id):
  226. """
  227. 通过生成计划的 id,获取该生成计划已有的抓取计划 list
  228. :param generate_task_id:
  229. :return:
  230. """
  231. url = "http://aigc-api.cybertogether.net/aigc/produce/plan/detail"
  232. payload = json.dumps({
  233. "params": {
  234. "id": generate_task_id
  235. },
  236. "baseInfo": PERSON_COOKIE
  237. })
  238. response = requests.request("POST", url, headers=HEADERS, data=payload, timeout=10)
  239. result = response.json()
  240. if result['msg'] == 'success':
  241. return result['data']
  242. else:
  243. return {}
  244. @retryOnTimeout()
  245. def get_publish_task_detail(publish_task_id):
  246. """
  247. 通过发布计划的 id,获取该发布计划已有的抓取计划 list
  248. :param publish_task_id:
  249. :param generate_task_id:
  250. :return:
  251. """
  252. url = "http://aigc-api.cybertogether.net/aigc/publish/plan/detail"
  253. payload = json.dumps({
  254. "params": {
  255. "id": publish_task_id
  256. },
  257. "baseInfo": PERSON_COOKIE
  258. })
  259. response = requests.request("POST", url, headers=HEADERS, data=payload)
  260. return response.json()
  261. def bind_crawler_task_to_publish_task(target_publish_task_id, crawler_task_name, crawler_task_id):
  262. """
  263. 将抓取计划绑定至发布计划
  264. 发布计划已经存在
  265. :param crawler_task_id: 抓取计划ID
  266. :param crawler_task_name: 抓取计划名称
  267. :param target_publish_task_id: 目标发布计划 id
  268. :return: response
  269. """
  270. publish_task_detail = get_publish_task_detail(target_publish_task_id)
  271. publish_task_detail_data = publish_task_detail.get("data")
  272. already_exist_crawler_task_list = publish_task_detail_data.get("inputGroups")[0].get("inputSources")
  273. new_crawler_task_list = [
  274. {
  275. "sourceCategory": 1,
  276. "inputSourceValueType": 1,
  277. "inputSourceValue": crawler_task_id,
  278. "inputSourceLabel": crawler_task_name
  279. }
  280. ]
  281. new_input_source_group = already_exist_crawler_task_list + new_crawler_task_list
  282. if publish_task_detail_data:
  283. url = "http://aigc-api.cybertogether.net/aigc/publish/plan/save"
  284. payload = json.dumps({
  285. "params": {
  286. "accountIds": [i['id'] for i in publish_task_detail_data.get("accountIds")],
  287. "inputGroups": [
  288. {
  289. "groupId": "e40cd06daeb5345ed26256c8744f7a33",
  290. "groupName": None,
  291. "channel": None,
  292. "contentModal": None,
  293. "groupIndex": 1,
  294. "filterMatchMode": 2,
  295. "inputSources": new_input_source_group,
  296. "inputFilters": [],
  297. "inputOrders": [],
  298. "label": "input1"
  299. }
  300. ],
  301. "inputSources": [],
  302. "inputFilters": [],
  303. "activeManualReview": publish_task_detail_data.get("activeManualReview"),
  304. "channel": publish_task_detail_data.get("channel"),
  305. "contentAllocationRules": publish_task_detail_data.get("contentAllocationRules"),
  306. "contentModal": publish_task_detail_data.get("contentModal"),
  307. "contentSortingRules": publish_task_detail_data.get("contentSortingRules"),
  308. "douyinPublishAccoutSetting": publish_task_detail_data.get("douyinPublishAccoutSetting"),
  309. "filterMatchMode": 1,
  310. "name": publish_task_detail_data.get("name"),
  311. "publishAccoutJson": "",
  312. "publishBgmType": publish_task_detail_data.get("publishBgmType"),
  313. "publishDate": publish_task_detail_data.get("publishDate"),
  314. "publishLocation": publish_task_detail_data.get("publishLocation"),
  315. "publishNum": publish_task_detail_data.get("publishNum"),
  316. "publishPushTime": publish_task_detail_data.get("publishPushTime"),
  317. "publishRate": publish_task_detail_data.get("publishRate"),
  318. "publishTimeInterval": publish_task_detail_data.get("publishTimeInterval"),
  319. "publishWindowEnd": publish_task_detail_data.get("publishWindowEnd"),
  320. "publishWindowStart": publish_task_detail_data.get("publishWindowStart"),
  321. "wxContentInsert": publish_task_detail_data.get("wxContentInsert"),
  322. "wxVideoPublishAccountSetting": publish_task_detail_data.get("wxVideoPublishAccountSetting"),
  323. "scoreJudgeFlag": publish_task_detail_data.get("scoreJudgeFlag"),
  324. "scoreJudgeTasks": publish_task_detail_data.get("scoreJudgeTasks"),
  325. "machineReviewMatchMode": publish_task_detail_data.get("machineReviewMatchMode"),
  326. "id": publish_task_detail_data.get("id"),
  327. "planType": publish_task_detail_data.get("planType"),
  328. "planTag": publish_task_detail_data.get("planTag"),
  329. "tagPenetrateFlag": publish_task_detail_data.get("tagPenetrateFlag"),
  330. "actionObjects": publish_task_detail_data.get("actionObjects"),
  331. "actionContents": publish_task_detail_data.get("actionContents"),
  332. "accountFrom": publish_task_detail_data.get("accountFrom"),
  333. "actionContentAllocationRule": publish_task_detail_data.get("actionContentAllocationRule"),
  334. "publishPerNum": publish_task_detail_data.get("publishPerNum"),
  335. "publishPerMinNum": publish_task_detail_data.get("publishPerMinNum"),
  336. "pushType": publish_task_detail_data.get("pushType"),
  337. "triggerEvent": publish_task_detail_data.get("triggerEvent"),
  338. "pushContentSortingRules": publish_task_detail_data.get("pushContentSortingRules"),
  339. "biliDistrict": publish_task_detail_data.get("biliDistrict"),
  340. "firstItemScoreJudgeTaskId": publish_task_detail_data.get("firstItemScoreJudgeTaskId"),
  341. "secondItemScoreJudgeTaskId": publish_task_detail_data.get("secondItemScoreJudgeTaskId"),
  342. "otherItemScoreJudgeTaskId": publish_task_detail_data.get("otherItemScoreJudgeTaskId"),
  343. "gzhArticleSortFlag": publish_task_detail_data.get("gzhArticleSortFlag"),
  344. "gzhArticleSortTask": publish_task_detail_data.get("gzhArticleSortTask"),
  345. "miniprogramInsertFlag": publish_task_detail_data.get("miniprogramInsertFlag"),
  346. "miniprogramInsertTasks": publish_task_detail_data.get("miniprogramInsertTasks"),
  347. "machineReviewConditions": publish_task_detail_data.get("machineReviewConditions"),
  348. "gzhTriggerSyncFrequency": publish_task_detail_data.get("gzhTriggerSyncFrequency"),
  349. "gzhTriggerSendContentType": publish_task_detail_data.get("gzhTriggerSendContentType"),
  350. "longArticleSystemHost": publish_task_detail_data.get("longArticleSystemHost"),
  351. },
  352. "baseInfo": PERSON_COOKIE
  353. })
  354. response = requests.request("POST", url, headers=HEADERS, data=payload)
  355. print(response.json())
  356. else:
  357. return
  358. def delete_articles(gh_id, title):
  359. """
  360. 删除公众号文章
  361. :param gh_id:
  362. :param title:
  363. :return:
  364. """
  365. url = "http://101.37.174.139:80/articleAudit/titleDangerFindDelete"
  366. payload = {
  367. "ghId": gh_id,
  368. 'title': title
  369. }
  370. headers = {
  371. 'Content-Type': 'application/json;charset=UTF-8'
  372. }
  373. response = requests.request("POST", url, headers=headers, json=payload, timeout=600)
  374. return response
  375. def get_only_auto_reply_accounts():
  376. """
  377. 获取即转的账号
  378. """
  379. sql = "select publish_account_id from publish_account_remark where remark like '%即转%';"
  380. denet = DeNetMysql()
  381. result = denet.select(sql)
  382. account_id_list = [i[0] for i in result]
  383. return set(account_id_list)
  384. def auto_create_single_video_crawler_task(plan_name, plan_tag, video_id_list):
  385. url = "http://aigc-api.cybertogether.net/aigc/crawler/plan/save"
  386. payload = json.dumps({
  387. "params": {
  388. "contentFilters": [],
  389. "accountFilters": [],
  390. "filterAccountMatchMode": 1,
  391. "filterContentMatchMode": 1,
  392. "selectModeValues": [],
  393. "searchModeValues": [],
  394. "contentModal": 4,
  395. "analyze": {},
  396. "crawlerComment": 0,
  397. "inputGroup": [],
  398. "inputSourceGroups": [],
  399. "modePublishTime": [],
  400. "name": plan_name,
  401. "frequencyType": 2,
  402. "channel": 10,
  403. "crawlerMode": 5,
  404. "planTag": plan_tag,
  405. "voiceExtractFlag": 1,
  406. "srtExtractFlag": 1,
  407. "videoKeyFrameType": 1,
  408. "inputModeValues": video_id_list,
  409. "planType": 2
  410. },
  411. "baseInfo": PERSON_COOKIE
  412. })
  413. response = requests.request("POST", url, headers=HEADERS, data=payload)
  414. return response.json()