aiditApi.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. """
  2. @author: luojunhui
  3. 通过抓包 aigc 平台,自动化一些操作
  4. """
  5. import requests
  6. import json
  7. from applications.decoratorApi import retryOnTimeout
  8. from applications.denetMysql import DeNetMysql
  9. HEADERS = {
  10. 'Accept': 'application/json',
  11. 'Accept-Language': 'zh,zh-CN;q=0.9',
  12. 'Content-Type': 'application/json',
  13. 'Origin': 'http://admin.cybertogether.net',
  14. 'Proxy-Connection': 'keep-alive',
  15. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
  16. }
  17. PERSON_COOKIE = {
  18. "token": "af54cdc404c3464d896745df389b2dce",
  19. "appType": 9,
  20. "platform": "pc",
  21. "appVersionCode": 1000,
  22. "clientTimestamp": 1,
  23. "fid": 1,
  24. "loginUid": 1,
  25. "pageSource": 1,
  26. "requestId": 1,
  27. "rid": 1,
  28. "uid": 1
  29. }
  30. def get_generated_article_list(plan_id):
  31. """
  32. 自动生成计划 id 获取该生成计划已经生成过的文章列表
  33. :param plan_id:
  34. :return:
  35. """
  36. db = DeNetMysql()
  37. sql = f"""
  38. SELECT
  39. account.wx_gh,
  40. content.title,
  41. content.content_link,
  42. content.view_count,
  43. content.like_count,
  44. from_unixtime(cprr.create_timestamp / 1000) AS 抓取时间,
  45. from_unixtime(content.publish_timestamp / 1000) AS 发布时间
  46. FROM crawler_plan_result_rel cprr
  47. JOIN crawler_plan plan ON cprr.plan_id = plan.id
  48. JOIN crawler_content content ON cprr.channel_source_id = content.channel_content_id
  49. JOIN crawler_account account ON content.channel_account_id = account.channel_account_id
  50. WHERE plan_id IN (
  51. SELECT
  52. input_source_value
  53. FROM
  54. produce_plan_input_source
  55. WHERE plan_id = '{plan_id}'
  56. );
  57. """
  58. article_list = db.select(sql)
  59. return article_list
  60. def get_generated_article_title(generate_task_id):
  61. """
  62. 生成计划 id 获取该生成计划已经生成过的文章标题
  63. :param generate_task_id:
  64. :return: title_set
  65. """
  66. db = DeNetMysql()
  67. sql = f"""
  68. SELECT DISTINCT output.output
  69. FROM produce_plan_exe_record planExeRecord
  70. JOIN produce_plan_module_output output ON output.plan_exe_id = planExeRecord.plan_exe_id AND output.produce_module_type = 3
  71. WHERE planExeRecord.plan_id = '{generate_task_id}';
  72. """
  73. title_tuple = db.select(sql)
  74. title_set = set([i[0] for i in title_tuple])
  75. return title_set
  76. def get_publish_account_from_aigc():
  77. """
  78. 从 aigc 系统中获取正在发布的账号
  79. :return:
  80. name: 公众号名称
  81. gh_id: 公众号 gh_id
  82. follower_count: 粉丝数量
  83. service_type_info: '公众号类型:0-订阅号,1-由历史老账号升级后的订阅号,2-服务号',
  84. verify_type_info:'公众号认证类型:-1-未认证,0-微信认证,1-新浪微博认证,3-已资质认证通过但还未通过名称认证,4-已资质认证通过、还未通过名称认证,但通过了新浪微博认证'
  85. """
  86. db = DeNetMysql()
  87. sql = """
  88. SELECT DISTINCT
  89. t3.`name`,
  90. t3.gh_id,
  91. t3.follower_count,
  92. t3.create_timestamp,
  93. t4.service_type_info,
  94. t4.verify_type_info
  95. FROM
  96. publish_plan t1
  97. JOIN publish_plan_account t2 ON t1.id = t2.plan_id
  98. JOIN publish_account t3 ON t2.account_id = t3.id
  99. LEFT JOIN publish_account_wx_type t4 on t3.id = t4.account_id
  100. WHERE
  101. t1.plan_status = 1
  102. AND t3.channel = 5
  103. GROUP BY t3.id
  104. ORDER BY t3.create_timestamp DESC
  105. """
  106. info_tuple = db.select(sql)
  107. info_list = [
  108. {
  109. "name": line[0],
  110. "ghId": line[1],
  111. "follower_count": line[2],
  112. "account_init_timestamp": int(line[3] / 1000),
  113. "account_type": line[4],
  114. "account_auth": line[5]
  115. } for line in info_tuple
  116. ]
  117. return info_list
  118. def auto_create_crawler_task(plan_id, plan_name, plan_tag, url_list):
  119. """
  120. 通过 url 自动创建抓取计划
  121. :param plan_id: 计划 id, 若往已经存在的 plan_id 中加文章则需要传,否则会新生成一个 id
  122. :param plan_name: 计划名称
  123. :param plan_tag: 计划标签
  124. :param url_list: 输入的 url_list
  125. :return:
  126. """
  127. url = "http://aigc-api.cybertogether.net/aigc/crawler/plan/save"
  128. payload = json.dumps({
  129. "params": {
  130. "contentFilters": [],
  131. "accountFilters": [],
  132. "filterAccountMatchMode": 1,
  133. "filterContentMatchMode": 1,
  134. "selectModeValues": [],
  135. "searchModeValues": [],
  136. "contentModal": 3,
  137. "analyze": {},
  138. "crawlerComment": 0,
  139. "inputGroup": None,
  140. "inputSourceGroups": [],
  141. "modePublishTime": [],
  142. "planType": 2,
  143. "frequencyType": 2,
  144. "planTag": plan_tag,
  145. "tagPenetrateFlag": 0,
  146. "id": plan_id,
  147. "name": plan_name,
  148. "channel": 5,
  149. "crawlerMode": 5,
  150. "inputModeValues": url_list,
  151. "modePublishTimeStart": None,
  152. "modePublishTimeEnd": None,
  153. "executeRate": None,
  154. "executeDate": None,
  155. "executeWindowStart": None,
  156. "executeWindowEnd": None,
  157. "executeTimeInterval": None,
  158. "executeNum": None,
  159. "addModal": None,
  160. "addChannel": None,
  161. "fileUpload": None,
  162. "prompt": None,
  163. "acelFlag": None,
  164. "tasks": []
  165. },
  166. "baseInfo": PERSON_COOKIE
  167. })
  168. response = requests.request("POST", url, headers=HEADERS, data=payload)
  169. return response.json()
  170. def bind_crawler_task_to_generate_task(crawler_task_list, generate_task_id):
  171. """
  172. 将抓取计划绑定至生成计划
  173. 生成计划已经存在
  174. :crawler_task_list: 要输入的抓取计划List
  175. :generate_task_id: 目标生成计划 id
  176. :return: response
  177. """
  178. url = "http://aigc-api.cybertogether.net/aigc/produce/plan/save"
  179. plan_info = get_generate_task_detail(generate_task_id)
  180. input_source_groups = plan_info.get("inputSourceGroups")
  181. existed_crawler_task = input_source_groups[0].get("inputSources")
  182. new_task_list = existed_crawler_task + crawler_task_list
  183. input_source_group_0 = input_source_groups[0]
  184. input_source_group_0['inputSources'] = new_task_list
  185. payload = json.dumps({
  186. "params": {
  187. "contentFilters": [],
  188. "produceModal": plan_info.get("produceModal"),
  189. "inputModal": plan_info.get("inputModal"),
  190. "tasks": plan_info.get("tasks", []),
  191. "modules": [],
  192. "moduleGroups": plan_info.get("moduleGroups"),
  193. "inputSourceGroups": [input_source_group_0],
  194. "layoutType": plan_info.get("layoutType"),
  195. "activeManualReview": plan_info.get("activeManualReview"),
  196. "totalProduceNum": plan_info.get("totalProduceNum"),
  197. "dailyProduceNum": plan_info.get("dailyProduceNum"),
  198. "maxConcurrentNum": plan_info.get("maxConcurrentNum"),
  199. "id": generate_task_id,
  200. "name": plan_info.get("name"),
  201. "planTag": plan_info.get("planTag"),
  202. "tagPenetrateFlag": plan_info.get("tagPenetrateFlag"),
  203. "inputType": plan_info.get("inputType"),
  204. "inputChannel": plan_info.get("inputChannel"),
  205. "activeManualReviewCount": plan_info.get("activeManualReviewCount"),
  206. "autoComposite": plan_info.get("autoComposite")
  207. },
  208. "baseInfo": PERSON_COOKIE
  209. })
  210. response = requests.request("POST", url, headers=HEADERS, data=payload)
  211. return response.json()
  212. @retryOnTimeout()
  213. def get_generate_task_detail(generate_task_id):
  214. """
  215. 通过生成计划的 id,获取该生成计划已有的抓取计划 list
  216. :param generate_task_id:
  217. :return:
  218. """
  219. url = "http://aigc-api.cybertogether.net/aigc/produce/plan/detail"
  220. payload = json.dumps({
  221. "params": {
  222. "id": generate_task_id
  223. },
  224. "baseInfo": PERSON_COOKIE
  225. })
  226. response = requests.request("POST", url, headers=HEADERS, data=payload, timeout=10)
  227. result = response.json()
  228. if result['msg'] == 'success':
  229. return result['data']
  230. else:
  231. return {}
  232. @retryOnTimeout()
  233. def get_publish_task_detail(publish_task_id):
  234. """
  235. 通过发布计划的 id,获取该发布计划已有的抓取计划 list
  236. :param publish_task_id:
  237. :param generate_task_id:
  238. :return:
  239. """
  240. url = "http://aigc-api.cybertogether.net/aigc/publish/plan/detail"
  241. payload = json.dumps({
  242. "params": {
  243. "id": publish_task_id
  244. },
  245. "baseInfo": PERSON_COOKIE
  246. })
  247. response = requests.request("POST", url, headers=HEADERS, data=payload)
  248. return response.json()
  249. def bind_crawler_task_to_publish_task(target_publish_task_id, crawler_task_name, crawler_task_id):
  250. """
  251. 将抓取计划绑定至发布计划
  252. 发布计划已经存在
  253. :param crawler_task_id: 抓取计划ID
  254. :param crawler_task_name: 抓取计划名称
  255. :param target_publish_task_id: 目标发布计划 id
  256. :return: response
  257. """
  258. publish_task_detail = get_publish_task_detail(target_publish_task_id)
  259. publish_task_detail_data = publish_task_detail.get("data")
  260. already_exist_crawler_task_list = publish_task_detail_data.get("inputGroups")[0].get("inputSources")
  261. new_crawler_task_list = [
  262. {
  263. "sourceCategory": 1,
  264. "inputSourceValueType": 1,
  265. "inputSourceValue": crawler_task_id,
  266. "inputSourceLabel": crawler_task_name
  267. }
  268. ]
  269. new_input_source_group = already_exist_crawler_task_list + new_crawler_task_list
  270. if publish_task_detail_data:
  271. url = "http://aigc-api.cybertogether.net/aigc/publish/plan/save"
  272. payload = json.dumps({
  273. "params": {
  274. "accountIds": [i['id'] for i in publish_task_detail_data.get("accountIds")],
  275. "inputGroups": [
  276. {
  277. "groupId": "e40cd06daeb5345ed26256c8744f7a33",
  278. "groupName": None,
  279. "channel": None,
  280. "contentModal": None,
  281. "groupIndex": 1,
  282. "filterMatchMode": 2,
  283. "inputSources": new_input_source_group,
  284. "inputFilters": [],
  285. "inputOrders": [],
  286. "label": "input1"
  287. }
  288. ],
  289. "inputSources": [],
  290. "inputFilters": [],
  291. "activeManualReview": publish_task_detail_data.get("activeManualReview"),
  292. "channel": publish_task_detail_data.get("channel"),
  293. "contentAllocationRules": publish_task_detail_data.get("contentAllocationRules"),
  294. "contentModal": publish_task_detail_data.get("contentModal"),
  295. "contentSortingRules": publish_task_detail_data.get("contentSortingRules"),
  296. "douyinPublishAccoutSetting": publish_task_detail_data.get("douyinPublishAccoutSetting"),
  297. "filterMatchMode": 1,
  298. "name": publish_task_detail_data.get("name"),
  299. "publishAccoutJson": "",
  300. "publishBgmType": publish_task_detail_data.get("publishBgmType"),
  301. "publishDate": publish_task_detail_data.get("publishDate"),
  302. "publishLocation": publish_task_detail_data.get("publishLocation"),
  303. "publishNum": publish_task_detail_data.get("publishNum"),
  304. "publishPushTime": publish_task_detail_data.get("publishPushTime"),
  305. "publishRate": publish_task_detail_data.get("publishRate"),
  306. "publishTimeInterval": publish_task_detail_data.get("publishTimeInterval"),
  307. "publishWindowEnd": publish_task_detail_data.get("publishWindowEnd"),
  308. "publishWindowStart": publish_task_detail_data.get("publishWindowStart"),
  309. "wxContentInsert": publish_task_detail_data.get("wxContentInsert"),
  310. "wxVideoPublishAccountSetting": publish_task_detail_data.get("wxVideoPublishAccountSetting"),
  311. "scoreJudgeFlag": publish_task_detail_data.get("scoreJudgeFlag"),
  312. "scoreJudgeTasks": publish_task_detail_data.get("scoreJudgeTasks"),
  313. "machineReviewMatchMode": publish_task_detail_data.get("machineReviewMatchMode"),
  314. "id": publish_task_detail_data.get("id"),
  315. "planType": publish_task_detail_data.get("planType"),
  316. "planTag": publish_task_detail_data.get("planTag"),
  317. "tagPenetrateFlag": publish_task_detail_data.get("tagPenetrateFlag"),
  318. "actionObjects": publish_task_detail_data.get("actionObjects"),
  319. "actionContents": publish_task_detail_data.get("actionContents"),
  320. "accountFrom": publish_task_detail_data.get("accountFrom"),
  321. "actionContentAllocationRule": publish_task_detail_data.get("actionContentAllocationRule"),
  322. "publishPerNum": publish_task_detail_data.get("publishPerNum"),
  323. "publishPerMinNum": publish_task_detail_data.get("publishPerMinNum"),
  324. "pushType": publish_task_detail_data.get("pushType"),
  325. "triggerEvent": publish_task_detail_data.get("triggerEvent"),
  326. "pushContentSortingRules": publish_task_detail_data.get("pushContentSortingRules"),
  327. "biliDistrict": publish_task_detail_data.get("biliDistrict"),
  328. "firstItemScoreJudgeTaskId": publish_task_detail_data.get("firstItemScoreJudgeTaskId"),
  329. "secondItemScoreJudgeTaskId": publish_task_detail_data.get("secondItemScoreJudgeTaskId"),
  330. "otherItemScoreJudgeTaskId": publish_task_detail_data.get("otherItemScoreJudgeTaskId"),
  331. "gzhArticleSortFlag": publish_task_detail_data.get("gzhArticleSortFlag"),
  332. "gzhArticleSortTask": publish_task_detail_data.get("gzhArticleSortTask"),
  333. "miniprogramInsertFlag": publish_task_detail_data.get("miniprogramInsertFlag"),
  334. "miniprogramInsertTasks": publish_task_detail_data.get("miniprogramInsertTasks"),
  335. "machineReviewConditions": publish_task_detail_data.get("machineReviewConditions"),
  336. "gzhTriggerSyncFrequency": publish_task_detail_data.get("gzhTriggerSyncFrequency"),
  337. "gzhTriggerSendContentType": publish_task_detail_data.get("gzhTriggerSendContentType"),
  338. "longArticleSystemHost": publish_task_detail_data.get("longArticleSystemHost"),
  339. },
  340. "baseInfo": PERSON_COOKIE
  341. })
  342. response = requests.request("POST", url, headers=HEADERS, data=payload)
  343. print(response.json())
  344. else:
  345. return