aidit_api.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. """
  2. @author: luojunhui
  3. """
  4. import requests
  5. import json
  6. class AIDTApi(object):
  7. """
  8. 自动操作
  9. """
  10. headers = {
  11. 'Accept': 'application/json',
  12. 'Accept-Language': 'zh,zh-CN;q=0.9',
  13. 'Content-Type': 'application/json',
  14. 'Origin': 'http://admin.cybertogether.net',
  15. 'Proxy-Connection': 'keep-alive',
  16. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
  17. }
  18. person_info = {
  19. "token": "af54cdc404c3464d896745df389b2dce",
  20. "appType": 9,
  21. "platform": "pc",
  22. "appVersionCode": 1000,
  23. "clientTimestamp": 1,
  24. "fid": 1,
  25. "loginUid": 1,
  26. "pageSource": 1,
  27. "requestId": 1,
  28. "rid": 1,
  29. "uid": 1
  30. }
  31. @classmethod
  32. def getPlanArticleList(cls, page_index, plan_id):
  33. """
  34. 获取抓取计划下的文章list
  35. :param plan_id:
  36. :param page_index:
  37. :return:
  38. """
  39. url = "http://aigc-api.cybertogether.net/aigc/crawler/content/list"
  40. payload = json.dumps({
  41. "params": {
  42. "filterItems": [
  43. {
  44. "itemName": "sourceCrawlerPlans",
  45. "selectValues": [plan_id]
  46. }
  47. ],
  48. "listFieldFormula": [],
  49. "pageNum": page_index,
  50. "pageSize": 50,
  51. "contentModal": 3
  52. },
  53. "baseInfo": cls.person_info
  54. })
  55. response = requests.request("POST", url, headers=cls.headers, data=payload)
  56. return response.json()
  57. @classmethod
  58. def updateArticleIntoCrawlerPlan(cls, plan_id, plan_name, plan_tag, url_list):
  59. """
  60. 往抓取计划加文章
  61. :return:
  62. """
  63. url = "http://aigc-api.cybertogether.net/aigc/crawler/plan/save"
  64. payload = json.dumps({
  65. "params": {
  66. "contentFilters": [],
  67. "accountFilters": [],
  68. "filterAccountMatchMode": 1,
  69. "filterContentMatchMode": 1,
  70. "selectModeValues": [],
  71. "searchModeValues": [],
  72. "contentModal": 3,
  73. "analyze": {},
  74. "crawlerComment": 0,
  75. "inputGroup": None,
  76. "inputSourceGroups": [],
  77. "modePublishTime": [],
  78. "planType": 2,
  79. "frequencyType": 2,
  80. "planTag": plan_tag,
  81. "tagPenetrateFlag": 0,
  82. "id": plan_id,
  83. "name": plan_name,
  84. "channel": 5,
  85. "crawlerMode": 5,
  86. "inputModeValues": url_list,
  87. "modePublishTimeStart": None,
  88. "modePublishTimeEnd": None,
  89. "executeRate": None,
  90. "executeDate": None,
  91. "executeWindowStart": None,
  92. "executeWindowEnd": None,
  93. "executeTimeInterval": None,
  94. "executeNum": None,
  95. "addModal": None,
  96. "addChannel": None,
  97. "fileUpload": None,
  98. "prompt": None,
  99. "acelFlag": None,
  100. "tasks": []
  101. },
  102. "baseInfo": cls.person_info
  103. })
  104. response = requests.request("POST", url, headers=cls.headers, data=payload)
  105. print(json.dumps(response.json(), ensure_ascii=False, indent=4))