|
@@ -0,0 +1,67 @@
|
|
|
+#! /usr/bin/env python
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
+# vim:fenc=utf-8
|
|
|
+
|
|
|
+"""
|
|
|
+A simple tool to request remote to make cache
|
|
|
+"""
|
|
|
+
|
|
|
+import json
|
|
|
+import time
|
|
|
+
|
|
|
+import requests
|
|
|
+from concurrent.futures.thread import ThreadPoolExecutor
|
|
|
+
|
|
|
+PLAN_ID = "20240813095121995118754"
|
|
|
+ACCOUNT_MAP = {
|
|
|
+ "gh_6d205db62f04": "20231214075906052349516",
|
|
|
+ "gh_0c89e11f8bf3": "20231214075715819462085"
|
|
|
+}
|
|
|
+SERVER_URL = "http://47.98.136.48:6060/score_list"
|
|
|
+
|
|
|
+def get_articles(plan_id, account_id):
|
|
|
+ URL = 'http://aigc-api.cybertogether.net/aigc/publish/content/gzhWaitingPublishContent'
|
|
|
+ headers={
|
|
|
+ "Content-Type": "application/json;charset=UTF-8"
|
|
|
+ }
|
|
|
+ payload={
|
|
|
+ "params": {
|
|
|
+ "accountId": account_id,
|
|
|
+ "planId": plan_id
|
|
|
+ }
|
|
|
+ }
|
|
|
+ resp = requests.post(URL, headers=headers, json=payload)
|
|
|
+ json_data = resp.json()
|
|
|
+ content_titles = [x['title'].replace("'", "") for x in json_data['data']]
|
|
|
+ return content_titles
|
|
|
+
|
|
|
+def score_list(server_url, titles, account_gh_id):
|
|
|
+ account_id = ACCOUNT_MAP[account_gh_id]
|
|
|
+ predefined_titles = [
|
|
|
+ "在俄罗斯买好地了,却发现没有公路、码头、仓储、燃气管道……”",
|
|
|
+ "被霸占15年后成功收回,岛礁资源超100万吨,曾遭到美菲联手抢夺",
|
|
|
+ "感人!河南姐弟被父母遗弃,7岁弟弟带着姐姐看病:别怕,以后我养",
|
|
|
+ "山东26岁女子产下罕见“4胞胎”,丈夫却突然消失,婆婆:养不起",
|
|
|
+ "突然,中国资产大爆发!A50指数期货直线拉升超4.5%,港股大涨!人民币也涨了"
|
|
|
+ ]
|
|
|
+
|
|
|
+ t1 = time.time()
|
|
|
+ body = {
|
|
|
+ "gh_id_list": [account_gh_id],
|
|
|
+ "text_list": titles,
|
|
|
+ "max_time": None,
|
|
|
+ "min_time": None,
|
|
|
+ "interest_type": "avg",
|
|
|
+ "sim_type": "avg",
|
|
|
+ "rate": 0.1
|
|
|
+ }
|
|
|
+ response = requests.post(url=server_url, headers={}, json=body).json()
|
|
|
+ t2 = time.time()
|
|
|
+ print(json.dumps(response, ensure_ascii=False, indent=4))
|
|
|
+ print(f"time: {t2 - t1:.4f}")
|
|
|
+ return response
|
|
|
+
|
|
|
+
|
|
|
+if __name__ == '__main__':
|
|
|
+ titles = get_articles(PLAN_ID, ACCOUNT_MAP['gh_6d205db62f04'])
|
|
|
+ score_list(SERVER_URL, titles, 'gh_6d205db62f04')
|