feishu_form.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import os
  4. import random
  5. import sys
  6. import datetime
  7. import itertools
  8. from collections import defaultdict
  9. from common.sql_help import sqlCollect
  10. sys.path.append(os.getcwd())
  11. from common.feishu_utils import Feishu
  12. class Material():
  13. @classmethod
  14. def feishu_list(cls,channel_id):
  15. summary = Feishu.get_values_batch("KsoMsyP2ghleM9tzBfmcEEXBnXg", "dQriSJ")
  16. for row in summary[1:]:
  17. channel = row[0]
  18. day_count = row[1]
  19. if channel:
  20. if channel == channel_id:
  21. return day_count
  22. else:
  23. return None
  24. return None
  25. @classmethod
  26. def get_count_restrict(cls, channel):
  27. count_channel = Feishu.get_values_batch("KsoMsyP2ghleM9tzBfmcEEXBnXg", "187FZ7")
  28. for row in count_channel[1:]:
  29. sheet_channel = row[0]
  30. if sheet_channel == channel:
  31. return row[1]
  32. """
  33. 获取汇总表所有负责人列表
  34. """
  35. @classmethod
  36. def feishu_list(cls):
  37. summary = Feishu.get_values_batch("summary", "bc154d")
  38. list = []
  39. for row in summary[1:]:
  40. mark = row[0]
  41. name = row[1]
  42. feishu_id = row[3]
  43. feishu_sheet = row[4]
  44. cookie_sheet = row[5]
  45. number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "cookie_sheet": cookie_sheet}
  46. if mark:
  47. list.append(number)
  48. else:
  49. return list
  50. return list
  51. @classmethod
  52. def get_sph_user(cls):
  53. data = Feishu.get_values_batch("GPbhsb5vchAN3qtzot6cu1f0n1c", "cc7ef0")
  54. user_data_list = []
  55. try:
  56. for row in data[1:]:
  57. users = str(row[2])
  58. if users and users != 'None':
  59. if ',' in users:
  60. user_list = users.split(',')
  61. else:
  62. user_list = [users]
  63. for user in user_list:
  64. status = sqlCollect.sph_channel_user(user)
  65. if status:
  66. user_data_list.append(user)
  67. else:
  68. return user_data_list
  69. return user_data_list
  70. except:
  71. return user_data_list
  72. """
  73. list 重新排序
  74. """
  75. @classmethod
  76. def sort_keyword_data(cls, data):
  77. # 解析 JSON 数据
  78. data = [json.loads(item) for item in data]
  79. # 根据 keyword_name 进行分组
  80. groups = defaultdict(list)
  81. for item in data:
  82. groups[item['keyword_name']].append(item)
  83. # 获取所有唯一的 keyword_name
  84. priority_names = list(groups.keys())
  85. # 对每个分组内的数据按 first_category 进行随机打乱
  86. for name in priority_names:
  87. random.shuffle(groups[name]) # 打乱每个分组内的数据顺序
  88. # 轮流排序每个分组的数据,保持 keyword_name 的顺序
  89. result = []
  90. max_length = max(len(groups[name]) for name in priority_names)
  91. for i in range(max_length):
  92. for name in priority_names:
  93. if i < len(groups[name]):
  94. result.append(groups[name][i])
  95. # 将结果转回 JSON 字符串列表
  96. sorted_list = [json.dumps(item, ensure_ascii=False) for item in result]
  97. return sorted_list
  98. """
  99. 获取搜索任务
  100. """
  101. @classmethod
  102. def get_keyword_data(cls, feishu_id, feishu_sheet):
  103. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  104. processed_list = []
  105. try:
  106. for row in data[1:]:
  107. channel_id = row[1]
  108. channel_url = str(row[2])
  109. tags = row[3]
  110. piaoquan_id = row[4]
  111. number = row[5]
  112. limit_number = row[6]
  113. video_share = row[7]
  114. video_ending = row[8]
  115. voice = row[9]
  116. crop_tool = row[10]
  117. gg_duration = row[11]
  118. title = row[12]
  119. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  120. continue
  121. first_category = row[14] # 一级品类
  122. secondary_category = row[15] # 二级品类
  123. def count_items(item, separator):
  124. if item and item not in {'None', ''}:
  125. return len(item.split(separator))
  126. return 0
  127. video_id_total = count_items(str(channel_url), ',')
  128. title_total = count_items(str(title), '/')
  129. video_ending_total = count_items(str(video_ending), ',')
  130. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total, first_category]
  131. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  132. task_mark = "_".join(map(str, filtered_values))
  133. keyword_sort = row[16] # 排序条件
  134. keyword_time = row[17] # 发布时间
  135. keyword_duration = row[18] # 视频时长
  136. keyword_name = row[19] # 负责人
  137. keyword_sort_list = keyword_sort.split(',')
  138. keyword_duration_list = keyword_duration.split(',')
  139. keyword_time_list = keyword_time.split(',')
  140. combinations = list(itertools.product(keyword_sort_list, keyword_time_list, keyword_duration_list))
  141. if ',' in channel_url:
  142. channel_url = channel_url.split(',')
  143. else:
  144. channel_url = [channel_url]
  145. for user in channel_url:
  146. for combo in combinations:
  147. number_dict = {
  148. "task_mark": task_mark,
  149. "channel_id": channel_id,
  150. "channel_url": user,
  151. "piaoquan_id": piaoquan_id,
  152. "number": number,
  153. "title": title,
  154. "video_share": video_share,
  155. "video_ending": video_ending,
  156. "crop_total": crop_tool,
  157. "gg_duration_total": gg_duration,
  158. "voice": voice,
  159. "first_category": first_category, # 一级品类
  160. "secondary_category": secondary_category, # 二级品类
  161. "combo": combo, # 搜索条件
  162. "keyword_name": keyword_name, # 品类负责人
  163. "tags": tags,
  164. "limit_number":limit_number
  165. }
  166. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  167. except:
  168. processed_list = cls.sort_keyword_data(processed_list)
  169. return processed_list
  170. processed_list = cls.sort_keyword_data(processed_list)
  171. return processed_list
  172. """
  173. 获取品类对应负责人任务明细
  174. """
  175. @classmethod
  176. def get_pl_task_data(cls, feishu_id, feishu_sheet):
  177. data = Feishu.get_values_batch( feishu_id, feishu_sheet )
  178. processed_list = []
  179. try:
  180. for row in data[1:]:
  181. channel_id = row[1]
  182. channel_url = str( row[2] )
  183. tags = row[3]
  184. piaoquan_id = row[4]
  185. number = row[5]
  186. limit_number = row[6]
  187. video_share = row[7]
  188. video_ending = row[8]
  189. voice = row[9]
  190. crop_tool = row[10]
  191. gg_duration = row[11]
  192. title = row[12]
  193. if channel_url == None or channel_url == "" or len( channel_url ) == 0:
  194. continue
  195. try:
  196. ls_number = int( row[13] )
  197. except:
  198. ls_number = None
  199. first_category = row[14]
  200. name = row[15]
  201. def count_items(item, separator):
  202. if item and item not in {'None', ''}:
  203. return len( item.split( separator ) )
  204. return 0
  205. video_id_total = count_items( str( channel_url ), ',' )
  206. title_total = count_items( str( title ), '/' )
  207. video_ending_total = count_items( str( video_ending ), ',' )
  208. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  209. gg_duration, title_total]
  210. filtered_values = [str( value ) for value in values if value is not None and value != "None"]
  211. task_mark = "_".join( map( str, filtered_values ) )
  212. if piaoquan_id and piaoquan_id not in {'None', ''}:
  213. if ',' in channel_url:
  214. channel_url = channel_url.split( ',' )
  215. else:
  216. channel_url = [channel_url]
  217. for user in channel_url:
  218. number_dict = {
  219. "task_mark": task_mark,
  220. "channel_id": channel_id,
  221. "channel_url": user,
  222. "piaoquan_id": piaoquan_id,
  223. "number": number,
  224. "title": title,
  225. "video_share": video_share,
  226. "video_ending": video_ending,
  227. "crop_total": crop_tool,
  228. "gg_duration_total": gg_duration,
  229. "voice": voice,
  230. "first_category": first_category, # 一级品类
  231. "keyword_name":name,
  232. "tags": tags,
  233. "limit_number":limit_number
  234. }
  235. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  236. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  237. if ls_number and ls_number not in {'None', ''}:
  238. if channel_id == "抖音":
  239. new_channel_id = "抖音历史"
  240. if channel_id == "快手":
  241. new_channel_id = "快手历史"
  242. if channel_id == "视频号":
  243. new_channel_id = "视频号历史"
  244. # values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total,
  245. # crop_tool,
  246. # gg_duration, title_total]
  247. # filtered_values1 = [str( value ) for value in values1 if
  248. # value is not None and value != "None"]
  249. # task_mark1 = "_".join( map( str, filtered_values1 ) )
  250. number_dict = {
  251. "task_mark": task_mark,
  252. "channel_id": new_channel_id,
  253. "channel_url": user,
  254. "piaoquan_id": piaoquan_id,
  255. "number": ls_number,
  256. "title": title,
  257. "video_share": video_share,
  258. "video_ending": video_ending,
  259. "crop_total": crop_tool,
  260. "gg_duration_total": gg_duration,
  261. "voice": voice,
  262. "first_category": first_category, # 一级品类
  263. "keyword_name": name,
  264. "tags": tags,
  265. "limit_number":limit_number
  266. }
  267. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  268. else:
  269. processed_list = cls.sort_keyword_data(processed_list)
  270. return processed_list
  271. processed_list = cls.sort_keyword_data(processed_list)
  272. return processed_list
  273. except:
  274. processed_list = cls.sort_keyword_data(processed_list)
  275. return processed_list
  276. """
  277. 获取对应负责人任务明细
  278. """
  279. @classmethod
  280. def get_task_data(cls, feishu_id, feishu_sheet):
  281. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  282. processed_list = []
  283. try:
  284. for row in data[1:]:
  285. channel_id = row[1]
  286. channel_url = str(row[2])
  287. tags = row[3]
  288. piaoquan_id = row[4]
  289. number = row[5]
  290. limit_number = row[6]
  291. video_share = row[7]
  292. video_ending = row[8]
  293. voice = row[9]
  294. crop_tool = row[10]
  295. gg_duration = row[11]
  296. title = row[12]
  297. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  298. continue
  299. try:
  300. ls_number = int(row[13])
  301. except:
  302. ls_number = None
  303. def count_items(item, separator):
  304. if item and item not in {'None', ''}:
  305. return len(item.split(separator))
  306. return 0
  307. video_id_total = count_items(str(channel_url), ',')
  308. title_total = count_items(str(title), '/')
  309. video_ending_total = count_items(str(video_ending), ',')
  310. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total]
  311. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  312. task_mark = "_".join(map(str, filtered_values))
  313. if piaoquan_id and piaoquan_id not in {'None', ''}:
  314. if ',' in channel_url:
  315. channel_url = channel_url.split(',')
  316. else:
  317. channel_url = [channel_url]
  318. for user in channel_url:
  319. number_dict = {
  320. "task_mark": task_mark,
  321. "channel_id": channel_id,
  322. "channel_url": user,
  323. "piaoquan_id": piaoquan_id,
  324. "number": number,
  325. "title": title,
  326. "video_share": video_share,
  327. "video_ending": video_ending,
  328. "crop_total": crop_tool,
  329. "gg_duration_total": gg_duration,
  330. "voice": voice,
  331. "tags":tags,
  332. "limit_number":limit_number
  333. }
  334. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  335. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  336. if ls_number and ls_number not in {'None', ''}:
  337. if channel_id == "抖音":
  338. new_channel_id = "抖音历史"
  339. if channel_id == "快手":
  340. new_channel_id = "快手历史"
  341. if channel_id == "视频号":
  342. new_channel_id = "视频号历史"
  343. # values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  344. # gg_duration, title_total]
  345. # filtered_values1 = [str(value) for value in values1 if value is not None and value != "None"]
  346. # task_mark1 = "_".join(map(str, filtered_values1))
  347. number_dict = {
  348. "task_mark": task_mark,
  349. "channel_id": new_channel_id,
  350. "channel_url": user,
  351. "piaoquan_id": piaoquan_id,
  352. "number": ls_number,
  353. "title": title,
  354. "video_share": video_share,
  355. "video_ending": video_ending,
  356. "crop_total": crop_tool,
  357. "gg_duration_total": gg_duration,
  358. "voice": voice,
  359. "tags": tags,
  360. "limit_number":limit_number
  361. }
  362. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  363. else:
  364. return processed_list
  365. return processed_list
  366. except:
  367. return processed_list
  368. """
  369. 获取对应片尾+srt
  370. """
  371. @classmethod
  372. def get_pwsrt_data(cls, feishu_id, feishu_sheet, video_ending):
  373. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  374. for row in data[1:]:
  375. pw_mark = row[0]
  376. pw_id = row[1]
  377. pw_srt = row[2]
  378. if pw_id != 'None' and pw_id != '' and pw_id != None:
  379. if pw_mark == video_ending:
  380. number = {"pw_id": pw_id, "pw_srt": pw_srt}
  381. return number
  382. return ''
  383. """
  384. 获取对应固定字幕
  385. """
  386. @classmethod
  387. def get_pzsrt_data(cls, feishu_id, feishu_sheet, video_share_name):
  388. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  389. for row in data[1:]:
  390. pz_mark = row[0]
  391. pz_zm = row[1]
  392. if pz_zm != 'None' and pz_zm != '' and pz_zm != None:
  393. if pz_mark == video_share_name:
  394. return pz_zm
  395. return ''
  396. """
  397. 获取 cookie 信息
  398. """
  399. @classmethod
  400. def get_cookie_data(cls, feishu_id, cookie_sheet, channel):
  401. data = Feishu.get_values_batch(feishu_id, cookie_sheet)
  402. for row in data[1:]:
  403. channel_mask = row[0]
  404. cookie = row[1]
  405. if channel_mask == channel:
  406. return cookie