chunk_task.py 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. import asyncio
  2. from typing import List
  3. from tqdm import tqdm
  4. from applications.api import get_basic_embedding
  5. from applications.utils.async_utils import run_tasks_with_asyncio_task_group
  6. from applications.utils.chunks import LLMClassifier, TopicAwarePackerV2
  7. from applications.utils.milvus import async_insert_chunk
  8. from applications.utils.mysql import ContentChunks, Contents
  9. from applications.utils.nlp import num_tokens
  10. from applications.config import Chunk, DEFAULT_MODEL
  11. from applications.config import ELASTIC_SEARCH_INDEX
  12. class ChunkEmbeddingTask(TopicAwarePackerV2):
  13. def __init__(self, doc_id, resource):
  14. super().__init__(doc_id)
  15. self.chunk_manager = None
  16. self.content_manager = None
  17. self.mysql_client = resource.mysql_client
  18. self.milvus_client = resource.milvus_client
  19. self.es_client = resource.es_client
  20. self.classifier = LLMClassifier()
  21. @staticmethod
  22. async def get_embedding_list(text: str) -> List:
  23. return await get_basic_embedding(text=text, model=DEFAULT_MODEL)
  24. def init_processer(self):
  25. self.content_manager = Contents(self.mysql_client)
  26. self.chunk_manager = ContentChunks(self.mysql_client)
  27. async def _chunk_each_content(
  28. self,
  29. doc_id: str,
  30. data: dict
  31. ) -> List[Chunk]:
  32. title, text = data.get("title", "").strip(), data["text"].strip()
  33. text_type = data.get("text_type", 1)
  34. dataset_id = data.get("dataset_id", 0) # 默认知识库 id 为 0
  35. re_chunk = data.get("re_chunk", False)
  36. dont_chunk = data.get("dont_chunk", False)
  37. if re_chunk:
  38. await self.content_manager.update_content_info(
  39. doc_id=doc_id,
  40. text=text,
  41. text_type=text_type,
  42. title=title,
  43. dataset_id=dataset_id,
  44. )
  45. flag = True
  46. else:
  47. flag = await self.content_manager.insert_content(
  48. doc_id, text, text_type, title, dataset_id
  49. )
  50. if not flag:
  51. return []
  52. else:
  53. raw_chunks = await self.chunk(text, text_type, dataset_id, dont_chunk)
  54. if not raw_chunks:
  55. await self.content_manager.update_content_status(
  56. doc_id=doc_id,
  57. ori_status=self.INIT_STATUS,
  58. new_status=self.FAILED_STATUS,
  59. )
  60. return []
  61. await self.content_manager.update_content_status(
  62. doc_id=doc_id,
  63. ori_status=self.INIT_STATUS,
  64. new_status=self.PROCESSING_STATUS,
  65. )
  66. return raw_chunks
  67. async def insert_into_es(self, milvus_id, chunk: Chunk) -> int:
  68. docs = [
  69. {
  70. "_index": ELASTIC_SEARCH_INDEX,
  71. "_id": milvus_id,
  72. "_source": {
  73. "milvus_id": milvus_id,
  74. "doc_id": chunk.doc_id,
  75. "dataset_id": chunk.dataset_id,
  76. "chunk_id": chunk.chunk_id,
  77. "topic": chunk.topic,
  78. "domain": chunk.domain,
  79. "task_type": chunk.task_type,
  80. "text_type": chunk.text_type,
  81. "keywords": chunk.keywords,
  82. "concepts": chunk.concepts,
  83. "entities": chunk.entities,
  84. "status": chunk.status,
  85. },
  86. }
  87. ]
  88. resp = await self.es_client.bulk_insert(docs)
  89. return resp["success"]
  90. async def save_each_chunk(self, chunk: Chunk):
  91. # insert
  92. flag = await self.chunk_manager.insert_chunk(chunk)
  93. if not flag:
  94. print("插入文本失败")
  95. return
  96. acquire_lock = await self.chunk_manager.update_chunk_status(
  97. doc_id=chunk.doc_id,
  98. chunk_id=chunk.chunk_id,
  99. ori_status=self.INIT_STATUS,
  100. new_status=self.PROCESSING_STATUS,
  101. )
  102. if not acquire_lock:
  103. print("抢占文本分块锁失败")
  104. return
  105. completion = await self.classifier.classify_chunk(chunk)
  106. if not completion:
  107. await self.chunk_manager.update_chunk_status(
  108. doc_id=chunk.doc_id,
  109. chunk_id=chunk.chunk_id,
  110. ori_status=self.PROCESSING_STATUS,
  111. new_status=self.FAILED_STATUS,
  112. )
  113. print("从deepseek获取信息失败")
  114. return
  115. update_flag = await self.chunk_manager.set_chunk_result(
  116. chunk=completion,
  117. ori_status=self.PROCESSING_STATUS,
  118. new_status=self.FINISHED_STATUS,
  119. )
  120. if not update_flag:
  121. await self.chunk_manager.update_chunk_status(
  122. doc_id=chunk.doc_id,
  123. chunk_id=chunk.chunk_id,
  124. ori_status=self.PROCESSING_STATUS,
  125. new_status=self.FAILED_STATUS,
  126. )
  127. return
  128. milvus_id = await self.save_to_milvus(completion)
  129. if not milvus_id:
  130. return
  131. # 存储到 es 中
  132. # acquire_lock
  133. acquire_es_lock = await self.chunk_manager.update_es_status(
  134. doc_id=chunk.doc_id,
  135. chunk_id=chunk.chunk_id,
  136. ori_status=self.INIT_STATUS,
  137. new_status=self.PROCESSING_STATUS,
  138. )
  139. if not acquire_es_lock:
  140. print(f"获取 es Lock Fail: {chunk.doc_id}--{chunk.chunk_id}")
  141. return
  142. insert_rows = await self.insert_into_es(milvus_id, completion)
  143. final_status = self.FINISHED_STATUS if insert_rows else self.FAILED_STATUS
  144. await self.chunk_manager.update_es_status(
  145. doc_id=chunk.doc_id,
  146. chunk_id=chunk.chunk_id,
  147. ori_status=self.PROCESSING_STATUS,
  148. new_status=final_status,
  149. )
  150. async def save_to_milvus(self, chunk: Chunk):
  151. """
  152. :param chunk: each single chunk
  153. :return:
  154. """
  155. # 抢锁
  156. acquire_lock = await self.chunk_manager.update_embedding_status(
  157. doc_id=chunk.doc_id,
  158. chunk_id=chunk.chunk_id,
  159. new_status=self.PROCESSING_STATUS,
  160. ori_status=self.INIT_STATUS,
  161. )
  162. if not acquire_lock:
  163. print(f"抢占-{chunk.doc_id}-{chunk.chunk_id}分块-embedding处理锁失败")
  164. return None
  165. try:
  166. data = {
  167. "doc_id": chunk.doc_id,
  168. "chunk_id": chunk.chunk_id,
  169. "vector_text": await self.get_embedding_list(chunk.text),
  170. "vector_summary": await self.get_embedding_list(chunk.summary),
  171. "vector_questions": await self.get_embedding_list(
  172. ",".join(chunk.questions)
  173. ),
  174. }
  175. resp = await async_insert_chunk(self.milvus_client, data)
  176. if not resp:
  177. await self.chunk_manager.update_embedding_status(
  178. doc_id=chunk.doc_id,
  179. chunk_id=chunk.chunk_id,
  180. ori_status=self.PROCESSING_STATUS,
  181. new_status=self.FAILED_STATUS,
  182. )
  183. return None
  184. await self.chunk_manager.update_embedding_status(
  185. doc_id=chunk.doc_id,
  186. chunk_id=chunk.chunk_id,
  187. ori_status=self.PROCESSING_STATUS,
  188. new_status=self.FINISHED_STATUS,
  189. )
  190. milvus_id = resp[0]
  191. return milvus_id
  192. except Exception as e:
  193. await self.chunk_manager.update_embedding_status(
  194. doc_id=chunk.doc_id,
  195. chunk_id=chunk.chunk_id,
  196. ori_status=self.PROCESSING_STATUS,
  197. new_status=self.FAILED_STATUS,
  198. )
  199. print(f"存入向量数据库失败", e)
  200. return None
  201. async def deal(self, data):
  202. text = data.get("text", "")
  203. dont_chunk = data.get("dont_chunk", False)
  204. # 如果无需分块,判断text 长度
  205. if dont_chunk and num_tokens(text) >= self.max_tokens:
  206. return {
  207. "error": "文档超多模型支持的最大吞吐量"
  208. }
  209. self.init_processer()
  210. async def _process():
  211. chunks = await self._chunk_each_content(self.doc_id, data)
  212. if not chunks:
  213. return
  214. # dev
  215. for chunk in tqdm(chunks):
  216. await self.save_each_chunk(chunk)
  217. # await run_tasks_with_asyncio_task_group(
  218. # task_list=chunks,
  219. # handler=self.save_each_chunk,
  220. # description="处理单篇文章分块",
  221. # unit="chunk",
  222. # max_concurrency=10,
  223. # )
  224. await self.content_manager.update_content_status(
  225. doc_id=self.doc_id,
  226. ori_status=self.PROCESSING_STATUS,
  227. new_status=self.FINISHED_STATUS,
  228. )
  229. asyncio.create_task(_process())
  230. return self.doc_id