chunk_task.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. import asyncio
  2. from typing import List
  3. from applications.api import get_basic_embedding
  4. from applications.utils.async_utils import run_tasks_with_asyncio_task_group
  5. from applications.utils.chunks import LLMClassifier, TopicAwarePackerV2
  6. from applications.utils.milvus import async_insert_chunk
  7. from applications.utils.mysql import ContentChunks, Contents
  8. from applications.utils.nlp import num_tokens
  9. from applications.config import Chunk, DEFAULT_MODEL
  10. from applications.config import ELASTIC_SEARCH_INDEX
  11. class ChunkEmbeddingTask(TopicAwarePackerV2):
  12. def __init__(self, doc_id, resource):
  13. super().__init__(doc_id)
  14. self.chunk_manager = None
  15. self.content_manager = None
  16. self.mysql_client = resource.mysql_client
  17. self.milvus_client = resource.milvus_client
  18. self.es_client = resource.es_client
  19. self.classifier = LLMClassifier()
  20. @staticmethod
  21. async def get_embedding_list(text: str) -> List:
  22. return await get_basic_embedding(text=text, model=DEFAULT_MODEL)
  23. def init_processer(self):
  24. self.content_manager = Contents(self.mysql_client)
  25. self.chunk_manager = ContentChunks(self.mysql_client)
  26. async def _chunk_each_content(self, doc_id: str, data: dict) -> List[Chunk]:
  27. title, text = data.get("title", "").strip(), data["text"].strip()
  28. text_type = data.get("text_type", 1)
  29. dataset_id = data.get("dataset_id", 0) # 默认知识库 id 为 0
  30. re_chunk = data.get("re_chunk", False)
  31. dont_chunk = data.get("dont_chunk", False)
  32. if re_chunk:
  33. await self.content_manager.update_content_info(
  34. doc_id=doc_id,
  35. text=text,
  36. text_type=text_type,
  37. title=title,
  38. dataset_id=dataset_id,
  39. )
  40. flag = True
  41. else:
  42. flag = await self.content_manager.insert_content(
  43. doc_id, text, text_type, title, dataset_id
  44. )
  45. if not flag:
  46. return []
  47. else:
  48. raw_chunks = await self.chunk(text, text_type, dataset_id, dont_chunk)
  49. if not raw_chunks:
  50. await self.content_manager.update_content_status(
  51. doc_id=doc_id,
  52. ori_status=self.INIT_STATUS,
  53. new_status=self.FAILED_STATUS,
  54. )
  55. return []
  56. await self.content_manager.update_content_status(
  57. doc_id=doc_id,
  58. ori_status=self.INIT_STATUS,
  59. new_status=self.PROCESSING_STATUS,
  60. )
  61. return raw_chunks
  62. async def insert_into_es(self, milvus_id, chunk: Chunk) -> int:
  63. docs = [
  64. {
  65. "_index": ELASTIC_SEARCH_INDEX,
  66. "_id": milvus_id,
  67. "_source": {
  68. "milvus_id": milvus_id,
  69. "doc_id": chunk.doc_id,
  70. "dataset_id": chunk.dataset_id,
  71. "chunk_id": chunk.chunk_id,
  72. "topic": chunk.topic,
  73. "domain": chunk.domain,
  74. "task_type": chunk.task_type,
  75. "text_type": chunk.text_type,
  76. "keywords": chunk.keywords,
  77. "concepts": chunk.concepts,
  78. "entities": chunk.entities,
  79. "status": chunk.status,
  80. },
  81. }
  82. ]
  83. resp = await self.es_client.bulk_insert(docs)
  84. return resp["success"]
  85. async def save_each_chunk(self, chunk: Chunk):
  86. # insert
  87. flag = await self.chunk_manager.insert_chunk(chunk)
  88. if not flag:
  89. print("插入文本失败")
  90. return
  91. acquire_lock = await self.chunk_manager.update_chunk_status(
  92. doc_id=chunk.doc_id,
  93. chunk_id=chunk.chunk_id,
  94. ori_status=self.INIT_STATUS,
  95. new_status=self.PROCESSING_STATUS,
  96. )
  97. if not acquire_lock:
  98. print("抢占文本分块锁失败")
  99. return
  100. completion = await self.classifier.classify_chunk(chunk)
  101. if not completion:
  102. await self.chunk_manager.update_chunk_status(
  103. doc_id=chunk.doc_id,
  104. chunk_id=chunk.chunk_id,
  105. ori_status=self.PROCESSING_STATUS,
  106. new_status=self.FAILED_STATUS,
  107. )
  108. print("从deepseek获取信息失败")
  109. return
  110. update_flag = await self.chunk_manager.set_chunk_result(
  111. chunk=completion,
  112. ori_status=self.PROCESSING_STATUS,
  113. new_status=self.FINISHED_STATUS,
  114. )
  115. if not update_flag:
  116. await self.chunk_manager.update_chunk_status(
  117. doc_id=chunk.doc_id,
  118. chunk_id=chunk.chunk_id,
  119. ori_status=self.PROCESSING_STATUS,
  120. new_status=self.FAILED_STATUS,
  121. )
  122. return
  123. milvus_id = await self.save_to_milvus(completion)
  124. if not milvus_id:
  125. return
  126. # 存储到 es 中
  127. # acquire_lock
  128. acquire_es_lock = await self.chunk_manager.update_es_status(
  129. doc_id=chunk.doc_id,
  130. chunk_id=chunk.chunk_id,
  131. ori_status=self.INIT_STATUS,
  132. new_status=self.PROCESSING_STATUS,
  133. )
  134. if not acquire_es_lock:
  135. print(f"获取 es Lock Fail: {chunk.doc_id}--{chunk.chunk_id}")
  136. return
  137. insert_rows = await self.insert_into_es(milvus_id, completion)
  138. final_status = self.FINISHED_STATUS if insert_rows else self.FAILED_STATUS
  139. await self.chunk_manager.update_es_status(
  140. doc_id=chunk.doc_id,
  141. chunk_id=chunk.chunk_id,
  142. ori_status=self.PROCESSING_STATUS,
  143. new_status=final_status,
  144. )
  145. async def save_to_milvus(self, chunk: Chunk):
  146. """
  147. :param chunk: each single chunk
  148. :return:
  149. """
  150. # 抢锁
  151. acquire_lock = await self.chunk_manager.update_embedding_status(
  152. doc_id=chunk.doc_id,
  153. chunk_id=chunk.chunk_id,
  154. new_status=self.PROCESSING_STATUS,
  155. ori_status=self.INIT_STATUS,
  156. )
  157. if not acquire_lock:
  158. print(f"抢占-{chunk.doc_id}-{chunk.chunk_id}分块-embedding处理锁失败")
  159. return None
  160. try:
  161. data = {
  162. "doc_id": chunk.doc_id,
  163. "chunk_id": chunk.chunk_id,
  164. "vector_text": await self.get_embedding_list(chunk.text),
  165. "vector_summary": await self.get_embedding_list(chunk.summary),
  166. "vector_questions": await self.get_embedding_list(
  167. ",".join(chunk.questions)
  168. ),
  169. }
  170. resp = await async_insert_chunk(self.milvus_client, data)
  171. if not resp:
  172. await self.chunk_manager.update_embedding_status(
  173. doc_id=chunk.doc_id,
  174. chunk_id=chunk.chunk_id,
  175. ori_status=self.PROCESSING_STATUS,
  176. new_status=self.FAILED_STATUS,
  177. )
  178. return None
  179. await self.chunk_manager.update_embedding_status(
  180. doc_id=chunk.doc_id,
  181. chunk_id=chunk.chunk_id,
  182. ori_status=self.PROCESSING_STATUS,
  183. new_status=self.FINISHED_STATUS,
  184. )
  185. milvus_id = resp[0]
  186. return milvus_id
  187. except Exception as e:
  188. await self.chunk_manager.update_embedding_status(
  189. doc_id=chunk.doc_id,
  190. chunk_id=chunk.chunk_id,
  191. ori_status=self.PROCESSING_STATUS,
  192. new_status=self.FAILED_STATUS,
  193. )
  194. print(f"存入向量数据库失败", e)
  195. return None
  196. async def deal(self, data):
  197. text = data.get("text", "")
  198. dont_chunk = data.get("dont_chunk", False)
  199. # 如果无需分块,判断text 长度
  200. if dont_chunk and num_tokens(text) >= self.max_tokens:
  201. return {"error": "文档超多模型支持的最大吞吐量"}
  202. self.init_processer()
  203. async def _process():
  204. chunks = await self._chunk_each_content(self.doc_id, data)
  205. if not chunks:
  206. return
  207. # # dev
  208. # for chunk in tqdm(chunks):
  209. # await self.save_each_chunk(chunk)
  210. await run_tasks_with_asyncio_task_group(
  211. task_list=chunks,
  212. handler=self.save_each_chunk,
  213. description="处理单篇文章分块",
  214. unit="chunk",
  215. max_concurrency=10,
  216. )
  217. await self.content_manager.update_content_status(
  218. doc_id=self.doc_id,
  219. ori_status=self.PROCESSING_STATUS,
  220. new_status=self.FINISHED_STATUS,
  221. )
  222. asyncio.create_task(_process())
  223. return self.doc_id