chinese.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. import os
  2. import re
  3. import cn2an
  4. import jieba.posseg as psg
  5. from pypinyin import Style, lazy_pinyin
  6. from fish_speech.text.symbols import punctuation
  7. from fish_speech.text.tone_sandhi import ToneSandhi
  8. current_file_path = os.path.dirname(__file__)
  9. OPENCPOP_DICT_PATH = os.path.join(current_file_path, "opencpop-strict.txt")
  10. pinyin_to_symbol_map = {
  11. line.split("\t")[0]: line.strip().split("\t")[1]
  12. for line in open(OPENCPOP_DICT_PATH).readlines()
  13. }
  14. tone_modifier = ToneSandhi()
  15. def replace_punctuation(text):
  16. text = text.replace("嗯", "恩").replace("呣", "母")
  17. replaced_text = re.sub(r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", text)
  18. return replaced_text
  19. def g2p(text):
  20. text = text_normalize(text)
  21. text = replace_punctuation(text)
  22. pattern = r"(?<=[{0}])\s*".format("".join(punctuation))
  23. sentences = [i for i in re.split(pattern, text) if i.strip() != ""]
  24. phones = _g2p(sentences)
  25. return phones
  26. def _get_initials_finals(word):
  27. initials = []
  28. finals = []
  29. orig_initials = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.INITIALS)
  30. orig_finals = lazy_pinyin(
  31. word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
  32. )
  33. for c, v in zip(orig_initials, orig_finals):
  34. initials.append(c)
  35. finals.append(v)
  36. return initials, finals
  37. def _g2p(segments):
  38. phones_list = []
  39. for seg in segments:
  40. pinyins = []
  41. # Replace all English words in the sentence
  42. seg = re.sub("[a-zA-Z]+", "", seg)
  43. seg_cut = psg.lcut(seg)
  44. initials = []
  45. finals = []
  46. seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
  47. for word, pos in seg_cut:
  48. if pos == "eng":
  49. continue
  50. sub_initials, sub_finals = _get_initials_finals(word)
  51. sub_finals = tone_modifier.modified_tone(word, pos, sub_finals)
  52. initials.append(sub_initials)
  53. finals.append(sub_finals)
  54. # assert len(sub_initials) == len(sub_finals) == len(word)
  55. initials = sum(initials, [])
  56. finals = sum(finals, [])
  57. #
  58. for c, v in zip(initials, finals):
  59. raw_pinyin = c + v
  60. # NOTE: post process for pypinyin outputs
  61. # we discriminate i, ii and iii
  62. if c == v:
  63. assert c in punctuation
  64. phone = [c]
  65. else:
  66. v_without_tone = v[:-1]
  67. tone = v[-1]
  68. pinyin = c + v_without_tone
  69. assert tone in "12345"
  70. if c:
  71. # 多音节
  72. v_rep_map = {
  73. "uei": "ui",
  74. "iou": "iu",
  75. "uen": "un",
  76. }
  77. if v_without_tone in v_rep_map.keys():
  78. pinyin = c + v_rep_map[v_without_tone]
  79. else:
  80. # 单音节
  81. pinyin_rep_map = {
  82. "ing": "ying",
  83. "i": "yi",
  84. "in": "yin",
  85. "u": "wu",
  86. }
  87. if pinyin in pinyin_rep_map.keys():
  88. pinyin = pinyin_rep_map[pinyin]
  89. else:
  90. single_rep_map = {
  91. "v": "yu",
  92. "e": "e",
  93. "i": "y",
  94. "u": "w",
  95. }
  96. if pinyin[0] in single_rep_map.keys():
  97. pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
  98. assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
  99. new_c, new_v = pinyin_to_symbol_map[pinyin].split(" ")
  100. new_v = new_v + tone
  101. phone = [new_c, new_v]
  102. phones_list += phone
  103. return phones_list
  104. def text_normalize(text):
  105. return cn2an.transform(text, "an2cn")
  106. if __name__ == "__main__":
  107. text = "啊——但是《原神》是由,米哈\游自主,研发的一款全.新开放世界.冒险游戏"
  108. text = "呣呣呣~就是…大人的鼹鼠党吧?"
  109. # text = "你好"
  110. text = text_normalize(text)
  111. print(g2p(text))