relay-palm.go 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. package controller
  2. import (
  3. "encoding/json"
  4. "fmt"
  5. "github.com/gin-gonic/gin"
  6. "io"
  7. "net/http"
  8. "one-api/common"
  9. )
  10. // https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body
  11. // https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#response-body
  12. type PaLMChatMessage struct {
  13. Author string `json:"author"`
  14. Content string `json:"content"`
  15. }
  16. type PaLMFilter struct {
  17. Reason string `json:"reason"`
  18. Message string `json:"message"`
  19. }
  20. type PaLMPrompt struct {
  21. Messages []PaLMChatMessage `json:"messages"`
  22. }
  23. type PaLMChatRequest struct {
  24. Prompt PaLMPrompt `json:"prompt"`
  25. Temperature float64 `json:"temperature,omitempty"`
  26. CandidateCount int `json:"candidateCount,omitempty"`
  27. TopP float64 `json:"topP,omitempty"`
  28. TopK int `json:"topK,omitempty"`
  29. }
  30. type PaLMError struct {
  31. Code int `json:"code"`
  32. Message string `json:"message"`
  33. Status string `json:"status"`
  34. }
  35. type PaLMChatResponse struct {
  36. Candidates []PaLMChatMessage `json:"candidates"`
  37. Messages []Message `json:"messages"`
  38. Filters []PaLMFilter `json:"filters"`
  39. Error PaLMError `json:"error"`
  40. }
  41. func requestOpenAI2PaLM(textRequest GeneralOpenAIRequest) *PaLMChatRequest {
  42. palmRequest := PaLMChatRequest{
  43. Prompt: PaLMPrompt{
  44. Messages: make([]PaLMChatMessage, 0, len(textRequest.Messages)),
  45. },
  46. Temperature: textRequest.Temperature,
  47. CandidateCount: textRequest.N,
  48. TopP: textRequest.TopP,
  49. TopK: textRequest.MaxTokens,
  50. }
  51. for _, message := range textRequest.Messages {
  52. palmMessage := PaLMChatMessage{
  53. Content: string(message.Content),
  54. }
  55. if message.Role == "user" {
  56. palmMessage.Author = "0"
  57. } else {
  58. palmMessage.Author = "1"
  59. }
  60. palmRequest.Prompt.Messages = append(palmRequest.Prompt.Messages, palmMessage)
  61. }
  62. return &palmRequest
  63. }
  64. func responsePaLM2OpenAI(response *PaLMChatResponse) *OpenAITextResponse {
  65. fullTextResponse := OpenAITextResponse{
  66. Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
  67. }
  68. for i, candidate := range response.Candidates {
  69. content, _ := json.Marshal(candidate.Content)
  70. choice := OpenAITextResponseChoice{
  71. Index: i,
  72. Message: Message{
  73. Role: "assistant",
  74. Content: content,
  75. },
  76. FinishReason: "stop",
  77. }
  78. fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
  79. }
  80. return &fullTextResponse
  81. }
  82. func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *ChatCompletionsStreamResponse {
  83. var choice ChatCompletionsStreamResponseChoice
  84. if len(palmResponse.Candidates) > 0 {
  85. choice.Delta.Content = palmResponse.Candidates[0].Content
  86. }
  87. choice.FinishReason = &stopFinishReason
  88. var response ChatCompletionsStreamResponse
  89. response.Object = "chat.completion.chunk"
  90. response.Model = "palm2"
  91. response.Choices = []ChatCompletionsStreamResponseChoice{choice}
  92. return &response
  93. }
  94. func palmStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
  95. responseText := ""
  96. responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
  97. createdTime := common.GetTimestamp()
  98. dataChan := make(chan string)
  99. stopChan := make(chan bool)
  100. go func() {
  101. responseBody, err := io.ReadAll(resp.Body)
  102. if err != nil {
  103. common.SysError("error reading stream response: " + err.Error())
  104. stopChan <- true
  105. return
  106. }
  107. err = resp.Body.Close()
  108. if err != nil {
  109. common.SysError("error closing stream response: " + err.Error())
  110. stopChan <- true
  111. return
  112. }
  113. var palmResponse PaLMChatResponse
  114. err = json.Unmarshal(responseBody, &palmResponse)
  115. if err != nil {
  116. common.SysError("error unmarshalling stream response: " + err.Error())
  117. stopChan <- true
  118. return
  119. }
  120. fullTextResponse := streamResponsePaLM2OpenAI(&palmResponse)
  121. fullTextResponse.Id = responseId
  122. fullTextResponse.Created = createdTime
  123. if len(palmResponse.Candidates) > 0 {
  124. responseText = palmResponse.Candidates[0].Content
  125. }
  126. jsonResponse, err := json.Marshal(fullTextResponse)
  127. if err != nil {
  128. common.SysError("error marshalling stream response: " + err.Error())
  129. stopChan <- true
  130. return
  131. }
  132. dataChan <- string(jsonResponse)
  133. stopChan <- true
  134. }()
  135. setEventStreamHeaders(c)
  136. c.Stream(func(w io.Writer) bool {
  137. select {
  138. case data := <-dataChan:
  139. c.Render(-1, common.CustomEvent{Data: "data: " + data})
  140. return true
  141. case <-stopChan:
  142. c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
  143. return false
  144. }
  145. })
  146. err := resp.Body.Close()
  147. if err != nil {
  148. return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
  149. }
  150. return nil, responseText
  151. }
  152. func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
  153. responseBody, err := io.ReadAll(resp.Body)
  154. if err != nil {
  155. return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
  156. }
  157. err = resp.Body.Close()
  158. if err != nil {
  159. return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
  160. }
  161. var palmResponse PaLMChatResponse
  162. err = json.Unmarshal(responseBody, &palmResponse)
  163. if err != nil {
  164. return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
  165. }
  166. if palmResponse.Error.Code != 0 || len(palmResponse.Candidates) == 0 {
  167. return &OpenAIErrorWithStatusCode{
  168. OpenAIError: OpenAIError{
  169. Message: palmResponse.Error.Message,
  170. Type: palmResponse.Error.Status,
  171. Param: "",
  172. Code: palmResponse.Error.Code,
  173. },
  174. StatusCode: resp.StatusCode,
  175. }, nil
  176. }
  177. fullTextResponse := responsePaLM2OpenAI(&palmResponse)
  178. completionTokens := countTokenText(palmResponse.Candidates[0].Content, model)
  179. usage := Usage{
  180. PromptTokens: promptTokens,
  181. CompletionTokens: completionTokens,
  182. TotalTokens: promptTokens + completionTokens,
  183. }
  184. fullTextResponse.Usage = usage
  185. jsonResponse, err := json.Marshal(fullTextResponse)
  186. if err != nil {
  187. return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
  188. }
  189. c.Writer.Header().Set("Content-Type", "application/json")
  190. c.Writer.WriteHeader(resp.StatusCode)
  191. _, err = c.Writer.Write(jsonResponse)
  192. return nil, &usage
  193. }