diff --git a/applogic/chatglm.go b/applogic/chatglm.go new file mode 100644 index 0000000..6373a6e --- /dev/null +++ b/applogic/chatglm.go @@ -0,0 +1,483 @@ +package applogic + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/google/uuid" + "github.com/hoshinonyaruko/gensokyo-llm/config" + "github.com/hoshinonyaruko/gensokyo-llm/fmtf" + "github.com/hoshinonyaruko/gensokyo-llm/prompt" + "github.com/hoshinonyaruko/gensokyo-llm/structs" + "github.com/hoshinonyaruko/gensokyo-llm/utils" +) + +// 用于存储每个conversationId的最后一条消息内容 +var ( + // lastResponses 存储每个真实 conversationId 的最后响应文本 + lastCompleteResponsesGlm sync.Map // 存储每个conversationId的完整累积信息 +) + +func (app *App) ChatHandlerGlm(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "Only POST method is allowed", http.StatusMethodNotAllowed) + return + } + + // 获取访问者的IP地址 + ip := r.RemoteAddr // 注意:这可能包含端口号 + ip = strings.Split(ip, ":")[0] // 去除端口号,仅保留IP地址 + + // 获取IP白名单 + whiteList := config.IPWhiteList() + + // 检查IP是否在白名单中 + if !utils.Contains(whiteList, ip) { + http.Error(w, "Access denied", http.StatusInternalServerError) + return + } + + var msg structs.Message + err := json.NewDecoder(r.Body).Decode(&msg) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // 读取URL参数 "prompt" + promptstr := r.URL.Query().Get("prompt") + if promptstr != "" { + // prompt 参数存在,可以根据需要进一步处理或记录 + fmtf.Printf("Received prompt parameter: %s\n", promptstr) + } + + // 读取URL参数 "userid" + useridstr := r.URL.Query().Get("userid") + if promptstr != "" { + // prompt 参数存在,可以根据需要进一步处理或记录 + fmtf.Printf("Received userid parameter: %s\n", useridstr) + } + + msg.Role = "user" + //颠倒用户输入 + if config.GetReverseUserPrompt() { + msg.Text = utils.ReverseString(msg.Text) + } + + if msg.ConversationID == "" { + msg.ConversationID = utils.GenerateUUID() + app.createConversation(msg.ConversationID) + } + + userMessageID, err := app.addMessage(msg) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var history []structs.Message + + //根据是否有prompt参数 选择是否载入config.yml的prompt还是prompts文件夹的 + if promptstr == "" { + // 获取系统提示词 + systemPromptContent := config.SystemPrompt() + if systemPromptContent != "0" { + systemPrompt := structs.Message{ + Text: systemPromptContent, + Role: "system", + } + // 将系统提示词添加到历史信息的开始 + history = append([]structs.Message{systemPrompt}, history...) + } + + // 分别获取FirstQ&A, SecondQ&A, ThirdQ&A + pairs := []struct { + Q string + A string + RoleQ string // 问题的角色 + RoleA string // 答案的角色 + }{ + {config.GetFirstQ(), config.GetFirstA(), "user", "assistant"}, + {config.GetSecondQ(), config.GetSecondA(), "user", "assistant"}, + {config.GetThirdQ(), config.GetThirdA(), "user", "assistant"}, + } + + // 检查每一对Q&A是否均不为空,并追加到历史信息中 + for _, pair := range pairs { + if pair.Q != "" && pair.A != "" { + qMessage := structs.Message{ + Text: pair.Q, + Role: pair.RoleQ, + } + aMessage := structs.Message{ + Text: pair.A, + Role: pair.RoleA, + } + + // 注意追加的顺序,确保问题在答案之前 + history = append(history, qMessage, aMessage) + } + } + } else { + // 默认执行 正常提示词顺序 + if !config.GetEnhancedQA(promptstr) { + history, err = prompt.GetMessagesFromFilename(promptstr) + if err != nil { + fmtf.Printf("prompt.GetMessagesFromFilename error: %v\n", err) + } + } else { + // 只获取系统提示词 + systemMessage, err := prompt.FindFirstSystemMessage(history) + if err != nil { + fmt.Println("Error:", err) + } else { + // 如果找到system消息,将其添加到历史数组中 + history = append(history, systemMessage) + fmt.Println("Added system message back to history.") + } + } + } + + // 获取历史信息 + if msg.ParentMessageID != "" { + userhistory, err := app.getHistory(msg.ConversationID, msg.ParentMessageID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // 截断历史信息 + userHistory := truncateHistoryGlm(userhistory, msg.Text, promptstr) + + if promptstr != "" { + // 注意追加的顺序,确保问题在系统提示词之后 + // 使用...操作符来展开userhistory切片并追加到history切片 + // 获取系统级预埋的系统自定义QA对 + systemHistory, err := prompt.GetMessagesExcludingSystem(promptstr) + if err != nil { + fmtf.Printf("Error getting system history: %v,promptstr[%v]\n", err, promptstr) + return + } + + // 处理增强QA逻辑 + if config.GetEnhancedQA(promptstr) { + // 确保系统历史与用户或助手历史数量一致,如果不足,则补足空的历史记录 + // 因为最后一个成员让给当前QA,所以-1 + if len(systemHistory)-2 > len(userHistory) { + difference := len(systemHistory) - len(userHistory) + for i := 0; i < difference; i++ { + userHistory = append(userHistory, structs.Message{Text: "", Role: "user"}) + userHistory = append(userHistory, structs.Message{Text: "", Role: "assistant"}) + } + } + + // 如果系统历史中只有一个成员,跳过覆盖逻辑,留给后续处理 + if len(systemHistory) > 1 { + // 将系统历史(除最后2个成员外)附加到相应的用户或助手历史上,采用倒序方式处理最近的记录 + for i := 0; i < len(systemHistory)-2; i++ { + sysMsg := systemHistory[i] + index := len(userHistory) - len(systemHistory) + i + if index >= 0 && index < len(userHistory) && (userHistory[index].Role == "user" || userHistory[index].Role == "assistant") { + userHistory[index].Text += fmt.Sprintf(" (%s)", sysMsg.Text) + } + } + } + } else { + // 将系统级别QA简单的附加在用户对话前方的位置(ai会知道,但不会主动引导) + history = append(history, systemHistory...) + } + + // 留下最后一个systemHistory成员进行后续处理 + } + + // 添加用户历史到总历史中 + history = append(history, userHistory...) + } + + fmtf.Printf("Glm上下文history:%v\n", history) + + // 构建请求到Glm API + apiURL := config.GetGlmApiPath() + + // 构造消息历史和当前消息 + messages := []map[string]interface{}{} + for _, hMsg := range history { + messages = append(messages, map[string]interface{}{ + "role": hMsg.Role, + "content": hMsg.Text, + }) + } + + // 添加当前用户消息 + messages = append(messages, map[string]interface{}{ + "role": "user", + "content": msg.Text, + }) + + if useridstr == "" { + useridstr = "123" + } + + // 获取配置信息 + apiKey := config.GetGlmApiKey() + // 创建请求体的映射结构 + requestBody := map[string]interface{}{ + "model": config.GetGlmModel(), + "messages": messages, + "do_sample": config.GetGlmDoSample(), + "stream": config.GetuseSse(promptstr), + "temperature": config.GetGlmTemperature(), + "top_p": config.GetGlmTopP(), + "max_tokens": config.GetGlmMaxTokens(), + "stop": config.GetGlmStop(), + //"tools": config.GetGlmTools(), 不太清楚参数格式 + "tool_choice": config.GetGlmToolChoice(), + "user_id": useridstr, + } + + fmtf.Printf("glm requestBody :%v", requestBody) + requestBodyJSON, _ := json.Marshal(requestBody) + + // 准备HTTP请求 + client := &http.Client{} + req, err := http.NewRequest("POST", apiURL, bytes.NewBuffer(requestBodyJSON)) + if err != nil { + http.Error(w, fmtf.Sprintf("Failed to create request: %v", err), http.StatusInternalServerError) + return + } + + req.Header.Set("Content-Type", "application/json") + + // 设置Content-Type和Authorization + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+apiKey) + + // 发送请求 + resp, err := client.Do(req) + if err != nil { + http.Error(w, fmtf.Sprintf("Error sending request to glm API: %v", err), http.StatusInternalServerError) + return + } + defer resp.Body.Close() + + if !config.GetuseSse(promptstr) { + // 处理响应 + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + http.Error(w, fmt.Sprintf("Failed to read response body: %v", err), http.StatusInternalServerError) + return + } + fmt.Printf("glm 返回: %v", string(responseBody)) + + var glmApiResponse struct { + Choices []struct { + FinishReason string `json:"finish_reason"` + Message struct { + Role string `json:"role"` + Content string `json:"content"` + } `json:"message"` + } `json:"choices"` + Created int64 `json:"created"` + ID string `json:"id"` + Model string `json:"model"` + RequestID string `json:"request_id"` + Usage struct { + CompletionTokens int `json:"completion_tokens"` + PromptTokens int `json:"prompt_tokens"` + TotalTokens int `json:"total_tokens"` + } `json:"usage"` + } + + if err := json.Unmarshal(responseBody, &glmApiResponse); err != nil { + http.Error(w, fmt.Sprintf("Error unmarshaling response: %v", err), http.StatusInternalServerError) + return + } + + // 从API响应中获取回复文本 + if len(glmApiResponse.Choices) > 0 { + responseText := glmApiResponse.Choices[0].Message.Content + + // 添加助理消息 + assistantMessageID, err := app.addMessage(structs.Message{ + ConversationID: msg.ConversationID, + ParentMessageID: userMessageID, + Text: responseText, + Role: "assistant", + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // 构造响应数据,包括回复文本、对话ID、消息ID,以及使用情况 + responseMap := map[string]interface{}{ + "response": responseText, + "conversationId": msg.ConversationID, + "messageId": assistantMessageID, + "details": map[string]interface{}{ + "usage": structs.UsageInfo{ + PromptTokens: glmApiResponse.Usage.PromptTokens, // 实际值 + CompletionTokens: glmApiResponse.Usage.CompletionTokens, // 实际值 + }, + }, + } + + // 设置响应头部为JSON格式 + w.Header().Set("Content-Type", "application/json") + // 将响应数据编码为JSON并发送 + if err := json.NewEncoder(w).Encode(responseMap); err != nil { + http.Error(w, fmt.Sprintf("Error encoding response: %v", err), http.StatusInternalServerError) + return + } + } else { + http.Error(w, "No response data available from glm API", http.StatusInternalServerError) + } + } else { + // 设置SSE相关的响应头部 + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "Streaming unsupported!", http.StatusInternalServerError) + return + } + + // 生成一个随机的UUID + randomUUID, err := uuid.NewRandom() + if err != nil { + http.Error(w, "Failed to generate UUID", http.StatusInternalServerError) + return + } + + reader := bufio.NewReader(resp.Body) + var totalUsage structs.GPTUsageInfo + + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + break // 流结束 + } + // 处理错误 + fmt.Fprintf(w, "data: %s\n\n", fmt.Sprintf("读取流数据时发生错误: %v", err)) + flusher.Flush() + continue + } + if strings.HasPrefix(line, "data:") { + eventDataJSON := line[5:] // 去掉"data: "前缀 + // 解析JSON数据 + var eventData structs.GlmSSEData + if err := json.Unmarshal([]byte(eventDataJSON), &eventData); err != nil { + fmt.Fprintf(w, "data: %s\n\n", fmt.Sprintf("解析事件数据出错: %v", err)) + flusher.Flush() + continue + } + + // 如果存在需要发送的临时响应数据(例如,在事件流中间点) + tempResponseMap := map[string]interface{}{ + "response": eventData.Choices[0].Delta.Content, + "conversationId": msg.ConversationID, // 确保msg.ConversationID已经定义并初始化 + // "details" 字段留待进一步处理,如有必要 + } + tempResponseJSON, _ := json.Marshal(tempResponseMap) + fmt.Fprintf(w, "data: %s\n\n", string(tempResponseJSON)) + flusher.Flush() + + // 维护累加信息,发送最后事件 + conversationId := msg.ConversationID + randomUUID.String() + // 读取完整信息 + completeResponse, _ := lastCompleteResponsesGlm.LoadOrStore(conversationId, "") + // 更新存储的完整累积信息 + updatedCompleteResponse := completeResponse.(string) + eventData.Choices[0].Delta.Content + lastCompleteResponsesGlm.Store(conversationId, updatedCompleteResponse) + } + } + + //一点点奇怪的转换 + conversationId := msg.ConversationID + randomUUID.String() + completeResponse, _ := lastCompleteResponsesGlm.LoadOrStore(conversationId, "") + // 在所有事件处理完毕后发送最终响应 + assistantMessageID, err := app.addMessage(structs.Message{ + ConversationID: msg.ConversationID, + ParentMessageID: userMessageID, + Text: completeResponse.(string), + Role: "assistant", + }) + + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // 在所有事件处理完毕后发送最终响应 + // 首先从 conversationMap 获取真实的 conversationId + if finalContent, ok := lastCompleteResponsesGlm.Load(conversationId); ok { + finalResponseMap := map[string]interface{}{ + "response": finalContent, + "conversationId": conversationId, + "messageId": assistantMessageID, + "details": map[string]interface{}{ + "usage": totalUsage, + }, + } + finalResponseJSON, _ := json.Marshal(finalResponseMap) + fmtf.Fprintf(w, "data: %s\n\n", string(finalResponseJSON)) + flusher.Flush() + } + } + +} + +func truncateHistoryGlm(history []structs.Message, prompt string, promptstr string) []structs.Message { + MAX_TOKENS := config.GetGlmMaxTokens(promptstr) + + tokenCount := len(prompt) + for _, msg := range history { + tokenCount += len(msg.Text) + } + + if tokenCount >= MAX_TOKENS { + // 第一步:从开始逐个移除消息,直到满足令牌数量限制 + for tokenCount > MAX_TOKENS && len(history) > 0 { + tokenCount -= len(history[0].Text) + history = history[1:] + + // 确保移除后,历史记录仍然以user消息结尾 + if len(history) > 0 && history[0].Role == "assistant" { + tokenCount -= len(history[0].Text) + history = history[1:] + } + } + } + + // 第二步:检查并移除包含空文本的QA对 + for i := 0; i < len(history)-1; i++ { // 使用len(history)-1是因为我们要检查成对的消息 + q := history[i] + a := history[i+1] + + // 检查Q和A是否成对,且A的角色应为assistant,Q的角色为user,避免删除非QA对的消息 + if q.Role == "user" && a.Role == "assistant" && (len(q.Text) == 0 || len(a.Text) == 0) { + fmtf.Println("closeai-找到了空的对话: ", q, a) + // 移除这对QA + history = append(history[:i], history[i+2:]...) + i-- // 因为删除了元素,调整索引以正确检查下一个元素 + } + } + + // 确保以user结尾,如果不是则尝试移除直到满足条件 + if len(history) > 0 && history[len(history)-1].Role != "user" { + for len(history) > 0 && history[len(history)-1].Role != "user" { + history = history[:len(history)-1] + } + } + + return history +} diff --git a/applogic/chatgpt.go b/applogic/chatgpt.go index e36f0ef..2a242a4 100644 --- a/applogic/chatgpt.go +++ b/applogic/chatgpt.go @@ -470,7 +470,7 @@ func (app *App) ChatHandlerChatgpt(w http.ResponseWriter, r *http.Request) { } tempResponseJSON, _ := json.Marshal(tempResponseMap) fmtf.Fprintf(w, "data: %s\n\n", string(tempResponseJSON)) - fmt.Printf("测试返回:%v\n", string(tempResponseJSON)) + //fmt.Printf("测试返回:%v\n", string(tempResponseJSON)) flusher.Flush() } } diff --git a/applogic/gensokyo.go b/applogic/gensokyo.go index 292fd4e..246c80c 100644 --- a/applogic/gensokyo.go +++ b/applogic/gensokyo.go @@ -54,6 +54,24 @@ func ResetIndex(s string) { stringToIndexMap[s] = 0 } +// checkMessageForHints 检查消息中是否包含给定的提示词 +func checkMessageForHints(message string) bool { + // 从配置中获取提示词数组 + hintWords := config.GetGroupHintWords() + if len(hintWords) == 0 { + return true // 未设置,直接返回0 + } + // 遍历每个提示词,检查它们是否出现在消息中 + for _, hint := range hintWords { + if strings.Contains(message, hint) { + return true // 如果消息包含任一提示词,返回true + } + } + // 如果没有找到任何提示词,记录日志并返回false + fmtf.Println("No hint words found in the message:", message) + return false +} + func (app *App) GensokyoHandler(w http.ResponseWriter, r *http.Request) { // 只处理POST请求 if r.Method != http.MethodPost { @@ -92,6 +110,15 @@ func (app *App) GensokyoHandler(w http.ResponseWriter, r *http.Request) { return } + // 判断是否是群聊 然后判断触发词 + if message.RealMessageType != "group_private" && message.MessageType != "private" { + if !checkMessageForHints(message.RawMessage) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("Group message not hint words.")) + return + } + } + // 从数据库读取用户的剧情存档 CustomRecord, err := app.FetchCustomRecord(message.UserID) if err != nil { @@ -188,6 +215,13 @@ func (app *App) GensokyoHandler(w http.ResponseWriter, r *http.Request) { fmtf.Printf("Received message: %v\n", message.Message) fmtf.Printf("Full message details: %+v\n", message) + // 进行array转换 + // 检查并解析消息类型 + if _, ok := message.Message.(string); !ok { + // 如果不是字符串,处理消息以转换为字符串,强制转换 + message.Message = ParseMessageContent(message.Message) + } + // 判断message.Message的类型 switch msg := message.Message.(type) { case string: @@ -515,6 +549,11 @@ func (app *App) GensokyoHandler(w http.ResponseWriter, r *http.Request) { urlParams.Add("prompt", promptstr) } + // glm会根据userid参数来封禁用户 + if config.GetApiType() == 5 { + urlParams.Add("userid", strconv.FormatInt(message.UserID, 10)) + } + // 将查询参数编码后附加到基本URL上 fullURL := baseURL if len(urlParams) > 0 { @@ -822,11 +861,13 @@ func (app *App) GensokyoHandler(w http.ResponseWriter, r *http.Request) { } case map[string]interface{}: // message.Message是一个map[string]interface{} + // 理论上不应该执行到这里,因为我们已确保它是字符串 fmtf.Println("Received map message, handling not implemented yet") // 处理map类型消息的逻辑(TODO) default: // message.Message是一个未知类型 + // 理论上不应该执行到这里,因为我们已确保它是字符串 fmtf.Printf("Received message of unexpected type: %T\n", msg) return } diff --git a/applogic/msgpraser.go b/applogic/msgpraser.go new file mode 100644 index 0000000..7d80e18 --- /dev/null +++ b/applogic/msgpraser.go @@ -0,0 +1,154 @@ +package applogic + +import ( + "encoding/base64" + "encoding/json" + "strings" + + "github.com/hoshinonyaruko/gensokyo-llm/fmtf" +) + +func ParseMessageContent(message interface{}) string { + messageText := "" + + switch message := message.(type) { + case string: + fmtf.Printf("params.message is a string\n") + messageText = message + case []interface{}: + //多个映射组成的切片 + fmtf.Printf("params.message is a slice (segment_type_koishi)\n") + for _, segment := range message { + segmentMap, ok := segment.(map[string]interface{}) + if !ok { + continue + } + + segmentType, ok := segmentMap["type"].(string) + if !ok { + continue + } + + segmentContent := "" + switch segmentType { + case "text": + segmentContent, _ = segmentMap["data"].(map[string]interface{})["text"].(string) + case "image": + fileContent, _ := segmentMap["data"].(map[string]interface{})["file"].(string) + segmentContent = "[CQ:image,file=" + fileContent + "]" + case "voice": + fileContent, _ := segmentMap["data"].(map[string]interface{})["file"].(string) + segmentContent = "[CQ:record,file=" + fileContent + "]" + case "record": + fileContent, _ := segmentMap["data"].(map[string]interface{})["file"].(string) + segmentContent = "[CQ:record,file=" + fileContent + "]" + case "at": + qqNumber, _ := segmentMap["data"].(map[string]interface{})["qq"].(string) + segmentContent = "[CQ:at,qq=" + qqNumber + "]" + case "markdown": + mdContent, ok := segmentMap["data"].(map[string]interface{})["data"] + if ok { + if mdContentMap, isMap := mdContent.(map[string]interface{}); isMap { + // mdContent是map[string]interface{},按map处理 + mdContentBytes, err := json.Marshal(mdContentMap) + if err != nil { + fmtf.Printf("Error marshaling mdContentMap to JSON:%v", err) + } + encoded := base64.StdEncoding.EncodeToString(mdContentBytes) + segmentContent = "[CQ:markdown,data=" + encoded + "]" + } else if mdContentStr, isString := mdContent.(string); isString { + // mdContent是string + if strings.HasPrefix(mdContentStr, "base64://") { + // 如果以base64://开头,直接使用 + segmentContent = "[CQ:markdown,data=" + mdContentStr + "]" + } else { + // 处理实体化后的JSON文本 + mdContentStr = strings.ReplaceAll(mdContentStr, "&", "&") + mdContentStr = strings.ReplaceAll(mdContentStr, "[", "[") + mdContentStr = strings.ReplaceAll(mdContentStr, "]", "]") + mdContentStr = strings.ReplaceAll(mdContentStr, ",", ",") + + // 将处理过的字符串视为JSON对象,进行序列化和编码 + var jsonMap map[string]interface{} + if err := json.Unmarshal([]byte(mdContentStr), &jsonMap); err != nil { + fmtf.Printf("Error unmarshaling string to JSON:%v", err) + } + mdContentBytes, err := json.Marshal(jsonMap) + if err != nil { + fmtf.Printf("Error marshaling jsonMap to JSON:%v", err) + } + encoded := base64.StdEncoding.EncodeToString(mdContentBytes) + segmentContent = "[CQ:markdown,data=" + encoded + "]" + } + } + } else { + fmtf.Printf("Error marshaling markdown segment to interface,contain type but data is nil.") + } + } + + messageText += segmentContent + } + case map[string]interface{}: + //单个映射 + fmtf.Printf("params.message is a map (segment_type_trss)\n") + messageType, _ := message["type"].(string) + switch messageType { + case "text": + messageText, _ = message["data"].(map[string]interface{})["text"].(string) + case "image": + fileContent, _ := message["data"].(map[string]interface{})["file"].(string) + messageText = "[CQ:image,file=" + fileContent + "]" + case "voice": + fileContent, _ := message["data"].(map[string]interface{})["file"].(string) + messageText = "[CQ:record,file=" + fileContent + "]" + case "record": + fileContent, _ := message["data"].(map[string]interface{})["file"].(string) + messageText = "[CQ:record,file=" + fileContent + "]" + case "at": + qqNumber, _ := message["data"].(map[string]interface{})["qq"].(string) + messageText = "[CQ:at,qq=" + qqNumber + "]" + case "markdown": + mdContent, ok := message["data"].(map[string]interface{})["data"] + if ok { + if mdContentMap, isMap := mdContent.(map[string]interface{}); isMap { + // mdContent是map[string]interface{},按map处理 + mdContentBytes, err := json.Marshal(mdContentMap) + if err != nil { + fmtf.Printf("Error marshaling mdContentMap to JSON:%v", err) + } + encoded := base64.StdEncoding.EncodeToString(mdContentBytes) + messageText = "[CQ:markdown,data=" + encoded + "]" + } else if mdContentStr, isString := mdContent.(string); isString { + // mdContent是string + if strings.HasPrefix(mdContentStr, "base64://") { + // 如果以base64://开头,直接使用 + messageText = "[CQ:markdown,data=" + mdContentStr + "]" + } else { + // 处理实体化后的JSON文本 + mdContentStr = strings.ReplaceAll(mdContentStr, "&", "&") + mdContentStr = strings.ReplaceAll(mdContentStr, "[", "[") + mdContentStr = strings.ReplaceAll(mdContentStr, "]", "]") + mdContentStr = strings.ReplaceAll(mdContentStr, ",", ",") + + // 将处理过的字符串视为JSON对象,进行序列化和编码 + var jsonMap map[string]interface{} + if err := json.Unmarshal([]byte(mdContentStr), &jsonMap); err != nil { + fmtf.Printf("Error unmarshaling string to JSON:%v", err) + } + mdContentBytes, err := json.Marshal(jsonMap) + if err != nil { + fmtf.Printf("Error marshaling jsonMap to JSON:%v", err) + } + encoded := base64.StdEncoding.EncodeToString(mdContentBytes) + messageText = "[CQ:markdown,data=" + encoded + "]" + } + } + } else { + fmtf.Printf("Error marshaling markdown segment to interface,contain type but data is nil.") + } + } + default: + fmtf.Println("Unsupported message format: params.message field is not a string, map or slice") + } + return messageText +} diff --git a/applogic/tongyiqianwen.go b/applogic/tongyiqianwen.go index 5853109..e067e90 100644 --- a/applogic/tongyiqianwen.go +++ b/applogic/tongyiqianwen.go @@ -296,11 +296,9 @@ func (app *App) ChatHandlerTyqw(w http.ResponseWriter, r *http.Request) { } // 设置工作区 - workspace, err := config.GetTyqworkspace() // 假设这是获取workspace的函数 - if err != nil { - fmt.Println("Error getting workspace:", err) - } + workspace, _ := config.GetTyqworkspace() // 假设这是获取workspace的函数 if workspace != "" { + fmt.Println("X-DashScope-WorkSpace:", workspace) req.Header.Set("X-DashScope-WorkSpace", workspace) } diff --git a/config/config.go b/config/config.go index 771e37d..d3d2704 100644 --- a/config/config.go +++ b/config/config.go @@ -2228,3 +2228,157 @@ func GetTyqworkspace() (string, error) { } return "", fmt.Errorf("configuration instance is not initialized") // 错误处理,当配置实例未初始化时 } + +// GetGlmApiPath 获取GLM API路径 +func GetGlmApiPath() string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmApiPath + } + return "" // 默认值或错误处理 +} + +// GetGlmModel 获取模型编码 +func GetGlmModel() string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmModel + } + return "" +} + +// GetGlmApiKey 获取模型编码 +func GetGlmApiKey() string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmApiKey + } + return "" +} + +// GetGlmMaxTokens 获取模型输出的最大tokens数,可接受basename作为参数 +func GetGlmMaxTokens(options ...string) int { + mu.Lock() + defer mu.Unlock() + return getGlmMaxTokensInternal(options...) +} + +// 内部逻辑执行函数,不处理锁,可以安全地递归调用 +func getGlmMaxTokensInternal(options ...string) int { + // 检查是否有参数传递进来,以及是否为空字符串 + if len(options) == 0 || options[0] == "" { + if instance != nil { + return instance.Settings.GlmMaxTokens + } + return 1024 // 默认值或错误处理 + } + + // 使用传入的 basename 来查找特定配置 + basename := options[0] + maxTokensInterface, err := prompt.GetSettingFromFilename(basename, "GlmMaxTokens") + if err != nil { + log.Println("Error retrieving GlmMaxTokens:", err) + return getGlmMaxTokensInternal() // 递归调用内部函数,不传递任何参数 + } + + maxTokens, ok := maxTokensInterface.(int) + if !ok { // 检查类型断言是否失败 + fmt.Println("Type assertion failed for GlmMaxTokens, fetching default") + return getGlmMaxTokensInternal() // 递归调用内部函数,不传递任何参数 + } + + return maxTokens +} + +// GetGlmTemperature 获取模型的采样温度 +func GetGlmTemperature() float64 { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmTemperature + } + return 0.95 // 返回默认值 +} + +// GetGlmDoSample 获取是否启用采样策略 +func GetGlmDoSample() bool { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmDoSample + } + return true // 返回默认值 +} + +// GetGlmToolChoice 获取工具选择策略 +func GetGlmToolChoice() string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmToolChoice + } + return "auto" // 返回默认值 +} + +// GetGlmUserID 获取终端用户的唯一ID +func GetGlmUserID() string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmUserID + } + return "" // 如果没有配置则返回空字符串 +} + +// GetGlmRequestID 获取请求的唯一标识 +func GetGlmRequestID() string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmRequestID + } + return "" // 返回默认值,表示没有设置 +} + +// GetGlmTopP 获取核取样概率 +func GetGlmTopP() float64 { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmTopP + } + return 0.7 // 返回默认值 +} + +// GetGlmStop 获取停止生成的词列表 +func GetGlmStop() []string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmStop + } + return nil // 返回空切片,表示没有设置停止词 +} + +// GetGlmTools 获取可调用的工具列表 +func GetGlmTools() []string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GlmTools + } + return []string{} // 返回空切片,表示没有工具设置 +} + +// 获取GroupHintWords列表 +func GetGroupHintWords() []string { + mu.Lock() + defer mu.Unlock() + if instance != nil { + return instance.Settings.GroupHintWords + } + return nil +} diff --git a/main.go b/main.go index 1a04e04..c950854 100644 --- a/main.go +++ b/main.go @@ -152,6 +152,9 @@ func main() { case 4: // 如果API类型是4,使用app.chatHandlerTyqw http.HandleFunc("/conversation", app.ChatHandlerTyqw) + case 5: + // 如果API类型是5,使用app.chatHandlerGlm + http.HandleFunc("/conversation", app.ChatHandlerGlm) default: // 如果是其他值,可以选择一个默认的处理器或者记录一个错误 log.Printf("Unknown API type: %d", apiType) @@ -163,6 +166,7 @@ func main() { http.HandleFunc("/conversation_ernie", app.ChatHandlerErnie) http.HandleFunc("/conversation_rwkv", app.ChatHandlerRwkv) http.HandleFunc("/conversation_tyqw", app.ChatHandlerTyqw) + http.HandleFunc("/conversation_glm", app.ChatHandlerGlm) } if config.GetSelfPath() != "" { rateLimiter := server.NewRateLimiter() diff --git a/readme.md b/readme.md index 22cc6a5..6df07f8 100644 --- a/readme.md +++ b/readme.md @@ -62,6 +62,22 @@ Rwkv runner --- +## 支持对接多种聊天机器人平台 + +[Gensokyo框架-QQ开放平台](https://github.com/Hoshinonyaruko/Gensokyo) + +[Gensokyo框架-Discord](https://github.com/Hoshinonyaruko/Gensokyo-discord) + +[Gensokyo框架-Kook](https://github.com/Hoshinonyaruko/Gensokyo-kook) + +[Gensokyo框架-微信订阅号\公众号](https://github.com/Hoshinonyaruko/Gensokyo-wxmp) + +[Gensokyo框架-Telegram](https://github.com/Hoshinonyaruko/Gensokyo-Telegram) + +[所有Onebotv11实现](https://onebot.dev/ecosystem.html#%E5%BA%94%E7%94%A8%E6%A1%88%E4%BE%8B) + +--- + ## 安全性 多重完备安全措施,尽可能保证开发者和llm应用安全. diff --git a/server/server.go b/server/server.go index aabf569..eaa1706 100644 --- a/server/server.go +++ b/server/server.go @@ -164,7 +164,7 @@ func processWSMessage(msg []byte, selfid string) { } } else { - log.Printf("Unknown message type or missing post type\n") + log.Printf("Unknown message type or missing post type:[%v]\n", string(msg)) } } diff --git a/structs/struct.go b/structs/struct.go index 41e3c59..9e01676 100644 --- a/structs/struct.go +++ b/structs/struct.go @@ -171,6 +171,26 @@ type TyqwSSEData struct { RequestID string `json:"request_id"` } +// GlmSSEData 结构体用于解析GLM模型的流式输出 +type GlmSSEData struct { + ID string `json:"id"` + Created int `json:"created"` + Model string `json:"model"` + Choices []struct { + Index int `json:"index"` + Delta struct { + Role string `json:"role"` + Content string `json:"content"` + } `json:"delta"` + FinishReason string `json:"finish_reason,omitempty"` // 使用omitempty使得该字段在为空时不会被序列化 + } `json:"choices"` + Usage struct { + PromptTokens int `json:"prompt_tokens"` + CompletionTokens int `json:"completion_tokens"` + TotalTokens int `json:"total_tokens"` + } `json:"usage,omitempty"` // 使用omitempty以便在字段为空时不包含在JSON中 +} + // 定义用于累积使用情况的结构(如果API提供此信息) type GPTUsageInfo struct { PromptTokens int `json:"prompt_tokens"` @@ -242,6 +262,7 @@ type Settings struct { Proxy string `yaml:"proxy"` UrlSendPics bool `yaml:"urlSendPics"` // 自己构造图床加速图片发送 MdPromptKeyboardAtGroup bool `yaml:"mdPromptKeyboardAtGroup"` // 群内使用md能力模拟PromptKeyboard + GroupHintWords []string `yaml:"groupHintWords"` HunyuanType int `yaml:"hunyuanType"` MaxTokensHunyuan int `yaml:"maxTokensHunyuan"` @@ -356,6 +377,20 @@ type Settings struct { TyqwApiKey string `yaml:"tyqwApiKey"` TyqwWorkspace string `yaml:"tyqwWorkspace"` + GlmApiPath string `yaml:"glmApiPath"` // 模型地址 + GlmModel string `yaml:"glmModel"` // 模型编码 + GlmApiKey string `yaml:"glmApiKey"` // 模型密钥 + GlmRequestID string `yaml:"glmRequestID"` // 请求的唯一标识,可选 + GlmDoSample bool `yaml:"glmDoSample"` // 是否启用采样策略 + GlmStream bool `yaml:"glmStream"` // 是否启用流式返回 + GlmTemperature float64 `yaml:"glmTemperature"` // 采样温度 + GlmTopP float64 `yaml:"glmTopP"` // 核取样概率 + GlmMaxTokens int `yaml:"glmMaxTokens"` // 最大输出token数 + GlmStop []string `yaml:"glmStop"` // 停止生成的词 + GlmTools []string `yaml:"glmTools"` // 可调用的工具列表 + GlmToolChoice string `yaml:"glmToolChoice"` // 工具选择策略 + GlmUserID string `yaml:"glmUserID"` // 用户ID + WSServerToken string `yaml:"wsServerToken"` WSPath string `yaml:"wsPath"` diff --git a/template/config_template.go b/template/config_template.go index efe2667..f6913f1 100644 --- a/template/config_template.go +++ b/template/config_template.go @@ -43,6 +43,7 @@ settings: withdrawCommand : ["撤回"] #撤回指令 hideExtraLogs : false #忽略流信息的log,提高性能 urlSendPics : false #自己构造图床加速图片发送.需配置公网ip+放通port+设置正确的selfPath + groupHintWords : [] #当机器人位于群内时,需满足包含groupHintWords数组任意内容如[CQ:at,qq=2] 机器人的名字 等 #Ws服务器配置 wsServerToken : "" #ws密钥 可以由onebotv11反向ws接入 @@ -164,6 +165,21 @@ settings: tyqwPreSystem: false # 是否在系统层面进行预处理 tyqwEnableSearch : false # 是否使用网络搜索 + # GLM 模型配置文件,为确保与API接口兼容,请符合相应的API资质要求。 + glmApiPath: "https://open.bigmodel.cn/api/paas/v4/chat/completions" # GLM API的地址,用于调用模型生成文本 + glmApiKey : "" # glm的api密钥 + glmModel: "" # 指定用于调用的模型编码,根据您的需求选择合适的模型,可选 glm-3-turbo glm-4 + glmRequestID: "" # 请求的唯一标识,用于追踪和调试请求 + glmDoSample: true # 是否启用采样策略,默认为true,采样开启 + glmTemperature: 0.95 # 控制输出随机性的采样温度,值越大输出越随机 + glmTopP: 0.9 # 采用核取样策略,从概率最高的令牌中选择top P的比例 + glmMaxTokens: 1024 # 模型输出的最大token数,控制输出长度 + glmStop: # 模型输出时遇到以下标记将停止生成 + - "stop_token" # 可以列出多个停止标记 + glmTools: # 列出模型可以调用的工具列表 + - "web_search" # 默认启用网络搜索工具 + glmToolChoice: "auto" # 工具选择策略,目前支持auto,自动选择最合适的工具 + glmUserID: "" # 用户唯一标识,用于跟踪和分析用户行为 ` const Logo = `