UPDATE: Fix chatbot
All checks were successful
Build and Release / release (push) Successful in 1m34s

This commit is contained in:
2026-05-05 10:16:13 +07:00
parent a8f0597e59
commit de2e1cddf3
3 changed files with 69 additions and 7 deletions

View File

@@ -34,7 +34,7 @@ func NewChatbotController(chatbotService services.ChatbotService) *ChatbotContro
// @Failure 500 {object} response.CommonResponse "Internal server error"
// @Router /chatbot/chat [post]
func (cx *ChatbotController) Chat(c fiber.Ctx) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
dto := &request.ChatbotDto{}

View File

@@ -38,14 +38,27 @@ func (s *chatbotService) Chat(ctx context.Context, projectID *string, question s
contextStr += fmt.Sprintf("[Document %d]: %s\n", i+1, res.Content)
}
prompt := fmt.Sprintf(`You are a helpful history assistant. Answer the question based ONLY on the provided context.
If the answer is not in the context, say "I don't have enough historical context to answer that."
var prompt string
if contextStr == "" {
prompt = fmt.Sprintf(`You are a friendly history assistant chatbot. The user said: "%s"
Rules:
- If it is a greeting (like "hello", "hi", "xin chào"), respond with a friendly greeting and briefly introduce yourself as a history assistant.
- If it is a history question, say that you don't have relevant documents to answer and suggest they ask about topics available in the system.
- Do NOT show your reasoning or thinking process. Output ONLY your final response.`, question)
} else {
prompt = fmt.Sprintf(`You are a helpful history assistant. Answer the question using ONLY the provided context.
Rules:
- If the answer is not in the context, say "I don't have enough historical context to answer that."
- Do NOT show your reasoning, thinking process, or analysis. Output ONLY your final answer.
- Be concise and direct.
Context:
%s
Question: %s
Answer:`, contextStr, question)
Question: %s`, contextStr, question)
}
return s.ragUtils.GenerateResponse(ctx, prompt)
}

View File

@@ -6,6 +6,7 @@ import (
"history-api/pkg/config"
"html"
"regexp"
"strings"
"github.com/tmc/langchaingo/embeddings"
"github.com/tmc/langchaingo/llms"
@@ -24,9 +25,20 @@ func NewRagUtils() (*RagUtils, error) {
return nil, err
}
googleModal, err := config.GetConfig("GOOGLE_AI_MODEL")
if err != nil {
googleModal = "gemma-4-26b-a4b-it"
}
googleEmbeddingModel, err := config.GetConfig("GOOGLE_AI_EMBEDDING_MODEL")
if err != nil {
googleEmbeddingModel = "gemini-embedding-001"
}
llm, err := googleai.New(context.Background(),
googleai.WithAPIKey(googleAIApiKey),
googleai.WithDefaultEmbeddingModel("gemini-embedding-001"),
googleai.WithDefaultModel(googleModal),
googleai.WithDefaultEmbeddingModel(googleEmbeddingModel),
)
if err != nil {
return nil, fmt.Errorf("failed to init google ai: %w", err)
@@ -77,5 +89,42 @@ func (u *RagUtils) EmbedQuery(ctx context.Context, query string) ([]float32, err
}
func (u *RagUtils) GenerateResponse(ctx context.Context, prompt string) (string, error) {
return llms.GenerateFromSinglePrompt(ctx, u.llm, prompt)
raw, err := llms.GenerateFromSinglePrompt(ctx, u.llm, prompt)
if err != nil {
return "", err
}
return stripThinking(raw), nil
}
func stripThinking(raw string) string {
if !strings.Contains(raw, "* ") {
return strings.TrimSpace(raw)
}
lines := strings.Split(raw, "\n")
answerStart := len(lines)
for i := len(lines) - 1; i >= 0; i-- {
trimmed := strings.TrimSpace(lines[i])
if trimmed == "" || strings.HasPrefix(trimmed, "*") || strings.HasPrefix(trimmed, "- ") {
break
}
answerStart = i
}
if answerStart < len(lines) {
answer := strings.TrimSpace(strings.Join(lines[answerStart:], "\n"))
if answer != "" {
return answer
}
}
lastLine := lines[len(lines)-1]
if idx := strings.LastIndex(lastLine, `"`); idx >= 0 && idx < len(lastLine)-1 {
answer := strings.TrimSpace(lastLine[idx+1:])
if answer != "" {
return answer
}
}
return strings.TrimSpace(raw)
}