diff --git a/backend/pkg/messaging/service.go b/backend/pkg/messaging/service.go
index de33f00..e0fc830 100644
--- a/backend/pkg/messaging/service.go
+++ b/backend/pkg/messaging/service.go
@@ -4,14 +4,11 @@ import (
"context"
"encoding/json"
"fmt"
- "regexp"
"sef/app/entities"
"sef/internal/validation"
"sef/pkg/providers"
"sef/pkg/toolrunners"
"strconv"
- "strings"
- "time"
"github.com/gofiber/fiber/v3/log"
"gorm.io/gorm"
@@ -42,7 +39,6 @@ type MessagingServiceInterface interface {
ExecuteToolCall(ctx context.Context, toolCall providers.ToolCall) (string, error)
}
-// ValidateAndParseSessionID validates and parses session ID from string
func (s *MessagingService) ValidateAndParseSessionID(sessionIDStr string) (uint, error) {
sessionID, err := strconv.ParseUint(sessionIDStr, 10, 32)
if err != nil {
@@ -51,7 +47,6 @@ func (s *MessagingService) ValidateAndParseSessionID(sessionIDStr string) (uint,
return uint(sessionID), nil
}
-// GetSessionByIDAndUser retrieves a session by ID and user ID
func (s *MessagingService) GetSessionByIDAndUser(sessionID, userID uint) (*entities.Session, error) {
var session entities.Session
if err := s.DB.
@@ -66,7 +61,6 @@ func (s *MessagingService) GetSessionByIDAndUser(sessionID, userID uint) (*entit
return &session, nil
}
-// ParseSendMessageRequest parses and validates the send message request from body bytes
func (s *MessagingService) ParseSendMessageRequest(body []byte) (*SendMessageRequest, error) {
var req SendMessageRequest
if err := json.Unmarshal(body, &req); err != nil {
@@ -80,7 +74,6 @@ func (s *MessagingService) ParseSendMessageRequest(body []byte) (*SendMessageReq
return &req, nil
}
-// LoadSessionWithChatbotAndMessages loads session with chatbot and messages
func (s *MessagingService) LoadSessionWithChatbotAndMessages(sessionID, userID uint) (*entities.Session, error) {
var session entities.Session
if err := s.DB.
@@ -98,7 +91,6 @@ func (s *MessagingService) LoadSessionWithChatbotAndMessages(sessionID, userID u
return &session, nil
}
-// LoadSessionWithChatbotToolsAndMessages loads session with chatbot, tools, and messages
func (s *MessagingService) LoadSessionWithChatbotToolsAndMessages(sessionID, userID uint) (*entities.Session, error) {
var session entities.Session
if err := s.DB.
@@ -117,11 +109,9 @@ func (s *MessagingService) LoadSessionWithChatbotToolsAndMessages(sessionID, use
return &session, nil
}
-// ConvertToolsToDefinitions converts entity tools to provider tool definitions
func (s *MessagingService) ConvertToolsToDefinitions(tools []entities.Tool) []providers.ToolDefinition {
var definitions []providers.ToolDefinition
for _, tool := range tools {
- // Convert JSONB parameters to OpenAPI JSON Schema format
parameters := map[string]interface{}{
"type": "object",
"properties": make(map[string]interface{}),
@@ -132,7 +122,6 @@ func (s *MessagingService) ConvertToolsToDefinitions(tools []entities.Tool) []pr
properties := make(map[string]interface{})
var required []string
- // Process each parameter in the array
for _, param := range tool.Parameters {
if paramMap, ok := param.(map[string]interface{}); ok {
name, hasName := paramMap["name"].(string)
@@ -141,7 +130,6 @@ func (s *MessagingService) ConvertToolsToDefinitions(tools []entities.Tool) []pr
isRequired, hasRequired := paramMap["required"].(bool)
if hasName && hasType {
- // Create property definition
property := map[string]interface{}{
"type": paramType,
}
@@ -150,7 +138,6 @@ func (s *MessagingService) ConvertToolsToDefinitions(tools []entities.Tool) []pr
}
properties[name] = property
- // Add to required array if marked as required
if hasRequired && isRequired {
required = append(required, name)
}
@@ -175,34 +162,27 @@ func (s *MessagingService) ConvertToolsToDefinitions(tools []entities.Tool) []pr
return definitions
}
-// ExecuteToolCall executes a tool call and returns the result
func (s *MessagingService) ExecuteToolCall(ctx context.Context, toolCall providers.ToolCall) (string, error) {
- // Find the tool by name (this would need to be optimized in production)
var tool entities.Tool
if err := s.DB.Where("name = ?", toolCall.Function.Name).First(&tool).Error; err != nil {
return "", fmt.Errorf("tool not found: %s", toolCall.Function.Name)
}
- // Handle arguments - they might be raw JSON string or parsed map
var args map[string]interface{}
if rawArgs, ok := toolCall.Function.Arguments["raw"].(string); ok {
- // Parse the raw JSON string
if err := json.Unmarshal([]byte(rawArgs), &args); err != nil {
return "", fmt.Errorf("failed to parse tool arguments: %w", err)
}
} else {
- // Already parsed
args = toolCall.Function.Arguments
}
- // Create tool runner
factory := &toolrunners.ToolRunnerFactory{}
runner, err := factory.NewToolRunner(tool.Type, tool.Config, tool.Parameters)
if err != nil {
return "", fmt.Errorf("failed to create tool runner: %w", err)
}
- // Create tool call context
toolContext := &toolrunners.ToolCallContext{
ToolCallID: toolCall.ID,
FunctionName: toolCall.Function.Name,
@@ -214,13 +194,11 @@ func (s *MessagingService) ExecuteToolCall(ctx context.Context, toolCall provide
},
}
- // Execute tool with context
result, err := runner.ExecuteWithContext(ctx, args, toolContext)
if err != nil {
return "", fmt.Errorf("tool execution failed: %w", err)
}
- // Convert result to string
resultJSON, err := json.Marshal(result)
if err != nil {
return "", fmt.Errorf("failed to marshal tool result: %w", err)
@@ -229,7 +207,6 @@ func (s *MessagingService) ExecuteToolCall(ctx context.Context, toolCall provide
return string(resultJSON), nil
}
-// SaveUserMessage saves the user message to database
func (s *MessagingService) SaveUserMessage(sessionID uint, content string) error {
userMessage := entities.Message{
SessionID: sessionID,
@@ -245,28 +222,9 @@ func (s *MessagingService) SaveUserMessage(sessionID uint, content string) error
return nil
}
-// cleanAssistantContent removes internal tags from assistant content before saving
-func cleanAssistantContent(content string) string {
- // Remove tags and content
- re := regexp.MustCompile(`.*?`)
- content = re.ReplaceAllString(content, "")
-
- // Remove tags and content
- re = regexp.MustCompile(`.*?`)
- content = re.ReplaceAllString(content, "")
-
- // Remove tags and content
- re = regexp.MustCompile(`.*?`)
- content = re.ReplaceAllString(content, "")
-
- return content
-}
-
-// PrepareChatMessages prepares the messages array for the chat API
func (s *MessagingService) PrepareChatMessages(session *entities.Session, userContent string) []providers.ChatMessage {
var messages []providers.ChatMessage
- // Add system message if system prompt exists
if session.Chatbot.SystemPrompt != "" {
messages = append(messages, providers.ChatMessage{
Role: "system",
@@ -274,7 +232,6 @@ func (s *MessagingService) PrepareChatMessages(session *entities.Session, userCo
})
}
- // Add current chat session messages
for _, msg := range session.Messages {
messages = append(messages, providers.ChatMessage{
Role: msg.Role,
@@ -282,7 +239,6 @@ func (s *MessagingService) PrepareChatMessages(session *entities.Session, userCo
})
}
- // Add current user message
messages = append(messages, providers.ChatMessage{
Role: "user",
Content: userContent,
@@ -291,7 +247,6 @@ func (s *MessagingService) PrepareChatMessages(session *entities.Session, userCo
return messages
}
-// CreateAssistantMessage creates an empty assistant message record
func (s *MessagingService) CreateAssistantMessage(sessionID uint) (*entities.Message, error) {
assistantMessage := entities.Message{
SessionID: sessionID,
@@ -307,7 +262,6 @@ func (s *MessagingService) CreateAssistantMessage(sessionID uint) (*entities.Mes
return &assistantMessage, nil
}
-// CreateToolMessage creates a tool message record
func (s *MessagingService) CreateToolMessage(sessionID uint, content string) (*entities.Message, error) {
toolMessage := entities.Message{
SessionID: sessionID,
@@ -323,296 +277,6 @@ func (s *MessagingService) CreateToolMessage(sessionID uint, content string) (*e
return &toolMessage, nil
}
-// processToolCalls handles the execution of tool calls and returns updated messages
-// Returns: updated messages, shouldStop flag, stop reason
-func (s *MessagingService) processToolCalls(session *entities.Session, toolCalls []providers.ToolCall, messages []providers.ChatMessage, outputCh chan<- string, assistantContent *strings.Builder, toolCallCounter map[string]int) ([]providers.ChatMessage, bool, string) {
- for _, toolCall := range toolCalls {
- displayName := toolCall.Function.Name
- // Extract tool display name from session.Chatbot.Tools
- for _, t := range session.Chatbot.Tools {
- if t.Name == toolCall.Function.Name {
- displayName = t.DisplayName
- break
- }
- }
-
- // Ensure we have a valid display name, fallback to function name if empty
- if displayName == "" {
- if toolCall.Function.Name != "" {
- displayName = toolCall.Function.Name
- } else {
- displayName = "Unknown Tool"
- }
- }
-
- // Check if this tool has been called too many times
- toolCallCounter[toolCall.Function.Name]++
- if toolCallCounter[toolCall.Function.Name] > 2 {
- log.Warn("Tool", toolCall.Function.Name, "has been called more than 2 times, stopping execution")
- errorMsg := fmt.Sprintf("Özür dilerim, '%s' aracını kullanarak istediğiniz bilgiyi alamadım. Lütfen sorunuzu farklı bir şekilde sorun veya daha spesifik bilgi verin.", displayName)
- outputCh <- errorMsg
- assistantContent.WriteString(errorMsg)
- return messages, true, "tool_call_limit_exceeded"
- }
-
- log.Info("Calling tool", toolCall.Function.Name, "- attempt", toolCallCounter[toolCall.Function.Name], "of 2")
-
- // Send tool executing indicator
- executingStr := fmt.Sprintf("%s", displayName)
- outputCh <- executingStr
- assistantContent.WriteString(executingStr)
-
- toolResult, err := s.ExecuteToolCall(context.Background(), toolCall)
- if err != nil {
- log.Error("Tool execution failed:", err)
- // Provide more user-friendly tool error messages
- if strings.Contains(err.Error(), "not found") {
- toolResult = fmt.Sprintf("The tool '%s' is not available or has been removed.", displayName)
- } else if strings.Contains(err.Error(), "timeout") {
- toolResult = fmt.Sprintf("The tool '%s' took too long to respond. Please try again.", displayName)
- } else if strings.Contains(err.Error(), "arguments") {
- toolResult = fmt.Sprintf("There was an issue with the parameters provided to '%s'. Please try rephrasing your request.", displayName)
- } else {
- toolResult = fmt.Sprintf("Tool '%s' encountered an error: %v", displayName, err)
- }
- }
-
- // Send tool executed indicator
- executedStr := fmt.Sprintf("%s", displayName)
- outputCh <- executedStr
- assistantContent.WriteString(executedStr)
-
- // Save tool message
- _, err = s.CreateToolMessage(session.ID, toolResult)
- if err != nil {
- log.Error("Failed to save tool message:", err)
- }
-
- // Add to messages for followup
- toolMessage := providers.ChatMessage{
- Role: "tool",
- Content: toolResult,
- }
- messages = append(messages, toolMessage)
- }
- return messages, false, ""
-}
-
-// GenerateChatResponse generates the chat response stream with infinite tool call chain support
-func (s *MessagingService) GenerateChatResponse(session *entities.Session, messages []providers.ChatMessage) (<-chan string, *entities.Message, error) {
- // Create provider instance
- factory := &providers.ProviderFactory{}
- providerConfig := map[string]interface{}{
- "base_url": session.Chatbot.Provider.BaseURL,
- }
-
- // Validate provider configuration
- if session.Chatbot.Provider.Type == "" {
- log.Error("Provider type is empty for chatbot:", session.Chatbot.Name)
- return nil, nil, fmt.Errorf("provider type is not configured for chatbot: %s", session.Chatbot.Name)
- }
-
- if session.Chatbot.Provider.BaseURL == "" {
- log.Error("Provider base URL is empty for chatbot:", session.Chatbot.Name)
- return nil, nil, fmt.Errorf("provider base URL is not configured for chatbot: %s", session.Chatbot.Name)
- }
-
- log.Info("Creating provider with config:", map[string]interface{}{
- "type": session.Chatbot.Provider.Type,
- "base_url": session.Chatbot.Provider.BaseURL,
- "chatbot_id": session.Chatbot.ID,
- "chatbot_name": session.Chatbot.Name,
- })
-
- provider, err := factory.NewProvider(session.Chatbot.Provider.Type, providerConfig)
- if err != nil {
- log.Error("Failed to create provider:", err, "Provider type:", session.Chatbot.Provider.Type, "Config:", providerConfig)
- return nil, nil, fmt.Errorf("failed to initialize provider: %w", err)
- }
-
- log.Info("Provider created successfully for chatbot:", session.Chatbot.Name)
-
- // Prepare options
- options := make(map[string]interface{})
- if session.Chatbot.ModelName != "" {
- options["model"] = session.Chatbot.ModelName
- log.Info("Using model from chatbot:", session.Chatbot.ModelName, "for chatbot:", session.Chatbot.Name)
- } else {
- log.Info("Using default model for chatbot:", session.Chatbot.Name)
- }
-
- // Add additional logging for debugging
- log.Info("Chat generation parameters:", map[string]interface{}{
- "session_id": session.ID,
- "chatbot_id": session.Chatbot.ID,
- "chatbot_name": session.Chatbot.Name,
- "provider_type": session.Chatbot.Provider.Type,
- "model_name": session.Chatbot.ModelName,
- "tools_count": len(session.Chatbot.Tools),
- "messages_count": len(messages),
- })
-
- // Convert tools to definitions
- toolDefinitions := s.ConvertToolsToDefinitions(session.Chatbot.Tools)
-
- // Create output channel
- outputCh := make(chan string)
-
- // Create first assistant message synchronously
- firstAssistant, err := s.CreateAssistantMessage(session.ID)
- if err != nil {
- log.Error("Failed to create assistant message:", err)
- return nil, nil, fmt.Errorf("failed to create assistant message: %w", err)
- }
-
- go func() {
- defer close(outputCh)
-
- var assistantContent strings.Builder
- thinkingStarted := false
- currentMessages := messages
-
- // Keep-alive ticker to prevent timeouts
- keepAliveTicker := time.NewTicker(30 * time.Second)
- defer keepAliveTicker.Stop()
-
- // Maximum iterations to prevent infinite loops
- const maxIterations = 10
- iteration := 0
-
- // Track tool call counts - each tool can be called at most 2 times
- toolCallCounter := make(map[string]int)
-
- // Continuous loop to handle infinite tool call chains
- for {
- iteration++
- if iteration > maxIterations {
- log.Warn("Maximum tool call iterations reached for session:", session.ID)
- errorMsg := "Özür dilerim, çok fazla araç çağrısı yapıldı. Lütfen sorunuzu daha basit bir şekilde sorun."
- outputCh <- errorMsg
- assistantContent.WriteString(errorMsg)
- s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
- return
- }
-
- log.Info("Tool call iteration", iteration, "of", maxIterations, "for session:", session.ID)
-
- // Generate chat response
- log.Info("Calling GenerateChatWithTools for session:", session.ID, "with", len(currentMessages), "messages")
- chatStream, err := provider.GenerateChatWithTools(context.Background(), currentMessages, toolDefinitions, options)
- if err != nil {
- log.Error("Failed to generate response:", err)
- // Kullanıcı dostu hata mesajı gönder
- errorMsg := "Özür dilerim, şu anda yanıt oluşturmakta zorlanıyorum. "
- if strings.Contains(err.Error(), "connection") || strings.Contains(err.Error(), "timeout") {
- errorMsg += "AI servisi ile bağlantı sorunu yaşanıyor gibi görünüyor. Lütfen bir süre sonra tekrar deneyin."
- } else if strings.Contains(err.Error(), "authentication") || strings.Contains(err.Error(), "auth") {
- errorMsg += "AI servisi ile kimlik doğrulama sorunu yaşanıyor. Lütfen bir yönetici ile iletişime geçin."
- } else if strings.Contains(err.Error(), "model") || strings.Contains(err.Error(), "does not support") {
- errorMsg += "Seçilen AI modeli kullanılamıyor veya araçları desteklemiyor. Lütfen farklı bir chatbot deneyin veya bir yönetici ile iletişime geçin."
- } else {
- errorMsg += fmt.Sprintf("Hata detayları: %v", err)
- }
-
- log.Info("Sending error message to client:", errorMsg)
- outputCh <- errorMsg
-
- // Assistant mesajını hata içeriği ile güncelle
- s.UpdateAssistantMessage(firstAssistant, errorMsg)
- return
- }
-
- log.Info("GenerateChatWithTools call successful, processing stream...")
-
- hasToolCalls := false
- var pendingToolCalls []providers.ToolCall
-
- // Process the stream
- responseCount := 0
- for response := range chatStream {
- responseCount++
-
- // Handle thinking tokens
- if response.Thinking != "" {
- if !thinkingStarted {
- outputCh <- ""
- thinkingStarted = true
- }
- outputCh <- response.Thinking
- assistantContent.WriteString("" + response.Thinking)
- } else if thinkingStarted {
- outputCh <- ""
- thinkingStarted = false
- assistantContent.WriteString("")
- }
-
- // Handle content
- if response.Content != "" {
- outputCh <- response.Content
- assistantContent.WriteString(response.Content)
- }
-
- // Collect tool calls
- if len(response.ToolCalls) > 0 {
- log.Info("Agent tool calls: ", response.ToolCalls)
- hasToolCalls = true
- pendingToolCalls = append(pendingToolCalls, response.ToolCalls...)
- }
-
- // If response is done, process any collected tool calls
- if response.Done {
- log.Info("Response marked as done after", responseCount, "responses")
- if thinkingStarted {
- outputCh <- ""
- thinkingStarted = false
- assistantContent.WriteString("")
- }
- break
- }
- }
-
- log.Info("Stream processing finished. Total responses:", responseCount, "HasToolCalls:", hasToolCalls)
-
- // If no tool calls were made, we're done
- if !hasToolCalls {
- // Update the assistant message with full content
- s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
- return
- }
-
- // Close thinking if open before processing tools
- if thinkingStarted {
- outputCh <- ""
- thinkingStarted = false
- assistantContent.WriteString("")
- }
-
- // IMPORTANT: Add the assistant's response to the message history
- // This helps the LLM understand what it has already said and prevents re-calling tools
- assistantMessage := providers.ChatMessage{
- Role: "assistant",
- Content: cleanAssistantContent(assistantContent.String()),
- }
- currentMessages = append(currentMessages, assistantMessage)
-
- // Process tool calls and update messages for next iteration
- var shouldStop bool
- var stopReason string
- currentMessages, shouldStop, stopReason = s.processToolCalls(session, pendingToolCalls, currentMessages, outputCh, &assistantContent, toolCallCounter)
-
- // If we should stop (e.g., tool call limit exceeded), save message and exit
- if shouldStop {
- log.Info("Stopping tool execution loop. Reason:", stopReason)
- s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
- return
- }
- }
- }()
-
- return outputCh, firstAssistant, nil
-}
-
-// UpdateAssistantMessage updates the assistant message with the full response
func (s *MessagingService) UpdateAssistantMessage(assistantMessage *entities.Message, content string) {
if assistantMessage == nil {
log.Error("Attempted to update nil assistant message")
@@ -624,10 +288,22 @@ func (s *MessagingService) UpdateAssistantMessage(assistantMessage *entities.Mes
}
}
-// UpdateAssistantMessageWithCallback updates the assistant message and executes a callback
func (s *MessagingService) UpdateAssistantMessageWithCallback(assistantMessage *entities.Message, content string, callback func()) {
s.UpdateAssistantMessage(assistantMessage, content)
if callback != nil {
callback()
}
}
+
+// GenerateChatResponse - MAIN ROUTER
+func (s *MessagingService) GenerateChatResponse(session *entities.Session, messages []providers.ChatMessage) (<-chan string, *entities.Message, error) {
+ isGemma := isGemmaModel(session.Chatbot.ModelName)
+
+ if isGemma {
+ log.Info("🔍 Using Gemma response generation for:", session.Chatbot.ModelName)
+ return s.generateChatResponseGemma(session, messages)
+ } else {
+ log.Info("🔍 Using OpenAI response generation for:", session.Chatbot.ModelName)
+ return s.generateChatResponseOpenAI(session, messages)
+ }
+}
diff --git a/backend/pkg/messaging/service_gemma.go b/backend/pkg/messaging/service_gemma.go
new file mode 100644
index 0000000..b0ef827
--- /dev/null
+++ b/backend/pkg/messaging/service_gemma.go
@@ -0,0 +1,554 @@
+package messaging
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "sef/app/entities"
+ "sef/pkg/providers"
+ "strings"
+ "time"
+
+ "github.com/gofiber/fiber/v3/log"
+)
+
+// ============================================================================
+// GEMMA HELPER FUNCTIONS
+// ============================================================================
+
+// extractThinkingFromContent extracts tags and returns thinking + regular content
+func extractThinkingFromContent(content string) (thinking string, regular string) {
+ thinkRegex := regexp.MustCompile(`(?s)(.*?)`)
+ matches := thinkRegex.FindAllStringSubmatch(content, -1)
+
+ var thinkingParts []string
+ for _, match := range matches {
+ if len(match) > 1 {
+ thinkingParts = append(thinkingParts, strings.TrimSpace(match[1]))
+ }
+ }
+
+ regularContent := thinkRegex.ReplaceAllString(content, "")
+ return strings.Join(thinkingParts, "\n"), strings.TrimSpace(regularContent)
+}
+
+// balanceJSON attempts to balance JSON braces if they're unmatched
+func balanceJSON(jsonStr string) string {
+ openBraces := strings.Count(jsonStr, "{")
+ closeBraces := strings.Count(jsonStr, "}")
+
+ if openBraces > closeBraces {
+ jsonStr += strings.Repeat("}", openBraces-closeBraces)
+ log.Info("Balanced JSON: added", openBraces-closeBraces, "closing braces")
+ }
+ return jsonStr
+}
+
+// detectAndParseToolCallsFromContent analyzes content for Gemma-style tool calls
+func (s *MessagingService) detectAndParseToolCallsFromContent(content string) ([]providers.ToolCall, string, string) {
+ if strings.Contains(content, "") || strings.Contains(content, "") {
+ toolCalls := s.parseGemmaToolCalls(content)
+ if len(toolCalls) > 0 {
+ log.Info("Detected Gemma3 format tool calls in content:", len(toolCalls), "calls")
+ cleanedContent := s.stripToolCallTags(content)
+ return toolCalls, "gemma", cleanedContent
+ }
+ }
+ return nil, "none", content
+}
+
+// stripToolCallTags removes tool call XML tags from content
+func (s *MessagingService) stripToolCallTags(content string) string {
+ re := regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ return strings.TrimSpace(content)
+}
+
+// parseGemmaToolCalls extracts tool calls from Gemma3 XML format
+func (s *MessagingService) parseGemmaToolCalls(content string) []providers.ToolCall {
+ var toolCalls []providers.ToolCall
+
+ toolCallRegex := regexp.MustCompile(`\s*([\s\S]*?)\s*`)
+ matches := toolCallRegex.FindAllStringSubmatch(content, -1)
+
+ if len(matches) == 0 {
+ log.Info("No Gemma tool call patterns found in content")
+ return toolCalls
+ }
+
+ log.Info("Found", len(matches), "Gemma tool call patterns in content")
+
+ for i, match := range matches {
+ if len(match) < 2 {
+ continue
+ }
+
+ jsonStr := strings.TrimSpace(match[1])
+ jsonStr = balanceJSON(jsonStr)
+
+ log.Info(fmt.Sprintf("Parsing Gemma tool call #%d, JSON length: %d chars", i+1, len(jsonStr)))
+
+ var toolCallData map[string]interface{}
+ if err := json.Unmarshal([]byte(jsonStr), &toolCallData); err != nil {
+ log.Error("Failed to parse Gemma tool call JSON:", err)
+ preview := jsonStr
+ if len(preview) > 200 {
+ preview = preview[:200] + "..."
+ }
+ log.Error("JSON preview:", preview)
+ continue
+ }
+
+ name, nameOk := toolCallData["name"].(string)
+ if !nameOk {
+ log.Error("Invalid Gemma tool call - missing 'name' field:", toolCallData)
+ continue
+ }
+
+ var params map[string]interface{}
+ if p, ok := toolCallData["parameters"].(map[string]interface{}); ok {
+ params = p
+ } else if p, ok := toolCallData["arguments"].(map[string]interface{}); ok {
+ params = p
+ } else {
+ params = make(map[string]interface{})
+ }
+
+ toolCall := providers.ToolCall{
+ ID: fmt.Sprintf("gemma_call_%d_%d", time.Now().UnixNano(), i),
+ Type: "function",
+ Function: providers.ToolCallFunction{
+ Name: name,
+ Arguments: params,
+ },
+ }
+
+ log.Info("✅ Successfully parsed Gemma tool call:", name, "with", len(params), "parameters")
+ toolCalls = append(toolCalls, toolCall)
+ }
+
+ return toolCalls
+}
+
+// formatToolOutputForModel formats tool output for Gemma
+func formatToolOutputForModel(result string, detectedFormat string) string {
+ if detectedFormat == "gemma" {
+ return fmt.Sprintf("\n\n%s\n\n", result)
+ }
+ return result
+}
+
+// ============================================================================
+// TOOL CALL PROCESSING FOR GEMMA
+// ============================================================================
+
+func (s *MessagingService) processToolCallsGemma(session *entities.Session, toolCalls []providers.ToolCall, messages []providers.ChatMessage, outputCh chan<- string, assistantContent *strings.Builder, toolCallCounter map[string]int, detectedFormat string) ([]providers.ChatMessage, bool, string) {
+ for _, toolCall := range toolCalls {
+ displayName := toolCall.Function.Name
+ for _, t := range session.Chatbot.Tools {
+ if t.Name == toolCall.Function.Name {
+ displayName = t.DisplayName
+ break
+ }
+ }
+
+ if displayName == "" {
+ if toolCall.Function.Name != "" {
+ displayName = toolCall.Function.Name
+ } else {
+ displayName = "Unknown Tool"
+ }
+ }
+
+ toolCallCounter[toolCall.Function.Name]++
+ if toolCallCounter[toolCall.Function.Name] > 2 {
+ log.Warn("Tool", toolCall.Function.Name, "has been called more than 2 times, stopping execution")
+ errorMsg := fmt.Sprintf("Özür dilerim, '%s' aracını kullanarak istediğiniz bilgiyi alamadım. Lütfen sorunuzu farklı bir şekilde sorun veya daha spesifik bilgi verin.", displayName)
+ outputCh <- errorMsg
+ assistantContent.WriteString(errorMsg)
+ return messages, true, "tool_call_limit_exceeded"
+ }
+
+ log.Info("Calling tool", toolCall.Function.Name, "- attempt", toolCallCounter[toolCall.Function.Name], "of 2")
+
+ executingStr := fmt.Sprintf("%s", displayName)
+ outputCh <- executingStr
+ assistantContent.WriteString(executingStr)
+
+ toolResult, err := s.ExecuteToolCall(context.Background(), toolCall)
+ if err != nil {
+ log.Error("Tool execution failed:", err)
+ if strings.Contains(err.Error(), "not found") {
+ toolResult = fmt.Sprintf("The tool '%s' is not available or has been removed.", displayName)
+ } else if strings.Contains(err.Error(), "timeout") {
+ toolResult = fmt.Sprintf("The tool '%s' took too long to respond. Please try again.", displayName)
+ } else if strings.Contains(err.Error(), "arguments") {
+ toolResult = fmt.Sprintf("There was an issue with the parameters provided to '%s'. Please try rephrasing your request.", displayName)
+ } else {
+ toolResult = fmt.Sprintf("Tool '%s' encountered an error: %v", displayName, err)
+ }
+ }
+
+ executedStr := fmt.Sprintf("%s", displayName)
+ outputCh <- executedStr
+ assistantContent.WriteString(executedStr)
+
+ formattedResult := formatToolOutputForModel(toolResult, detectedFormat)
+ //guidanceMessage := "\n\nÖnemli: Yukarıdaki veriyi analiz et ve kullanıcıya Türkçe, özetlenmiş, anlaşılır bir şekilde sun. Ham JSON'u veya teknik çıktıyı gösterme."
+
+ //_, err = s.CreateToolMessage(session.ID, formattedResult + guidanceMessage)
+ _, err = s.CreateToolMessage(session.ID, formattedResult)
+
+ if err != nil {
+ log.Error("Failed to save tool message:", err)
+ }
+
+ toolMessage := providers.ChatMessage{
+ Role: "tool",
+ Content: formattedResult,
+ }
+ messages = append(messages, toolMessage)
+ }
+ return messages, false, ""
+}
+
+// ============================================================================
+// MAIN GEMMA CHAT RESPONSE GENERATOR
+// ============================================================================
+
+// generateChatResponseGemma generates chat response for Gemma models with enhanced features
+func (s *MessagingService) generateChatResponseGemma(session *entities.Session, messages []providers.ChatMessage) (<-chan string, *entities.Message, error) {
+ // Create provider instance
+ factory := &providers.ProviderFactory{}
+ providerConfig := map[string]interface{}{
+ "base_url": session.Chatbot.Provider.BaseURL,
+ }
+
+ // Validate provider configuration
+ if session.Chatbot.Provider.Type == "" {
+ log.Error("Provider type is empty for chatbot:", session.Chatbot.Name)
+ return nil, nil, fmt.Errorf("provider type is not configured for chatbot: %s", session.Chatbot.Name)
+ }
+
+ if session.Chatbot.Provider.BaseURL == "" {
+ log.Error("Provider base URL is empty for chatbot:", session.Chatbot.Name)
+ return nil, nil, fmt.Errorf("provider base URL is not configured for chatbot: %s", session.Chatbot.Name)
+ }
+
+ log.Info("Creating provider with config:", map[string]interface{}{
+ "type": session.Chatbot.Provider.Type,
+ "base_url": session.Chatbot.Provider.BaseURL,
+ "chatbot_id": session.Chatbot.ID,
+ "chatbot_name": session.Chatbot.Name,
+ })
+
+ provider, err := factory.NewProvider(session.Chatbot.Provider.Type, providerConfig)
+ if err != nil {
+ log.Error("Failed to create provider:", err, "Provider type:", session.Chatbot.Provider.Type, "Config:", providerConfig)
+ return nil, nil, fmt.Errorf("failed to initialize provider: %w", err)
+ }
+
+ log.Info("Provider created successfully for chatbot:", session.Chatbot.Name)
+
+ // Prepare options
+ options := make(map[string]interface{})
+ if session.Chatbot.ModelName != "" {
+ options["model"] = session.Chatbot.ModelName
+ log.Info("Using model from chatbot:", session.Chatbot.ModelName, "for chatbot:", session.Chatbot.Name)
+ } else {
+ log.Info("Using default model for chatbot:", session.Chatbot.Name)
+ }
+
+ // Add additional logging for debugging
+ log.Info("Chat generation parameters (Gemma mode):", map[string]interface{}{
+ "session_id": session.ID,
+ "chatbot_id": session.Chatbot.ID,
+ "chatbot_name": session.Chatbot.Name,
+ "provider_type": session.Chatbot.Provider.Type,
+ "model_name": session.Chatbot.ModelName,
+ "tools_count": len(session.Chatbot.Tools),
+ "messages_count": len(messages),
+ })
+
+ // Convert tools to definitions
+ toolDefinitions := s.ConvertToolsToDefinitions(session.Chatbot.Tools)
+
+ // Create output channel
+ outputCh := make(chan string)
+
+ // Create first assistant message synchronously
+ firstAssistant, err := s.CreateAssistantMessage(session.ID)
+ if err != nil {
+ log.Error("Failed to create assistant message:", err)
+ return nil, nil, fmt.Errorf("failed to create assistant message: %w", err)
+ }
+
+ go func() {
+ defer close(outputCh)
+
+ var assistantContent strings.Builder
+ thinkingStarted := false
+ currentMessages := messages
+ detectedFormat := "none"
+
+ // Keep-alive ticker to prevent timeouts
+ keepAliveTicker := time.NewTicker(30 * time.Second)
+ defer keepAliveTicker.Stop()
+
+ // Maximum iterations to prevent infinite loops
+ const maxIterations = 10
+ iteration := 0
+
+ // Track tool call counts
+ toolCallCounter := make(map[string]int)
+
+ // Continuous loop to handle infinite tool call chains
+ for {
+ iteration++
+ if iteration > maxIterations {
+ log.Warn("Maximum tool call iterations reached for session:", session.ID)
+ errorMsg := "Özür dilerim, çok fazla araç çağrısı yapıldı. Lütfen sorunuzu daha basit bir şekilde sorun."
+ outputCh <- errorMsg
+ assistantContent.WriteString(errorMsg)
+ s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
+ return
+ }
+
+ log.Info("🔄 Tool call iteration", iteration, "of", maxIterations, "for session:", session.ID)
+
+ // Generate chat response
+ log.Info("Calling GenerateChatWithTools for session:", session.ID, "with", len(currentMessages), "messages")
+
+ // DEBUG: Log the last few messages being sent to model
+ if len(currentMessages) > 0 {
+ log.Info("DEBUG - Last 3 messages sent to model:")
+ startIdx := len(currentMessages) - 3
+ if startIdx < 0 {
+ startIdx = 0
+ }
+ for i := startIdx; i < len(currentMessages); i++ {
+ msg := currentMessages[i]
+ preview := msg.Content
+ if len(preview) > 200 {
+ preview = preview[:200] + "..."
+ }
+ log.Info(fmt.Sprintf(" Msg[%d] Role:%s Content:%s", i, msg.Role, preview))
+ }
+ }
+
+ chatStream, err := provider.GenerateChatWithTools(context.Background(), currentMessages, toolDefinitions, options)
+ if err != nil {
+ log.Error("Failed to generate response:", err)
+ // User-friendly error messages
+ errorMsg := "Özür dilerim, şu anda yanıt oluşturmakta zorlanıyorum. "
+ if strings.Contains(err.Error(), "connection") || strings.Contains(err.Error(), "timeout") {
+ errorMsg += "AI servisi ile bağlantı sorunu yaşanıyor gibi görünüyor. Lütfen bir süre sonra tekrar deneyin."
+ } else if strings.Contains(err.Error(), "authentication") || strings.Contains(err.Error(), "auth") {
+ errorMsg += "AI servisi ile kimlik doğrulama sorunu yaşanıyor. Lütfen bir yönetici ile iletişime geçin."
+ } else if strings.Contains(err.Error(), "model") || strings.Contains(err.Error(), "does not support") {
+ errorMsg += "Seçilen AI modeli kullanılamıyor veya araçları desteklemiyor. Lütfen farklı bir chatbot deneyin veya bir yönetici ile iletişime geçin."
+ } else {
+ errorMsg += fmt.Sprintf("Hata detayları: %v", err)
+ }
+
+ log.Info("Sending error message to client:", errorMsg)
+ outputCh <- errorMsg
+ s.UpdateAssistantMessage(firstAssistant, errorMsg)
+ return
+ }
+
+ log.Info("✅ GenerateChatWithTools call successful, processing stream...")
+
+ hasToolCalls := false
+ var pendingToolCalls []providers.ToolCall
+ var streamContent strings.Builder
+ var bufferMode bool = false
+
+ // Process the stream
+ responseCount := 0
+ for response := range chatStream {
+ responseCount++
+
+ // Handle thinking tokens - both from provider and inline in content
+ if response.Thinking != "" {
+ if !thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = true
+ }
+ outputCh <- response.Thinking
+ assistantContent.WriteString("" + response.Thinking)
+ }
+
+ // Handle content
+ if response.Content != "" {
+ // Check for inline tags in content (Gemma-style)
+ if strings.Contains(response.Content, "") {
+ thinkContent, regularContent := extractThinkingFromContent(response.Content)
+
+ if thinkContent != "" {
+ if !thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = true
+ }
+ outputCh <- thinkContent
+ assistantContent.WriteString("" + thinkContent)
+ }
+
+ if strings.Contains(response.Content, "") && thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = false
+ assistantContent.WriteString("")
+ }
+
+ if regularContent != "" {
+ streamContent.WriteString(regularContent)
+ assistantContent.WriteString(regularContent)
+
+ // Early detection: Check if this chunk contains tool call tags
+ if !bufferMode && (strings.Contains(regularContent, "") || strings.Contains(regularContent, "")) {
+ bufferMode = true
+ log.Info("⚡ Early detected tool call tag in stream, entering buffer mode")
+ }
+
+ if !bufferMode {
+ outputCh <- regularContent
+ }
+ }
+ } else {
+ // Normal content without thinking tags
+ streamContent.WriteString(response.Content)
+ assistantContent.WriteString(response.Content)
+
+ // Early detection
+ if !bufferMode && (strings.Contains(response.Content, "") || strings.Contains(response.Content, "")) {
+ bufferMode = true
+ log.Info("⚡ Early detected tool call tag in stream, entering buffer mode")
+ }
+
+ if !bufferMode {
+ outputCh <- response.Content
+ }
+ }
+ }
+
+ // Collect tool calls from provider (OpenAI format)
+ if len(response.ToolCalls) > 0 {
+ log.Info("📞 Provider returned tool calls (OpenAI format):", len(response.ToolCalls), "calls")
+ hasToolCalls = true
+ pendingToolCalls = append(pendingToolCalls, response.ToolCalls...)
+ if detectedFormat == "none" {
+ detectedFormat = "openai"
+ log.Info("🔍 Auto-detected format: OpenAI (tool calls from provider)")
+ }
+ }
+
+ if response.Done {
+ log.Info("✅ Response marked as done after", responseCount, "responses")
+ if thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = false
+ assistantContent.WriteString("")
+ }
+ break
+ }
+ }
+
+ log.Info("📊 Stream processing finished. Total responses:", responseCount, "Buffer mode:", bufferMode)
+
+ // AUTO-DETECT: Check content for tool calls if provider didn't return any
+ if !hasToolCalls && streamContent.Len() > 0 {
+ // DEBUG: Log content preview
+ contentPreview := streamContent.String()
+ if len(contentPreview) > 500 {
+ contentPreview = contentPreview[:500] + "..."
+ }
+ log.Info("🔍 DEBUG - Model output preview (first 500 chars):", contentPreview)
+ log.Info("📏 DEBUG - Full content length:", streamContent.Len(), "bytes")
+
+ contentToolCalls, format, cleanedContent := s.detectAndParseToolCallsFromContent(streamContent.String())
+ if len(contentToolCalls) > 0 {
+ log.Info("🎯 Auto-detected", len(contentToolCalls), "tool calls from content. Format:", format)
+ hasToolCalls = true
+ pendingToolCalls = append(pendingToolCalls, contentToolCalls...)
+ detectedFormat = format
+
+ // Send cleaned content to frontend
+ if bufferMode && len(cleanedContent) > 0 {
+ log.Info("✨ Sending cleaned content to frontend (removed tool tags)")
+ outputCh <- cleanedContent
+ }
+
+ // Update assistantContent with cleaned version
+ originalLen := assistantContent.Len()
+ streamLen := streamContent.Len()
+ if streamLen > 0 && originalLen >= streamLen {
+ prefix := assistantContent.String()[:originalLen-streamLen]
+ assistantContent.Reset()
+ assistantContent.WriteString(prefix)
+ assistantContent.WriteString(cleanedContent)
+ }
+ } else if bufferMode {
+ log.Info("📤 Buffer mode was active but no tool calls found, sending buffered content")
+ outputCh <- streamContent.String()
+ }
+ }
+
+ log.Info("📊 Tool calls status: HasToolCalls:", hasToolCalls, "DetectedFormat:", detectedFormat, "PendingCount:", len(pendingToolCalls))
+
+ // Check for thinking-only response
+ if !hasToolCalls && streamContent.Len() == 0 && assistantContent.Len() > 0 {
+ log.Warn("⚠️ Model only produced thinking content, no actual response!")
+ log.Warn(" This might indicate:")
+ log.Warn(" 1. Temperature too low")
+ log.Warn(" 2. System prompt needs adjustment")
+ log.Warn(" 3. Model stopping too early")
+
+ fallbackMsg := "Özür dilerim, yanıt oluştururken bir sorun yaşadım. Lütfen sorunuzu tekrar sorar mısınız?"
+ outputCh <- fallbackMsg
+ assistantContent.WriteString(fallbackMsg)
+ }
+
+ // If no tool calls were made, we're done
+ if !hasToolCalls {
+ log.Info("✅ No tool calls detected, finishing response")
+ s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
+ return
+ }
+
+ // Close thinking if open before processing tools
+ if thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = false
+ assistantContent.WriteString("")
+ }
+
+ // Add the assistant's response to the message history
+ assistantMessage := providers.ChatMessage{
+ Role: "assistant",
+ Content: cleanAssistantContent(assistantContent.String()),
+ }
+ currentMessages = append(currentMessages, assistantMessage)
+
+ // Process tool calls
+ log.Info("🔧 Processing", len(pendingToolCalls), "tool calls...")
+ var shouldStop bool
+ var stopReason string
+ currentMessages, shouldStop, stopReason = s.processToolCallsGemma(session, pendingToolCalls, currentMessages, outputCh, &assistantContent, toolCallCounter, detectedFormat)
+
+ if shouldStop {
+ log.Info("🛑 Stopping tool execution loop. Reason:", stopReason)
+ s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
+ return
+ }
+
+ log.Info("🔄 Tool calls processed, continuing to next iteration...")
+ }
+ }()
+
+ return outputCh, firstAssistant, nil
+}
diff --git a/backend/pkg/messaging/service_helpers.go b/backend/pkg/messaging/service_helpers.go
new file mode 100644
index 0000000..8e0b98c
--- /dev/null
+++ b/backend/pkg/messaging/service_helpers.go
@@ -0,0 +1,73 @@
+package messaging
+
+import (
+ "regexp"
+ "strings"
+)
+
+// isGemmaModel checks if model is Gemma variant
+func isGemmaModel(modelName string) bool {
+ if modelName == "" {
+ return true
+ }
+ modelLower := strings.ToLower(modelName)
+
+ // GPT-OSS check
+ if strings.Contains(modelLower, "gpt-oss") {
+ return false
+ }
+ if strings.Contains(modelLower, "gpt-") && !strings.Contains(modelLower, "gemma") {
+ return false
+ }
+ if strings.Contains(modelLower, "openai") {
+ return false
+ }
+
+ // Gemma check
+ if strings.Contains(modelLower, "gemma") {
+ return true
+ }
+
+ // Your custom models
+ if strings.Contains(modelLower, "kubernetes-ai") || strings.Contains(modelLower, "kube-ai") {
+ return true
+ }
+
+ return true
+}
+
+// cleanAssistantContent removes internal tags
+func cleanAssistantContent(content string) string {
+ re := regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile(`(?s).*?`)
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile("(?s)```tool_outputs.*?```")
+ content = re.ReplaceAllString(content, "")
+
+ re = regexp.MustCompile("(?s)```json.*?```")
+ content = re.ReplaceAllString(content, "")
+
+ content = strings.ReplaceAll(content, "user", "")
+ content = strings.ReplaceAll(content, "model", "")
+ content = strings.ReplaceAll(content, "", "")
+
+ return strings.TrimSpace(content)
+}
diff --git a/backend/pkg/messaging/service_openai.go b/backend/pkg/messaging/service_openai.go
new file mode 100644
index 0000000..0f7dcec
--- /dev/null
+++ b/backend/pkg/messaging/service_openai.go
@@ -0,0 +1,193 @@
+package messaging
+
+import (
+ "context"
+ "fmt"
+ "sef/app/entities"
+ "sef/pkg/providers"
+ "strings"
+ "time"
+
+ "github.com/gofiber/fiber/v3/log"
+)
+
+// generateChatResponseOpenAI - GPT-OSS için basit implementasyon
+func (s *MessagingService) generateChatResponseOpenAI(session *entities.Session, messages []providers.ChatMessage) (<-chan string, *entities.Message, error) {
+ factory := &providers.ProviderFactory{}
+ providerConfig := map[string]interface{}{
+ "base_url": session.Chatbot.Provider.BaseURL,
+ }
+
+ if session.Chatbot.Provider.Type == "" {
+ return nil, nil, fmt.Errorf("provider type is not configured")
+ }
+
+ if session.Chatbot.Provider.BaseURL == "" {
+ return nil, nil, fmt.Errorf("provider base URL is not configured")
+ }
+
+ provider, err := factory.NewProvider(session.Chatbot.Provider.Type, providerConfig)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to initialize provider: %w", err)
+ }
+
+ options := make(map[string]interface{})
+ if session.Chatbot.ModelName != "" {
+ options["model"] = session.Chatbot.ModelName
+ }
+
+ toolDefinitions := s.ConvertToolsToDefinitions(session.Chatbot.Tools)
+ outputCh := make(chan string)
+
+ firstAssistant, err := s.CreateAssistantMessage(session.ID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create assistant message: %w", err)
+ }
+
+ go func() {
+ defer close(outputCh)
+
+ var assistantContent strings.Builder
+ thinkingStarted := false
+ currentMessages := messages
+
+ keepAliveTicker := time.NewTicker(30 * time.Second)
+ defer keepAliveTicker.Stop()
+
+ const maxIterations = 10
+ iteration := 0
+ toolCallCounter := make(map[string]int)
+
+ for {
+ iteration++
+ if iteration > maxIterations {
+ log.Warn("Max iterations reached")
+ errorMsg := "Özür dilerim, çok fazla araç çağrısı yapıldı."
+ outputCh <- errorMsg
+ assistantContent.WriteString(errorMsg)
+ s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
+ return
+ }
+
+ chatStream, err := provider.GenerateChatWithTools(context.Background(), currentMessages, toolDefinitions, options)
+ if err != nil {
+ log.Error("Failed to generate response:", err)
+ errorMsg := "Özür dilerim, yanıt oluşturamadım."
+ outputCh <- errorMsg
+ s.UpdateAssistantMessage(firstAssistant, errorMsg)
+ return
+ }
+
+ hasToolCalls := false
+ var pendingToolCalls []providers.ToolCall
+
+ for response := range chatStream {
+ if response.Thinking != "" {
+ if !thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = true
+ }
+ outputCh <- response.Thinking
+ assistantContent.WriteString("" + response.Thinking)
+ } else if thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = false
+ assistantContent.WriteString("")
+ }
+
+ if response.Content != "" {
+ outputCh <- response.Content
+ assistantContent.WriteString(response.Content)
+ }
+
+ if len(response.ToolCalls) > 0 {
+ hasToolCalls = true
+ pendingToolCalls = append(pendingToolCalls, response.ToolCalls...)
+ }
+
+ if response.Done {
+ if thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = false
+ assistantContent.WriteString("")
+ }
+ break
+ }
+ }
+
+ if !hasToolCalls {
+ s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
+ return
+ }
+
+ if thinkingStarted {
+ outputCh <- ""
+ thinkingStarted = false
+ assistantContent.WriteString("")
+ }
+
+ assistantMessage := providers.ChatMessage{
+ Role: "assistant",
+ Content: cleanAssistantContent(assistantContent.String()),
+ }
+ currentMessages = append(currentMessages, assistantMessage)
+
+ var shouldStop bool
+ currentMessages, shouldStop, _ = s.processToolCallsSimple(session, pendingToolCalls, currentMessages, outputCh, &assistantContent, toolCallCounter)
+
+ if shouldStop {
+ s.UpdateAssistantMessage(firstAssistant, assistantContent.String())
+ return
+ }
+ }
+ }()
+
+ return outputCh, firstAssistant, nil
+}
+
+// processToolCallsSimple - OpenAI için basit tool call processing
+func (s *MessagingService) processToolCallsSimple(session *entities.Session, toolCalls []providers.ToolCall, messages []providers.ChatMessage, outputCh chan<- string, assistantContent *strings.Builder, toolCallCounter map[string]int) ([]providers.ChatMessage, bool, string) {
+ for _, toolCall := range toolCalls {
+ displayName := toolCall.Function.Name
+ for _, t := range session.Chatbot.Tools {
+ if t.Name == toolCall.Function.Name {
+ displayName = t.DisplayName
+ break
+ }
+ }
+
+ if displayName == "" {
+ displayName = toolCall.Function.Name
+ }
+
+ toolCallCounter[toolCall.Function.Name]++
+ if toolCallCounter[toolCall.Function.Name] > 2 {
+ errorMsg := fmt.Sprintf("Araç '%s' limit aşıldı.", displayName)
+ outputCh <- errorMsg
+ assistantContent.WriteString(errorMsg)
+ return messages, true, "limit_exceeded"
+ }
+
+ executingStr := fmt.Sprintf("%s", displayName)
+ outputCh <- executingStr
+ assistantContent.WriteString(executingStr)
+
+ toolResult, err := s.ExecuteToolCall(context.Background(), toolCall)
+ if err != nil {
+ toolResult = fmt.Sprintf("Tool error: %v", err)
+ }
+
+ executedStr := fmt.Sprintf("%s", displayName)
+ outputCh <- executedStr
+ assistantContent.WriteString(executedStr)
+
+ s.CreateToolMessage(session.ID, toolResult)
+
+ toolMessage := providers.ChatMessage{
+ Role: "tool",
+ Content: toolResult,
+ }
+ messages = append(messages, toolMessage)
+ }
+ return messages, false, ""
+}
diff --git a/frontend/next.config.mjs b/frontend/next.config.mjs
index a23cf58..f30a110 100644
--- a/frontend/next.config.mjs
+++ b/frontend/next.config.mjs
@@ -8,7 +8,7 @@ const nextConfig = {
return [
{
source: "/api/:path*",
- destination: `http://localhost:8110/api/:path*`,
+ destination: `http://backend:8110/api/:path*`,
},
]
},