diff --git a/config/locales/client.en.yml b/config/locales/client.en.yml index a9ed3fed4..3b050fb58 100644 --- a/config/locales/client.en.yml +++ b/config/locales/client.en.yml @@ -99,6 +99,15 @@ en: label: "Tool" description: "Tool to use for triage (tool must have no parameters defined)" + + llm_persona_triage: + fields: + persona: + label: "Persona" + description: "AI Persona to use for triage (must have default LLM and User set)" + whisper: + label: "Reply as Whisper" + description: "Whether the persona's response should be a whisper" llm_triage: fields: system_prompt: diff --git a/config/locales/server.en.yml b/config/locales/server.en.yml index 9f7069c6c..3644ea274 100644 --- a/config/locales/server.en.yml +++ b/config/locales/server.en.yml @@ -9,6 +9,9 @@ en: llm_tool_triage: title: Triage posts using AI Tool description: "Triage posts using custom logic in an AI tool" + llm_persona_triage: + title: Triage posts using AI Persona + description: "Respond to posts using a specific AI persona" llm_triage: title: Triage posts using AI description: "Triage posts using a large language model" diff --git a/discourse_automation/llm_persona_triage.rb b/discourse_automation/llm_persona_triage.rb new file mode 100644 index 000000000..b78e418cc --- /dev/null +++ b/discourse_automation/llm_persona_triage.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +if defined?(DiscourseAutomation) + DiscourseAutomation::Scriptable.add("llm_persona_triage") do + version 1 + run_in_background + + triggerables %i[post_created_edited] + + field :persona, + component: :choices, + required: true, + extra: { + content: DiscourseAi::Automation.available_persona_choices, + } + field :whisper, component: :boolean + + script do |context, fields| + post = context["post"] + next if post&.user&.bot? + + persona_id = fields["persona"]["value"] + whisper = fields["whisper"]["value"] + + begin + RateLimiter.new( + Discourse.system_user, + "llm_persona_triage_#{post.id}", + SiteSetting.ai_automation_max_triage_per_post_per_minute, + 1.minute, + ).performed! + + RateLimiter.new( + Discourse.system_user, + "llm_persona_triage", + SiteSetting.ai_automation_max_triage_per_minute, + 1.minute, + ).performed! + + DiscourseAi::Automation::LlmPersonaTriage.handle( + post: post, + persona_id: persona_id, + whisper: whisper, + automation: self.automation, + ) + rescue => e + Discourse.warn_exception( + e, + message: "llm_persona_triage: skipped triage on post #{post.id}", + ) + raise e if Rails.env.tests? + end + end + end +end diff --git a/discourse_automation/llm_tool_triage.rb b/discourse_automation/llm_tool_triage.rb index f6106c8f7..807112a36 100644 --- a/discourse_automation/llm_tool_triage.rb +++ b/discourse_automation/llm_tool_triage.rb @@ -17,6 +17,7 @@ script do |context, fields| tool_id = fields["tool"]["value"] post = context["post"] + return if post&.user&.bot? begin RateLimiter.new( diff --git a/lib/ai_bot/playground.rb b/lib/ai_bot/playground.rb index 9e0c02295..2c9fee2c1 100644 --- a/lib/ai_bot/playground.rb +++ b/lib/ai_bot/playground.rb @@ -170,7 +170,7 @@ def update_playground_with(post) schedule_bot_reply(post) if can_attach?(post) end - def conversation_context(post) + def conversation_context(post, style: nil) # Pay attention to the `post_number <= ?` here. # We want to inject the last post as context because they are translated differently. @@ -205,6 +205,7 @@ def conversation_context(post) ) builder = DiscourseAi::Completions::PromptMessagesBuilder.new + builder.topic = post.topic context.reverse_each do |raw, username, custom_prompt, upload_ids| custom_prompt_translation = @@ -245,7 +246,7 @@ def conversation_context(post) end end - builder.to_a + builder.to_a(style: style || (post.topic.private_message? ? :bot : :topic)) end def title_playground(post, user) @@ -418,7 +419,7 @@ def get_context(participants:, conversation_context:, user:, skip_tool_details: result end - def reply_to(post, custom_instructions: nil, whisper: nil, &blk) + def reply_to(post, custom_instructions: nil, whisper: nil, context_style: nil, &blk) # this is a multithreading issue # post custom prompt is needed and it may not # be properly loaded, ensure it is loaded @@ -439,7 +440,7 @@ def reply_to(post, custom_instructions: nil, whisper: nil, &blk) context = get_context( participants: post.topic.allowed_users.map(&:username).join(", "), - conversation_context: conversation_context(post), + conversation_context: conversation_context(post, style: context_style), user: post.user, ) context[:post_id] = post.id diff --git a/lib/automation.rb b/lib/automation.rb index 4325fbd12..f080234ed 100644 --- a/lib/automation.rb +++ b/lib/automation.rb @@ -37,5 +37,19 @@ def self.available_models values end + + def self.available_persona_choices + AiPersona + .joins(:user) + .where.not(user_id: nil) + .where.not(default_llm: nil) + .map do |persona| + { + id: persona.id, + translated_name: persona.name, + description: "#{persona.name} (#{persona.user.username})", + } + end + end end end diff --git a/lib/automation/llm_persona_triage.rb b/lib/automation/llm_persona_triage.rb new file mode 100644 index 000000000..753d2632e --- /dev/null +++ b/lib/automation/llm_persona_triage.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true +module DiscourseAi + module Automation + module LlmPersonaTriage + def self.handle(post:, persona_id:, whisper: false, automation: nil) + ai_persona = AiPersona.find_by(id: persona_id) + return if ai_persona.nil? + + persona_class = ai_persona.class_instance + persona = persona_class.new + + bot_user = ai_persona.user + return if bot_user.nil? + + bot = DiscourseAi::AiBot::Bot.as(bot_user, persona: persona) + playground = DiscourseAi::AiBot::Playground.new(bot) + + playground.reply_to(post, whisper: whisper, context_style: :topic) + rescue => e + Rails.logger.error("Error in LlmPersonaTriage: #{e.message}\n#{e.backtrace.join("\n")}") + raise e if Rails.env.test? + nil + end + end + end +end diff --git a/lib/completions/prompt_messages_builder.rb b/lib/completions/prompt_messages_builder.rb index 045e0e89e..eab624a3f 100644 --- a/lib/completions/prompt_messages_builder.rb +++ b/lib/completions/prompt_messages_builder.rb @@ -4,8 +4,10 @@ module DiscourseAi module Completions class PromptMessagesBuilder MAX_CHAT_UPLOADS = 5 + MAX_TOPIC_UPLOADS = 5 attr_reader :chat_context_posts attr_reader :chat_context_post_upload_ids + attr_accessor :topic def initialize @raw_messages = [] @@ -41,6 +43,7 @@ def set_chat_context_posts(post_ids, guardian, include_uploads:) def to_a(limit: nil, style: nil) return chat_array(limit: limit) if style == :chat + return topic_array if style == :topic result = [] # this will create a "valid" messages array @@ -127,6 +130,58 @@ def push(type:, content:, name: nil, upload_ids: nil, id: nil, thinking: nil) private + def topic_array + raw_messages = @raw_messages.dup + + user_content = +"You are operating in a Discourse forum.\n\n" + + if @topic + if @topic.private_message? + user_content << "Private message info.\n" + else + user_content << "Topic information:\n" + end + + user_content << "- URL: #{@topic.url}\n" + user_content << "- Title: #{@topic.title}\n" + if SiteSetting.tagging_enabled + tags = @topic.tags.pluck(:name) + tags -= DiscourseTagging.hidden_tag_names if tags.present? + user_content << "- Tags: #{tags.join(", ")}\n" if tags.present? + end + if !@topic.private_message? + user_content << "- Category: #{@topic.category.name}\n" if @topic.category + end + user_content << "- Number of replies: #{@topic.posts_count - 1}\n\n" + end + + last_user_message = raw_messages.pop + + upload_ids = [] + if raw_messages.present? + user_content << "Here is the conversation so far:\n" + raw_messages.each do |message| + user_content << "#{message[:name] || "User"}: #{message[:content]}\n" + upload_ids.concat(message[:upload_ids]) if message[:upload_ids].present? + end + end + + if last_user_message + user_content << "You are responding to #{last_user_message[:name] || "User"} who just said:\n #{last_user_message[:content]}" + if last_user_message[:upload_ids].present? + upload_ids.concat(last_user__message[:upload_ids]) + end + end + + user_message = { type: :user, content: user_content } + + if upload_ids.present? + user_message[:upload_ids] = upload_ids[-MAX_TOPIC_UPLOADS..-1] || upload_ids + end + + [user_message] + end + def chat_array(limit:) if @raw_messages.length > 1 buffer = @@ -155,7 +210,7 @@ def chat_array(limit:) end last_message = @raw_messages[-1] - buffer << "#{last_message[:name] || "User"} said #{last_message[:content]} " + buffer << "#{last_message[:name] || "User"}: #{last_message[:content]} " message = { type: :user, content: buffer } upload_ids.concat(last_message[:upload_ids]) if last_message[:upload_ids].present? diff --git a/plugin.rb b/plugin.rb index bebd2b72b..e3218d930 100644 --- a/plugin.rb +++ b/plugin.rb @@ -78,6 +78,7 @@ def self.public_asset_path(name) require_relative "discourse_automation/llm_triage" require_relative "discourse_automation/llm_report" require_relative "discourse_automation/llm_tool_triage" + require_relative "discourse_automation/llm_persona_triage" add_admin_route("discourse_ai.title", "discourse-ai", { use_new_show_route: true }) diff --git a/spec/lib/completions/prompt_messages_builder_spec.rb b/spec/lib/completions/prompt_messages_builder_spec.rb index 7e758e7ae..b162e39cf 100644 --- a/spec/lib/completions/prompt_messages_builder_spec.rb +++ b/spec/lib/completions/prompt_messages_builder_spec.rb @@ -40,4 +40,28 @@ expected = [{ type: :user, content: "Alice: Echo 123 please\nJames: OK" }] expect(builder.to_a).to eq(expected) end + + it "should format messages for topic style" do + # Create a topic with tags + topic = Fabricate(:topic, title: "This is an Example Topic") + + # Add tags to the topic + topic.tags = [Fabricate(:tag, name: "tag1"), Fabricate(:tag, name: "tag2")] + topic.save! + + builder.topic = topic + builder.push(type: :user, content: "I like frogs", name: "Bob") + builder.push(type: :user, content: "How do I solve this?", name: "Alice") + + result = builder.to_a(style: :topic) + + content = result[0][:content] + + expect(content).to include("This is an Example Topic") + expect(content).to include("tag1") + expect(content).to include("tag2") + expect(content).to include("Bob: I like frogs") + expect(content).to include("Alice") + expect(content).to include("How do I solve this") + end end diff --git a/spec/lib/discourse_automation/llm_persona_triage_spec.rb b/spec/lib/discourse_automation/llm_persona_triage_spec.rb new file mode 100644 index 000000000..37e40193a --- /dev/null +++ b/spec/lib/discourse_automation/llm_persona_triage_spec.rb @@ -0,0 +1,208 @@ +# frozen_string_literal: true + +return if !defined?(DiscourseAutomation) + +describe DiscourseAi::Automation::LlmPersonaTriage do + fab!(:user) + fab!(:bot_user) { Fabricate(:user) } + + fab!(:llm_model) do + Fabricate(:llm_model, provider: "anthropic", name: "claude-3-opus", enabled_chat_bot: true) + end + + fab!(:ai_persona) do + persona = + Fabricate( + :ai_persona, + name: "Triage Helper", + description: "A persona that helps with triaging posts", + system_prompt: "You are a helpful assistant that triages posts", + default_llm: llm_model, + ) + + # Create the user for this persona + persona.update!(user_id: bot_user.id) + persona + end + + let(:automation) { Fabricate(:automation, script: "llm_persona_triage", enabled: true) } + + def add_automation_field(name, value, type: "text") + automation.fields.create!( + component: type, + name: name, + metadata: { + value: value, + }, + target: "script", + ) + end + + before do + SiteSetting.ai_bot_enabled = true + SiteSetting.ai_bot_allowed_groups = "#{Group::AUTO_GROUPS[:trust_level_0]}" + + add_automation_field("persona", ai_persona.id, type: "choices") + add_automation_field("whisper", false, type: "boolean") + end + + it "can respond to a post using the specified persona" do + post = Fabricate(:post, raw: "This is a test post that needs triage") + + response_text = "I've analyzed your post and can help with that." + + DiscourseAi::Completions::Llm.with_prepared_responses([response_text]) do + automation.running_in_background! + automation.trigger!({ "post" => post }) + end + + topic = post.topic.reload + last_post = topic.posts.order(:post_number).last + + expect(topic.posts.count).to eq(2) + + # Verify that the response was posted by the persona's user + expect(last_post.user_id).to eq(bot_user.id) + expect(last_post.raw).to eq(response_text) + expect(last_post.post_type).to eq(Post.types[:regular]) # Not a whisper + end + + it "can respond with a whisper when configured to do so" do + add_automation_field("whisper", true, type: "boolean") + post = Fabricate(:post, raw: "This is another test post for triage") + + response_text = "Staff-only response to your post." + + DiscourseAi::Completions::Llm.with_prepared_responses([response_text]) do + automation.running_in_background! + automation.trigger!({ "post" => post }) + end + + topic = post.topic.reload + last_post = topic.posts.order(:post_number).last + + # Verify that the response is a whisper + expect(last_post.user_id).to eq(bot_user.id) + expect(last_post.raw).to eq(response_text) + expect(last_post.post_type).to eq(Post.types[:whisper]) # This should be a whisper + end + + it "does not respond to posts made by bots" do + bot = Fabricate(:bot) + bot_post = Fabricate(:post, user: bot, raw: "This is a bot post") + + # The automation should not trigger for bot posts + DiscourseAi::Completions::Llm.with_prepared_responses(["Response"]) do + automation.running_in_background! + automation.trigger!({ "post" => bot_post }) + end + + # Verify no new post was created + expect(bot_post.topic.reload.posts.count).to eq(1) + end + + it "handles errors gracefully" do + post = Fabricate(:post, raw: "Error-triggering post") + + # Set up to cause an error + ai_persona.update!(user_id: nil) + + # Should not raise an error + expect { + automation.running_in_background! + automation.trigger!({ "post" => post }) + }.not_to raise_error + + # Verify no new post was created + expect(post.topic.reload.posts.count).to eq(1) + end + + it "passes topic metadata in context when responding to topic" do + # Create a category and tags for the test + category = Fabricate(:category, name: "Test Category") + tag1 = Fabricate(:tag, name: "test-tag") + tag2 = Fabricate(:tag, name: "support") + + # Create a topic with category and tags + topic = + Fabricate( + :topic, + title: "Important Question About Feature", + category: category, + tags: [tag1, tag2], + user: user, + ) + + # Create a post in that topic + _post = + Fabricate( + :post, + topic: topic, + user: user, + raw: "This is a test post in a categorized and tagged topic", + ) + + post2 = + Fabricate(:post, topic: topic, user: user, raw: "This is another post in the same topic") + + # Capture the prompt sent to the LLM to verify it contains metadata + prompt = nil + + DiscourseAi::Completions::Llm.with_prepared_responses( + ["I've analyzed your question"], + ) do |_, _, _prompts| + automation.running_in_background! + automation.trigger!({ "post" => post2 }) + prompt = _prompts.first + end + + context = prompt.messages[1][:content] # The second message should be the triage prompt + + # Verify that topic metadata is included in the context + expect(context).to include("Important Question About Feature") + expect(context).to include("Test Category") + expect(context).to include("test-tag") + expect(context).to include("support") + end + + it "passes private message metadata in context when responding to PM" do + # Create a private message topic + pm_topic = Fabricate(:private_message_topic, user: user, title: "Important PM") + + # Create initial PM post + pm_post = + Fabricate( + :post, + topic: pm_topic, + user: user, + raw: "This is a private message that needs triage", + ) + + # Create a follow-up post + pm_post2 = + Fabricate( + :post, + topic: pm_topic, + user: user, + raw: "Adding more context to my private message", + ) + + # Capture the prompt sent to the LLM + prompt = nil + + DiscourseAi::Completions::Llm.with_prepared_responses( + ["I've received your private message"], + ) do |_, _, _prompts| + automation.running_in_background! + automation.trigger!({ "post" => pm_post2 }) + prompt = _prompts.first + end + + context = prompt.messages[1][:content] + + # Verify that PM metadata is included in the context + expect(context).to include("Important PM") + expect(context).to include(pm_post.raw) + expect(context).to include(pm_post2.raw) + end +end