From 242297a25c94f61be56c76d9ab5871a0c5eb59e1 Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Wed, 17 Dec 2025 15:45:56 -0800 Subject: [PATCH 1/8] feat: showcase text generation and thinking steps from suggested prompts --- ai/llm_caller.py | 18 +-- .../assistant/assistant_thread_started.py | 32 ++--- listeners/assistant/message.py | 119 +++++++++++++----- listeners/events/app_mentioned.py | 3 +- 4 files changed, 104 insertions(+), 68 deletions(-) diff --git a/ai/llm_caller.py b/ai/llm_caller.py index d0a0591..c4ea7cb 100644 --- a/ai/llm_caller.py +++ b/ai/llm_caller.py @@ -1,27 +1,17 @@ import os -from typing import Dict, List import openai from openai import Stream from openai.types.responses import ResponseStreamEvent -DEFAULT_SYSTEM_CONTENT = """ -You're an assistant in a Slack workspace. -Users in the workspace will ask you to help them write something or to think better about a specific topic. -You'll respond to those questions in a professional way. -When you include markdown text, convert them to Slack compatible ones. -When a prompt has Slack's special syntax like <@USER_ID> or <#CHANNEL_ID>, you must keep them as-is in your response. -""" - def call_llm( - messages_in_thread: List[Dict[str, str]], - system_content: str = DEFAULT_SYSTEM_CONTENT, + prompt: str, ) -> Stream[ResponseStreamEvent]: openai_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY")) - messages = [{"role": "system", "content": system_content}] - messages.extend(messages_in_thread) response = openai_client.responses.create( - model="gpt-4o-mini", input=messages, stream=True + model="gpt-4o-mini", + input=prompt, + stream=True, ) return response diff --git a/listeners/assistant/assistant_thread_started.py b/listeners/assistant/assistant_thread_started.py index 9d2990f..2420a51 100644 --- a/listeners/assistant/assistant_thread_started.py +++ b/listeners/assistant/assistant_thread_started.py @@ -1,5 +1,4 @@ from logging import Logger -from typing import Dict, List from slack_bolt import Say, SetSuggestedPrompts @@ -18,24 +17,19 @@ def assistant_thread_started( logger: Logger instance for error tracking """ try: - say("How can I help you?") - - prompts: List[Dict[str, str]] = [ - { - "title": "What does Slack stand for?", - "message": "Slack, a business communication service, was named after an acronym. Can you guess what it stands for?", - }, - { - "title": "Write a draft announcement", - "message": "Can you write a draft announcement about a new feature my team just released? It must include how impactful it is.", - }, - { - "title": "Suggest names for my Slack app", - "message": "Can you suggest a few names for my Slack app? The app helps my teammates better organize information and plan priorities and action items.", - }, - ] - - set_suggested_prompts(prompts=prompts) + say("What would you like to do today?") + set_suggested_prompts( + prompts=[ + { + "title": "Prompt a task with thinking steps", + "message": "Wonder a few deep thoughts.", + }, + { + "title": "Generate a release announcement", + "message": "Please write detailed changelog notes for a feature that almost seems to be magic.", + }, + ] + ) except Exception as e: logger.exception(f"Failed to handle an assistant_thread_started event: {e}", e) say(f":warning: Something went wrong! ({e})") diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index b61de90..1a88630 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -1,8 +1,9 @@ +import time from logging import Logger -from typing import Dict, List from slack_bolt import BoltContext, Say, SetStatus from slack_sdk import WebClient +from slack_sdk.models.messages.chunk import MarkdownTextChunk, TaskUpdateChunk from ai.llm_caller import call_llm @@ -13,6 +14,7 @@ def message( client: WebClient, context: BoltContext, logger: Logger, + message: dict, payload: dict, say: Say, set_status: SetStatus, @@ -34,30 +36,6 @@ def message( thread_ts = payload["thread_ts"] user_id = context.user_id - set_status( - status="thinking...", - loading_messages=[ - "Teaching the hamsters to type faster…", - "Untangling the internet cables…", - "Consulting the office goldfish…", - "Polishing up the response just for you…", - "Convincing the AI to stop overthinking…", - ], - ) - - replies = client.conversations_replies( - channel=context.channel_id, - ts=context.thread_ts, - oldest=context.thread_ts, - limit=10, - ) - messages_in_thread: List[Dict[str, str]] = [] - for message in replies["messages"]: - role = "user" if message.get("bot_id") is None else "assistant" - messages_in_thread.append({"role": role, "content": message["text"]}) - - returned_message = call_llm(messages_in_thread) - streamer = client.chat_stream( channel=channel_id, recipient_team_id=team_id, @@ -65,16 +43,89 @@ def message( thread_ts=thread_ts, ) - # Loop over OpenAI response stream - # https://platform.openai.com/docs/api-reference/responses/create - for event in returned_message: - if event.type == "response.output_text.delta": - streamer.append(markdown_text=f"{event.delta}") - else: - continue + # This first example shows a generated text response for the provided prompt + if message["text"] != "Wonder a few deep thoughts.": + set_status( + status="thinking...", + loading_messages=[ + "Teaching the hamsters to type faster…", + "Untangling the internet cables…", + "Consulting the office goldfish…", + "Polishing up the response just for you…", + "Convincing the AI to stop overthinking…", + ], + ) + + # Loop over OpenAI response stream + # https://platform.openai.com/docs/api-reference/responses/create + for event in call_llm(message["text"]): + if event.type == "response.output_text.delta": + streamer.append(markdown_text=f"{event.delta}") + else: + continue + + feedback_block = create_feedback_block() + streamer.stop( + blocks=feedback_block, + ) + + # The second example shows detailed thinking steps similar to tool calls + else: + streamer.append( + chunks=[ + MarkdownTextChunk( + text="Hello.\nI have received the task. ", + ), + MarkdownTextChunk( + text="This task appears manageable.\nThat is good.", + ), + TaskUpdateChunk( + id="001", + title="Understanding the task...", + status="in_progress", + details="- Indentify the goal\n- Identify constraints\n- Pretending this is obvious", + ), + TaskUpdateChunk( + id="002", + title="Performing acrobatics...", + status="pending", + ), + ], + ) + time.sleep(4) + + streamer.append( + chunks=[ + TaskUpdateChunk( + id="001", + title="Understanding the task...", + status="complete", + details="- Indentied the goal\n- Identied constraints\n- Pretended this was obvious", + output="We'll continue to ramble now", + ), + TaskUpdateChunk( + id="002", + title="Performing acrobatics...", + status="in_progress", + details="- Jumping atop ropes\n- Juggling bowling pins\n- Riding a single wheel too", + ), + ], + ) + time.sleep(4) - feedback_block = create_feedback_block() - streamer.stop(blocks=feedback_block) + streamer.stop( + chunks=[ + TaskUpdateChunk( + id="002", + title="Performing acrobatics...", + status="complete", + details="- Jumped atop ropes\n- Juggled bowling pins\n- Rode a single wheel too", + ), + MarkdownTextChunk( + text="The crowd appears to be astouned and applauds :popcorn:" + ), + ], + ) except Exception as e: logger.exception(f"Failed to handle a user message event: {e}") diff --git a/listeners/events/app_mentioned.py b/listeners/events/app_mentioned.py index 6ec8d25..9ea8c13 100644 --- a/listeners/events/app_mentioned.py +++ b/listeners/events/app_mentioned.py @@ -4,6 +4,7 @@ from slack_sdk import WebClient from ai.llm_caller import call_llm + from ..views.feedback_block import create_feedback_block @@ -38,7 +39,7 @@ def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say: ], ) - returned_message = call_llm([{"role": "user", "content": text}]) + returned_message = call_llm(text) streamer = client.chat_stream( channel=channel_id, From ea6c722b7d94b20885a9da94d430bc313ccdea41 Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Wed, 17 Dec 2025 16:02:57 -0800 Subject: [PATCH 2/8] fix: append to details instead of overwrite past arguments --- listeners/assistant/message.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index 1a88630..bdc259f 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -83,7 +83,7 @@ def message( id="001", title="Understanding the task...", status="in_progress", - details="- Indentify the goal\n- Identify constraints\n- Pretending this is obvious", + details="- Identifying the goal\n- Identifying constraints", ), TaskUpdateChunk( id="002", @@ -100,14 +100,13 @@ def message( id="001", title="Understanding the task...", status="complete", - details="- Indentied the goal\n- Identied constraints\n- Pretended this was obvious", + details="\n- Pretending this was obvious", output="We'll continue to ramble now", ), TaskUpdateChunk( id="002", title="Performing acrobatics...", status="in_progress", - details="- Jumping atop ropes\n- Juggling bowling pins\n- Riding a single wheel too", ), ], ) From 4a5de842de30bfcaaadde618e4bcbad26e3dc780 Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Sun, 18 Jan 2026 22:27:51 -0800 Subject: [PATCH 3/8] feat: roll dice --- README.md | 4 +- ai/llm_caller.py | 100 ++++++++++++++++-- ai/tools/__init__.py | 0 ai/tools/dice.py | 58 ++++++++++ app.py | 2 +- .../assistant/assistant_thread_started.py | 4 +- listeners/assistant/message.py | 51 ++++++--- listeners/events/app_mentioned.py | 25 +++-- requirements.txt | 2 +- 9 files changed, 203 insertions(+), 43 deletions(-) create mode 100644 ai/tools/__init__.py create mode 100644 ai/tools/dice.py diff --git a/README.md b/README.md index 4216662..0f777cd 100644 --- a/README.md +++ b/README.md @@ -152,7 +152,9 @@ Configures the new Slack Assistant features, providing a dedicated side panel UI ### `/ai` -The `llm_caller.py` file, which handles OpenAI API integration and message formatting. It includes the `call_llm()` function that sends conversation threads to OpenAI's models. +The `llm_caller.py` file calls the OpenAI API and streams the generated response into a Slack conversation. + +The `tools` directory contains app-specific functions for the LLM to call. ## App Distribution / OAuth diff --git a/ai/llm_caller.py b/ai/llm_caller.py index c4ea7cb..9fe22b1 100644 --- a/ai/llm_caller.py +++ b/ai/llm_caller.py @@ -1,17 +1,101 @@ +import json import os import openai -from openai import Stream -from openai.types.responses import ResponseStreamEvent +from openai.types.responses import ResponseInputParam +from slack_sdk.models.messages.chunk import TaskUpdateChunk +from slack_sdk.web.chat_stream import ChatStream + +from ai.tools.dice import roll_dice, roll_dice_definition def call_llm( - prompt: str, -) -> Stream[ResponseStreamEvent]: - openai_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY")) - response = openai_client.responses.create( + streamer: ChatStream, + prompts: ResponseInputParam, +): + """ + Stream an LLM response to prompts with an example dice rolling function + + https://docs.slack.dev/tools/python-slack-sdk/web#sending-streaming-messages + https://platform.openai.com/docs/guides/text + https://platform.openai.com/docs/guides/streaming-responses + https://platform.openai.com/docs/guides/function-calling + """ + llm = openai.OpenAI( + api_key=os.getenv("OPENAI_API_KEY"), + ) + tool_calls = [] + response = llm.responses.create( model="gpt-4o-mini", - input=prompt, + input=prompts, + tools=[ + roll_dice_definition, + ], stream=True, ) - return response + for event in response: + # Markdown text from the LLM response is streamed in chat as it arrives + if event.type == "response.output_text.delta": + streamer.append(markdown_text=f"{event.delta}") + + # Function calls are saved for later computation and a new task is shown + if event.type == "response.output_item.done": + if event.item.type == "function_call": + tool_calls.append(event.item) + if event.item.name == "roll_dice": + args = json.loads(event.item.arguments) + streamer.append( + chunks=[ + TaskUpdateChunk( + id=f"{event.item.call_id}", + title=f"Rolling a {args['count']}d{args['sides']}...", + status="in_progress", + ), + ], + ) + + # Tool calls are performed and tasks are marked as completed in Slack + if tool_calls: + for call in tool_calls: + if call.name == "roll_dice": + args = json.loads(call.arguments) + prompts.append( + { + "id": call.id, + "call_id": call.call_id, + "type": "function_call", + "name": "roll_dice", + "arguments": call.arguments, + } + ) + result = roll_dice(**args) + prompts.append( + { + "type": "function_call_output", + "call_id": call.call_id, + "output": json.dumps(result), + } + ) + if result.get("error") is not None: + streamer.append( + chunks=[ + TaskUpdateChunk( + id=f"{call.call_id}", + title=f"{result['error']}", + status="error", + ), + ], + ) + else: + streamer.append( + chunks=[ + TaskUpdateChunk( + id=f"{call.call_id}", + title=f"{result['description']}", + status="complete", + ), + ], + ) + + # Complete the LLM response after making tool calls + call_llm(streamer, prompts) diff --git a/ai/tools/__init__.py b/ai/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ai/tools/dice.py b/ai/tools/dice.py new file mode 100644 index 0000000..fa98f5e --- /dev/null +++ b/ai/tools/dice.py @@ -0,0 +1,58 @@ +import random +import time + +from openai.types.responses import FunctionToolParam + + +def roll_dice(sides: int = 6, count: int = 1) -> dict: + if sides < 2: + return { + "error": "A die must have at least 2 sides", + "rolls": [], + "total": 0, + } + + if count < 1: + return { + "error": "Must roll at least 1 die", + "rolls": [], + "total": 0, + } + rolls = [random.randint(1, sides) for _ in range(count)] + total = sum(rolls) + + # Add a pause between rolls to demonstrate loading states + time.sleep(2) + + return { + "rolls": rolls, + "total": total, + "description": f"Rolled a {count}d{sides} to total {total}", + } + + +# Tool definition for OpenAI API +# +# https://platform.openai.com/docs/guides/function-calling +roll_dice_definition: FunctionToolParam = { + "type": "function", + "name": "roll_dice", + "description": "Roll one or more dice with a specified number of sides. Use this when the user wants to roll dice or generate random numbers within a range.", + "parameters": { + "type": "object", + "properties": { + "sides": { + "type": "integer", + "description": "The number of sides on the die (e.g., 6 for a standard die, 20 for a d20)", + "default": 6, + }, + "count": { + "type": "integer", + "description": "The number of dice to roll", + "default": 1, + }, + }, + "required": ["sides", "count"], + }, + "strict": False, +} diff --git a/app.py b/app.py index 44ae23b..fbdac66 100644 --- a/app.py +++ b/app.py @@ -2,7 +2,6 @@ import os from dotenv import load_dotenv - from slack_bolt import App from slack_bolt.adapter.socket_mode import SocketModeHandler from slack_sdk import WebClient @@ -22,6 +21,7 @@ token=os.environ.get("SLACK_BOT_TOKEN"), ), ) + # Register Listeners register_listeners(app) diff --git a/listeners/assistant/assistant_thread_started.py b/listeners/assistant/assistant_thread_started.py index 2420a51..a5fa3f1 100644 --- a/listeners/assistant/assistant_thread_started.py +++ b/listeners/assistant/assistant_thread_started.py @@ -25,8 +25,8 @@ def assistant_thread_started( "message": "Wonder a few deep thoughts.", }, { - "title": "Generate a release announcement", - "message": "Please write detailed changelog notes for a feature that almost seems to be magic.", + "title": "Roll dice for a random number", + "message": "Roll two 12-sided dice and three 6-sided dice for a psuedo-random score.", }, ] ) diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index bdc259f..c7d693c 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -1,13 +1,17 @@ import time from logging import Logger +from openai.types.responses import ResponseInputParam from slack_bolt import BoltContext, Say, SetStatus from slack_sdk import WebClient -from slack_sdk.models.messages.chunk import MarkdownTextChunk, TaskUpdateChunk +from slack_sdk.models.messages.chunk import ( + MarkdownTextChunk, + PlanUpdateChunk, + TaskUpdateChunk, +) from ai.llm_caller import call_llm - -from ..views.feedback_block import create_feedback_block +from listeners.views.feedback_block import create_feedback_block def message( @@ -36,13 +40,6 @@ def message( thread_ts = payload["thread_ts"] user_id = context.user_id - streamer = client.chat_stream( - channel=channel_id, - recipient_team_id=team_id, - recipient_user_id=user_id, - thread_ts=thread_ts, - ) - # This first example shows a generated text response for the provided prompt if message["text"] != "Wonder a few deep thoughts.": set_status( @@ -56,13 +53,20 @@ def message( ], ) - # Loop over OpenAI response stream - # https://platform.openai.com/docs/api-reference/responses/create - for event in call_llm(message["text"]): - if event.type == "response.output_text.delta": - streamer.append(markdown_text=f"{event.delta}") - else: - continue + streamer = client.chat_stream( + channel=channel_id, + recipient_team_id=team_id, + recipient_user_id=user_id, + thread_ts=thread_ts, + task_display_mode="timeline", + ) + prompts: ResponseInputParam = [ + { + "role": "user", + "content": message["text"], + }, + ] + call_llm(streamer, prompts) feedback_block = create_feedback_block() streamer.stop( @@ -71,6 +75,13 @@ def message( # The second example shows detailed thinking steps similar to tool calls else: + streamer = client.chat_stream( + channel=channel_id, + recipient_team_id=team_id, + recipient_user_id=user_id, + thread_ts=thread_ts, + task_display_mode="plan", + ) streamer.append( chunks=[ MarkdownTextChunk( @@ -96,6 +107,9 @@ def message( streamer.append( chunks=[ + PlanUpdateChunk( + title="Adding the final pieces...", + ), TaskUpdateChunk( id="001", title="Understanding the task...", @@ -114,6 +128,9 @@ def message( streamer.stop( chunks=[ + PlanUpdateChunk( + title="Decided to put on a show", + ), TaskUpdateChunk( id="002", title="Performing acrobatics...", diff --git a/listeners/events/app_mentioned.py b/listeners/events/app_mentioned.py index 9ea8c13..fe18d61 100644 --- a/listeners/events/app_mentioned.py +++ b/listeners/events/app_mentioned.py @@ -1,11 +1,11 @@ from logging import Logger +from openai.types.responses import ResponseInputParam from slack_bolt import Say from slack_sdk import WebClient from ai.llm_caller import call_llm - -from ..views.feedback_block import create_feedback_block +from listeners.views.feedback_block import create_feedback_block def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say: Say): @@ -39,25 +39,24 @@ def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say: ], ) - returned_message = call_llm(text) - streamer = client.chat_stream( channel=channel_id, recipient_team_id=team_id, recipient_user_id=user_id, thread_ts=thread_ts, ) - - # Loop over OpenAI response stream - # https://platform.openai.com/docs/api-reference/responses/create - for event in returned_message: - if event.type == "response.output_text.delta": - streamer.append(markdown_text=f"{event.delta}") - else: - continue + prompts: ResponseInputParam = [ + { + "role": "user", + "content": text, + }, + ] + call_llm(streamer, prompts) feedback_block = create_feedback_block() - streamer.stop(blocks=feedback_block) + streamer.stop( + blocks=feedback_block, + ) except Exception as e: logger.exception(f"Failed to handle a user message event: {e}") say(f":warning: Something went wrong! ({e})") diff --git a/requirements.txt b/requirements.txt index 2a18225..00a0ced 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -slack-sdk==3.39.0 +slack-sdk==3.40.0.dev0 slack-bolt==1.27.0 # If you use a different LLM vendor, replace this dependency From ac2816f0f582f3fad6dbc7918d726f99526e3d5a Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Tue, 20 Jan 2026 15:17:42 -0800 Subject: [PATCH 4/8] docs: improve comments to reason about sections of code Co-authored-by: Michael Brooks --- ai/tools/dice.py | 2 ++ listeners/assistant/message.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/ai/tools/dice.py b/ai/tools/dice.py index fa98f5e..7ce5080 100644 --- a/ai/tools/dice.py +++ b/ai/tools/dice.py @@ -18,6 +18,8 @@ def roll_dice(sides: int = 6, count: int = 1) -> dict: "rolls": [], "total": 0, } + + # Roll the dice and calculate the total rolls = [random.randint(1, sides) for _ in range(count)] total = sum(rolls) diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index c7d693c..04d6675 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -41,6 +41,7 @@ def message( user_id = context.user_id # This first example shows a generated text response for the provided prompt + # displayed as a timeline. if message["text"] != "Wonder a few deep thoughts.": set_status( status="thinking...", @@ -74,6 +75,7 @@ def message( ) # The second example shows detailed thinking steps similar to tool calls + # displayed as plan. else: streamer = client.chat_stream( channel=channel_id, From 6ab87957c0696e4cf242c600929300274ab50e8b Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Tue, 20 Jan 2026 15:22:24 -0800 Subject: [PATCH 5/8] refactor: move 'ai' features to an 'agent' directory --- README.md | 2 +- {ai => agent}/llm_caller.py | 2 +- {ai => agent}/tools/__init__.py | 0 {ai => agent}/tools/dice.py | 0 listeners/assistant/message.py | 2 +- listeners/events/app_mentioned.py | 2 +- 6 files changed, 4 insertions(+), 4 deletions(-) rename {ai => agent}/llm_caller.py (98%) rename {ai => agent}/tools/__init__.py (100%) rename {ai => agent}/tools/dice.py (100%) diff --git a/README.md b/README.md index 0f777cd..0d16fcc 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ Configures the new Slack Assistant features, providing a dedicated side panel UI - The `assistant_thread_started.py` file, which responds to new app threads with a list of suggested prompts. - The `message.py` file, which responds to user messages sent to app threads or from the **Chat** and **History** tab with an LLM generated response. -### `/ai` +### `/agent` The `llm_caller.py` file calls the OpenAI API and streams the generated response into a Slack conversation. diff --git a/ai/llm_caller.py b/agent/llm_caller.py similarity index 98% rename from ai/llm_caller.py rename to agent/llm_caller.py index 9fe22b1..4bc08ee 100644 --- a/ai/llm_caller.py +++ b/agent/llm_caller.py @@ -6,7 +6,7 @@ from slack_sdk.models.messages.chunk import TaskUpdateChunk from slack_sdk.web.chat_stream import ChatStream -from ai.tools.dice import roll_dice, roll_dice_definition +from agent.tools.dice import roll_dice, roll_dice_definition def call_llm( diff --git a/ai/tools/__init__.py b/agent/tools/__init__.py similarity index 100% rename from ai/tools/__init__.py rename to agent/tools/__init__.py diff --git a/ai/tools/dice.py b/agent/tools/dice.py similarity index 100% rename from ai/tools/dice.py rename to agent/tools/dice.py diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index 04d6675..2378737 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -10,7 +10,7 @@ TaskUpdateChunk, ) -from ai.llm_caller import call_llm +from agent.llm_caller import call_llm from listeners.views.feedback_block import create_feedback_block diff --git a/listeners/events/app_mentioned.py b/listeners/events/app_mentioned.py index fe18d61..89001b7 100644 --- a/listeners/events/app_mentioned.py +++ b/listeners/events/app_mentioned.py @@ -4,7 +4,7 @@ from slack_bolt import Say from slack_sdk import WebClient -from ai.llm_caller import call_llm +from agent.llm_caller import call_llm from listeners.views.feedback_block import create_feedback_block From e2f6fa2343e91c431f4f5b6162b5e08d5010dc84 Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Tue, 20 Jan 2026 16:21:55 -0800 Subject: [PATCH 6/8] refactor: reverse the order of expected prompts for longform demo first --- listeners/assistant/message.py | 72 +++++++++++++++++----------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index 2378737..5bcd6eb 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -40,43 +40,9 @@ def message( thread_ts = payload["thread_ts"] user_id = context.user_id - # This first example shows a generated text response for the provided prompt - # displayed as a timeline. - if message["text"] != "Wonder a few deep thoughts.": - set_status( - status="thinking...", - loading_messages=[ - "Teaching the hamsters to type faster…", - "Untangling the internet cables…", - "Consulting the office goldfish…", - "Polishing up the response just for you…", - "Convincing the AI to stop overthinking…", - ], - ) - - streamer = client.chat_stream( - channel=channel_id, - recipient_team_id=team_id, - recipient_user_id=user_id, - thread_ts=thread_ts, - task_display_mode="timeline", - ) - prompts: ResponseInputParam = [ - { - "role": "user", - "content": message["text"], - }, - ] - call_llm(streamer, prompts) - - feedback_block = create_feedback_block() - streamer.stop( - blocks=feedback_block, - ) - - # The second example shows detailed thinking steps similar to tool calls + # The first example shows detailed thinking steps similar to tool calls # displayed as plan. - else: + if message["text"] == "Wonder a few deep thoughts.": streamer = client.chat_stream( channel=channel_id, recipient_team_id=team_id, @@ -145,6 +111,40 @@ def message( ], ) + # This second example shows a generated text response for a provided prompt + # displayed as a timeline. + else: + set_status( + status="thinking...", + loading_messages=[ + "Teaching the hamsters to type faster…", + "Untangling the internet cables…", + "Consulting the office goldfish…", + "Polishing up the response just for you…", + "Convincing the AI to stop overthinking…", + ], + ) + + streamer = client.chat_stream( + channel=channel_id, + recipient_team_id=team_id, + recipient_user_id=user_id, + thread_ts=thread_ts, + task_display_mode="timeline", + ) + prompts: ResponseInputParam = [ + { + "role": "user", + "content": message["text"], + }, + ] + call_llm(streamer, prompts) + + feedback_block = create_feedback_block() + streamer.stop( + blocks=feedback_block, + ) + except Exception as e: logger.exception(f"Failed to handle a user message event: {e}") say(f":warning: Something went wrong! ({e})") From 7ddcac009658c6eb097cd1f2de0187896d67e779 Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Tue, 20 Jan 2026 16:23:30 -0800 Subject: [PATCH 7/8] fix: match verbe tenses of actions and details in steps --- listeners/assistant/message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index 5bcd6eb..902aa1c 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -89,6 +89,7 @@ def message( id="002", title="Performing acrobatics...", status="in_progress", + details="- Jumping atop ropes\n- Juggling bowling pins\n- Riding a single wheel too", ), ], ) @@ -103,7 +104,6 @@ def message( id="002", title="Performing acrobatics...", status="complete", - details="- Jumped atop ropes\n- Juggled bowling pins\n- Rode a single wheel too", ), MarkdownTextChunk( text="The crowd appears to be astouned and applauds :popcorn:" From d6af288d1d41263a06282c3aa93a63b7c974fd82 Mon Sep 17 00:00:00 2001 From: Eden Zimbelman Date: Tue, 20 Jan 2026 16:54:18 -0800 Subject: [PATCH 8/8] docs: correct spellings of shocked wordage in example Co-authored-by: Michael Brooks --- listeners/assistant/message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/listeners/assistant/message.py b/listeners/assistant/message.py index 902aa1c..9967020 100644 --- a/listeners/assistant/message.py +++ b/listeners/assistant/message.py @@ -106,7 +106,7 @@ def message( status="complete", ), MarkdownTextChunk( - text="The crowd appears to be astouned and applauds :popcorn:" + text="The crowd appears to be astounded and applauds :popcorn:" ), ], )