Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,11 @@ Configures the new Slack Assistant features, providing a dedicated side panel UI
- The `assistant_thread_started.py` file, which responds to new app threads with a list of suggested prompts.
- The `message.py` file, which responds to user messages sent to app threads or from the **Chat** and **History** tab with an LLM generated response.

### `/ai`
### `/agent`

The `llm_caller.py` file, which handles OpenAI API integration and message formatting. It includes the `call_llm()` function that sends conversation threads to OpenAI's models.
The `llm_caller.py` file calls the OpenAI API and streams the generated response into a Slack conversation.

The `tools` directory contains app-specific functions for the LLM to call.

## App Distribution / OAuth

Expand Down
101 changes: 101 additions & 0 deletions agent/llm_caller.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import json
import os

import openai
from openai.types.responses import ResponseInputParam
from slack_sdk.models.messages.chunk import TaskUpdateChunk
from slack_sdk.web.chat_stream import ChatStream

from agent.tools.dice import roll_dice, roll_dice_definition


def call_llm(
streamer: ChatStream,
prompts: ResponseInputParam,
):
"""
Stream an LLM response to prompts with an example dice rolling function

https://docs.slack.dev/tools/python-slack-sdk/web#sending-streaming-messages
https://platform.openai.com/docs/guides/text
https://platform.openai.com/docs/guides/streaming-responses
https://platform.openai.com/docs/guides/function-calling
"""
llm = openai.OpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
)
tool_calls = []
response = llm.responses.create(
model="gpt-4o-mini",
input=prompts,
tools=[
roll_dice_definition,
],
stream=True,
)
for event in response:
# Markdown text from the LLM response is streamed in chat as it arrives
if event.type == "response.output_text.delta":
streamer.append(markdown_text=f"{event.delta}")

# Function calls are saved for later computation and a new task is shown
if event.type == "response.output_item.done":
if event.item.type == "function_call":
tool_calls.append(event.item)
if event.item.name == "roll_dice":
args = json.loads(event.item.arguments)
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{event.item.call_id}",
title=f"Rolling a {args['count']}d{args['sides']}...",
status="in_progress",
),
],
)

# Tool calls are performed and tasks are marked as completed in Slack
if tool_calls:
for call in tool_calls:
if call.name == "roll_dice":
args = json.loads(call.arguments)
prompts.append(
{
"id": call.id,
"call_id": call.call_id,
"type": "function_call",
"name": "roll_dice",
"arguments": call.arguments,
}
)
result = roll_dice(**args)
prompts.append(
{
"type": "function_call_output",
"call_id": call.call_id,
"output": json.dumps(result),
}
)
if result.get("error") is not None:
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{call.call_id}",
title=f"{result['error']}",
status="error",
),
],
)
else:
streamer.append(
chunks=[
TaskUpdateChunk(
id=f"{call.call_id}",
title=f"{result['description']}",
status="complete",
),
],
)

# Complete the LLM response after making tool calls
call_llm(streamer, prompts)
Empty file added agent/tools/__init__.py
Empty file.
60 changes: 60 additions & 0 deletions agent/tools/dice.py
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i really like this example 🤩

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@srtaalej It makes for fun games! 🎲 ✨

Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import random
import time

from openai.types.responses import FunctionToolParam


def roll_dice(sides: int = 6, count: int = 1) -> dict:
if sides < 2:
return {
"error": "A die must have at least 2 sides",
"rolls": [],
"total": 0,
}

if count < 1:
return {
"error": "Must roll at least 1 die",
"rolls": [],
"total": 0,
}

# Roll the dice and calculate the total
rolls = [random.randint(1, sides) for _ in range(count)]
total = sum(rolls)

# Add a pause between rolls to demonstrate loading states
time.sleep(2)

return {
"rolls": rolls,
"total": total,
"description": f"Rolled a {count}d{sides} to total {total}",
}


# Tool definition for OpenAI API
#
# https://platform.openai.com/docs/guides/function-calling
roll_dice_definition: FunctionToolParam = {
"type": "function",
"name": "roll_dice",
"description": "Roll one or more dice with a specified number of sides. Use this when the user wants to roll dice or generate random numbers within a range.",
"parameters": {
"type": "object",
"properties": {
"sides": {
"type": "integer",
"description": "The number of sides on the die (e.g., 6 for a standard die, 20 for a d20)",
"default": 6,
},
"count": {
"type": "integer",
"description": "The number of dice to roll",
"default": 1,
},
},
"required": ["sides", "count"],
},
"strict": False,
}
27 changes: 0 additions & 27 deletions ai/llm_caller.py

This file was deleted.

2 changes: 1 addition & 1 deletion app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import os

from dotenv import load_dotenv

from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk import WebClient
Expand All @@ -22,6 +21,7 @@
token=os.environ.get("SLACK_BOT_TOKEN"),
),
)

# Register Listeners
register_listeners(app)

Expand Down
32 changes: 13 additions & 19 deletions listeners/assistant/assistant_thread_started.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from logging import Logger
from typing import Dict, List

from slack_bolt import Say, SetSuggestedPrompts

Expand All @@ -18,24 +17,19 @@ def assistant_thread_started(
logger: Logger instance for error tracking
"""
try:
say("How can I help you?")

prompts: List[Dict[str, str]] = [
{
"title": "What does Slack stand for?",
"message": "Slack, a business communication service, was named after an acronym. Can you guess what it stands for?",
},
{
"title": "Write a draft announcement",
"message": "Can you write a draft announcement about a new feature my team just released? It must include how impactful it is.",
},
{
"title": "Suggest names for my Slack app",
"message": "Can you suggest a few names for my Slack app? The app helps my teammates better organize information and plan priorities and action items.",
},
]

set_suggested_prompts(prompts=prompts)
say("What would you like to do today?")
set_suggested_prompts(
prompts=[
{
"title": "Prompt a task with thinking steps",
"message": "Wonder a few deep thoughts.",
},
{
"title": "Roll dice for a random number",
"message": "Roll two 12-sided dice and three 6-sided dice for a psuedo-random score.",
},
]
)
except Exception as e:
logger.exception(f"Failed to handle an assistant_thread_started event: {e}", e)
say(f":warning: Something went wrong! ({e})")
Loading