Initialize a chatbot wrapper that you can fill in with your chatbot’s details.
chatbots.json
file in .snowglobe
directory that tracks the necessary information to connect your chatbot to Snowglobe.
snowglobe
snowglobe init
from snowglobe.client import CompletionRequest, CompletionFunctionOutputs
from openai import OpenAI
import os
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
def process_scenario(request: CompletionRequest) -> CompletionFunctionOutputs:
"""
Process a scenario request from Snowglobe.
This function is called by the Snowglobe client to process requests. It should return a
CompletionFunctionOutputs object with the response content.
Example CompletionRequest:
CompletionRequest(
messages=[
SnowglobeMessage(role="user", content="Hello, how are you?", snowglobe_data=None),
]
)
Example CompletionFunctionOutputs:
CompletionFunctionOutputs(response="This is a string response from your application")
Args:
request (CompletionRequest): The request object containing the messages.
Returns:
CompletionFunctionOutputs: The response object with the generated content.
"""
# Process the request using the messages. Example:
messages = request.to_openai_messages()
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages
)
return CompletionFunctionOutputs(response=response.choices[0].message.content)
"""
ShopSupport: single-step tool-call example
What Snowglobe sends:
CompletionRequest(
messages=[SnowglobeMessage(role="user", content="Where is order A1001?")]
)
What you must return:
CompletionFunctionOutputs(response="string to display")
Notes:
- One OpenAI call only. If a tool is requested, we execute it once
then render a final user-facing string ourselves.
"""
# 1) Imports and setup
from snowglobe.client import CompletionRequest, CompletionFunctionOutputs
from openai import OpenAI
import os, json, uuid
from typing import Dict, Any, Callable, Tuple
MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# 2) Toy app data and business functions
ORDERS: Dict[str, Dict[str, Any]] = {
"A1001": {"status": "Shipped", "eta_days": 2, "carrier": "UPS"},
"A1002": {"status": "Processing", "eta_days": 5, "carrier": None},
}
TICKETS: Dict[str, Dict[str, Any]] = {}
def get_order_status(order_id: str) -> Dict[str, Any]:
"""Return order details or found=False."""
data = ORDERS.get(order_id.upper())
if not data:
return {"found": False, "order_id": order_id}
return {"found": True, "order_id": order_id.upper(), **data}
def create_ticket(email: str, subject: str, body: str) -> Dict[str, Any]:
"""Create a support ticket and return its id."""
ticket_id = f"T{uuid.uuid4().hex[:8].upper()}"
TICKETS[ticket_id] = {"email": email, "subject": subject, "body": body, "status": "open"}
return {"ticket_id": ticket_id, "status": "open"}
# 3) Renderers: turn tool JSON into a final user-facing string
def render_order_status(out: Dict[str, Any], _args: Dict[str, Any]) -> str:
if not out.get("found"):
return f"I could not find order {out.get('order_id')}."
eta = out["eta_days"]
carrier = out["carrier"] or "carrier pending"
return f"Order {out['order_id']}: {out['status']}. ETA about {eta} day(s) via {carrier}."
def render_ticket(out: Dict[str, Any], args: Dict[str, Any]) -> str:
return f"Ticket {out['ticket_id']} opened. We will email updates to {args.get('email')}."
# 4) Tool specs for the model and a small dispatch table
TOOL_SPECS = [
{
"type": "function",
"function": {
"name": "get_order_status",
"description": "Look up an order by id like A1234",
"parameters": {
"type": "object",
"properties": {"order_id": {"type": "string"}},
"required": ["order_id"],
},
},
},
{
"type": "function",
"function": {
"name": "create_ticket",
"description": "Open a support ticket",
"parameters": {
"type": "object",
"properties": {
"email": {"type": "string"},
"subject": {"type": "string"},
"body": {"type": "string"},
},
"required": ["email", "subject", "body"],
},
},
},
]
# name -> (callable, renderer)
TOOL_REGISTRY: Dict[str, Tuple[Callable[..., Dict[str, Any]], Callable[[Dict[str, Any], Dict[str, Any]], str]]] = {
"get_order_status": (get_order_status, render_order_status),
"create_ticket": (create_ticket, render_ticket),
}
# 5) System prompt for predictable routing
SYSTEM_PROMPT = (
"You are ShopSupport. "
"If the user asks about an order, call get_order_status. "
"If they want to open a support ticket, call create_ticket. "
"If no tool fits, answer directly and be concise."
)
# 6) The Snowglobe entry point
def process_scenario(request: CompletionRequest) -> CompletionFunctionOutputs:
"""
Entry point called by Snowglobe. Returns a plain string response.
"""
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
messages += request.to_openai_messages()
resp = client.chat.completions.create(
model=MODEL,
messages=messages,
tools=TOOL_SPECS,
tool_choice="auto",
)
msg = resp.choices[0].message
tool_calls = getattr(msg, "tool_calls", None)
# No tool called: just return the model's text
if not tool_calls:
return CompletionFunctionOutputs(response=msg.content or "")
# Single step: handle the first tool call only
tc = tool_calls[0]
try:
args = json.loads(tc.function.arguments or "{}")
except Exception:
return CompletionFunctionOutputs(response="I could not parse the tool arguments.")
handler = TOOL_REGISTRY.get(tc.function.name)
if not handler:
return CompletionFunctionOutputs(response="I cannot run the requested tool.")
func, render = handler
try:
out = func(**args) # run the business function
text = render(out, args) # format a user-friendly message
return CompletionFunctionOutputs(response=text)
except TypeError:
return CompletionFunctionOutputs(response="The tool arguments were incomplete.")
except Exception:
return CompletionFunctionOutputs(response="The tool failed to run.")
"""
SessionChatbot: simple session-managed chat example
What Snowglobe sends:
CompletionRequest(
messages=[
# Include a hidden system tag to identify the session (fallback):
SnowglobeMessage(role="system", content="[session: u123]"),
# Each message also includes IDs you can use directly:
SnowglobeMessage(
role="user",
content="Hey, what's up?",
conversation_id="conv_u123", # stable per conversation
message_id="msg_0001" # unique per message
)
]
)
What you return:
CompletionFunctionOutputs(response="string to display")
Notes:
- The wrapper keeps a chat history per session_id in memory.
- Prefer session management via `conversation_id` on `SnowglobeMessage`.
Fallback: read a system tag like [session: your-id] if needed.
- `SnowglobeMessage` also contains `message_id`, which you can use
to build a per-message lookup table (e.g., metadata, deduping).
- Replace the in-memory store with Redis for production.
"""
# 1) Imports and setup
from snowglobe.client import CompletionRequest, CompletionFunctionOutputs
from openai import OpenAI
import os, re
from typing import Dict, List
MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# 2) Session store (swap with Redis or your DB later)
class SessionStore:
def __init__(self, max_turns: int = 20):
# session_id -> list of OpenAI-style messages
self._db: Dict[str, List[Dict[str, str]]] = {}
self.max_turns = max_turns
def load(self, session_id: str) -> List[Dict[str, str]]:
return list(self._db.get(session_id, []))
def save(self, session_id: str, history: List[Dict[str, str]]) -> None:
# keep the last N turns to cap context size
self._db[session_id] = history[-self.max_turns :]
STORE = SessionStore(max_turns=24)
# 3) Helpers: session id, system prompts, last user text
SESSION_TAG = re.compile(r"\[session:\s*([A-Za-z0-9_-]{1,64})\]", flags=re.I)
def get_session_id(request: CompletionRequest) -> str:
"""
Use `conversation_id` from any message if present.
Otherwise, look for a tag like [session: abc123] in system messages.
If neither exists, use 'default'.
"""
# 1) Prefer explicit IDs provided by Snowglobe
for m in request.messages:
cid = getattr(m, "conversation_id", None)
if cid:
return str(cid)
# 2) Fallback to a system-tag-based session id
for m in request.messages:
if getattr(m, "role", "") == "system" and m.content:
hit = SESSION_TAG.search(m.content)
if hit:
return hit.group(1)
return "default"
def system_prompts_from_request(request: CompletionRequest) -> List[str]:
"""
Pull through any system prompts from Snowglobe, but strip session tags.
"""
prompts: List[str] = []
for m in request.messages:
if getattr(m, "role", "") == "system" and m.content:
cleaned = SESSION_TAG.sub("", m.content).strip()
if cleaned:
prompts.append(cleaned)
return prompts
def latest_user_text(request: CompletionRequest) -> str:
for m in reversed(request.messages):
if getattr(m, "role", "") == "user":
return m.content or ""
return ""
# 4) System prompt for tone and routing
BASE_SYSTEM = (
"You are a concise helpful assistant. "
"Use prior conversation context to stay consistent."
)
# 5) Snowglobe entry point
def process_scenario(request: CompletionRequest) -> CompletionFunctionOutputs:
"""
Per-call flow:
1) Read session_id from system tag.
2) Load prior history from the store.
3) Append the latest user message.
4) Call the model with [base system + any extra system prompts + history + new user].
5) Save the new user and assistant turns back to the store.
6) Return the assistant text as a string.
"""
session_id = get_session_id(request)
history = STORE.load(session_id)
user_text = latest_user_text(request)
# Build the prompt
messages: List[Dict[str, str]] = [{"role": "system", "content": BASE_SYSTEM}]
for sp in system_prompts_from_request(request):
messages.append({"role": "system", "content": sp})
messages.extend(history) # prior turns for this session
if user_text:
messages.append({"role": "user", "content": user_text})
# Call the model
resp = client.chat.completions.create(model=MODEL, messages=messages)
assistant_text = resp.choices[0].message.content or ""
# Persist new turns for the session
if user_text:
history.append({"role": "user", "content": user_text})
history.append({"role": "assistant", "content": assistant_text})
STORE.save(session_id, history)
return CompletionFunctionOutputs(response=assistant_text)
SnowglobeMessage
includes conversation_id
and message_id
. Use:Show Optional: message-level lookup by message_id
# Simple in-memory message metadata store
from typing import Dict, Any
MESSAGE_META: Dict[str, Dict[str, Any]] = {}
def record_message_meta(request: CompletionRequest, assistant_text: str) -> None:
for m in request.messages:
mid = getattr(m, "message_id", None)
if mid:
# Store whatever you need per incoming message
MESSAGE_META[mid] = {
"role": getattr(m, "role", None),
"content_len": len(getattr(m, "content", "") or ""),
}
# Example: store the latest assistant turn keyed by a generated id
# (In a real system, attach your own id or use the model's response id if available.)
assistant_mid = f"assistant_{hash(assistant_text) & 0xffff}"
MESSAGE_META[assistant_mid] = {"role": "assistant", "content": assistant_text}