Skip to content

Commit 3b17522

Browse files
committed
feat(llmo): create text type object
1 parent 216b8ee commit 3b17522

4 files changed

Lines changed: 57 additions & 28 deletions

File tree

posthog/ai/utils.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def format_response_anthropic(response):
126126
and hasattr(choice, "text")
127127
and choice.text
128128
):
129-
content.append(choice.text)
129+
content.append({"type": "text", "text": choice.text})
130130
elif (
131131
hasattr(choice, "type")
132132
and choice.type == "tool_use"
@@ -167,7 +167,7 @@ def format_response_openai(response):
167167
role = choice.message.role
168168

169169
if choice.message.content:
170-
content.append(choice.message.content)
170+
content.append({"type": "text", "text": choice.message.content})
171171

172172
if hasattr(choice.message, "tool_calls") and choice.message.tool_calls:
173173
for tool_call in choice.message.tool_calls:
@@ -205,9 +205,9 @@ def format_response_openai(response):
205205
and content_item.type == "output_text"
206206
and hasattr(content_item, "text")
207207
):
208-
content.append(content_item.text)
208+
content.append({"type": "text", "text": content_item.text})
209209
elif hasattr(content_item, "text"):
210-
content.append(content_item.text)
210+
content.append({"type": "text", "text": content_item.text})
211211
elif (
212212
hasattr(content_item, "type")
213213
and content_item.type == "input_image"
@@ -220,7 +220,7 @@ def format_response_openai(response):
220220
}
221221
)
222222
elif hasattr(item, "content"):
223-
content.append(str(item.content))
223+
content.append({"type": "text", "text": str(item.content)})
224224

225225
elif hasattr(item, "type") and item.type == "function_call":
226226
content.append(
@@ -255,7 +255,7 @@ def format_response_gemini(response):
255255
if hasattr(candidate.content, "parts") and candidate.content.parts:
256256
for part in candidate.content.parts:
257257
if hasattr(part, "text") and part.text:
258-
content.append(part.text)
258+
content.append({"type": "text", "text": part.text})
259259
elif hasattr(part, "function_call") and part.function_call:
260260
function_call = part.function_call
261261
content.append(
@@ -279,14 +279,14 @@ def format_response_gemini(response):
279279
output.append(
280280
{
281281
"role": "assistant",
282-
"content": [candidate.text],
282+
"content": [{"type": "text", "text": candidate.text}],
283283
}
284284
)
285285
elif hasattr(response, "text") and response.text:
286286
output.append(
287287
{
288288
"role": "assistant",
289-
"content": [response.text],
289+
"content": [{"type": "text", "text": response.text}],
290290
}
291291
)
292292

posthog/test/ai/anthropic/test_anthropic.py

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,10 @@ def test_basic_completion(mock_client, mock_anthropic_response):
162162
assert props["$ai_model"] == "claude-3-opus-20240229"
163163
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
164164
assert props["$ai_output_choices"] == [
165-
{"role": "assistant", "content": ["Test response"]}
165+
{
166+
"role": "assistant",
167+
"content": [{"type": "text", "text": "Test response"}],
168+
}
166169
]
167170
assert props["$ai_input_tokens"] == 20
168171
assert props["$ai_output_tokens"] == 10
@@ -336,7 +339,9 @@ def test_basic_integration(mock_client):
336339
{"role": "user", "content": "Foo"},
337340
]
338341
assert props["$ai_output_choices"][0]["role"] == "assistant"
339-
assert props["$ai_output_choices"][0]["content"] == "Bar"
342+
assert props["$ai_output_choices"][0]["content"] == [
343+
{"type": "text", "text": "Bar"}
344+
]
340345
assert props["$ai_input_tokens"] == 18
341346
assert props["$ai_output_tokens"] == 1
342347
assert props["$ai_http_status"] == 200
@@ -475,7 +480,10 @@ def test_cached_tokens(mock_client, mock_anthropic_response_with_cached_tokens):
475480
assert props["$ai_model"] == "claude-3-opus-20240229"
476481
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
477482
assert props["$ai_output_choices"] == [
478-
{"role": "assistant", "content": ["Test response"]}
483+
{
484+
"role": "assistant",
485+
"content": [{"type": "text", "text": "Test response"}],
486+
}
479487
]
480488
assert props["$ai_input_tokens"] == 20
481489
assert props["$ai_output_tokens"] == 10
@@ -532,7 +540,10 @@ def test_tool_definition(mock_client, mock_anthropic_response):
532540
assert props["$ai_model"] == "claude-3-5-sonnet-20241022"
533541
assert props["$ai_input"] == [{"role": "user", "content": "hey"}]
534542
assert props["$ai_output_choices"] == [
535-
{"role": "assistant", "content": ["Test response"]}
543+
{
544+
"role": "assistant",
545+
"content": [{"type": "text", "text": "Test response"}],
546+
}
536547
]
537548
assert props["$ai_input_tokens"] == 20
538549
assert props["$ai_output_tokens"] == 10
@@ -585,8 +596,8 @@ def test_tool_calls_in_output_choices(
585596
{
586597
"role": "assistant",
587598
"content": [
588-
"I'll help you check the weather.",
589-
" Let me look that up.",
599+
{"type": "text", "text": "I'll help you check the weather."},
600+
{"type": "text", "text": " Let me look that up."},
590601
{
591602
"type": "function",
592603
"id": "toolu_abc123",
@@ -717,8 +728,8 @@ async def run_test():
717728
{
718729
"role": "assistant",
719730
"content": [
720-
"I'll help you check the weather.",
721-
" Let me look that up.",
731+
{"type": "text", "text": "I'll help you check the weather."},
732+
{"type": "text", "text": " Let me look that up."},
722733
{
723734
"type": "function",
724735
"id": "toolu_abc123",

posthog/test/ai/gemini/test_gemini.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -449,7 +449,10 @@ def test_tool_use_response(mock_client, mock_google_genai_client, mock_gemini_re
449449
assert props["$ai_model"] == "gemini-2.5-flash"
450450
assert props["$ai_input"] == [{"role": "user", "content": "hey"}]
451451
assert props["$ai_output_choices"] == [
452-
{"role": "assistant", "content": ["Test response from Gemini"]}
452+
{
453+
"role": "assistant",
454+
"content": [{"type": "text", "text": "Test response from Gemini"}],
455+
}
453456
]
454457
assert props["$ai_input_tokens"] == 20
455458
assert props["$ai_output_tokens"] == 10
@@ -490,8 +493,8 @@ def test_function_calls_in_output_choices(
490493
{
491494
"role": "assistant",
492495
"content": [
493-
"I'll check the weather for you.",
494-
" Let me look that up.",
496+
{"type": "text", "text": "I'll check the weather for you."},
497+
{"type": "text", "text": " Let me look that up."},
495498
{
496499
"type": "function",
497500
"function": {

posthog/test/ai/openai/test_openai.py

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,10 @@ def test_basic_completion(mock_client, mock_openai_response):
385385
assert props["$ai_model"] == "gpt-4"
386386
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
387387
assert props["$ai_output_choices"] == [
388-
{"role": "assistant", "content": ["Test response"]}
388+
{
389+
"role": "assistant",
390+
"content": [{"type": "text", "text": "Test response"}],
391+
}
389392
]
390393
assert props["$ai_input_tokens"] == 20
391394
assert props["$ai_output_tokens"] == 10
@@ -534,7 +537,10 @@ def test_cached_tokens(mock_client, mock_openai_response_with_cached_tokens):
534537
assert props["$ai_model"] == "gpt-4"
535538
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
536539
assert props["$ai_output_choices"] == [
537-
{"role": "assistant", "content": ["Test response"]}
540+
{
541+
"role": "assistant",
542+
"content": [{"type": "text", "text": "Test response"}],
543+
}
538544
]
539545
assert props["$ai_input_tokens"] == 20
540546
assert props["$ai_output_tokens"] == 10
@@ -585,8 +591,8 @@ def test_tool_calls(mock_client, mock_openai_response_with_tool_calls):
585591
{
586592
"role": "assistant",
587593
"content": [
588-
"I'll check the weather for you.",
589-
" Let me look that up.",
594+
{"type": "text", "text": "I'll check the weather for you."},
595+
{"type": "text", "text": " Let me look that up."},
590596
{
591597
"type": "function",
592598
"id": "call_abc123",
@@ -708,8 +714,8 @@ def test_responses_api_tool_calls(mock_client, mock_responses_api_with_tool_call
708714
{
709715
"role": "assistant",
710716
"content": [
711-
"I'll help you with the weather.",
712-
" Let me check that for you.",
717+
{"type": "text", "text": "I'll help you with the weather."},
718+
{"type": "text", "text": " Let me check that for you."},
713719
{
714720
"type": "function",
715721
"id": "call_xyz789",
@@ -920,7 +926,10 @@ def test_responses_api(mock_client, mock_openai_response_with_responses_api):
920926
assert props["$ai_model"] == "gpt-4o-mini"
921927
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
922928
assert props["$ai_output_choices"] == [
923-
{"role": "assistant", "content": ["Test response"]}
929+
{
930+
"role": "assistant",
931+
"content": [{"type": "text", "text": "Test response"}],
932+
}
924933
]
925934
assert props["$ai_input_tokens"] == 10
926935
assert props["$ai_output_tokens"] == 10
@@ -990,7 +999,10 @@ def test_responses_parse(mock_client, mock_parsed_response):
990999
{
9911000
"role": "assistant",
9921001
"content": [
993-
'{"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]}'
1002+
{
1003+
"type": "text",
1004+
"text": '{"name": "Science Fair", "date": "Friday", "participants": ["Alice", "Bob"]}',
1005+
}
9941006
],
9951007
}
9961008
]
@@ -1051,7 +1063,10 @@ def test_tool_definition(mock_client, mock_openai_response):
10511063
assert props["$ai_model"] == "gpt-4o-mini"
10521064
assert props["$ai_input"] == [{"role": "user", "content": "hey"}]
10531065
assert props["$ai_output_choices"] == [
1054-
{"role": "assistant", "content": ["Test response"]}
1066+
{
1067+
"role": "assistant",
1068+
"content": [{"type": "text", "text": "Test response"}],
1069+
}
10551070
]
10561071
assert props["$ai_input_tokens"] == 20
10571072
assert props["$ai_output_tokens"] == 10

0 commit comments

Comments
 (0)