@@ -46,9 +46,11 @@ def log_prompt(system_prompt="", user_prompt)
4646 end
4747
4848 def log_response ( response )
49+ if response . respond_to? ( :body )
50+ log_bedrock_response ( response )
4951 # This is Gemini specific, see -
5052 # https://github.com/gbaptista/gemini-ai?tab=readme-ov-file#generate_content
51- if response . has_key? ( "candidates" )
53+ elsif response . has_key? ( "candidates" )
5254 log_gemini_response ( response )
5355 else
5456 log_openai_response ( response )
@@ -66,6 +68,32 @@ def log_gemini_response(response)
6668 } )
6769 end
6870
71+ def log_bedrock_response ( response )
72+ body = JSON . parse ( response . body . read ( ) )
73+
74+ @span . add_attributes ( {
75+ OpenTelemetry ::SemanticConventionsAi ::SpanAttributes ::LLM_RESPONSE_MODEL => body . dig ( "model" ) ,
76+ } )
77+ if body . has_key? ( "usage" )
78+ input_tokens = body . dig ( "usage" , "input_tokens" )
79+ output_tokens = body . dig ( "usage" , "output_tokens" )
80+
81+ @span . add_attributes ( {
82+ OpenTelemetry ::SemanticConventionsAi ::SpanAttributes ::LLM_USAGE_TOTAL_TOKENS => input_tokens + output_tokens ,
83+ OpenTelemetry ::SemanticConventionsAi ::SpanAttributes ::LLM_USAGE_COMPLETION_TOKENS => output_tokens ,
84+ OpenTelemetry ::SemanticConventionsAi ::SpanAttributes ::LLM_USAGE_PROMPT_TOKENS => input_tokens ,
85+ } )
86+ end
87+ if body . has_key? ( "content" )
88+ @span . add_attributes ( {
89+ "#{ OpenTelemetry ::SemanticConventionsAi ::SpanAttributes ::LLM_COMPLETIONS } .0.role" => body . dig ( "role" ) ,
90+ "#{ OpenTelemetry ::SemanticConventionsAi ::SpanAttributes ::LLM_COMPLETIONS } .0.content" => body . dig ( "content" ) . first . dig ( "text" )
91+ } )
92+ end
93+
94+ response . body . rewind ( )
95+ end
96+
6997 def log_openai_response ( response )
7098 @span . add_attributes ( {
7199 OpenTelemetry ::SemanticConventionsAi ::SpanAttributes ::LLM_RESPONSE_MODEL => response . dig ( "model" ) ,
0 commit comments