chore: make vcr tests pretty-print JSON (#146)

Signed-off-by: Adrian Cole <adrian.cole@elastic.co>
This commit is contained in:
Adrian Cole
2024-10-16 09:36:23 +11:00
committed by GitHub
parent e165981897
commit e687b0b3bc
16 changed files with 756 additions and 184 deletions

View File

@@ -1,7 +1,19 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini"}'
body: |-
{
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Hello"
}
],
"model": "gpt-4o-mini"
}
headers:
accept:
- '*/*'
@@ -23,35 +35,94 @@ interactions:
uri: https://test.openai.azure.com/openai/deployments/test-azure-deployment/chat/completions?api-version=2024-05-01-preview
response:
body:
string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"content":"Hello!
How can I assist you today?","role":"assistant"}}],"created":1727230065,"id":"chatcmpl-ABBjN3AoYlxkP7Vg2lBvUhYeA6j5K","model":"gpt-4-32k","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":9,"prompt_tokens":18,"total_tokens":27}}
'
string: |-
{
"choices": [
{
"content_filter_results": {
"hate": {
"filtered": false,
"severity": "safe"
},
"self_harm": {
"filtered": false,
"severity": "safe"
},
"sexual": {
"filtered": false,
"severity": "safe"
},
"violence": {
"filtered": false,
"severity": "safe"
}
},
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "Hello! How can I assist you today?\n",
"role": "assistant"
}
}
],
"created": 1728788170,
"id": "chatcmpl-AHj469gGa9bQaCSikrZYDejfGDx2x",
"model": "gpt-4-32k",
"object": "chat.completion",
"prompt_filter_results": [
{
"prompt_index": 0,
"content_filter_results": {
"hate": {
"filtered": false,
"severity": "safe"
},
"self_harm": {
"filtered": false,
"severity": "safe"
},
"sexual": {
"filtered": false,
"severity": "safe"
},
"violence": {
"filtered": false,
"severity": "safe"
}
}
}
],
"system_fingerprint": null,
"usage": {
"completion_tokens": 9,
"prompt_tokens": 18,
"total_tokens": 27
}
}
headers:
Cache-Control:
- no-cache, must-revalidate
Content-Length:
- '825'
- '827'
Content-Type:
- application/json
Date:
- Wed, 25 Sep 2024 02:07:45 GMT
Set-Cookie: test_set_cookie
- Sun, 13 Oct 2024 02:56:10 GMT
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
access-control-allow-origin:
- '*'
apim-request-id:
- 82e66ef8-ac07-4a43-b60f-9aecec1d8c81
- b203f657-e7aa-40ea-8969-ba0b83dae854
azureml-model-session:
- d145-20240919052126
openai-organization: test_openai_org_key
- d156-20241010120317
x-accel-buffering:
- 'no'
x-content-type-options:
- nosniff
x-ms-client-request-id:
- 82e66ef8-ac07-4a43-b60f-9aecec1d8c81
- b203f657-e7aa-40ea-8969-ba0b83dae854
x-ms-rai-invoked:
- 'true'
x-ms-region:
@@ -61,7 +132,7 @@ interactions:
x-ratelimit-remaining-tokens:
- '79984'
x-request-id:
- 38db9001-8b16-4efe-84c9-620e10f18c3c
- 0c6dc92f-a017-4879-b82e-be937533c76e
status:
code: 200
message: OK

View File

@@ -1,13 +1,40 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant.
Expect to need to read a file using read_file."}, {"role": "user", "content":
"What are the contents of this file? test.txt"}], "model": "gpt-4o-mini", "tools":
[{"type": "function", "function": {"name": "read_file", "description": "Read
the contents of the file.", "parameters": {"type": "object", "properties": {"filename":
{"type": "string", "description": "The path to the file, which can be relative
or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent
working directory."}}, "required": ["filename"]}}}]}'
body: |-
{
"messages": [
{
"role": "system",
"content": "You are a helpful assistant. Expect to need to read a file using read_file."
},
{
"role": "user",
"content": "What are the contents of this file? test.txt"
}
],
"model": "gpt-4o-mini",
"tools": [
{
"type": "function",
"function": {
"name": "read_file",
"description": "Read the contents of the file.",
"parameters": {
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "The path to the file, which can be relative or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent working directory."
}
},
"required": [
"filename"
]
}
}
}
]
}
headers:
accept:
- '*/*'
@@ -29,10 +56,64 @@ interactions:
uri: https://test.openai.azure.com/openai/deployments/test-azure-deployment/chat/completions?api-version=2024-05-01-preview
response:
body:
string: '{"choices":[{"content_filter_results":{},"finish_reason":"tool_calls","index":0,"logprobs":null,"message":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\n \"filename\":
\"test.txt\"\n}","name":"read_file"},"id":"call_a47abadDxlGKIWjvYYvGVAHa","type":"function"}]}}],"created":1727256650,"id":"chatcmpl-ABIeABbq5WVCq0e0AriGFaYDSih3P","model":"gpt-4-32k","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":16,"prompt_tokens":109,"total_tokens":125}}
'
string: |-
{
"choices": [
{
"content_filter_results": {},
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null,
"message": {
"content": null,
"role": "assistant",
"tool_calls": [
{
"function": {
"arguments": "{\n \"filename\": \"test.txt\"\n}",
"name": "read_file"
},
"id": "call_Dn0idyNSdmHSYpsql3EbFH9L",
"type": "function"
}
]
}
}
],
"created": 1728788173,
"id": "chatcmpl-AHj498XEBkixukw2lwNReXCIBStp0",
"model": "gpt-4-32k",
"object": "chat.completion",
"prompt_filter_results": [
{
"prompt_index": 0,
"content_filter_results": {
"hate": {
"filtered": false,
"severity": "safe"
},
"self_harm": {
"filtered": false,
"severity": "safe"
},
"sexual": {
"filtered": false,
"severity": "safe"
},
"violence": {
"filtered": false,
"severity": "safe"
}
}
}
],
"system_fingerprint": null,
"usage": {
"completion_tokens": 16,
"prompt_tokens": 109,
"total_tokens": 125
}
}
headers:
Cache-Control:
- no-cache, must-revalidate
@@ -41,33 +122,31 @@ interactions:
Content-Type:
- application/json
Date:
- Wed, 25 Sep 2024 09:30:50 GMT
Set-Cookie: test_set_cookie
- Sun, 13 Oct 2024 02:56:14 GMT
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
access-control-allow-origin:
- '*'
apim-request-id:
- 8c0e3372-8ffd-4ff5-a5d1-0b962c4ea339
- 0e0af575-5634-415c-88e3-a7bf68549ee5
azureml-model-session:
- d145-20240919052126
openai-organization: test_openai_org_key
- d159-20241010142543
x-accel-buffering:
- 'no'
x-content-type-options:
- nosniff
x-ms-client-request-id:
- 8c0e3372-8ffd-4ff5-a5d1-0b962c4ea339
- 0e0af575-5634-415c-88e3-a7bf68549ee5
x-ms-rai-invoked:
- 'true'
x-ms-region:
- Switzerland North
x-ratelimit-remaining-requests:
- '79'
- '77'
x-ratelimit-remaining-tokens:
- '79824'
- '79952'
x-request-id:
- 401bd803-b790-47b7-b098-98708d44f060
- e5012889-ef86-449a-908c-2065dbf0954e
status:
code: 200
message: OK

View File

@@ -1,7 +1,25 @@
interactions:
- request:
body: '{"system_instruction": {"parts": [{"text": "You are a helpful assistant."}]},
"contents": [{"role": "user", "parts": [{"text": "Hello"}]}]}'
body: |-
{
"system_instruction": {
"parts": [
{
"text": "You are a helpful assistant."
}
]
},
"contents": [
{
"role": "user",
"parts": [
{
"text": "Hello"
}
]
}
]
}
headers:
accept:
- '*/*'
@@ -21,18 +39,46 @@ interactions:
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=test_google_api_key
response:
body:
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
[\n {\n \"text\": \"Hello! \U0001F44B How can I help
you today? \U0001F60A \\n\"\n }\n ],\n \"role\": \"model\"\n
\ },\n \"finishReason\": \"STOP\",\n \"index\": 0,\n \"safetyRatings\":
[\n {\n \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n
\ \"probability\": \"NEGLIGIBLE\"\n },\n {\n \"category\":
\"HARM_CATEGORY_HATE_SPEECH\",\n \"probability\": \"NEGLIGIBLE\"\n
\ },\n {\n \"category\": \"HARM_CATEGORY_HARASSMENT\",\n
\ \"probability\": \"NEGLIGIBLE\"\n },\n {\n \"category\":
\"HARM_CATEGORY_DANGEROUS_CONTENT\",\n \"probability\": \"NEGLIGIBLE\"\n
\ }\n ]\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
8,\n \"candidatesTokenCount\": 12,\n \"totalTokenCount\": 20\n }\n}\n"
string: |-
{
"candidates": [
{
"content": {
"parts": [
{
"text": "Hello! \ud83d\udc4b What can I do for you today? \ud83d\ude0a \n"
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usageMetadata": {
"promptTokenCount": 8,
"candidatesTokenCount": 13,
"totalTokenCount": 21
}
}
headers:
Alt-Svc:
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
@@ -41,11 +87,11 @@ interactions:
Content-Type:
- application/json; charset=UTF-8
Date:
- Wed, 02 Oct 2024 01:06:50 GMT
- Sun, 13 Oct 2024 02:54:58 GMT
Server:
- scaffolding on HTTPServer2
Server-Timing:
- gfet4t7; dur=426
- gfet4t7; dur=1201
Transfer-Encoding:
- chunked
Vary:
@@ -59,7 +105,7 @@ interactions:
X-XSS-Protection:
- '0'
content-length:
- '855'
- '858'
status:
code: 200
message: OK

View File

@@ -1,13 +1,45 @@
interactions:
- request:
body: '{"system_instruction": {"parts": [{"text": "You are a helpful assistant.
Expect to need to read a file using read_file."}]}, "contents": [{"role": "user",
"parts": [{"text": "What are the contents of this file? test.txt"}]}], "tools":
{"functionDeclarations": [{"name": "read_file", "description": "Read the contents
of the file.", "parameters": {"type": "object", "properties": {"filename": {"type":
"string", "description": "The path to the file, which can be relative or\nabsolute.
If it is a plain filename, it is assumed to be in the\ncurrent working directory."}},
"required": ["filename"]}}]}}'
body: |-
{
"system_instruction": {
"parts": [
{
"text": "You are a helpful assistant. Expect to need to read a file using read_file."
}
]
},
"contents": [
{
"role": "user",
"parts": [
{
"text": "What are the contents of this file? test.txt"
}
]
}
],
"tools": {
"functionDeclarations": [
{
"name": "read_file",
"description": "Read the contents of the file.",
"parameters": {
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "The path to the file, which can be relative or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent working directory."
}
},
"required": [
"filename"
]
}
}
]
}
}
headers:
accept:
- '*/*'
@@ -27,19 +59,51 @@ interactions:
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=test_google_api_key
response:
body:
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
[\n {\n \"functionCall\": {\n \"name\": \"read_file\",\n
\ \"args\": {\n \"filename\": \"test.txt\"\n }\n
\ }\n }\n ],\n \"role\": \"model\"\n },\n
\ \"finishReason\": \"STOP\",\n \"index\": 0,\n \"safetyRatings\":
[\n {\n \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n
\ \"probability\": \"NEGLIGIBLE\"\n },\n {\n \"category\":
\"HARM_CATEGORY_HATE_SPEECH\",\n \"probability\": \"NEGLIGIBLE\"\n
\ },\n {\n \"category\": \"HARM_CATEGORY_HARASSMENT\",\n
\ \"probability\": \"NEGLIGIBLE\"\n },\n {\n \"category\":
\"HARM_CATEGORY_DANGEROUS_CONTENT\",\n \"probability\": \"NEGLIGIBLE\"\n
\ }\n ]\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
101,\n \"candidatesTokenCount\": 17,\n \"totalTokenCount\": 118\n }\n}\n"
string: |-
{
"candidates": [
{
"content": {
"parts": [
{
"functionCall": {
"name": "read_file",
"args": {
"filename": "test.txt"
}
}
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0,
"safetyRatings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usageMetadata": {
"promptTokenCount": 101,
"candidatesTokenCount": 17,
"totalTokenCount": 118
}
}
headers:
Alt-Svc:
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
@@ -48,7 +112,7 @@ interactions:
Content-Type:
- application/json; charset=UTF-8
Date:
- Wed, 02 Oct 2024 01:06:51 GMT
- Sun, 13 Oct 2024 02:54:59 GMT
Server:
- scaffolding on HTTPServer2
Server-Timing:

View File

@@ -23,15 +23,25 @@ interactions:
Content-Type:
- text/plain; charset=utf-8
Date:
- Sun, 22 Sep 2024 23:40:13 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
- Sun, 13 Oct 2024 04:53:22 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"}], "model": "mistral-nemo"}'
body: |-
{
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Hello"
}
],
"model": "mistral-nemo"
}
headers:
accept:
- '*/*'
@@ -51,17 +61,36 @@ interactions:
uri: http://localhost:11434/v1/chat/completions
response:
body:
string: "{\"id\":\"chatcmpl-429\",\"object\":\"chat.completion\",\"created\":1727048416,\"model\":\"mistral-nemo\",\"system_fingerprint\":\"fp_ollama\",\"choices\":[{\"index\":0,\"message\":{\"role\":\"assistant\",\"content\":\"Hello!
I'm here to help. How can I assist you today? Let's chat. \U0001F60A\"},\"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":10,\"completion_tokens\":23,\"total_tokens\":33}}\n"
string: |-
{
"id": "chatcmpl-565",
"object": "chat.completion",
"created": 1728795204,
"model": "mistral-nemo",
"system_fingerprint": "fp_ollama",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! I'm here to help. How can I assist you today? \ud83d\ude0a"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 19,
"total_tokens": 29
}
}
headers:
Content-Length:
- '356'
- '344'
Content-Type:
- application/json
Date:
- Sun, 22 Sep 2024 23:40:16 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
- Sun, 13 Oct 2024 04:53:24 GMT
status:
code: 200
message: OK

View File

@@ -23,21 +23,46 @@ interactions:
Content-Type:
- text/plain; charset=utf-8
Date:
- Wed, 25 Sep 2024 09:23:08 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
- Sun, 13 Oct 2024 02:52:00 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant.
Expect to need to read a file using read_file."}, {"role": "user", "content":
"What are the contents of this file? test.txt"}], "model": "mistral-nemo", "tools":
[{"type": "function", "function": {"name": "read_file", "description": "Read
the contents of the file.", "parameters": {"type": "object", "properties": {"filename":
{"type": "string", "description": "The path to the file, which can be relative
or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent
working directory."}}, "required": ["filename"]}}}]}'
body: |-
{
"messages": [
{
"role": "system",
"content": "You are a helpful assistant. Expect to need to read a file using read_file."
},
{
"role": "user",
"content": "What are the contents of this file? test.txt"
}
],
"model": "mistral-nemo",
"tools": [
{
"type": "function",
"function": {
"name": "read_file",
"description": "Read the contents of the file.",
"parameters": {
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "The path to the file, which can be relative or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent working directory."
}
},
"required": [
"filename"
]
}
}
}
]
}
headers:
accept:
- '*/*'
@@ -57,18 +82,46 @@ interactions:
uri: http://localhost:11434/v1/chat/completions
response:
body:
string: '{"id":"chatcmpl-245","object":"chat.completion","created":1727256190,"model":"mistral-nemo","system_fingerprint":"fp_ollama","choices":[{"index":0,"message":{"role":"assistant","content":"","tool_calls":[{"id":"call_z6fgu3z3","type":"function","function":{"name":"read_file","arguments":"{\"filename\":\"test.txt\"}"}}]},"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":112,"completion_tokens":21,"total_tokens":133}}
'
string: |-
{
"id": "chatcmpl-212",
"object": "chat.completion",
"created": 1728787922,
"model": "mistral-nemo",
"system_fingerprint": "fp_ollama",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_h5d3s25w",
"type": "function",
"function": {
"name": "read_file",
"arguments": "{\"filename\":\"test.txt\"}"
}
}
]
},
"finish_reason": "tool_calls"
}
],
"usage": {
"prompt_tokens": 112,
"completion_tokens": 21,
"total_tokens": 133
}
}
headers:
Content-Length:
- '425'
Content-Type:
- application/json
Date:
- Wed, 25 Sep 2024 09:23:10 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
- Sun, 13 Oct 2024 02:52:02 GMT
status:
code: 200
message: OK

View File

@@ -1,7 +1,19 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini"}'
body: |-
{
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Hello"
}
],
"model": "gpt-4o-mini"
}
headers:
accept:
- '*/*'
@@ -23,25 +35,48 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-AAQTYi3DXJnltAfd5sUH1Wnzh69t3\",\n \"object\":
\"chat.completion\",\n \"created\": 1727048416,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 18,\n \"completion_tokens\":
9,\n \"total_tokens\": 27,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
0\n }\n },\n \"system_fingerprint\": \"fp_1bb46167f9\"\n}\n"
string: |-
{
"id": "chatcmpl-AHj1Y1xN9345uFT3PVMInIYEQ8g4a",
"object": "chat.completion",
"created": 1728788012,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! How can I assist you today?",
"refusal": null
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 18,
"completion_tokens": 9,
"total_tokens": 27,
"prompt_tokens_details": {
"cached_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0
}
},
"system_fingerprint": "fp_e2bde53e6e"
}
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c762399feb55739-SYD
- 8d1c0a328dca3e44-SIN
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Sun, 22 Sep 2024 23:40:17 GMT
- Sun, 13 Oct 2024 02:53:33 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
@@ -51,15 +86,17 @@ interactions:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
content-length:
- '593'
- '656'
openai-organization: test_openai_org_key
openai-processing-ms:
- '560'
- '481'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15552000; includeSubDomains; preload
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
@@ -67,13 +104,13 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '199973'
- '199972'
x-ratelimit-reset-requests:
- 8.64s
x-ratelimit-reset-tokens:
- 8ms
x-request-id:
- req_22e26c840219cde3152eaba1ce89483b
- req_85f532ac5fdad6a4af020cab55e2fd4d
status:
code: 200
message: OK

View File

@@ -1,13 +1,40 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant.
Expect to need to read a file using read_file."}, {"role": "user", "content":
"What are the contents of this file? test.txt"}], "model": "gpt-4o-mini", "tools":
[{"type": "function", "function": {"name": "read_file", "description": "Read
the contents of the file.", "parameters": {"type": "object", "properties": {"filename":
{"type": "string", "description": "The path to the file, which can be relative
or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent
working directory."}}, "required": ["filename"]}}}]}'
body: |-
{
"messages": [
{
"role": "system",
"content": "You are a helpful assistant. Expect to need to read a file using read_file."
},
{
"role": "user",
"content": "What are the contents of this file? test.txt"
}
],
"model": "gpt-4o-mini",
"tools": [
{
"type": "function",
"function": {
"name": "read_file",
"description": "Read the contents of the file.",
"parameters": {
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "The path to the file, which can be relative or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent working directory."
}
},
"required": [
"filename"
]
}
}
}
]
}
headers:
accept:
- '*/*'
@@ -29,29 +56,58 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-ABIV2aZWVKQ774RAQ8KHYdNwkI5N7\",\n \"object\":
\"chat.completion\",\n \"created\": 1727256084,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_xXYlw4A7Ud1qtCopuK5gEJrP\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"read_file\",\n
\ \"arguments\": \"{\\\"filename\\\":\\\"test.txt\\\"}\"\n }\n
\ }\n ],\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\":
{\n \"prompt_tokens\": 107,\n \"completion_tokens\": 15,\n \"total_tokens\":
122,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n
\ }\n },\n \"system_fingerprint\": \"fp_1bb46167f9\"\n}\n"
string: |-
{
"id": "chatcmpl-AHj1axEdpe3coVDULrCjHmXql5euz",
"object": "chat.completion",
"created": 1728788014,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": null,
"tool_calls": [
{
"id": "call_Z43oz2RtLmNHw9xvFgxA1SC5",
"type": "function",
"function": {
"name": "read_file",
"arguments": "{\"filename\":\"test.txt\"}"
}
}
],
"refusal": null
},
"logprobs": null,
"finish_reason": "tool_calls"
}
],
"usage": {
"prompt_tokens": 107,
"completion_tokens": 15,
"total_tokens": 122,
"prompt_tokens_details": {
"cached_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0
}
},
"system_fingerprint": "fp_e2bde53e6e"
}
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c89f19fed997e43-SYD
- 8d1c0a419b009d0b-SIN
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Wed, 25 Sep 2024 09:21:25 GMT
- Sun, 13 Oct 2024 02:53:35 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
@@ -61,11 +117,13 @@ interactions:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
content-length:
- '844'
- '907'
openai-organization: test_openai_org_key
openai-processing-ms:
- '266'
- '442'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -75,15 +133,15 @@ interactions:
x-ratelimit-limit-tokens:
- '200000'
x-ratelimit-remaining-requests:
- '9991'
- '9997'
x-ratelimit-remaining-tokens:
- '199952'
- '199951'
x-ratelimit-reset-requests:
- 1m9.486s
- 23.873s
x-ratelimit-reset-tokens:
- 14ms
x-request-id:
- req_ff6b5d65c24f40e1faaf049c175e718d
- req_8ec455e318c9f2d6eecf82d1fdf124ab
status:
code: 200
message: OK

File diff suppressed because one or more lines are too long

View File

@@ -1,11 +1,14 @@
import json
import os
import re
from typing import Type, Tuple
import pytest
import yaml
from exchange import Message, ToolUse, ToolResult, Tool
from exchange.providers import Usage, Provider
from tests.conftest import read_file
OPENAI_API_KEY = "test_openai_api_key"
@@ -70,6 +73,73 @@ def default_google_env(monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", GOOGLE_API_KEY)
class LiteralBlockScalar(str):
"""Formats the string as a literal block scalar, preserving whitespace and
without interpreting escape characters"""
pass
def literal_block_scalar_presenter(dumper, data):
"""Represents a scalar string as a literal block, via '|' syntax"""
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
yaml.add_representer(LiteralBlockScalar, literal_block_scalar_presenter)
def process_string_value(string_value):
"""Pretty-prints JSON or returns long strings as a LiteralString"""
try:
json_data = json.loads(string_value)
return LiteralBlockScalar(json.dumps(json_data, indent=2))
except (ValueError, TypeError):
if len(string_value) > 80:
return LiteralBlockScalar(string_value)
return string_value
def convert_body_to_literal(data):
"""Searches the data for body strings, attempting to pretty-print JSON"""
if isinstance(data, dict):
for key, value in data.items():
# Handle response body case (e.g., response.body.string)
if key == "body" and isinstance(value, dict) and "string" in value:
value["string"] = process_string_value(value["string"])
# Handle request body case (e.g., request.body)
elif key == "body" and isinstance(value, str):
data[key] = process_string_value(value)
else:
convert_body_to_literal(value)
elif isinstance(data, list):
for i, item in enumerate(data):
data[i] = convert_body_to_literal(item)
return data
class PrettyPrintJSONBody:
"""This makes request and response body recordings more readable."""
@staticmethod
def serialize(cassette_dict):
cassette_dict = convert_body_to_literal(cassette_dict)
return yaml.dump(cassette_dict, default_flow_style=False, allow_unicode=True)
@staticmethod
def deserialize(cassette_string):
return yaml.load(cassette_string, Loader=yaml.Loader)
@pytest.fixture(scope="module")
def vcr(vcr):
vcr.register_serializer("yaml", PrettyPrintJSONBody)
return vcr
@pytest.fixture(scope="module")
def vcr_config():
"""

View File

@@ -91,7 +91,7 @@ def test_message_text_to_anthropic_spec() -> None:
def test_messages_to_anthropic_spec() -> None:
messages = [
Message(role="user", content=[Text(text="Hello, Claude")]),
Message(role="user", content=[Text("Hello, Claude")]),
Message(
role="assistant",
content=[ToolUse(id="1", name="example_fn", parameters={"param": "value"})],
@@ -148,7 +148,7 @@ def test_anthropic_completion(mock_error, mock_warning, mock_post, anthropic_pro
reply_message, reply_usage = anthropic_provider.complete(model=model, system=system, messages=messages)
assert reply_message.content == [Text(text="Hello from Claude!")]
assert reply_message.content == [Text("Hello from Claude!")]
assert reply_usage.total_tokens == 35
assert mock_post.call_count == 2
mock_post.assert_any_call(

View File

@@ -43,7 +43,7 @@ def test_from_env_throw_error_when_missing_env_var(env_var_name):
def test_azure_complete(default_azure_env):
reply_message, reply_usage = complete(AzureProvider, AZURE_MODEL)
assert reply_message.content == [Text(text="Hello! How can I assist you today?")]
assert reply_message.content == [Text("Hello! How can I assist you today?\n")]
assert reply_usage.total_tokens == 27
@@ -61,7 +61,7 @@ def test_azure_tools(default_azure_env):
tool_use = reply_message.content[0]
assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}"
assert tool_use.id == "call_a47abadDxlGKIWjvYYvGVAHa"
assert tool_use.id == "call_Dn0idyNSdmHSYpsql3EbFH9L"
assert tool_use.name == "read_file"
assert tool_use.parameters == {"filename": "test.txt"}
assert reply_usage.total_tokens == 125

View File

@@ -61,7 +61,7 @@ def test_databricks_completion(mock_error, mock_warning, mock_sleep, mock_post,
model=model, system=system, messages=messages, tools=tools
)
assert reply_message.content == [Text(text="Hello!")]
assert reply_message.content == [Text("Hello!")]
assert reply_usage.total_tokens == 35
assert mock_post.call_count == 1
mock_post.assert_called_once_with(

View File

@@ -83,7 +83,7 @@ def test_message_text_to_google_spec() -> None:
def test_messages_to_google_spec() -> None:
messages = [
Message(role="user", content=[Text(text="Hello, Gemini")]),
Message(role="user", content=[Text("Hello, Gemini")]),
Message(
role="assistant",
content=[ToolUse(id="1", name="example_fn", parameters={"param": "value"})],
@@ -105,8 +105,8 @@ def test_messages_to_google_spec() -> None:
def test_google_complete(default_google_env):
reply_message, reply_usage = complete(GoogleProvider, GOOGLE_MODEL)
assert reply_message.content == [Text("Hello! 👋 How can I help you today? 😊 \n")]
assert reply_usage.total_tokens == 20
assert reply_message.content == [Text("Hello! 👋 What can I do for you today? 😊 \n")]
assert reply_usage.total_tokens == 21
@pytest.mark.integration

View File

@@ -13,8 +13,8 @@ OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", OLLAMA_MODEL)
def test_ollama_complete():
reply_message, reply_usage = complete(OllamaProvider, OLLAMA_MODEL)
assert reply_message.content == [Text(text="Hello! I'm here to help. How can I assist you today? Let's chat. 😊")]
assert reply_usage.total_tokens == 33
assert reply_message.content == [Text("Hello! I'm here to help. How can I assist you today? 😊")]
assert reply_usage.total_tokens == 29
@pytest.mark.integration
@@ -31,7 +31,7 @@ def test_ollama_tools():
tool_use = reply_message.content[0]
assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}"
assert tool_use.id == "call_z6fgu3z3"
assert tool_use.id == "call_h5d3s25w"
assert tool_use.name == "read_file"
assert tool_use.parameters == {"filename": "test.txt"}
assert reply_usage.total_tokens == 133

View File

@@ -25,7 +25,7 @@ def test_from_env_throw_error_when_missing_api_key():
def test_openai_complete(default_openai_env):
reply_message, reply_usage = complete(OpenAiProvider, OPENAI_MODEL)
assert reply_message.content == [Text(text="Hello! How can I assist you today?")]
assert reply_message.content == [Text("Hello! How can I assist you today?")]
assert reply_usage.total_tokens == 27
@@ -43,7 +43,7 @@ def test_openai_tools(default_openai_env):
tool_use = reply_message.content[0]
assert isinstance(tool_use, ToolUse), f"Expected ToolUse, but was {type(tool_use).__name__}"
assert tool_use.id == "call_xXYlw4A7Ud1qtCopuK5gEJrP"
assert tool_use.id == "call_Z43oz2RtLmNHw9xvFgxA1SC5"
assert tool_use.name == "read_file"
assert tool_use.parameters == {"filename": "test.txt"}
assert reply_usage.total_tokens == 122
@@ -64,7 +64,7 @@ def test_openai_tools_integration():
def test_openai_vision(default_openai_env):
reply_message, reply_usage = vision(OpenAiProvider, OPENAI_MODEL)
assert reply_message.content == [Text(text='The first entry in the menu says "Ask Goose."')]
assert reply_message.content == [Text('The first entry in the menu says "Ask Goose."')]
assert reply_usage.total_tokens == 14241