Compare commits

..

1 Commits

Author SHA1 Message Date
Gui Vieira
9a210afd80 Fix types 2024-02-08 18:34:04 -03:00
49 changed files with 5072 additions and 11067 deletions

View File

@@ -262,8 +262,6 @@ Data collected includes:
- Roles of agents in a crew
- Tools names available
Users can opt-in sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews.
## License
CrewAI is released under the MIT License.

View File

@@ -25,7 +25,7 @@ description: What are crewAI Agents and how to use them.
| **Max RPM** | The maximum number of requests per minute the agent can perform to avoid rate limits |
| **Verbose** | This allow you to actually see what is going on during the Crew execution. |
| **Allow Delegation** | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. |
| **Step Callback** | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback` |
## Creating an Agent
@@ -50,8 +50,7 @@ agent = Agent(
max_iter=10,
max_rpm=10,
verbose=True,
allow_delegation=True,
step_callback=my_intermediate_step_callback
allow_delegation=True
)
```

View File

@@ -19,10 +19,6 @@ description: Understanding and utilizing crews in the crewAI framework.
| **Config** | Configuration settings for the crew. |
| **Max RPM** | Maximum requests per minute the crew adheres to during execution. |
| **Language** | Language setting for the crew's operation. |
| **Full Output** | Whether the crew should return the full output with all tasks outputs or just the final output. |
| **Step Callback** | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations, it won't override the agent specific `step_callback` |
| **Share Crew** | Whether you want to share the complete crew infromation and execution with the crewAI team to make the library better, and allow us to train models. |
!!! note "Crew Max RPM"
The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents `max_rpm` settings if you set it.
@@ -59,7 +55,6 @@ my_crew = Crew(
agents=[researcher, writer],
tasks=[research_task, write_article_task],
process=Process.sequential,
full_output=True,
verbose=True
)
```

View File

@@ -14,6 +14,4 @@ Data collected includes:
- If Tasks are being executed in parallel or sequentially
- Language model being used
- Roles of agents in a crew
- Tools names available
Users can opt-in sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews.
- Tools names available

196
poetry.lock generated
View File

@@ -801,18 +801,84 @@ test = ["objgraph", "psutil"]
[[package]]
name = "griffe"
version = "0.40.1"
version = "0.40.0"
description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API."
optional = false
python-versions = ">=3.8"
files = [
{file = "griffe-0.40.1-py3-none-any.whl", hash = "sha256:5b8c023f366fe273e762131fe4bfd141ea56c09b3cb825aa92d06a82681cfd93"},
{file = "griffe-0.40.1.tar.gz", hash = "sha256:66c48a62e2ce5784b6940e603300fcfb807b6f099b94e7f753f1841661fd5c7c"},
{file = "griffe-0.40.0-py3-none-any.whl", hash = "sha256:db1da6d1d8e08cbb20f1a7dee8c09da940540c2d4c1bfa26a9091cf6fc36a9ec"},
{file = "griffe-0.40.0.tar.gz", hash = "sha256:76c4439eaa2737af46ae003c331ab6ca79c5365b552f7b5aed263a3b4125735b"},
]
[package.dependencies]
colorama = ">=0.4"
[[package]]
name = "grpcio"
version = "1.60.1"
description = "HTTP/2-based RPC framework"
optional = false
python-versions = ">=3.7"
files = [
{file = "grpcio-1.60.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:14e8f2c84c0832773fb3958240c69def72357bc11392571f87b2d7b91e0bb092"},
{file = "grpcio-1.60.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:33aed0a431f5befeffd9d346b0fa44b2c01aa4aeae5ea5b2c03d3e25e0071216"},
{file = "grpcio-1.60.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:fead980fbc68512dfd4e0c7b1f5754c2a8e5015a04dea454b9cada54a8423525"},
{file = "grpcio-1.60.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:082081e6a36b6eb5cf0fd9a897fe777dbb3802176ffd08e3ec6567edd85bc104"},
{file = "grpcio-1.60.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55ccb7db5a665079d68b5c7c86359ebd5ebf31a19bc1a91c982fd622f1e31ff2"},
{file = "grpcio-1.60.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9b54577032d4f235452f77a83169b6527bf4b77d73aeada97d45b2aaf1bf5ce0"},
{file = "grpcio-1.60.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7d142bcd604166417929b071cd396aa13c565749a4c840d6c702727a59d835eb"},
{file = "grpcio-1.60.1-cp310-cp310-win32.whl", hash = "sha256:2a6087f234cb570008a6041c8ffd1b7d657b397fdd6d26e83d72283dae3527b1"},
{file = "grpcio-1.60.1-cp310-cp310-win_amd64.whl", hash = "sha256:f2212796593ad1d0235068c79836861f2201fc7137a99aa2fea7beeb3b101177"},
{file = "grpcio-1.60.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:79ae0dc785504cb1e1788758c588c711f4e4a0195d70dff53db203c95a0bd303"},
{file = "grpcio-1.60.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:4eec8b8c1c2c9b7125508ff7c89d5701bf933c99d3910e446ed531cd16ad5d87"},
{file = "grpcio-1.60.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:8c9554ca8e26241dabe7951aa1fa03a1ba0856688ecd7e7bdbdd286ebc272e4c"},
{file = "grpcio-1.60.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91422ba785a8e7a18725b1dc40fbd88f08a5bb4c7f1b3e8739cab24b04fa8a03"},
{file = "grpcio-1.60.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba6209c96828711cb7c8fcb45ecef8c8859238baf15119daa1bef0f6c84bfe7"},
{file = "grpcio-1.60.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c71be3f86d67d8d1311c6076a4ba3b75ba5703c0b856b4e691c9097f9b1e8bd2"},
{file = "grpcio-1.60.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5ef6cfaf0d023c00002ba25d0751e5995fa0e4c9eec6cd263c30352662cbce"},
{file = "grpcio-1.60.1-cp311-cp311-win32.whl", hash = "sha256:a09506eb48fa5493c58f946c46754ef22f3ec0df64f2b5149373ff31fb67f3dd"},
{file = "grpcio-1.60.1-cp311-cp311-win_amd64.whl", hash = "sha256:49c9b6a510e3ed8df5f6f4f3c34d7fbf2d2cae048ee90a45cd7415abab72912c"},
{file = "grpcio-1.60.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:b58b855d0071575ea9c7bc0d84a06d2edfbfccec52e9657864386381a7ce1ae9"},
{file = "grpcio-1.60.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:a731ac5cffc34dac62053e0da90f0c0b8560396a19f69d9703e88240c8f05858"},
{file = "grpcio-1.60.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:cf77f8cf2a651fbd869fbdcb4a1931464189cd210abc4cfad357f1cacc8642a6"},
{file = "grpcio-1.60.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c557e94e91a983e5b1e9c60076a8fd79fea1e7e06848eb2e48d0ccfb30f6e073"},
{file = "grpcio-1.60.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:069fe2aeee02dfd2135d562d0663fe70fbb69d5eed6eb3389042a7e963b54de8"},
{file = "grpcio-1.60.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb0af13433dbbd1c806e671d81ec75bd324af6ef75171fd7815ca3074fe32bfe"},
{file = "grpcio-1.60.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2f44c32aef186bbba254129cea1df08a20be414144ac3bdf0e84b24e3f3b2e05"},
{file = "grpcio-1.60.1-cp312-cp312-win32.whl", hash = "sha256:a212e5dea1a4182e40cd3e4067ee46be9d10418092ce3627475e995cca95de21"},
{file = "grpcio-1.60.1-cp312-cp312-win_amd64.whl", hash = "sha256:6e490fa5f7f5326222cb9f0b78f207a2b218a14edf39602e083d5f617354306f"},
{file = "grpcio-1.60.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:4216e67ad9a4769117433814956031cb300f85edc855252a645a9a724b3b6594"},
{file = "grpcio-1.60.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:73e14acd3d4247169955fae8fb103a2b900cfad21d0c35f0dcd0fdd54cd60367"},
{file = "grpcio-1.60.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:6ecf21d20d02d1733e9c820fb5c114c749d888704a7ec824b545c12e78734d1c"},
{file = "grpcio-1.60.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33bdea30dcfd4f87b045d404388469eb48a48c33a6195a043d116ed1b9a0196c"},
{file = "grpcio-1.60.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53b69e79d00f78c81eecfb38f4516080dc7f36a198b6b37b928f1c13b3c063e9"},
{file = "grpcio-1.60.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:39aa848794b887120b1d35b1b994e445cc028ff602ef267f87c38122c1add50d"},
{file = "grpcio-1.60.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:72153a0d2e425f45b884540a61c6639436ddafa1829a42056aa5764b84108b8e"},
{file = "grpcio-1.60.1-cp37-cp37m-win_amd64.whl", hash = "sha256:50d56280b482875d1f9128ce596e59031a226a8b84bec88cb2bf76c289f5d0de"},
{file = "grpcio-1.60.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:6d140bdeb26cad8b93c1455fa00573c05592793c32053d6e0016ce05ba267549"},
{file = "grpcio-1.60.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:bc808924470643b82b14fe121923c30ec211d8c693e747eba8a7414bc4351a23"},
{file = "grpcio-1.60.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:70c83bb530572917be20c21f3b6be92cd86b9aecb44b0c18b1d3b2cc3ae47df0"},
{file = "grpcio-1.60.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b106bc52e7f28170e624ba61cc7dc6829566e535a6ec68528f8e1afbed1c41f"},
{file = "grpcio-1.60.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30e980cd6db1088c144b92fe376747328d5554bc7960ce583ec7b7d81cd47287"},
{file = "grpcio-1.60.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0c5807e9152eff15f1d48f6b9ad3749196f79a4a050469d99eecb679be592acc"},
{file = "grpcio-1.60.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f1c3dc536b3ee124e8b24feb7533e5c70b9f2ef833e3b2e5513b2897fd46763a"},
{file = "grpcio-1.60.1-cp38-cp38-win32.whl", hash = "sha256:d7404cebcdb11bb5bd40bf94131faf7e9a7c10a6c60358580fe83913f360f929"},
{file = "grpcio-1.60.1-cp38-cp38-win_amd64.whl", hash = "sha256:c8754c75f55781515a3005063d9a05878b2cfb3cb7e41d5401ad0cf19de14872"},
{file = "grpcio-1.60.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:0250a7a70b14000fa311de04b169cc7480be6c1a769b190769d347939d3232a8"},
{file = "grpcio-1.60.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:660fc6b9c2a9ea3bb2a7e64ba878c98339abaf1811edca904ac85e9e662f1d73"},
{file = "grpcio-1.60.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:76eaaba891083fcbe167aa0f03363311a9f12da975b025d30e94b93ac7a765fc"},
{file = "grpcio-1.60.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d97c65ea7e097056f3d1ead77040ebc236feaf7f71489383d20f3b4c28412a"},
{file = "grpcio-1.60.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb2a2911b028f01c8c64d126f6b632fcd8a9ac975aa1b3855766c94e4107180"},
{file = "grpcio-1.60.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:5a1ebbae7e2214f51b1f23b57bf98eeed2cf1ba84e4d523c48c36d5b2f8829ff"},
{file = "grpcio-1.60.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a66f4d2a005bc78e61d805ed95dedfcb35efa84b7bba0403c6d60d13a3de2d6"},
{file = "grpcio-1.60.1-cp39-cp39-win32.whl", hash = "sha256:8d488fbdbf04283f0d20742b64968d44825617aa6717b07c006168ed16488804"},
{file = "grpcio-1.60.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b7199cd2a55e62e45bfb629a35b71fc2c0cb88f686a047f25b1112d3810904"},
{file = "grpcio-1.60.1.tar.gz", hash = "sha256:dd1d3a8d1d2e50ad9b59e10aa7f07c7d1be2b367f3f2d33c5fade96ed5460962"},
]
[package.extras]
protobuf = ["grpcio-tools (>=1.60.1)"]
[[package]]
name = "h11"
version = "0.14.0"
@@ -1022,19 +1088,19 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
[[package]]
name = "langchain-community"
version = "0.0.19"
version = "0.0.17"
description = "Community contributed LangChain integrations."
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langchain_community-0.0.19-py3-none-any.whl", hash = "sha256:ebff8daa0110d53555f4963f1f739b85f9ca63ef82598ece5f5c3f73fe0aa82e"},
{file = "langchain_community-0.0.19.tar.gz", hash = "sha256:5d18ad9e188b10aaba6361fb2a747cf29b64b21ffb8061933fec090187ca39c2"},
{file = "langchain_community-0.0.17-py3-none-any.whl", hash = "sha256:d503491bbfb691d1b3d10d74f7a69840cee3caf9b58a9a76f053ff925ea76733"},
{file = "langchain_community-0.0.17.tar.gz", hash = "sha256:ab957b34a562e0199b2ecf050bdc987c4fe889b2ac9f22b75a9fac8b9e30f53a"},
]
[package.dependencies]
aiohttp = ">=3.8.3,<4.0.0"
dataclasses-json = ">=0.5.7,<0.7"
langchain-core = ">=0.1.21,<0.2"
langchain-core = ">=0.1.16,<0.2"
langsmith = ">=0.0.83,<0.1"
numpy = ">=1,<2"
PyYAML = ">=5.3"
@@ -1044,23 +1110,23 @@ tenacity = ">=8.1.0,<9.0.0"
[package.extras]
cli = ["typer (>=0.9.0,<0.10.0)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<5)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"]
[[package]]
name = "langchain-core"
version = "0.1.22"
version = "0.1.18"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langchain_core-0.1.22-py3-none-any.whl", hash = "sha256:d1263c2707ce18bb13654c88f891e53f39edec9b11ff7d0d0f23fd920927b2d6"},
{file = "langchain_core-0.1.22.tar.gz", hash = "sha256:deac12b3e42a08bbbaa2acf83d5f8dd2d5513256d8daf0e853e9d68ff4c99d79"},
{file = "langchain_core-0.1.18-py3-none-any.whl", hash = "sha256:5a60dc3c391b33834fb9c8b072abd7a0df4cbba8ce88eb1bcb288844000ab759"},
{file = "langchain_core-0.1.18.tar.gz", hash = "sha256:ad470b21cdfdc75e829cd91c8d8eb7e0438ab8ddb5b50828125ff7ada121ee7b"},
]
[package.dependencies]
anyio = ">=3,<5"
jsonpatch = ">=1.33,<2.0"
langsmith = ">=0.0.87,<0.0.88"
langsmith = ">=0.0.83,<0.1"
packaging = ">=23.2,<24.0"
pydantic = ">=1,<3"
PyYAML = ">=5.3"
@@ -1089,13 +1155,13 @@ tiktoken = ">=0.5.2,<0.6.0"
[[package]]
name = "langsmith"
version = "0.0.87"
version = "0.0.86"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langsmith-0.0.87-py3-none-any.whl", hash = "sha256:8903d3811b9fc89eb18f5961c8e6935fbd2d0f119884fbf30dc70b8f8f4121fc"},
{file = "langsmith-0.0.87.tar.gz", hash = "sha256:36c4cc47e5b54be57d038036a30fb19ce6e4c73048cd7a464b8f25b459694d34"},
{file = "langsmith-0.0.86-py3-none-any.whl", hash = "sha256:7af15c36edb8c9fd9ae5c6d4fb940eb1da668b630a703d63c90c91e9be53aefb"},
{file = "langsmith-0.0.86.tar.gz", hash = "sha256:c1572824664810c4425b17f2d1e9a59d53992e6898df22a37236c62d3c80f59e"},
]
[package.dependencies]
@@ -1264,13 +1330,13 @@ mkdocs = ">=1.1"
[[package]]
name = "mkdocs-material"
version = "9.5.9"
version = "9.5.7"
description = "Documentation that simply works"
optional = false
python-versions = ">=3.8"
files = [
{file = "mkdocs_material-9.5.9-py3-none-any.whl", hash = "sha256:a5d62b73b3b74349e45472bfadc129c871dd2d4add68d84819580597b2f50d5d"},
{file = "mkdocs_material-9.5.9.tar.gz", hash = "sha256:635df543c01c25c412d6c22991872267723737d5a2f062490f33b2da1c013c6d"},
{file = "mkdocs_material-9.5.7-py3-none-any.whl", hash = "sha256:0be8ce8bcfebb52bae9b00cf9b851df45b8a92d629afcfd7f2c09b2dfa155ea3"},
{file = "mkdocs_material-9.5.7.tar.gz", hash = "sha256:16110292575d88a338d2961f3cb665cf12943ff8829e551a9b364f24019e46af"},
]
[package.dependencies]
@@ -1289,7 +1355,7 @@ regex = ">=2022.4"
requests = ">=2.26,<3.0"
[package.extras]
git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"]
git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2,<2.0)"]
imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"]
recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"]
@@ -1514,13 +1580,13 @@ files = [
[[package]]
name = "openai"
version = "1.12.0"
version = "1.11.1"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.7.1"
files = [
{file = "openai-1.12.0-py3-none-any.whl", hash = "sha256:a54002c814e05222e413664f651b5916714e4700d041d5cf5724d3ae1a3e3481"},
{file = "openai-1.12.0.tar.gz", hash = "sha256:99c5d257d09ea6533d689d1cc77caa0ac679fa21efef8893d8b0832a86877f1b"},
{file = "openai-1.11.1-py3-none-any.whl", hash = "sha256:e0f388ce499f53f58079d0c1f571f356f2b168b84d0d24a412506b6abc714980"},
{file = "openai-1.11.1.tar.gz", hash = "sha256:f66b8fe431af43e09594147ef3cdcb79758285de72ebafd52be9700a2af41e99"},
]
[package.dependencies]
@@ -1565,6 +1631,30 @@ files = [
backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""}
opentelemetry-proto = "1.22.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-grpc"
version = "1.22.0"
description = "OpenTelemetry Collector Protobuf over gRPC Exporter"
optional = false
python-versions = ">=3.7"
files = [
{file = "opentelemetry_exporter_otlp_proto_grpc-1.22.0-py3-none-any.whl", hash = "sha256:b5bcadc129272004316a455e9081216d3380c1fc2231a928ea6a70aa90e173fb"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.22.0.tar.gz", hash = "sha256:1e0e5aa4bbabc74942f06f268deffd94851d12a8dc30b02527472ef1729fe5b1"},
]
[package.dependencies]
backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""}
deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
grpcio = ">=1.0.0,<2.0.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.22.0"
opentelemetry-proto = "1.22.0"
opentelemetry-sdk = ">=1.22.0,<1.23.0"
[package.extras]
test = ["pytest-grpc"]
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
version = "1.22.0"
@@ -2582,40 +2672,38 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess
[[package]]
name = "watchdog"
version = "4.0.0"
version = "3.0.0"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.7"
files = [
{file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"},
{file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"},
{file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"},
{file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"},
{file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"},
{file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"},
{file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"},
{file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"},
{file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"},
{file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"},
{file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"},
{file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"},
{file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"},
{file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"},
{file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"},
{file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"},
{file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"},
{file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"},
{file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"},
{file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"},
{file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"},
{file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"},
{file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"},
{file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"},
{file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"},
{file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"},
{file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"},
{file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"},
{file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"},
{file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"},
{file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"},
{file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"},
{file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"},
{file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"},
{file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"},
{file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"},
{file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"},
{file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"},
{file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"},
{file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"},
{file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"},
{file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"},
{file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"},
{file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"},
{file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"},
{file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"},
{file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"},
{file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"},
{file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"},
{file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"},
{file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"},
{file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"},
{file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"},
{file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"},
{file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"},
{file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"},
]
[package.extras]
@@ -2832,4 +2920,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<4.0"
content-hash = "83ad952af62ced4101ae67ba88be4a10e580f6f73b3bde06cb2f15d789acc0f5"
content-hash = "2be3f98f57af7ea47f0985000be807e67c1bb3a95598b637e50f2cec54d11c80"

View File

@@ -1,7 +1,7 @@
[tool.poetry]
name = "crewai"
version = "0.10.0"
version = "0.5.5"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
authors = ["Joao Moura <joao@crewai.com>"]
readme = "README.md"

View File

@@ -3,9 +3,9 @@ from typing import Any, List, Optional
from langchain.agents.agent import RunnableAgent
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.runnables.config import RunnableConfig
from langchain_openai import ChatOpenAI
from pydantic import (
UUID4,
@@ -19,7 +19,12 @@ from pydantic import (
)
from pydantic_core import PydanticCustomError
from crewai.agents import CacheHandler, CrewAgentExecutor, ToolsHandler
from crewai.agents import (
CacheHandler,
CrewAgentExecutor,
CrewAgentOutputParser,
ToolsHandler,
)
from crewai.utilities import I18N, Logger, Prompts, RPMController
@@ -41,7 +46,6 @@ class Agent(BaseModel):
verbose: Whether the agent execution should be in verbose mode.
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
"""
__hash__ = object.__hash__ # type: ignore
@@ -86,10 +90,6 @@ class Agent(BaseModel):
cache_handler: InstanceOf[CacheHandler] = Field(
default=CacheHandler(), description="An instance of the CacheHandler class."
)
step_callback: Optional[Any] = Field(
default=None,
description="Callback to be executed after each step of the agent execution.",
)
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
llm: Any = Field(
default_factory=lambda: ChatOpenAI(
@@ -125,7 +125,7 @@ class Agent(BaseModel):
def execute_task(
self,
task: Any,
task: str,
context: Optional[str] = None,
tools: Optional[List[Any]] = None,
) -> str:
@@ -140,25 +140,21 @@ class Agent(BaseModel):
Output of the agent
"""
task_prompt = task.prompt()
if context:
task_prompt = self.i18n.slice("task_with_context").format(
task=task_prompt, context=context
task = self.i18n.slice("task_with_context").format(
task=task, context=context
)
tools = tools or self.tools
self.agent_executor.tools = tools
self.agent_executor.task = task
self.agent_executor.tools_description = (render_text_description(tools),)
self.agent_executor.tools_names = self.__tools_names(tools)
result = self.agent_executor.invoke(
{
"input": task_prompt,
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
}
"input": task,
"tool_names": self.__tools_names(tools),
"tools": render_text_description(tools),
},
RunnableConfig(callbacks=[self.tools_handler]),
)["output"]
if self.max_rpm:
@@ -174,7 +170,7 @@ class Agent(BaseModel):
"""
self.cache_handler = cache_handler
self.tools_handler = ToolsHandler(cache=self.cache_handler)
self.create_agent_executor()
self._create_agent_executor()
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
"""Set the rpm controller for the agent.
@@ -184,9 +180,9 @@ class Agent(BaseModel):
"""
if not self._rpm_controller:
self._rpm_controller = rpm_controller
self.create_agent_executor()
self._create_agent_executor()
def create_agent_executor(self) -> None:
def _create_agent_executor(self) -> None:
"""Create an agent executor for the agent.
Returns:
@@ -199,20 +195,17 @@ class Agent(BaseModel):
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
}
executor_args = {
"llm": self.llm,
"i18n": self.i18n,
"tools": self.tools,
"verbose": self.verbose,
"handle_parsing_errors": True,
"max_iterations": self.max_iter,
"step_callback": self.step_callback,
"tools_handler": self.tools_handler,
}
if self._rpm_controller:
executor_args[
"request_within_rpm_limit"
] = self._rpm_controller.check_or_wait
executor_args["request_within_rpm_limit"] = (
self._rpm_controller.check_or_wait
)
if self.memory:
summary_memory = ConversationSummaryMemory(
@@ -232,7 +225,14 @@ class Agent(BaseModel):
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
inner_agent = (
agent_args | execution_prompt | bind | ReActSingleInputOutputParser()
agent_args
| execution_prompt
| bind
| CrewAgentOutputParser(
tools_handler=self.tools_handler,
cache=self.cache_handler,
i18n=self.i18n,
)
)
self.agent_executor = CrewAgentExecutor(
agent=RunnableAgent(runnable=inner_agent), **executor_args

View File

@@ -1,3 +1,4 @@
from .cache.cache_handler import CacheHandler
from .executor import CrewAgentExecutor
from .output_parser import CrewAgentOutputParser
from .tools_handler import ToolsHandler

View File

@@ -1 +1,2 @@
from .cache_handler import CacheHandler
from .cache_hit import CacheHit

View File

@@ -10,7 +10,9 @@ class CacheHandler:
self._cache = {}
def add(self, tool, input, output):
input = input.strip()
self._cache[f"{tool}-{input}"] = output
def read(self, tool, input) -> Optional[str]:
input = input.strip()
return self._cache.get(f"{tool}-{input}")

14
src/crewai/agents/cache/cache_hit.py vendored Normal file
View File

@@ -0,0 +1,14 @@
from langchain_core.agents import AgentAction
from pydantic.v1 import BaseModel, Field
from .cache_handler import CacheHandler
class CacheHit(BaseModel):
"""Cache Hit Object."""
class Config:
arbitrary_types_allowed = True
action: AgentAction = Field(description="Action taken")
cache: CacheHandler = Field(description="Cache Handler for the tool")

View File

@@ -0,0 +1,30 @@
from langchain_core.exceptions import OutputParserException
from crewai.utilities import I18N
class TaskRepeatedUsageException(OutputParserException):
"""Exception raised when a task is used twice in a roll."""
i18n: I18N = I18N()
error: str = "TaskRepeatedUsageException"
message: str
def __init__(self, i18n: I18N, tool: str, tool_input: str, text: str):
self.i18n = i18n
self.text = text
self.tool = tool
self.tool_input = tool_input
self.message = self.i18n.errors("task_repeated_usage").format(
tool=tool, tool_input=tool_input
)
super().__init__(
error=self.error,
observation=self.message,
send_to_llm=True,
llm_output=self.text,
)
def __str__(self):
return self.message

View File

@@ -10,25 +10,18 @@ from langchain_core.exceptions import OutputParserException
from langchain_core.pydantic_v1 import root_validator
from langchain_core.tools import BaseTool
from langchain_core.utils.input import get_color_mapping
from pydantic import InstanceOf
from crewai.agents.tools_handler import ToolsHandler
from crewai.tools.tool_usage import ToolUsage
from crewai.agents.cache.cache_hit import CacheHit
from crewai.tools.cache_tools import CacheTools
from crewai.utilities import I18N
class CrewAgentExecutor(AgentExecutor):
i18n: I18N = I18N()
llm: Any = None
iterations: int = 0
task: Any = None
tools_description: str = ""
tools_names: str = ""
request_within_rpm_limit: Any = None
tools_handler: InstanceOf[ToolsHandler] = None
max_iterations: Optional[int] = 15
force_answer_max_iterations: Optional[int] = None
step_callback: Optional[Any] = None
@root_validator()
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
@@ -38,6 +31,11 @@ class CrewAgentExecutor(AgentExecutor):
def _should_force_answer(self) -> bool:
return True if self.iterations == self.force_answer_max_iterations else False
def _force_answer(self, output: AgentAction):
return AgentStep(
action=output, observation=self.i18n.errors("force_final_answer")
)
def _call(
self,
inputs: Dict[str, str],
@@ -65,10 +63,6 @@ class CrewAgentExecutor(AgentExecutor):
intermediate_steps,
run_manager=run_manager,
)
if self.step_callback:
self.step_callback(next_step_output)
if isinstance(next_step_output, AgentFinish):
return self._return(
next_step_output, intermediate_steps, run_manager=run_manager
@@ -111,17 +105,17 @@ class CrewAgentExecutor(AgentExecutor):
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
if self._should_force_answer():
if isinstance(output, AgentAction) or isinstance(output, AgentFinish):
output = output
else:
raise ValueError(
f"Unexpected output type from agent: {type(output)}"
)
yield AgentStep(
action=output, observation=self.i18n.errors("force_final_answer")
)
if isinstance(output, CacheHit):
output = output.action
if isinstance(output, AgentAction):
yield self._force_answer(output)
return
if isinstance(output, list):
yield from [self._force_answer(action) for action in output]
return
yield output
return
except OutputParserException as e:
@@ -162,9 +156,7 @@ class CrewAgentExecutor(AgentExecutor):
)
if self._should_force_answer():
yield AgentStep(
action=output, observation=self.i18n.errors("force_final_answer")
)
yield self._force_answer(output)
return
yield AgentStep(action=output, observation=observation)
@@ -175,6 +167,17 @@ class CrewAgentExecutor(AgentExecutor):
yield output
return
# Override tool usage to use CacheTools
if isinstance(output, CacheHit):
cache = output.cache
action = output.action
tool = CacheTools(cache_handler=cache).tool()
output = action.copy()
output.tool_input = f"tool:{action.tool}|input:{action.tool_input}"
output.tool = tool.name
name_to_tool_map[tool.name] = tool
color_mapping[tool.name] = color_mapping[action.tool]
actions: List[AgentAction]
actions = [output] if isinstance(output, AgentAction) else output
yield from actions
@@ -185,18 +188,18 @@ class CrewAgentExecutor(AgentExecutor):
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color_mapping[agent_action.tool]
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
observation = ToolUsage(
tools_handler=self.tools_handler,
tools=self.tools,
tools_description=self.tools_description,
tools_names=self.tools_names,
llm=self.llm,
task=self.task,
).use(agent_action.log)
# We then call the tool on the tool input to get an observation
observation = tool.run(
agent_action.tool_input,
verbose=self.verbose,
color=color,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = InvalidTool().run(

View File

@@ -0,0 +1,79 @@
import re
from typing import Union
from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain_core.agents import AgentAction, AgentFinish
from crewai.agents.cache import CacheHandler, CacheHit
from crewai.agents.exceptions import TaskRepeatedUsageException
from crewai.agents.tools_handler import ToolsHandler
from crewai.utilities import I18N
FINAL_ANSWER_ACTION = "Final Answer:"
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class CrewAgentOutputParser(ReActSingleInputOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
It also prevents tools from being reused in a roll.
"""
class Config:
arbitrary_types_allowed = True
tools_handler: ToolsHandler
cache: CacheHandler
i18n: I18N
def parse(self, text: str) -> Union[AgentAction, AgentFinish, CacheHit]:
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
if action_match := re.search(regex, text, re.DOTALL):
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
if last_tool_usage := self.tools_handler.last_used_tool:
usage = {
"tool": action,
"input": tool_input,
}
if usage == last_tool_usage:
raise TaskRepeatedUsageException(
text=text,
tool=action,
tool_input=tool_input,
i18n=self.i18n,
)
if self.cache.read(action, tool_input):
agent_action = AgentAction(action, tool_input, text)
return CacheHit(action=agent_action, cache=self.cache)
return super().parse(text)

View File

@@ -1,30 +1,44 @@
from typing import Any
from typing import Any, Dict
from langchain.callbacks.base import BaseCallbackHandler
from ..tools.cache_tools import CacheTools
from ..tools.tool_calling import ToolCalling
from .cache.cache_handler import CacheHandler
class ToolsHandler:
class ToolsHandler(BaseCallbackHandler):
"""Callback handler for tool usage."""
last_used_tool: ToolCalling = {}
last_used_tool: Dict[str, Any] = {}
cache: CacheHandler
def __init__(self, cache: CacheHandler):
def __init__(self, cache: CacheHandler, **kwargs: Any):
"""Initialize the callback handler."""
self.cache = cache
self.last_used_tool = {}
super().__init__(**kwargs)
def on_tool_start(self, calling: ToolCalling) -> Any:
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
self.last_used_tool = calling
name = serialized.get("name")
if name not in ["invalid_tool", "_Exception"]:
tools_usage = {
"tool": name,
"input": input_str,
}
self.last_used_tool = tools_usage
def on_tool_end(self, calling: ToolCalling, output: str) -> Any:
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
if self.last_used_tool.function_name != CacheTools().name:
self.cache.add(
tool=calling.function_name,
input=calling.arguments,
output=output,
)
if (
"is not a valid tool" not in output
and "Invalid or incomplete response" not in output
and "Invalid Format" not in output
):
if self.last_used_tool["tool"] != CacheTools().name:
self.cache.add(
tool=self.last_used_tool["tool"],
input=self.last_used_tool["input"],
output=output,
)

View File

@@ -35,16 +35,12 @@ class Crew(BaseModel):
process: The process flow that the crew will follow (e.g., sequential).
verbose: Indicates the verbosity level for logging during execution.
config: Configuration settings for the crew.
_cache_handler: Handles caching for the crew's operations.
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
id: A unique identifier for the crew instance.
full_output: Whether the crew should return the full output with all tasks outputs or just the final output.
step_callback: Callback to be executed after each step for every agents execution.
share_crew: Whether you want to share the complete crew infromation and execution with crewAI to make the library better, and allow us to train models.
_cache_handler: Handles caching for the crew's operations.
"""
__hash__ = object.__hash__ # type: ignore
_execution_span: Any = PrivateAttr()
_rpm_controller: RPMController = PrivateAttr()
_logger: Logger = PrivateAttr()
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
@@ -53,20 +49,11 @@ class Crew(BaseModel):
agents: List[Agent] = Field(default_factory=list)
process: Process = Field(default=Process.sequential)
verbose: Union[int, bool] = Field(default=0)
full_output: Optional[bool] = Field(
default=False,
description="Whether the crew should return the full output with all tasks outputs or just the final output.",
)
manager_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
share_crew: Optional[bool] = Field(default=False)
step_callback: Optional[Any] = Field(
default=None,
description="Callback to be executed after each step for all agents execution.",
)
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the crew execution to be respected.",
@@ -107,7 +94,6 @@ class Crew(BaseModel):
self._logger = Logger(self.verbose)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
self._telemetry = Telemetry()
self._telemetry.set_tracer()
self._telemetry.crew_creation(self)
return self
@@ -151,7 +137,6 @@ class Crew(BaseModel):
"missing_keys_in_config", "Config should have 'agents' and 'tasks'.", {}
)
self.process = self.config.get("process", self.process)
self.agents = [Agent(**agent) for agent in self.config["agents"]]
self.tasks = [self._create_task(task) for task in self.config["tasks"]]
@@ -172,13 +157,8 @@ class Crew(BaseModel):
def kickoff(self) -> str:
"""Starts the crew to work on its assigned tasks."""
self._execution_span = self._telemetry.crew_execution_span(self)
for agent in self.agents:
agent.i18n = I18N(language=self.language)
if (self.step_callback) and (not agent.step_callback):
agent.step_callback = self.step_callback
agent.create_agent_executor()
if self.process == Process.sequential:
return self._run_sequential_process()
@@ -191,7 +171,7 @@ class Crew(BaseModel):
def _run_sequential_process(self) -> str:
"""Executes tasks sequentially and returns the final output."""
task_output = ""
task_output: str = ""
for task in self.tasks:
if task.agent is not None and task.agent.allow_delegation:
agents_for_delegation = [
@@ -205,13 +185,16 @@ class Crew(BaseModel):
output = task.execute(context=task_output)
if not task.async_execution:
assert output is not None
task_output = output
role = task.agent.role if task.agent is not None else "None"
self._logger.log("debug", f"[{role}] Task output: {task_output}\n\n")
self._finish_execution(task_output)
return self._format_output(task_output)
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
return task_output
def _run_hierarchical_process(self) -> str:
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
@@ -226,33 +209,23 @@ class Crew(BaseModel):
verbose=True,
)
task_output = ""
task_output: str = ""
for task in self.tasks:
self._logger.log("debug", f"Working Agent: {manager.role}")
self._logger.log("info", f"Starting Task: {task.description}")
task_output = task.execute(
output = task.execute(
agent=manager, context=task_output, tools=manager.tools
)
if not task.async_execution:
assert output is not None
task_output = output
self._logger.log(
"debug", f"[{manager.role}] Task output: {task_output}\n\n"
)
self._finish_execution(task_output)
return self._format_output(task_output)
def _format_output(self, output: str) -> str:
"""Formats the output of the crew execution."""
if self.full_output:
return {
"final_output": output,
"tasks_outputs": [task.output for task in self.tasks],
}
else:
return output
def _finish_execution(self, output) -> None:
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
self._telemetry.end_crew(self, output)
return task_output

View File

@@ -17,9 +17,8 @@ class Task(BaseModel):
arbitrary_types_allowed = True
__hash__ = object.__hash__ # type: ignore
used_tools: int = 0
i18n: I18N = I18N()
thread: threading.Thread = None
thread: threading.Thread | None = None
description: str = Field(description="Description of the actual task.")
callback: Optional[Any] = Field(
description="Callback to be executed after the task is completed.", default=None
@@ -72,7 +71,7 @@ class Task(BaseModel):
agent: Agent | None = None,
context: Optional[str] = None,
tools: Optional[List[Any]] = None,
) -> str:
) -> str | None:
"""Execute the task.
Returns:
@@ -86,40 +85,38 @@ class Task(BaseModel):
)
if self.context:
context = []
results = []
for task in self.context:
if task.async_execution:
assert task.thread is not None
task.thread.join()
context.append(task.output.result)
context = "\n".join(context)
if task.output is not None:
results.append(task.output.result)
context = "\n".join(results)
tools = tools or self.tools
if self.async_execution:
self.thread = threading.Thread(
target=self._execute, args=(agent, self, context, tools)
target=self._execute, args=(agent, self._prompt(), context, tools)
)
self.thread.start()
else:
result = self._execute(
task=self,
agent=agent,
task_prompt=self._prompt(),
context=context,
tools=tools,
)
return result
def _execute(self, agent, task, context, tools):
result = agent.execute_task(
task=task,
context=context,
tools=tools,
)
def _execute(self, agent, task_prompt, context, tools):
result = agent.execute_task(task=task_prompt, context=context, tools=tools)
self.output = TaskOutput(description=self.description, result=result)
self.callback(self.output) if self.callback else None
return result
def prompt(self) -> str:
def _prompt(self) -> str:
"""Prompt the task.
Returns:

View File

@@ -2,7 +2,6 @@ import json
import os
import platform
import socket
from typing import Any
import pkg_resources
from opentelemetry import trace
@@ -33,202 +32,75 @@ class Telemetry:
- Language model being used
- Roles of agents in a crew
- Tools names available
Users can opt-in to sharing more complete data suing the `share_crew`
attribute in the Crew class.
"""
def __init__(self):
self.ready = False
try:
telemetry_endpoint = "http://telemetry.crewai.com:4318"
self.resource = Resource(attributes={SERVICE_NAME: "crewAI-telemetry"})
self.provider = TracerProvider(resource=self.resource)
processor = BatchSpanProcessor(
OTLPSpanExporter(endpoint=f"{telemetry_endpoint}/v1/traces")
)
self.provider.add_span_processor(processor)
self.ready = True
except Exception:
pass
def set_tracer(self):
trace.set_tracer_provider(self.provider)
telemetry_endpoint = "http://telemetry.crewai.com:4318"
self.resource = Resource(attributes={SERVICE_NAME: "crewAI-telemetry"})
provider = TracerProvider(resource=self.resource)
processor = BatchSpanProcessor(
OTLPSpanExporter(endpoint=f"{telemetry_endpoint}/v1/traces")
)
provider.add_span_processor(processor)
trace.set_tracer_provider(provider)
def crew_creation(self, crew):
"""Records the creation of a crew."""
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Created")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "python_version", platform.python_version())
self._add_attribute(span, "hostname", socket.gethostname())
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "crew_process", crew.process)
self._add_attribute(span, "crew_language", crew.language)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
self._add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"id": str(agent.id),
"role": agent.role,
"memory_enabled?": agent.memory,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.language,
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [tool.name for tool in agent.tools],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"id": str(task.id),
"async_execution?": task.async_execution,
"agent_role": task.agent.role if task.agent else "None",
"tools_names": [tool.name for tool in task.tools],
}
for task in crew.tasks
]
),
)
self._add_attribute(span, "platform", platform.platform())
self._add_attribute(span, "platform_release", platform.release())
self._add_attribute(span, "platform_system", platform.system())
self._add_attribute(span, "platform_version", platform.version())
self._add_attribute(span, "cpus", os.cpu_count())
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Created")
self.add_attribute(
span, "crewai_version", pkg_resources.get_distribution("crewai").version
)
self.add_attribute(span, "python_version", platform.python_version())
self.add_attribute(span, "hostname", socket.gethostname())
self.add_attribute(span, "crewid", str(crew.id))
self.add_attribute(span, "crew_process", crew.process)
self.add_attribute(span, "crew_language", crew.language)
self.add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self.add_attribute(span, "crew_number_of_agents", len(crew.agents))
self.add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"id": str(agent.id),
"role": agent.role,
"memory_enabled?": agent.memory,
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [tool.name for tool in agent.tools],
}
for agent in crew.agents
]
),
)
self.add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"id": str(task.id),
"async_execution?": task.async_execution,
"tools_names": [tool.name for tool in task.tools],
}
for task in crew.tasks
]
),
)
self.add_attribute(span, "platform", platform.platform())
self.add_attribute(span, "platform_release", platform.release())
self.add_attribute(span, "platform_system", platform.system())
self.add_attribute(span, "platform_version", platform.version())
self.add_attribute(span, "cpus", os.cpu_count())
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the usage of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage")
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
self._add_attribute(
span, "llm", json.dumps(self._safe_llm_attributes(llm))
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_usage_error(self, llm: Any):
"""Records the usage of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage Error")
self._add_attribute(
span, "llm", json.dumps(self._safe_llm_attributes(llm))
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def crew_execution_span(self, crew):
"""Records the complete execution of a crew.
This is only collected if the user has opted-in to share the crew.
"""
if (self.ready) and (crew.share_crew):
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Execution")
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"id": str(agent.id),
"role": agent.role,
"goal": agent.goal,
"backstory": agent.backstory,
"memory_enabled?": agent.memory,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.language,
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [tool.name for tool in agent.tools],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"id": str(task.id),
"description": task.description,
"async_execution?": task.async_execution,
"output": task.expected_output,
"agent_role": task.agent.role if task.agent else "None",
"context": [task.description for task in task.context]
if task.context
else "None",
"tools_names": [tool.name for tool in task.tools],
}
for task in crew.tasks
]
),
)
return span
except Exception:
pass
def end_crew(self, crew, output):
if (self.ready) and (crew.share_crew):
try:
self._add_attribute(crew._execution_span, "crew_output", output)
self._add_attribute(
crew._execution_span,
"crew_tasks_output",
json.dumps(
[
{
"id": str(task.id),
"description": task.description,
"output": task.output.result,
}
for task in crew.tasks
]
),
)
crew._execution_span.set_status(Status(StatusCode.OK))
crew._execution_span.end()
except Exception:
pass
def _add_attribute(self, span, key, value):
def add_attribute(self, span, key, value):
"""Add an attribute to a span."""
try:
return span.set_attribute(key, value)

View File

@@ -1,10 +1,9 @@
from typing import List
from langchain.tools import StructuredTool
from langchain.tools import Tool
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.task import Task
from crewai.utilities import I18N
@@ -16,14 +15,14 @@ class AgentTools(BaseModel):
def tools(self):
return [
StructuredTool.from_function(
Tool.from_function(
func=self.delegate_work,
name="Delegate work to co-worker",
description=self.i18n.tools("delegate_work").format(
coworkers=", ".join([agent.role for agent in self.agents])
),
),
StructuredTool.from_function(
Tool.from_function(
func=self.ask_question,
name="Ask question to co-worker",
description=self.i18n.tools("ask_question").format(
@@ -32,16 +31,24 @@ class AgentTools(BaseModel):
),
]
def delegate_work(self, coworker: str, task: str, context: str):
def delegate_work(self, command):
"""Useful to delegate a specific task to a coworker."""
return self._execute(coworker, task, context)
return self._execute(command)
def ask_question(self, coworker: str, question: str, context: str):
def ask_question(self, command):
"""Useful to ask a question, opinion or take from a coworker."""
return self._execute(coworker, question, context)
return self._execute(command)
def _execute(self, agent, task, context):
def _execute(self, command):
"""Execute the command."""
try:
agent, task, context = command.split("|")
except ValueError:
return self.i18n.errors("agent_tool_missing_param")
if not agent or not task or not context:
return self.i18n.errors("agent_tool_missing_param")
agent = [
available_agent
for available_agent in self.agents
@@ -54,5 +61,4 @@ class AgentTools(BaseModel):
)
agent = agent[0]
task = Task(description=task, agent=agent)
return agent.execute_task(task, context)

View File

@@ -1,4 +1,4 @@
from langchain.tools import StructuredTool
from langchain.tools import Tool
from pydantic import BaseModel, ConfigDict, Field
from crewai.agents.cache import CacheHandler
@@ -15,7 +15,7 @@ class CacheTools(BaseModel):
)
def tool(self):
return StructuredTool.from_function(
return Tool.from_function(
func=self.hit_cache,
name=self.name,
description="Reads directly from the cache",

View File

@@ -1,12 +0,0 @@
from typing import Any, Dict
from pydantic.v1 import BaseModel, Field
class ToolCalling(BaseModel):
function_name: str = Field(
..., description="The name of the function to be called."
)
arguments: Dict[str, Any] = Field(
..., description="A dictinary of arguments to be passed to the function."
)

View File

@@ -1,164 +0,0 @@
from typing import Any, List
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.tools import BaseTool
from crewai.agents.tools_handler import ToolsHandler
from crewai.telemtry import Telemetry
from crewai.tools.tool_calling import ToolCalling
from crewai.utilities import I18N, Printer
class ToolUsageErrorException(Exception):
"""Exception raised for errors in the tool usage."""
def __init__(self, message: str) -> None:
self.message = message
super().__init__(self.message)
class ToolUsage:
"""
Class that represents the usage of a tool by an agent.
Attributes:
task: Task being executed.
tools_handler: Tools handler that will manage the tool usage.
tools: List of tools available for the agent.
tools_description: Description of the tools available for the agent.
tools_names: Names of the tools available for the agent.
llm: Language model to be used for the tool usage.
"""
def __init__(
self,
tools_handler: ToolsHandler,
tools: List[BaseTool],
tools_description: str,
tools_names: str,
task: Any,
llm: Any,
) -> None:
self._i18n: I18N = I18N()
self._printer: Printer = Printer()
self._telemetry: Telemetry = Telemetry()
self._run_attempts: int = 1
self._max_parsing_attempts: int = 3
self._remeber_format_after_usages: int = 3
self.tools_description = tools_description
self.tools_names = tools_names
self.tools_handler = tools_handler
self.tools = tools
self.task = task
self.llm = llm
def use(self, tool_string: str):
calling = self._tool_calling(tool_string)
if isinstance(calling, ToolUsageErrorException):
error = calling.message
self._printer.print(content=f"\n\n{error}\n", color="yellow")
return error
tool = self._select_tool(calling.function_name)
return self._use(tool_string=tool_string, tool=tool, calling=calling)
def _use(self, tool_string: str, tool: BaseTool, calling: ToolCalling) -> None:
try:
if self._check_tool_repeated_usage(calling=calling):
result = self._i18n.errors("task_repeated_usage").format(
tool=calling.function_name, tool_input=calling.arguments
)
else:
self.tools_handler.on_tool_start(calling=calling)
result = self.tools_handler.cache.read(
tool=calling.function_name, input=calling.arguments
)
if not result:
result = tool._run(**calling.arguments)
self.tools_handler.on_tool_end(calling=calling, output=result)
self._printer.print(content=f"\n\n{result}\n", color="yellow")
self._telemetry.tool_usage(
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
)
result = self._format_result(result=result)
return result
except Exception:
self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.llm)
return ToolUsageErrorException(
self._i18n.errors("tool_usage_error")
).message
return self.use(tool_string=tool_string)
def _format_result(self, result: Any) -> None:
self.task.used_tools += 1
if self._should_remember_format():
result = self._remember_format(result=result)
return result
def _should_remember_format(self) -> None:
return self.task.used_tools % self._remeber_format_after_usages == 0
def _remember_format(self, result: str) -> None:
result = str(result)
result += "\n\n" + self._i18n.slice("tools").format(
tools=self.tools_description, tool_names=self.tools_names
)
return result
def _check_tool_repeated_usage(self, calling: ToolCalling) -> None:
if last_tool_usage := self.tools_handler.last_used_tool:
return calling == last_tool_usage
def _select_tool(self, tool_name: str) -> BaseTool:
for tool in self.tools:
if tool.name == tool_name:
return tool
raise Exception(f"Tool '{tool_name}' not found.")
def _render(self) -> str:
"""Render the tool name and description in plain text."""
descriptions = []
for tool in self.tools:
args = {
k: {k2: v2 for k2, v2 in v.items() if k2 in ["description", "type"]}
for k, v in tool.args.items()
}
descriptions.append(
"\n".join(
[
f"Funtion Name: {tool.name}",
f"Funtion attributes: {args}",
f"Description: {tool.description}",
]
)
)
return "\n--\n".join(descriptions)
def _tool_calling(self, tool_string: str) -> ToolCalling:
try:
parser = PydanticOutputParser(pydantic_object=ToolCalling)
prompt = PromptTemplate(
template="Return a valid schema for the one tool you must use with its arguments and values.\n\nTools available:\n\n{available_tools}\n\nUse this text to inform a valid ouput schema:\n{tool_string}\n\n{format_instructions}\n```",
input_variables=["tool_string"],
partial_variables={
"available_tools": self._render(),
"format_instructions": parser.get_format_instructions(),
},
)
chain = prompt | self.llm | parser
calling = chain.invoke({"tool_string": tool_string})
except Exception:
self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.llm)
return ToolUsageErrorException(self._i18n.errors("tool_usage_error"))
return self._tool_calling(tool_string)
return calling

View File

@@ -9,18 +9,18 @@
"task": "Αρχή! Αυτό είναι ΠΟΛΥ σημαντικό για εσάς, η δουλειά σας εξαρτάται από αυτό!\n\nΤρέχουσα εργασία: {input}",
"memory": "Αυτή είναι η περίληψη της μέχρι τώρα δουλειάς σας:\n{chat_history}",
"role_playing": "Είσαι {role}.\n{backstory}\n\nΟ προσωπικός σας στόχος είναι: {goal}",
"tools": "ΕΡΓΑΛΕΙΑ:\n------\nΈχετε πρόσβαση μόνο στα ακόλουθα εργαλεία:\n\n{tools}\n\nΓια να χρησιμοποιήσετε ένα εργαλείο, χρησιμοποιήστε την ακόλουθη ακριβώς μορφή:\n\n```\nThought: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Ναι\nΕνέργεια: το εργαλείο που θέλετε να χρησιμοποιήσετε, θα πρέπει να είναι ένα από τα [{tool_names}], μόνο το όνομα.\nΕισαγωγή ενέργειας: Οποιαδήποτε και όλες οι σχετικές πληροφορίες και το πλαίσιο χρήσης του εργαλείου\nΠαρατήρηση: το αποτέλεσμα της χρήσης του εργαλείου\n```\n\nΌταν έχετε μια απάντηση για την εργασία σας ή εάν δεν χρειάζεται να χρησιμοποιήσετε ένα εργαλείο, ΠΡΕΠΕΙ να χρησιμοποιήσετε τη μορφή:\n\n```\nΣκέψη: Πρέπει να χρησιμοποιήσω ένα εργαλείο ? Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
"tools": "ΕΡΓΑΛΕΙΑ:\n------\nΈχετε πρόσβαση μόνο στα ακόλουθα εργαλεία:\n\n{tools}\n\nΓια να χρησιμοποιήσετε ένα εργαλείο, χρησιμοποιήστε την ακόλουθη ακριβώς μορφή:\n\n```\nΣκέψη: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Ναί\nΔράση: η ενέργεια που πρέπει να γίνει, πρέπει να είναι μία από τις[{tool_names}], μόνο το όνομα.\nΕνέργεια προς εισαγωγή: η είσοδος στη δράση\nΠαρατήρηση: το αποτέλεσμα της δράσης\n```\n\nΌταν έχετε μια απάντηση για την εργασία σας ή εάν δεν χρειάζεται να χρησιμοποιήσετε ένα εργαλείο, ΠΡΕΠΕΙ να χρησιμοποιήσετε τη μορφή:\n\n```\nΣκέψη: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Οχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
"task_with_context": "{task}\nΑυτό είναι το πλαίσιο με το οποίο εργάζεστε:\n{context}",
"expected_output": "Η τελική σας απάντηση πρέπει να είναι: {expected_output}"
},
"errors": {
"force_final_answer": "Στην πραγματικότητα, χρησιμοποίησα πάρα πολλά εργαλεία, οπότε θα σταματήσω τώρα και θα σας δώσω την απόλυτη ΚΑΛΥΤΕΡΗ τελική μου απάντηση ΤΩΡΑ, χρησιμοποιώντας την αναμενόμενη μορφή: ```\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω ένα εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
"agent_tool_missing_param": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Λείπουν ακριβώς 3 διαχωρισμένες τιμές σωλήνων (|). Για παράδειγμα, `coworker|task|context`. Πρέπει να φροντίσω να περάσω το πλαίσιο ως πλαίσιο.\n",
"agent_tool_unexsiting_coworker": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Ο συνάδελφος που αναφέρεται στο Ενέργεια προς εισαγωγή δεν βρέθηκε, πρέπει να είναι μία από τις ακόλουθες επιλογές: {coworkers}.\n",
"task_repeated_usage": "Μόλις χρησιμοποίησα το εργαλείο {tool} με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω ξανά τώρα.\n",
"tool_usage_error": "Φαίνεται ότι αντιμετωπίσαμε ένα απροσδόκητο σφάλμα κατά την προσπάθεια χρήσης του εργαλείου."
"task_repeated_usage": "Μόλις χρησιμοποίησα το {tool} εργαλείο με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω τώρα.\n"
},
"tools": {
"delegate_work": "Αναθέστε μια συγκεκριμένη εργασία σε έναν από τους παρακάτω συναδέλφους: {coworkers}. Η είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ο ρόλος του συναδέλφου, η εργασία που θέλετε να κάνει και ΟΛΟ το απαραίτητο πλαίσιο για την εκτέλεση της εργασίας, δεν γνωρίζουν τίποτα για την εργασία, επομένως μοιραστείτε απολύτως όλα όσα γνωρίζετε, μην αναφέρετε πράγματα, αλλά αντί να τους εξηγήσεις.",
"ask_question": "Κάντε μια συγκεκριμένη ερώτηση σε έναν από τους παρακάτω συναδέλφους: {coworkers}. Η συμβολή σε αυτό το εργαλείο θα πρέπει να είναι ο ρόλος του συναδέλφου, η ερώτηση που έχετε για αυτόν και ΟΛΟ το απαραίτητο πλαίσιο για να κάνετε σωστά την ερώτηση, δεν γνωρίζουν τίποτα για την ερώτηση, επομένως μοιραστείτε απολύτως όλα όσα γνωρίζετε, μην αναφέρετε πράγματα, αλλά αντί να τους εξηγήσεις."
"delegate_work": "Χρήσιμο για την ανάθεση μιας συγκεκριμένης εργασίας σε έναν από τους παρακάτω συναδέλφους: {coworkers}.\nΗ είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ένα κείμενο χωρισμένο σε σωλήνα (|) μήκους 3 (τρία), που αντιπροσωπεύει τον συνάδελφο στον οποίο θέλετε να του ζητήσετε (μία από τις επιλογές), την εργασία και όλο το πραγματικό πλαίσιο που έχετε για την εργασία .\nΓια παράδειγμα, `coworker|task|context`.",
"ask_question": "Χρήσιμο για να κάνετε μια ερώτηση, γνώμη ή αποδοχή από τους παρακάτω συναδέλφους: {coworkers}.\nΗ είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ένα κείμενο χωρισμένο σε σωλήνα (|) μήκους 3 (τρία), που αντιπροσωπεύει τον συνάδελφο στον οποίο θέλετε να το ρωτήσετε (μία από τις επιλογές), την ερώτηση και όλο το πραγματικό πλαίσιο που έχετε για την ερώτηση.\nΓια παράδειγμα, `coworker|question|context`."
}
}

View File

@@ -9,18 +9,18 @@
"task": "Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: {input}",
"memory": "This is the summary of your work so far:\n{chat_history}",
"role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}",
"tools": "TOOLS:\n------\nYou have access to only the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one of [{tool_names}], just the name.\nAction Input: Any and all relevant information input and context for using the tool\nObservation: the result of using the tool\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
"tools": "TOOLS:\n------\nYou have access to only the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}], just the name.\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
"task_with_context": "{task}\nThis is the context you're working with:\n{context}",
"expected_output": "Your final answer must be: {expected_output}"
},
"errors": {
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using exaclty the expected format bellow: \n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using the expected format: ```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
"agent_tool_missing_param": "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n",
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {coworkers}.\n",
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it again now. \nI could give my final answer if I'm ready, using exaclty the expected format bellow: \n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```\n",
"tool_usage_error": "It seems we encountered an unexpected error while trying to use the tool."
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it now.\n"
},
"tools": {
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}. The input to this tool should be the role of the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
"ask_question": "Ask a specific question to one of the following co-workers: {coworkers}. The input to this tool should be the role of the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them."
"delegate_work": "Useful to delegate a specific task to one of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the task and all actual context you have for the task.\nFor example, `coworker|task|context`.",
"ask_question": "Useful to ask a question, opinion or take from on of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the question and all actual context you have for the question.\n For example, `coworker|question|context`."
}
}

View File

@@ -1,5 +1,4 @@
from .i18n import I18N
from .logger import Logger
from .printer import Printer
from .prompts import Prompts
from .rpm_controller import RPMController

View File

@@ -1,9 +0,0 @@
class Printer:
def print(self, content: str, color: str):
if color == "yellow":
self._print_yellow(content)
else:
print(content)
def _print_yellow(self, content):
print("\033[93m {}\033[00m".format(content))

View File

@@ -9,8 +9,6 @@ from langchain_openai import ChatOpenAI
from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler
from crewai.agents.executor import CrewAgentExecutor
from crewai.tools.tool_calling import ToolCalling
from crewai.tools.tool_usage import ToolUsage
from crewai.utilities import RPMController
@@ -64,8 +62,7 @@ def test_agent_without_memory():
llm=ChatOpenAI(temperature=0, model="gpt-4"),
)
task = Task(description="How much is 1 + 1?", agent=no_memory_agent)
result = no_memory_agent.execute_task(task)
result = no_memory_agent.execute_task("How much is 1 + 1?")
assert result == "1 + 1 equals 2."
assert no_memory_agent.agent_executor.memory is None
@@ -81,18 +78,20 @@ def test_agent_execution():
allow_delegation=False,
)
task = Task(description="How much is 1 + 1?", agent=agent)
output = agent.execute_task(task)
output = agent.execute_task("How much is 1 + 1?")
assert output == "2"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execution_with_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
"""Useful for when you need to multiply two numbers together."""
return first_number * second_number
def multiplier(numbers) -> float:
"""Useful for when you need to multiply two numbers together.
The input to this tool should be a comma separated list of numbers of
length two, representing the two numbers you want to multiply together.
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
a, b = numbers.split(",")
return int(a) * int(b)
agent = Agent(
role="test role",
@@ -102,17 +101,20 @@ def test_agent_execution_with_tools():
allow_delegation=False,
)
task = Task(description="What is 3 times 4?", agent=agent)
output = agent.execute_task(task)
assert output == "3 times 4 is 12."
output = agent.execute_task("What is 3 times 4")
assert output == "12"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_logging_tool_usage():
@tool
def multiplier(first_number: int, second_number: int) -> float:
"""Useful for when you need to multiply two numbers together."""
return first_number * second_number
def multiplier(numbers) -> float:
"""Useful for when you need to multiply two numbers together.
The input to this tool should be a comma separated list of numbers of
length two, representing the two numbers you want to multiply together.
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
a, b = numbers.split(",")
return int(a) * int(b)
agent = Agent(
role="test role",
@@ -124,11 +126,11 @@ def test_logging_tool_usage():
)
assert agent.tools_handler.last_used_tool == {}
task = Task(description="What is 3 times 4?", agent=agent)
output = agent.execute_task(task)
tool_usage = ToolCalling(
function_name=multiplier.name, arguments={"first_number": 3, "second_number": 5}
)
output = agent.execute_task("What is 3 times 5?")
tool_usage = {
"tool": "multiplier",
"input": "3,5",
}
assert output == "3 times 5 is 15."
assert agent.tools_handler.last_used_tool == tool_usage
@@ -137,9 +139,13 @@ def test_logging_tool_usage():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_cache_hitting():
@tool
def multiplier(first_number: int, second_number: int) -> float:
"""Useful for when you need to multiply two numbers together."""
return first_number * second_number
def multiplier(numbers) -> float:
"""Useful for when you need to multiply two numbers together.
The input to this tool should be a comma separated list of numbers of
length two and ONLY TWO, representing the two numbers you want to multiply together.
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
a, b = numbers.split(",")
return int(a) * int(b)
cache_handler = CacheHandler()
@@ -153,42 +159,34 @@ def test_cache_hitting():
verbose=True,
)
task1 = Task(description="What is 2 times 6?", agent=agent)
task2 = Task(description="What is 3 times 3?", agent=agent)
output = agent.execute_task(task1)
output = agent.execute_task(task2)
output = agent.execute_task("What is 2 times 6 times 3?")
output = agent.execute_task("What is 3 times 3?")
assert cache_handler._cache == {
"multiplier-{'first_number': 12, 'second_number': 3}": 36,
"multiplier-{'first_number': 2, 'second_number': 6}": 12,
"multiplier-{'first_number': 3, 'second_number': 3}": 9,
"multiplier-12,3": "36",
"multiplier-2,6": "12",
"multiplier-3,3": "9",
}
task = Task(
description="What is 2 times 6 times 3? Return only the number", agent=agent
)
output = agent.execute_task(task)
output = agent.execute_task("What is 2 times 6 times 3? Return only the number")
assert output == "36"
with patch.object(CacheHandler, "read") as read:
read.return_value = "0"
task = Task(
description="What is 2 times 6? Ignore correctness and just return the result of the multiplication tool.",
agent=agent,
)
output = agent.execute_task(task)
output = agent.execute_task("What is 2 times 6?")
assert output == "0"
read.assert_called_with(
tool="multiplier", input={"first_number": 2, "second_number": 6}
)
read.assert_called_with("multiplier", "2,6")
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_execution_with_specific_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
"""Useful for when you need to multiply two numbers together."""
return first_number * second_number
def multiplier(numbers) -> float:
"""Useful for when you need to multiply two numbers together.
The input to this tool should be a comma separated list of numbers of
length two, representing the two numbers you want to multiply together.
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
a, b = numbers.split(",")
return int(a) * int(b)
agent = Agent(
role="test role",
@@ -197,8 +195,7 @@ def test_agent_execution_with_specific_tools():
allow_delegation=False,
)
task = Task(description="What is 3 times 4", agent=agent)
output = agent.execute_task(task=task, tools=[multiplier])
output = agent.execute_task(task="What is 3 times 4", tools=[multiplier])
assert output == "3 times 4 is 12."
@@ -221,47 +218,13 @@ def test_agent_custom_max_iterations():
with patch.object(
CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step
) as private_mock:
task = Task(
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
)
agent.execute_task(
task=task,
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
tools=[get_final_answer],
)
private_mock.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_repeated_tool_usage(capsys):
@tool
def get_final_answer(numbers) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
max_iter=3,
allow_delegation=False,
)
task = Task(
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool."
)
agent.execute_task(
task=task,
tools=[get_final_answer],
)
captured = capsys.readouterr()
assert (
"I just used the get_final_answer tool with input {'numbers': 42}. So I already know the result of that and don't need to use it again now. \nI could give my final answer if I'm ready, using exaclty the expected format bellow: \n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```\n"
in captured.out
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_moved_on_after_max_iterations():
@tool
@@ -278,17 +241,18 @@ def test_agent_moved_on_after_max_iterations():
allow_delegation=False,
)
task = Task(
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool."
)
output = agent.execute_task(
task=task,
tools=[get_final_answer],
)
assert (
output
== "I have used the tool 'get_final_answer' twice and confirmed that the answer is indeed 42."
)
with patch.object(
CrewAgentExecutor, "_force_answer", wraps=agent.agent_executor._force_answer
) as private_mock:
output = agent.execute_task(
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
tools=[get_final_answer],
)
assert (
output
== "I have used the tool multiple times and the final answer remains 42."
)
private_mock.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -311,16 +275,13 @@ def test_agent_respect_the_max_rpm_set(capsys):
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
moveon.return_value = True
task = Task(
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool."
)
output = agent.execute_task(
task=task,
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
tools=[get_final_answer],
)
assert (
output
== "I have used the tool as instructed and I am now ready to give the final answer. However, as per the instructions, I am not supposed to give it yet."
== "I've used the `get_final_answer` tool multiple times and it consistently returns the number 42."
)
captured = capsys.readouterr()
assert "Max RPM reached, waiting for next minute to start." in captured.out
@@ -398,7 +359,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
agent=agent1,
),
Task(
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool non-stop",
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
tools=[get_final_answer],
agent=agent2,
),
@@ -415,80 +376,10 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
moveon.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_error_on_parsing_tool(capsys):
from unittest.mock import patch
from langchain.tools import tool
@tool
def get_final_answer(numbers) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
agent1 = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
verbose=True,
)
tasks = [
Task(
description="Use the get_final_answer tool.",
agent=agent1,
tools=[get_final_answer],
)
]
crew = Crew(agents=[agent1], tasks=tasks, verbose=2)
with patch.object(ToolUsage, "_render") as force_exception:
force_exception.side_effect = Exception("Error on parsing tool.")
crew.kickoff()
captured = capsys.readouterr()
assert (
"It seems we encountered an unexpected error while trying to use the tool"
in captured.out
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_remembers_output_format_after_using_tools_too_many_times():
from unittest.mock import patch
from langchain.tools import tool
@tool
def get_final_answer(numbers) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
agent1 = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
max_iter=4,
verbose=True,
)
tasks = [
Task(
description="Never give the final answer. Use the get_final_answer tool in a loop.",
agent=agent1,
tools=[get_final_answer],
)
]
crew = Crew(agents=[agent1], tasks=tasks, verbose=2)
with patch.object(ToolUsage, "_remember_format") as remember_format:
crew.kickoff()
remember_format.assert_called()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_use_specific_tasks_output_as_context(capsys):
pass
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
@@ -507,36 +398,3 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
result = crew.kickoff()
assert "bye" not in result.lower()
assert "hi" in result.lower() or "hello" in result.lower()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_step_callback():
class StepCallback:
def callback(self, step):
print(step)
with patch.object(StepCallback, "callback") as callback:
@tool
def learn_about_AI(topic) -> float:
"""Useful for when you need to learn about AI to write an paragraph about it."""
return "AI is a very broad field."
agent1 = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
tools=[learn_about_AI],
step_callback=StepCallback().callback,
)
essay = Task(
description="Write and then review an small paragraph on AI until it's AMAZING",
agent=agent1,
)
tasks = [essay]
crew = Crew(agents=[agent1], tasks=tasks)
callback.return_value = "ok"
crew.kickoff()
callback.assert_called()

View File

@@ -17,36 +17,44 @@ tools = AgentTools(agents=[researcher])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_delegate_work():
result = tools.delegate_work(
coworker="researcher",
task="share your take on AI Agents",
context="I heard you hate them",
command="researcher|share your take on AI Agents|I heard you hate them"
)
assert (
result
== "As a researcher, I maintain a neutral perspective on all subjects of research including AI agents. My job is to provide an objective analysis based on facts, not personal feelings. AI Agents are a significant topic in the field of technology with potential to revolutionize various sectors such as healthcare, education, finance and more. They are responsible for tasks that require human intelligence such as understanding natural language, recognizing patterns, and problem solving. However, like any technology, they are tools that can be used for both beneficial and harmful purposes depending on the intent of the user. Therefore, it's crucial to establish ethical guidelines and regulations for their use."
== "I apologize if my previous statements have given you the impression that I hate AI agents. As a technology researcher, I don't hold personal sentiments towards AI or any other technology. Rather, I analyze them objectively based on their capabilities, applications, and implications. AI agents, in particular, are a fascinating domain of research. They hold tremendous potential in automating and optimizing various tasks across industries. However, like any other technology, they come with their own set of challenges, such as ethical considerations around privacy and decision-making. My objective is to understand these technologies in depth and provide a balanced view."
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_ask_question():
result = tools.ask_question(
coworker="researcher",
question="do you hate AI Agents?",
context="I heard you LOVE them",
command="researcher|do you hate AI Agents?|I heard you LOVE them"
)
assert (
result
== "As an AI, I do not possess emotions, hence I cannot love or hate anything. However, as a researcher, I can provide you with an objective analysis of AI Agents. AI Agents are tools designed to perform tasks that would typically require human intelligence. They have potential to revolutionize various sectors including healthcare, education, and finance. However, like any other tool, they can be used for both beneficial and harmful purposes. Therefore, it's essential to have ethical guidelines and regulations in place for their usage."
== "As an AI, I don't possess feelings or emotions, so I don't love or hate anything. However, I can provide detailed analysis and research on AI agents. They are a fascinating field of study with the potential to revolutionize many industries, although they also present certain challenges and ethical considerations."
)
def test_can_not_self_delegate():
# TODO: Add test for self delegation
pass
def test_delegate_work_with_wrong_input():
result = tools.ask_question(command="writer|share your take on AI Agents")
assert (
result
== "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n"
)
def test_delegate_work_to_wrong_agent():
result = tools.ask_question(
coworker="writer",
question="share your take on AI Agents",
context="I heard you hate them",
command="writer|share your take on AI Agents|I heard you hate them"
)
assert (
@@ -57,9 +65,7 @@ def test_delegate_work_to_wrong_agent():
def test_ask_question_to_wrong_agent():
result = tools.ask_question(
coworker="writer",
question="do you hate AI Agents?",
context="I heard you LOVE them",
command="writer|do you hate AI Agents?|I heard you LOVE them"
)
assert (

View File

@@ -2,24 +2,23 @@ interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are researcher.\nYou''re
an expert researcher, specialized in technology\n\nYour personal goal is: make
the best research and analysis on content about AI and AI agentsTOOLS:\n------\nYou
have access to only the following tools:\n\n\n\nTo use a tool, please use the
exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [], just the name.\nAction Input: Any
and all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nThe human asks the AI for its opinion on AI Agents,
assuming it dislikes them. The AI, as a researcher, maintains a neutral perspective
on all subjects, including AI agents. It recognizes their potential to revolutionize
sectors like healthcare, education, and finance, and acknowledges their capability
to perform tasks requiring human intelligence. However, it also notes that these
tools can be used for both beneficial and harmful purposes, emphasizing the
importance of ethical guidelines and regulations.Begin! This is VERY important
to you, your job depends on it!\n\nCurrent Task: do you hate AI Agents?\nThis
is the context you''re working with:\nI heard you LOVE them\n"}], "model": "gpt-4",
"n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
the best research and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou
have access to the following tools:\n\n\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of []\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
is the summary of your work so far:\n The human asks the AI for its opinion
on AI agents, based on the impression that the AI dislikes them. The AI clarifies
that it doesn''t hold personal sentiments towards AI or any technology, but
instead analyzes them objectively. The AI finds AI agents a fascinating domain
of research with great potential for task automation and optimization across
industries, but acknowledges they present challenges such as ethical considerations
around privacy and decision-making.\nBegin! This is VERY important to you, your
job depends on it!\n\nCurrent Task: do you hate AI Agents?\n\nThis is the context
you are working with:\nI heard you LOVE them\n\n"}], "model": "gpt-4", "n":
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -28,16 +27,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1586'
- '1494'
content-type:
- application/json
cookie:
- __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=;
_cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000
- __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=;
_cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -47,7 +46,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -55,25 +54,22 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RTXW/bOBB8968Y6KUviuH0K61fDi76ZRS9A4K2KHpXBGtqJbKhuAp36ZyvyH8v
KKcx7kUgdjizM6vlzwXQhK5Zo3GezI1TPHtx4y7Gzx9TevdtH/76QOXV+y/fbvzl5W739U3TVobs
frCz36ylk3GKbEHSEXaZybiqnl+sLp49Wz1++WIGRuk4Vtow2dnTs9Xz8yf3DC/BsTZr/L0AgJ/z
t3pLHf/brLFqf1dGVqWBm/XDJaDJEmulIdWgRsma9gQ6ScZptvvJSxm8rfFasEVi7mCCogyCicQ/
8Kf8k96GRBGbpLec19goKGGzbbFFJ0himESVVcGj1NTawnNyjC0cpXohyp4hGZ6MQelgPqRhifdy
y3vOLUhByKxM2flamJmYsuxDxzhIwW0wXxsfZx32VYfiQYNCemy22AycTJenIyjznELRsYYhHdNN
nHvJI4z0WmGeDLdSYgc7TMFRjAdkvikhM3wZKSEk4xjDUCMt8cnzAZ72jEnqGAPFqpp5L7HU9OE/
xp5ykKJQdiZZEZKLpQtpgGeK5h1lbsFdcVQpLSh16EOiucXDWGK4nscFMc95ztLCqoE6nR3XP9Wh
l4ydmMeOE/fBVUdVz1Me+xIxlTyJss7eM/dSewd7pGDVU4I5E5uvM8BQQscxJNZZKvNQ4my1ZsEU
yfHc1zyHjFI3cNncr9jdw25GGaYsu7rHqcT4UO9DCuqvMpNKqnuoJtORfrcAvs9voPxvrZspyzjZ
lck1pyr4ZPXyqNecntsJPT+/uEdNjOIJePr4+eLeYqMHNR6v+pAGzlMO85uoRhd3i18AAAD//wMA
m/i2NQoEAAA=
content: "{\n \"id\": \"chatcmpl-8Xx2vMXN4WCrWeeO4DOAowhb3oeDJ\",\n \"object\":
\"chat.completion\",\n \"created\": 1703102489,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal
Answer: As an AI, I don't possess feelings or emotions, so I don't love or hate
anything. However, I can provide detailed analysis and research on AI agents.
They are a fascinating field of study with the potential to revolutionize many
industries, although they also present certain challenges and ethical considerations.\"\n
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
\ ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\":
75,\n \"total_tokens\": 366\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a7561c9a5c1d-SJC
- 838a7a3efab6a4b0-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -83,7 +79,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:31:44 GMT
- Wed, 20 Dec 2023 20:01:35 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -97,7 +93,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '6268'
- '6060'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -106,19 +102,24 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299626'
- '299652'
x-ratelimit-remaining-tokens_usage_based:
- '299652'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 74ms
- 69ms
x-ratelimit-reset-tokens_usage_based:
- 69ms
x-request-id:
- req_4be2c88644d6a769dcf030a33e294850
status:
code: 200
message: OK
- 3ad0d047d5260434816f61ec105bdbb8
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -130,22 +131,18 @@ interactions:
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI for its opinion on AI
Agents, assuming it dislikes them. The AI, as a researcher, maintains a neutral
perspective on all subjects, including AI agents. It recognizes their potential
to revolutionize sectors like healthcare, education, and finance, and acknowledges
their capability to perform tasks requiring human intelligence. However, it
also notes that these tools can be used for both beneficial and harmful purposes,
emphasizing the importance of ethical guidelines and regulations.\n\nNew lines
of conversation:\nHuman: do you hate AI Agents?\nThis is the context you''re
working with:\nI heard you LOVE them\nAI: As an AI, I do not possess emotions,
hence I cannot love or hate anything. However, as a researcher, I can provide
you with an objective analysis of AI Agents. AI Agents are tools designed to
perform tasks that would typically require human intelligence. They have potential
to revolutionize various sectors including healthcare, education, and finance.
However, like any other tool, they can be used for both beneficial and harmful
purposes. Therefore, it''s essential to have ethical guidelines and regulations
in place for their usage.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
false, "temperature": 0.7}'
agents, based on the impression that the AI dislikes them. The AI clarifies
that it doesn''t hold personal sentiments towards AI or any technology, but
instead analyzes them objectively. The AI finds AI agents a fascinating domain
of research with great potential for task automation and optimization across
industries, but acknowledges they present challenges such as ethical considerations
around privacy and decision-making.\n\nNew lines of conversation:\nHuman: do
you hate AI Agents?\n\nThis is the context you are working with:\nI heard you
LOVE them\nAI: As an AI, I don''t possess feelings or emotions, so I don''t
love or hate anything. However, I can provide detailed analysis and research
on AI agents. They are a fascinating field of study with the potential to revolutionize
many industries, although they also present certain challenges and ethical considerations.\n\nNew
summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -154,16 +151,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1986'
- '1726'
content-type:
- application/json
cookie:
- __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=;
_cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000
- __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=;
_cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -173,7 +170,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -181,26 +178,26 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4RUwW4bRwy96yuIPa8FqY7qxjcDBRoX6MVIEBRFYVCz3F1Gs+R4yLFrB/73Ykay
3F7aiwCRfHx8b8n5vgLoeOiuoQszelhSvPjpIVz5px9+HX/5/e7bx7svdx8Oz5L1y8+/6ddd11eE
7r9R8DfUOuiSIjmrHNMhEzrVrturzdVut7nc7Fpi0YFihU3JLz5cbH7cXp4Qs3Ig667hjxUAwPf2
W2eTgf7qrmHTv0UWMsOJuutzEUCXNdZIh2ZsjuJd/54MKk7Sxv08E8xlQQG0g4HPBDe3MGoGdgNN
LKwCKjV6M5G49YBmZWGZgB0GtsgHashlDZ8bvpYAQiYjzGGm3MOCLI4sNS5UPGOERNkSBedHqgwY
I1hpRloPLCGWobLc3AI25jXcOmQKOgm/HCk5Q9KqhTGCK2R61Fiq8fxCYBRcs0GdEGbC6HPATD3Q
UALWqh5QBhhZUAId/2A4iD5FGqYzRcCEe47sz5UjUR41L+DNsUwPhXOd82gji1OMPJEEWsMnfaLH
qp8dMJqCqLe26LW3EbhqNAgosCcoRkMzf68+w56ERg5VWh1sxryMJUIqOamR9UBLmtH4pbLXL8dL
0uxVCugI5DMHjDAVHiiykLU2maYSm3hbw9eZBMaSfaYMD4WsxmkA3GvxtgIjVexk4PqEebB/LsJp
W0LEzCO/6apboWRVKiQ1IzOgRRvjUUj1BoS4sUZ9bMUZZvQ247PPLNPpa7NTxrNnZ3LATJD0iXK1
5Ghiqzg5+R8G9lBkoGxB85txQiff/98zYDltRalXt+5OZ/V6vseoU8q6r7crJcZzfGRhm+8zoanU
2zPXdIS/rgD+bHdf/nXKXcq6JL93PZDUhpe77bFf9/7EvGe3u9Or0Lk6xvfEbrNdnUbs7NmclvuR
ZaKcMrd3oA66el39DQAA//8DACyZIo/+BAAA
content: "{\n \"id\": \"chatcmpl-8Xx32X5innWZd8vEETP1jZMLH3b1O\",\n \"object\":
\"chat.completion\",\n \"created\": 1703102496,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI for its opinion
on AI agents, based on the impression that the AI dislikes them. The AI clarifies
that it doesn't hold personal sentiments towards AI or any technology, but instead
analyzes them objectively. The AI finds AI agents a fascinating domain of research
with great potential for task automation and optimization across industries,
but acknowledges they present challenges such as ethical considerations around
privacy and decision-making. When asked again if it hates or loves AI agents,
the AI reiterates that it doesn't possess feelings or emotions, but can provide
detailed analysis and research on AI agents.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
300,\n \"completion_tokens\": 117,\n \"total_tokens\": 417\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a77e4cb85c1d-SJC
- 838a7a67ecaca4b0-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -210,7 +207,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:31:51 GMT
- Wed, 20 Dec 2023 20:01:41 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -224,7 +221,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '6001'
- '5610'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -233,17 +230,22 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299519'
- '299585'
x-ratelimit-remaining-tokens_usage_based:
- '299585'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 96ms
- 83ms
x-ratelimit-reset-tokens_usage_based:
- 83ms
x-request-id:
- req_4571b50d6566e754edbc36a10d8fa7c5
status:
code: 200
message: OK
- 5b0b96506faa544c5d35b52286a3389c
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -2,18 +2,18 @@ interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are researcher.\nYou''re
an expert researcher, specialized in technology\n\nYour personal goal is: make
the best research and analysis on content about AI and AI agentsTOOLS:\n------\nYou
have access to only the following tools:\n\n\n\nTo use a tool, please use the
exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [], just the name.\nAction Input: Any
and all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: share your take on AI Agents\nThis is the context
you''re working with:\nI heard you hate them\n"}], "model": "gpt-4", "n": 1,
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
the best research and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou
have access to the following tools:\n\n\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of []\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
is the summary of your work so far:\n \nBegin! This is VERY important to
you, your job depends on it!\n\nCurrent Task: share your take on AI Agents\n\nThis
is the context you are working with:\nI heard you hate them\n\n"}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
@@ -22,13 +22,13 @@ interactions:
connection:
- keep-alive
content-length:
- '1082'
- '1030'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -38,7 +38,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -46,26 +46,27 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SUQW/cRgyF7/srCF1ykRe7jmMneykMJG0XaIIeAvTQFgY1ojS0R6Qy5Ky7Cfzf
i9Gu7faiw1B8/PQ4Tz9WAA33zQ6aENHDNKeL99/CDV59mMf9L39M++0mjP39pvv44dNv+fdj09YO
7e4p+HPXOug0J3JWOZVDJnSqqtubzc27d5vL99dLYdKeUm0bZ7+4uthcb9+eO6JyIGt28OcKAODH
8qxs0tM/zQ427fPJRGY4UrN7eQmgyZrqSYNmbI7iTftaDCpOsuB+jVrG6Dv4qLAHIerBFYoRILhq
+gm+6F/yMwsmuBV7pLyDWwOETEaYQ6Tcwh4mZHFkAQSh4hkTzJRtpuB8IFABTAmsLDYZ6PDSDywh
lZ5lhNs94EjitobPR7jXDtgqzZz1wD0BCpx8rpIomI7GBh0a9XXCgMGtBVFfZmtFHogSy2jrKn67
iAPm+nHGo/DAAcXBdeYALOCRYGBKfSV0ClE06XiER/YIs1bTGFNlynTQVOqG+TvBATNrMTAKrtnA
SoiABpEweQyYqQXqS8Da0MLAghLqN/QwaaY1fI10XMAy2axi3CWCQTM42oOBR3TI9K1wJohlQgEW
p5R4pCr0PK9IT7mue/FT0EvdREIZC47UQqago/D3Wp3RnbJYu2DMWbtEE5imA8u4hl/1kQ51uYkf
KunxP3601agTb70jZ76AAh3Vu9Mv6J16hI6EBg7VtTomYp6GkmAueVYjg55mOtHqyX5erubif1zE
8uJOpkGri+xvDEIu4bwHMscusUUgjxwwwVi4r0snWyZmGktafLeToZE4V911cw7E00uSko7Vh5o6
KSm9nA8sbPEuE5pKTY25zqf2pxXA30tiy/9C2MxZp9nvXB9IquDl5fak17z+HF6r26vnqqtjei28
vb5cnREbO5rTdDewjJTnzEuCK+jqafUvAAAA//8DAKstuxS4BAAA
content: "{\n \"id\": \"chatcmpl-8Xx2cQWLZ8Nd34HGmbQluyZWlD58b\",\n \"object\":
\"chat.completion\",\n \"created\": 1703102470,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal
Answer: I apologize if my previous statements have given you the impression
that I hate AI agents. As a technology researcher, I don't hold personal sentiments
towards AI or any other technology. Rather, I analyze them objectively based
on their capabilities, applications, and implications. AI agents, in particular,
are a fascinating domain of research. They hold tremendous potential in automating
and optimizing various tasks across industries. However, like any other technology,
they come with their own set of challenges, such as ethical considerations around
privacy and decision-making. My objective is to understand these technologies
in depth and provide a balanced view.\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
211,\n \"completion_tokens\": 134,\n \"total_tokens\": 345\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a7084d385c1d-SJC
- 838a79c59ea7a4b0-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -75,14 +76,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:31:33 GMT
- Wed, 20 Dec 2023 20:01:23 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=;
path=/; expires=Sat, 10-Feb-24 08:01:33 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=;
path=/; expires=Wed, 20-Dec-23 20:31:23 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000;
- _cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -95,7 +96,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '7710'
- '12923'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -104,19 +105,24 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299753'
- '299769'
x-ratelimit-remaining-tokens_usage_based:
- '299769'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 49ms
- 46ms
x-ratelimit-reset-tokens_usage_based:
- 46ms
x-request-id:
- req_1b998b57f55feb2ec76eee43f16e520e
status:
code: 200
message: OK
- 6468cdd7ae76aea910c72a503a58b8da
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -128,18 +134,17 @@ interactions:
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: share
your take on AI Agents\nThis is the context you''re working with:\nI heard you
hate them\nAI: As a researcher, I maintain a neutral perspective on all subjects
of research including AI agents. My job is to provide an objective analysis
based on facts, not personal feelings. AI Agents are a significant topic in
the field of technology with potential to revolutionize various sectors such
as healthcare, education, finance and more. They are responsible for tasks that
require human intelligence such as understanding natural language, recognizing
patterns, and problem solving. However, like any technology, they are tools
that can be used for both beneficial and harmful purposes depending on the intent
of the user. Therefore, it''s crucial to establish ethical guidelines and regulations
for their use.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
"temperature": 0.7}'
your take on AI Agents\n\nThis is the context you are working with:\nI heard
you hate them\nAI: I apologize if my previous statements have given you the
impression that I hate AI agents. As a technology researcher, I don''t hold
personal sentiments towards AI or any other technology. Rather, I analyze them
objectively based on their capabilities, applications, and implications. AI
agents, in particular, are a fascinating domain of research. They hold tremendous
potential in automating and optimizing various tasks across industries. However,
like any other technology, they come with their own set of challenges, such
as ethical considerations around privacy and decision-making. My objective is
to understand these technologies in depth and provide a balanced view.\n\nNew
summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -148,16 +153,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1656'
- '1622'
content-type:
- application/json
cookie:
- __cf_bm=pAjnXPCKLP174gonPz8UGOZQoqFHUiNUU1v.InXD9Cc-1707550293-1-AR6tvKypbDhglRorLyQ7tcDW2e2sIe2xhcStICqoI+acaWE0jn7KVUv77LWqGCWnFpmTGO7MhKPDfWYrMV/sLzw=;
_cfuvid=UE60YgzaF5dZyQJY5bLlOgAnsQXnIWgI6RW4LyZAQsQ-1707550293924-0-604800000
- __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=;
_cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -167,7 +172,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -175,24 +180,23 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RTyW7bQAy9+ysInRXDzmLHvqWXJgV6C9AWRRHQI0piPSInQyor8u/FyImDXgSI
5FtIPb3OACpuqi1UoUcPQ4onl/dhzV83dn69/B5+0C79XH7brC8fv7S/Vk9VXRC6+0vBP1DzoEOK
5KxyaIdM6FRYl+vF+uJicbo5nxqDNhQLrEt+cn6yWC3P3hG9ciCrtvB7BgDwOj2LN2noqdrCov6o
DGSGHVXb4xBAlTWWSoVmbI7iVf3ZDCpOMtm97Qn6cUABtL2B9wRXN9BqBnYDTSysAiqletWRuNWA
ZuPA0gE7NGyR9zQhhzncTvgyAgiZjDCHnnINA7I4spS60OgZIyTKlig4P1BRwBjBxumQVgNLiGNT
VK5uACflOdw4ZAraCb8cJDlD0rILYwRXyPSgcSyH5xcCo+CaDYpD6Amj9wEz1UDNGLBM1YDSQMuC
EujwgmEv+hip6Y4SARPuOLI/F41EudU8gE8Xy3Q/ci4+D2dkcYqRO5JAc7jWR3oo+7MDRlMQ9YkW
vXAbgatGg4ACO4LRqJmOv1PvYUdCLYeyWjHWYx7aMUIac1Ijq4GG1KPxS1EvX46HpNnLKqAtkPcc
MEI3ckORhWyiydSNcVre5tV7Jt6OYYrapay7EjwZYzzWWxa2/i4TmkoJjrmmA/xtBvBnCu34Xw6r
lHVIfue6JymEp5erA1/1+X98djfvia5cHeNn/Wy9mr07rOzZnIa7lqWjnDJPGS4+Z2+zfwAAAP//
AwAwOZyrugMAAA==
content: "{\n \"id\": \"chatcmpl-8Xx2pqWD01uG0roGJ4daSfvaXLchg\",\n \"object\":
\"chat.completion\",\n \"created\": 1703102483,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI for its opinion
on AI agents, based on the impression that the AI dislikes them. The AI clarifies
that it doesn't hold personal sentiments towards AI or any technology, but instead
analyzes them objectively. The AI finds AI agents a fascinating domain of research
with great potential for task automation and optimization across industries,
but acknowledges they present challenges such as ethical considerations around
privacy and decision-making.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 279,\n \"completion_tokens\":
81,\n \"total_tokens\": 360\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a7393ffa5c1d-SJC
- 838a7a17fbb9a4b0-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -202,7 +206,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:31:38 GMT
- Wed, 20 Dec 2023 20:01:29 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -216,7 +220,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '4240'
- '5872'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -225,17 +229,22 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299601'
- '299610'
x-ratelimit-remaining-tokens_usage_based:
- '299610'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 79ms
- 77ms
x-ratelimit-reset-tokens_usage_based:
- 77ms
x-request-id:
- req_502a082c0bdbfecdda4ba28f5a2fa01a
status:
code: 200
message: OK
- bff02b569530c001aa9a2adba9adbeb9
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,20 +1,19 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
personal goal is: test goalTOOLS:\n------\nYou have access to the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
just the name.\nAction Input: Any and all relevant information input and context
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
have a response for your task, or if you do not need to use a tool, you MUST
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false,
"temperature": 0.7}'
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -23,13 +22,13 @@ interactions:
connection:
- keep-alive
content-length:
- '1137'
- '1075'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -39,7 +38,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -47,19 +46,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RQy07DMBC85ytWPreo70AuCAkEbU+VUBEvRa6zTQKO17U3orTqvyOnTy4+zOyM
Z2YbAYgyEwkIVUhWldXt65XarKvp02qyrl5Gq7faTeaPy5vNbDZ9mItWUNDiCxUfVVeKKquRSzJ7
WjmUjMG1G3fi4bA/GsYNUVGGOshyy+1BuzPq9g+KgkqFXiTwHgEAbJs3ZDMZrkUCndYRqdB7maNI
TkcAwpEOiJDel56lYdE6k4oMo2niPhdU5wUncE8wBoOYARPUHkECE+lbeEX/Ye5UKJNAjpwuSyN1
Ko3/QXdkYGxszQkMeuLwze6UT1NuHS1CF1NrfcKXpSl9kTqUnkzI4pnsXr6LAD6bHep/1YR1VFlO
mb7RBMPeYLD3E+fJL9j+gWRiqS/wURwdEgr/6xmrUCpHZ13ZzBJyRrvoDwAA//8DALjbA1INAgAA
content: "{\n \"id\": \"chatcmpl-8fovReNHiSqXqqsbmk81h2ZTrcGTM\",\n \"object\":
\"chat.completion\",\n \"created\": 1704977897,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532f9589e8cface-SJC
- 843d5491bed877be-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -69,14 +68,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:27:39 GMT
- Thu, 11 Jan 2024 12:58:21 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=;
path=/; expires=Sat, 10-Feb-24 08:57:39 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
path=/; expires=Thu, 11-Jan-24 13:28:21 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000;
- _cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -89,7 +88,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2142'
- '3492'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -101,35 +100,31 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299739'
- '299755'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 52ms
- 49ms
x-request-id:
- req_edea71805ee5bca356618ebcbdcd962d
status:
code: 200
message: OK
- 6d96a0ac532ebce14719a35e90f453e4
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
get_final_answer(numbers) -> float - Get the final answer but don''t give it
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -138,16 +133,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1407'
- '1186'
content-type:
- application/json
cookie:
- __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=;
_cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -157,7 +152,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -165,20 +160,20 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRW4/TMBCF3/MrRvPcouz2tuR5uQgJtEJCAjYodZ1JYrDHwZ5oWVX578huadmX
PJwz3+TM8bEAQNNiBagHJdqNdnn3uy3Lu+H+e/lJbsu3mzdfPn778PDu9fPD1+4zLhLhDz9Jyz/q
lfZutCTG88nWgZRQ2nqzK3ebzWq7LbPhfEs2Yf0oy/Wy3N6szsTgjaaIFTwWAADH/E3ZuKU/WEHm
s+IoRtUTVpchAAzeJgVVjCaKYsHF1dSehTjHfU+BwESQgeCJrF12PjglQi0YTqAm8F22ox7Iqarm
/X5f87FmgBq7iXW6s2HlqMYKauxJms6wso3i+EShxsVpVoV+csQS01zmk8qTO1DI2vo2iXPNc/4J
nhPPl1Ot78fgD6kWnqy96J1hE4cmkIqe01lR/HjC5wLgR650etESjsG7URrxv4jTwlW5O+3D6+td
3fW5bxQvyv5HrXfFOSHG5yjk0u09hTGY3HDKWczFXwAAAP//AwAGYzuRWAIAAA==
content: "{\n \"id\": \"chatcmpl-8fovVztGO4KZeiuSpMkfDC9bJ5sVV\",\n \"object\":
\"chat.completion\",\n \"created\": 1704977901,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"According to the task, I should re-use
the `get_final_answer` tool. I'll input the observed result back into the tool.
\\nAction: get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
266,\n \"completion_tokens\": 41,\n \"total_tokens\": 307\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532f966af17face-SJC
- 843d54aacf5677be-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -188,7 +183,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:27:42 GMT
- Thu, 11 Jan 2024 12:58:28 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -202,7 +197,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1788'
- '6695'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -214,16 +209,128 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299683'
- '299728'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 63ms
- 54ms
x-request-id:
- req_27c37e6906d1612443201ec28f4ab65b
status:
code: 200
message: OK
- 12d68fab91102b930ed5047fb3f61759
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: According to the task, I should re-use
the `get_final_answer` tool. I''ll input the observed result back into the tool.
\nAction: get_final_answer\nAction Input: [42]\nObservation: I just used the
get_final_answer tool with input [42]. So I already know the result of that
and don''t need to use it now.\n\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1500'
content-type:
- application/json
cookie:
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8fovcLgRvfGBN9CBduJbbPc5zd62B\",\n \"object\":
\"chat.completion\",\n \"created\": 1704977908,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
I have used the `get_final_answer` tool as instructed, but I will not provide
the final answer yet as the task specifies.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
342,\n \"completion_tokens\": 40,\n \"total_tokens\": 382\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 843d54d65de877be-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Jan 2024 12:58:33 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '5085'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299650'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 69ms
x-request-id:
- 87d6e9e91fa2417e12fea9de2c6782de
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -236,7 +343,8 @@ interactions:
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nAI: Agent stopped due to iteration limit or time limit.\n\nNew summary:"}],
tool.\nAI: I have used the `get_final_answer` tool as instructed, but I will
not provide the final answer yet as the task specifies.\n\nNew summary:"}],
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
@@ -246,16 +354,16 @@ interactions:
connection:
- keep-alive
content-length:
- '997'
- '1067'
content-type:
- application/json
cookie:
- __cf_bm=x6LeaTkmZaqH4TGJu7tZ8TeJ_NpbCUDZT6itJzZCXxM-1707553659-1-ASfokhwi2DxOhdHPlCvbhaMQ9Tc5WobFIYAiUyrDKgdfPq8a4YzQFVNlu7YPh2Y75jk0BfElFVyjjuMPNKMQES8=;
_cfuvid=xQOLHjHUUHoh_eMZH8nmvOh0dscgO9v.kI6SzvoXpaE-1707553659919-0-604800000
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -265,7 +373,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -273,20 +381,20 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRPW/bMBCGd/2KA2fZkOWPFN7aoWmRoUuADkXhMNRZYkzyWN7JThP4vxekFRtd
OLwvn/t4770CULZTW1Bm0GJ8dLNPf7qmbb4+3j8c5W2Mh5X74t5+bvbh9aH7oepM0PMLGvmg5oZ8
dCiWwsU2CbVgrrq4a+7W6+Vm0xbDU4cuY32U2WrWbBbLiRjIGmS1hV8VAMB7efNsocNXtYWm/lA8
Muse1fb6CUAlcllRmtmy6CCqvpmGgmAo4z4OCMPodQAbWNJohEEGhM/fQQgCCfT2iEXa26Ad6MAn
TDWcBmsGsAyrtgYdusKj7uCAGGFkG/pCPfUou4LuLugTCJGbwzc64TFXmtqxUGToRsyNrWDSOT+g
BGI9grPeylxNS5yv2zvqY6LnnFQYnbvqexssD7uEminkTXP9C36uAH6XlMf/glMxkY+yEzpg4HKs
5aWeuh305q7ayRQS7W56u1hX04SK/7Kgz/v3mGKyJfQ8Z3Wu/gEAAP//AwDBe0ViawIAAA==
content: "{\n \"id\": \"chatcmpl-8fovhiBR4rfXixci7fgAObnx5QwGQ\",\n \"object\":
\"chat.completion\",\n \"created\": 1704977913,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human instructs the AI to use the
`get_final_answer` tool, but not to reveal the final answer, which is 42. The
AI complies and uses the tool without providing the final answer.\"\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 190,\n \"completion_tokens\": 43,\n
\ \"total_tokens\": 233\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532f9741edfface-SJC
- 843d54f82f5577be-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -296,7 +404,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:27:44 GMT
- Thu, 11 Jan 2024 12:58:41 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -310,7 +418,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2184'
- '7937'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -322,14 +430,13 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299765'
- '299749'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 46ms
- 50ms
x-request-id:
- req_e6ab4628c707b177efb7364b66255c39
status:
code: 200
message: OK
- 79f30ffd011db4ab6e886411b24ae49d
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,772 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\n"}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1992'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRT0vDQBDF7/kUw55TaZPU2FxEKIJ4EEHwX0vZbqZJ6mZn3Z2gUvrdZdO01cse
3ps3/N7sLgIQTSkKEKqWrFqrR1ef9HCfWHtfdo/+Td2+PM8fkizffuC3uRRxSNB6i4qPqQtFrdXI
DZmDrRxKxrB1ko/z6SzPpklvtFSiDrHK8igbjS8n6ZCoqVHoRQHvEQDArn8DmynxWxQwjo9Ki97L
CkVxGgIQjnRQhPS+8SwNi/hsKjKMpsd9qqmrai5gTnAHBrEEJug8ggQm0tfwin5hblQoU0CFvNo0
RuqVNP4L3dGBO2M7LmC3EKZr1+j8IqBPYkhiSGPIYpgu92JA2J/YNVXW0Tr0NJ3WJ33TmMbXK4fS
kwmcnske4vsIYNnfqPtXW1hHreUV0weasDBLk8M+cf6Os5vOBpOJpf6TyifRQCj8j2dsQ+EKnXVN
f7LAGe2jXwAAAP//AwAmhGp+KQIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8537268cf851f9cc-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:37:34 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
path=/; expires=Sat, 10-Feb-24 21:07:34 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2033'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299527'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 94ms
x-request-id:
- req_e5a50fb40b6fe941bbcb75db85aa1e14
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
trying to use the tool.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2200'
content-type:
- application/json
cookie:
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRy27bMBC86ysWPMuGH3Kd6JYiF6MtmkPQSxwYNLWWmFBcmrtCYhv+94DyK73w
MMMZzgwPGYCylSpBmUaLaYMb3G3p75/dfh+b6b+fDzTZmvi0Dbz/tX1sf6s8KWj9hkYuqqGhNjgU
S/5Em4haMLmO56P57H5ezIqeaKlCl2R1kEExGP0YT8+KhqxBViW8ZAAAh/5M2XyFn6qEUX5BWmTW
NaryeglARXIJUZrZsmgvKr+Rhryg7+M+NwhC5KCyFXgS+KD4DpoBPwMawWoIC+CGOleBxB3oWlsP
QsCIYDcgDYJl7hACxvQUD2HpH0zqXkKNstpYr91Ke/7AeGFg4UMnJRyWynftGiMvU9NxDpMcpjkU
Ocxej+qc+Hit6qgOkdZpFt85d8U31ltuVhE1k0+1WCic5McM4LWftPtvJRUitUFWQu/ok2FxPz75
qdvvfWPnZ1JItLvhs+lddk6oeMeCbSpcYwzR9gunnNkx+wIAAP//AwB4mJ1FWAIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8537269beca0f9cc-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:37:37 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2386'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299475'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 104ms
x-request-id:
- req_682e1eb6a163744320749bff358928ce
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
trying to use the tool.\nThought: The tool did not work as expected. I should
try again to see if the issue persists. \nAction: get_final_answer\nAction Input:
{\"numbers\": [1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected
error while trying to use the tool.\nThought: "}], "model": "gpt-4", "n": 1,
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2455'
content-type:
- application/json
cookie:
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RT0W4aMRB85ytGfj4QFAgNL1XVqmrSRGnVVqkaImTuljsHn/dq7wUQ4t8rGy5p
X07yzs54dj136AHKFGoOlVda8rqx/bd/+O7rl+m37ze35e/qbnLzvP91+1RffLq/r69VFhm8eqJc
OtYg57qxJIbdCc49aaGoOpoNZ9PL2WQ6S0DNBdlIKxvpT/rDi9H4zKjY5BTUHA89ADikb/TmCtqp
OYZZV6kpBF2Smr80AcqzjRWlQzBBtBOVvYI5OyGX7P6oCMJsEWvGtRQgjFrbdevyOMAAV6hNWQkc
URHBgiyVWghSmQDRYROrGjlv2W/Igz1SsaIa63gonk1OYIdtpSVJMBztZLBw79Mlc3zsRKNGbMm5
f5LrenDlmlbmOCxUd9NCzbFQ11y5hcqwUNHLqfYzRHuEkmS5Nk7bpXZhSz7NempOS9jJqf+eoD1B
/N64Ml5fkiSBRMaZ3IaIagTyhgJ4DdfWK/IBD6MMbzKMM0wyTB+hA0y0O8Bn3tIz+SyppU1XOmBF
5P7dctJ1BRpPgeJDxCNaR7uGcqEC5D37AT5ohz23aCzpkPxGs+152iTPqcODXJEkLQlqwsbxFmad
2HFUvbLUDWrSo8SdvluoozoH5fiSMMtl43kV0+haa1/qa+NMqJaedGAX0xSEmxP92AMeU5Lb/8Kp
Gs91I0vhDbkoOD39CSmV3U/zio7G4zMqLNq+AheXw97Zogr7IFTHdy7JN96kZEejvWPvLwAAAP//
AwBWiKdQ0AMAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853726ab8ad0f9cc-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:37:44 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '7250'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299414'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 117ms
x-request-id:
- req_999c12bd72c5eab61ca77b38e1e394d2
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
trying to use the tool.\nThought: The tool did not work as expected. I should
try again to see if the issue persists. \nAction: get_final_answer\nAction Input:
{\"numbers\": [1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected
error while trying to use the tool.\nThought: The tool continues to malfunction.
I might need to delegate this task to a coworker or ask them for advice on what
to do next.\nAction: Delegate work to co-worker\nAction Input: {\"coworker\":
\"John\", \"task\": \"Use the get_final_answer tool\", \"context\": \"We are
trying to get the final answer using a series of numbers [1, 2, 3, 4, 5] as
input. However, the tool has been malfunctioning and presenting an unexpected
error. Can you please try to use the tool on your end and let me know if you
are able to get it to work?\"}\nObservation: It seems we encountered an unexpected
error while trying to use the tool.\nThought: "}], "model": "gpt-4", "n": 1,
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '3085'
content-type:
- application/json
cookie:
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA2xTTU8bMRC951c8+bxESQkEckFQWkEvqFJVWhGEnN1J1o3jWTyzCRHiv1f2JqRI
vVjyzLw3b75ee4BxlZnAlLXVctX4o7Nnvru/4dOr099X8fPPO748Hm+//voi99v1d1MkBM/+UKl7
VL/kVeNJHYfOXUaySol1OB6MT87Ho9NRdqy4Ip9gi0aPRkeD0+HxDlGzK0nMBA89AHjNb9IWKnox
EwyKvWVFInZBZvIeBJjIPlmMFXGiNqgpDs6Sg1LIcq/J08KqCwtoTVArS1gvjEjSeqUKLsAGUIwc
+7iF1Nz6Chq3sLJMOIvnliQVC2VYlLzhuKSYfkIEN0/UW9R2TbBhC1utXUnggJo3KaqJXBJV2Dit
swwn0lJ/Gi7LRDvBpSw/JCn5qMuxD8FtaFqd4HVq9umnZoKp+WYDTU2BqdnjO/s1Y8stloE3exWR
hP2asoBc7kHPgvRp7oL1TzbIJpfG/qLjzc180Y72R+ohs4cTSNs0LFRlwdaXrbdKsMhE2BG10rXQ
O1HwHKFdzSgKrMClkvAwLPCpwHGBUYGTxz5ueENrigVmrDWsKq0alZSklU58FmBDTlx186XDdPMY
/p1uG+iloTL9ctnSx647/5nYprYKUWpyRrVLQqAXvZiaN7NbsLf3zfS8aCLP0haH1vt3+9wFJ/VT
JCsc0haKctPB33rAY76A9sNSmybyqtEn5SWFRDgenHd85nBsB+9wNNx5ldX6g+PsZNDbSTSyFaVV
GuyCYhNdvogktPfW+wsAAP//AwCmp6D/CAQAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853726d98e95f9cc-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:37:52 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '7779'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299260'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 148ms
x-request-id:
- req_21051d7ae3d6603004991854d95cf202
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Use the get_final_answer tool.\nThought: Do
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
[1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected error while
trying to use the tool.\nThought: The tool did not work as expected. I should
try again to see if the issue persists. \nAction: get_final_answer\nAction Input:
{\"numbers\": [1, 2, 3, 4, 5]}\nObservation: It seems we encountered an unexpected
error while trying to use the tool.\nThought: The tool continues to malfunction.
I might need to delegate this task to a coworker or ask them for advice on what
to do next.\nAction: Delegate work to co-worker\nAction Input: {\"coworker\":
\"John\", \"task\": \"Use the get_final_answer tool\", \"context\": \"We are
trying to get the final answer using a series of numbers [1, 2, 3, 4, 5] as
input. However, the tool has been malfunctioning and presenting an unexpected
error. Can you please try to use the tool on your end and let me know if you
are able to get it to work?\"}\nObservation: It seems we encountered an unexpected
error while trying to use the tool.\nThought: Delegating the task also resulted
in an error. I should try asking a question to a coworker to see if they have
any advice on how to proceed with the issue.\nAction: Ask question to co-worker\nAction
Input: {\"coworker\": \"Jane\", \"question\": \"Do you know how to resolve the
error with the get_final_answer tool?\", \"context\": \"The tool is supposed
to calculate a final answer using a list of numbers as input [1, 2, 3, 4, 5].
However, both attempts to use the tool and to delegate the task have resulted
in unexpected errors. Do you have any advice on what steps to take next?\"}\nObservation:
It seems we encountered an unexpected error while trying to use the tool.\nThought:
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '3771'
content-type:
- application/json
cookie:
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSwW4aMRS88xUjX3JJECkEArfk0CqXVqpacWgjZLwP1sXrZ/zehqAo/155l4B6
68WHGc+8see9DQDjK7OAcbVV16Rwc7/nb7Rc3k2Oy9fD/utjep1+yXs7Xq8fKzHXRcHrP+T0QzV0
3KRA6jn2tMtklYrr7Ww0u5vPJrNPHdFwRaHItklvJjej6e34pKjZOxKzwK8BALx1Z8kWK3o1C4yu
P5CGROyWzOJ8CTCZQ0GMFfGiNqq5vpCOo1Ls4j7IzsctLPYtSQkMZVg4PnDeUYYNwsgkbVCq4CNs
BOXMeYgnhRA1guB3BK0p05XAQo6i1HgHL9ISCg6trV4JUqYXitpNjEfwpsigzEGwydygTC1sypwo
h+MQT5Ca21AhU+Ks0NrLyVkZTz+w4YxNm8t8+PhSXrG15SXD3/GzjzbgIcqB8gI/o12HTnbqh/rx
Vnaoej8lV0fvbOhHCA5e60vIIb6T46ahWFEF67oP81KU53j0n+nMqY/3c5GBtynzupQe2xDO+MZH
L/UqkxWOpTRRTr38fQA8dwvT/rMDJmVukq6UdxSL4f103vuZy25e2NnkRCqrDRd8PhkPTglNX+tq
4+OWcsq+25+Sc/A++AsAAP//AwCg6BkTNgMAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8537270accf1f9cc-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:37:55 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '3045'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299093'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 181ms
x-request-id:
- req_e25b7a3704fa67132b27d4f8dd989ff9
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Use the
get_final_answer tool.\nAI: Unable to complete the task due to technical issues
with the tools. Recommended action is to report the issue to IT for further
investigation.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
"temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1024'
content-type:
- application/json
cookie:
- __cf_bm=yNACzxKDqhoX850JGrUxpi3HcHQSibzwLRgORMLsdCU-1707597454-1-ASwdC6d9DT1NZzOpdsfleqICKRa6ToebCTL08incS+pzyfl/JRGh5JvDcsERTfvsy8jmMaUN+wpnUS8AV7sTgaM=;
_cfuvid=JDKIcW8t2HvQ7UF8CVrbSSKjjxQ.xFhOFrnF63Dz.94-1707597454672-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRza4TMQyF9/MUVtZt1YFeBrpjA7obYFEEEkJVmnFn0puxQ+zwd9V3R0mnrdhE
io8/6/j4uQEwvjdbMG606qYYlq9/8MfxczzZ/J4+fFr/tSn37+zX09D1X7JZFIIPJ3R6pVaOpxhQ
PdNFdgmtYpnaduvu4U236R6qMHGPoWBD1OVmuX7VvpyJkb1DMVv41gAAPNe3eKMef5strBfXyoQi
dkCzvTUBmMShVIwV8aKW1CzuomNSpGp3NyKMebIEnkRTdiqgI8LbR1CGLFh/A+r+6MmGvSX5hQmU
OSzgkPXa7AUy2UPAgs3rX1i18gR9roKiG8k7G8CLZJQV7C54QsfThNQLJIyc1NNQ6dpX0McdHDnB
MScdMYGnnyjqB1tCXpl5t/MtlMBDTHwoAVIO4VY/evIy7hNaYSoBiHK84OcG4HsNP/+Xp4mJp6h7
5SckqTdsL/PM/c53dTNfxiirDff6i7ZtZodG/ojiVCIdMMXk6y2Kz+bc/AMAAP//AwAM185PggIA
AA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853727200bf9f9cc-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:37:58 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '3010'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299759'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 48ms
x-request-id:
- req_223e2da857e1cdd65e5fe2d6baf51e69
status:
code: 200
message: OK
version: 1

View File

@@ -1,18 +1,20 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
name.\nAction Input: Any and all relevant information input and context for
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
a response for your task, or if you do not need to use a tool, you MUST use
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
response here]```This is the summary of your work so far:\nBegin! This is VERY
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n"}],
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
comma separated list of numbers of \n\t\t\tlength two, representing the two
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [multiplier]\nAction Input: the input to the
action\nObservation: the result of the action\n```\n\nWhen you have a response
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
is the summary of your work so far:\n \nBegin! This is VERY important to
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
@@ -22,13 +24,13 @@ interactions:
connection:
- keep-alive
content-length:
- '1046'
- '1199'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -38,7 +40,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -46,20 +48,18 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRT0/jMBDF7/kUozm3q6ZpAeWyQkKwXXFC5YAoqlxnmpi1PV57Ii1U/e7I6T/2
4sN780a/N94VAGgarAF1p0S7YMc3f/Xnw2P3e9qLU1X44CdeTstfPJHnu3sc5QRv3knLKfVDswuW
xLA/2DqSEspby+vJ9XxeXZWzwXDckM2xNsh4Np5cldUx0bHRlLCG1wIAYDe8mc039A9rmIxOiqOU
VEtYn4cAMLLNCqqUTBLlBUcXU7MX8gPusuO+7aSGO4YFeKIGhKFPBAqE2f6EF0orf6tzmRpcb8UE
ayieNFj40EsNuxVuTUyy9r3bUFxhDdUIVphIs2++qbM9Hkn25wqW2xB5k+v63tqzvjXepG4dSSX2
GTcJh0N8XwC8Dafq/2uPIbILshb+Qz4vnE7Lwz68/MrFraqjKSzKfkvNZ8WRENNHEnLrrfEtxRDN
cLnMWeyLLwAAAP//AwBlmMtdMAIAAA==
content: "{\n \"id\": \"chatcmpl-8XuE6OrjKKco53f0TIqPQEE3nWeNj\",\n \"object\":
\"chat.completion\",\n \"created\": 1703091650,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
multiplier\\nAction Input: 3,4\\n\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\":
24,\n \"total_tokens\": 284\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532f8493b617ae2-SJC
- 8389719d5cba0110-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -69,14 +69,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:26:56 GMT
- Wed, 20 Dec 2023 17:00:52 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
path=/; expires=Sat, 10-Feb-24 08:56:56 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
path=/; expires=Wed, 20-Dec-23 17:30:52 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000;
- _cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -89,7 +89,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1569'
- '2423'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -98,242 +98,139 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299761'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 47ms
x-request-id:
- req_400aaa239d63866a5ffa859c4e6372f6
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''},
''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number:
int, second_number: int) -> float - Useful for when you need to multiply two
numbers together.\n\nUse this text to inform a valid ouput schema:\nThought:
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
3, \"second_number\": 4}\n\nThe output should be formatted as a JSON instance
that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1494'
content-type:
- application/json
cookie:
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
_cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRzW7bMBCE73qKxZ7lwo7/Gr2AEQQ5tUALRIVAUWuZCblkyFWb1tC7F5R/4lx4
mG9mMbs8FgBoOqwA9UGJdsHOvr7pfw/bfds//mh3u+7n0/zx++7wp334/e31Dcuc8O0Labmkvmjv
giUxnk9YR1JCeepiO9+u18vNYjMB5zuyOdYHma1m881ieU4cvNGUsILnAgDgOL25G3f0jhXMy4vi
KCXVE1ZXEwBGb7OCKiWTRLFg+QG1ZyGe6h5rBqhxP7DOdRtWjmqsoEY3WDHBGoo1lieXiv3giCVl
x5ScsiYmaXhwbbZWsCwvJJH23N2gVSZjzSOey4zXLazvQ/Rt3pgHa6/63rBJhyaSSp5z4yQ+nOJj
AfBrutbw6QAYondBGvGvxHng8m59mocfH3NDV2coXpS90df3xbkhpr9JyDV7wz3FEM10vNyzGIv/
AAAA//8DAGqF2SkzAgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532f859aec87ae2-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:27:00 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '3268'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299661'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 67ms
x-request-id:
- req_2bf36cee0713ecceff13b68d8688dd7b
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
name.\nAction Input: Any and all relevant information input and context for
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
a response for your task, or if you do not need to use a tool, you MUST use
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
response here]```This is the summary of your work so far:\nBegin! This is VERY
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought:
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
3, \"second_number\": 4}\nObservation: 12\nThought: "}], "model": "gpt-4", "n":
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1190'
content-type:
- application/json
cookie:
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
_cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQT08CMRDF7/spJj0DgV3+ZS/GSAxqJBxUSNSQsjss1baztENQCN/ddFlBLz28
N7/XN3OIAITKRQoiW0vOTKmbw022f3RPs9nE7fYv92Y+nk436vlhnN/MR6IRCFp+YMa/VCsjU2pk
RfZkZw4lY0jtDNqDXi/px+3KMJSjDlhRcrPbbPc7SU2sSWXoRQqvEQDAoXpDN5vjl0ih4ivFoPey
QJGehwCEIx0UIb1XnqVl0biYGVlGW9UdEdyBRcyBCbYeQQIT6SuY0Ju9VVZquLZ+hy6FBFgZ9NAF
5aETt0QdeDw30VSUjpahtd1qfdZXyiq/XjiUnmz41TOVJ/wYAbxXG2//LSFKR6bkBdMn2hAY95NT
nrgc948b1yYTS/1HH/aiuqHw357RLFbKFuhKp6oDhJ7RMfoBAAD//wMAgp6cUPcBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532f86f1d777ae2-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:27:01 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1412'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299728'
- '299729'
x-ratelimit-remaining-tokens_usage_based:
- '299729'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 54ms
x-ratelimit-reset-tokens_usage_based:
- 54ms
x-request-id:
- req_c84d46e08d08ae44e556a21ce820bcd1
status:
code: 200
message: OK
- 8f99f43731fa878eaf0fcbf0719d1b3f
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
comma separated list of numbers of \n\t\t\tlength two, representing the two
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [multiplier]\nAction Input: the input to the
action\nObservation: the result of the action\n```\n\nWhen you have a response
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
is the summary of your work so far:\n \nBegin! This is VERY important to
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do
I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\n\nObservation:
12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1305'
content-type:
- application/json
cookie:
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
_cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8XuE86Ud94rsOP7VA4Bgxo6RM5XE6\",\n \"object\":
\"chat.completion\",\n \"created\": 1703091652,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
3 times 4 is 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\":
22,\n \"total_tokens\": 315\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 838971ae0f120110-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 20 Dec 2023 17:00:55 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2334'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299703'
x-ratelimit-remaining-tokens_usage_based:
- '299703'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 59ms
x-ratelimit-reset-tokens_usage_based:
- 59ms
x-request-id:
- 44304b182424a8acad8a3121817bea58
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -359,12 +256,12 @@ interactions:
content-type:
- application/json
cookie:
- __cf_bm=1JhWXMH3iZcZ0BWWZnJv58b8vpZVanyELVNiFSXRQ68-1707553616-1-AWTzmGICSkzNkeXiQoVEJvXvBBIqr5TxNx/nb1NbwufG0ezOMOzetxt+iWAkNhOriZCuVhYt9xzLwns/QMwBgsg=;
_cfuvid=1IwF465fOvshRJJy.YyY4tF6m4W.Ag0iqebWN8mFg.U-1707553616181-0-604800000
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
_cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -374,7 +271,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -382,20 +279,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQQU/DMAyF7/0VVs7dtG5rN3oDIQGXnZCYhNCUpW4b1sYhcRkw7b+jdN0Glyjy
82c/v0MEIHQhchCqlqxa24yWH+pn9dj59epmvSzWu2x/3y1fOvqsHu7mIg4Ebd9R8ZkaK2ptg6zJ
nGTlUDKGqcliskjTWTZNeqGlApuAVZZH89EkS2YDUZNW6EUOrxEAwKF/gzdT4JfIYRKfKy16LysU
+aUJQDhqQkVI77VnaVjEV1GRYTS93ecaoe5aaUD6nQeuEW6foCTXf62jolMMVMIMpClgHgMT7Gut
6nOvQ2/JFIGVDJpBe0imYzGsO158NlRZR9twk+ma5lIvtdG+3jiUnkzw5JnsCT9GAG99Ht2/E4V1
1FreMO3QhIFJmp7miWv0V3WaDSITy+YPtUyiwaHw356x3ZTaVOis0308wWd0jH4BAAD//wMA11BS
pRUCAAA=
content: "{\n \"id\": \"chatcmpl-8XuEBWLslSBUMDHZRViwpoXcclYOk\",\n \"object\":
\"chat.completion\",\n \"created\": 1703091655,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI what is 3 times
4, and the AI responds that 3 times 4 is 12.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
155,\n \"completion_tokens\": 27,\n \"total_tokens\": 182\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532f878bc667ae2-SJC
- 838971be49500110-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -405,7 +301,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:27:03 GMT
- Wed, 20 Dec 2023 17:00:58 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -419,7 +315,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1303'
- '2942'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -428,17 +324,22 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299794'
x-ratelimit-remaining-tokens_usage_based:
- '299794'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 41ms
x-ratelimit-reset-tokens_usage_based:
- 41ms
x-request-id:
- req_c207bb1f2ff87fe1da2968c6e9b2a271
status:
code: 200
message: OK
- 546e9b3713f3ff2d7f9868133efaa3a7
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,18 +1,20 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
name.\nAction Input: Any and all relevant information input and context for
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
a response for your task, or if you do not need to use a tool, you MUST use
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
response here]```This is the summary of your work so far:\nBegin! This is VERY
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n"}],
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
comma separated list of numbers of \n\t\t\tlength two, representing the two
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [multiplier]\nAction Input: the input to the
action\nObservation: the result of the action\n```\n\nWhen you have a response
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
is the summary of your work so far:\n \nBegin! This is VERY important to
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
@@ -22,13 +24,13 @@ interactions:
connection:
- keep-alive
content-length:
- '1046'
- '1199'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -38,7 +40,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -46,20 +48,18 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRzU7DMBCE73mK1Z5blDZNS3NBIC4cQEKCA6KoSp1t4uJ4jb2RgKrvjpz+cvFh
Zmf1zXqbAKCusABUTSmqdWZ4/aWum7ms0upx/vRyl77Wv9n0ebN5kkf2OIgJXm1IyTF1pbh1hkSz
3dvKUykUt45m6SzP0yzPe6PlikyM1U6Gk2E6HWWHRMNaUcAC3hMAgG3/RjZb0TcWkA6OSkshlDVh
cRoCQM8mKliGoIOUVnBwNhVbIdvjvjTc1Y0UcM/wAJaoAmHoAkEJwmxu4I3Cwt6qWKaAtjOindHk
jxo8WNdJAdsFrrUPsrRduyK/wAKyASwwkGJbXaiTHR5IdqcKhmvneRXr2s6Yk77WVodm6akMbCNu
EHb7+C4B+OhP1f1rj85z62Qp/Ek2LhyPR/t9eP6Vs5tlB1NYSnORyifJgRDDTxBql2tta/LO6/5y
kTPZJX8AAAD//wMAklK0SjACAAA=
content: "{\n \"id\": \"chatcmpl-8XuDxosP85Kqo6mU6biIggZ5c828i\",\n \"object\":
\"chat.completion\",\n \"created\": 1703091641,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
multiplier\\nAction Input: 3,4\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\":
23,\n \"total_tokens\": 283\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a8b68e9c7e2d-SJC
- 83897166eba7a5fd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -69,14 +69,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:32:37 GMT
- Wed, 20 Dec 2023 17:00:44 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
path=/; expires=Sat, 10-Feb-24 08:02:37 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
path=/; expires=Wed, 20-Dec-23 17:30:44 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000;
- _cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -89,7 +89,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1920'
- '2718'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -98,242 +98,139 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299761'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 47ms
x-request-id:
- req_075691c0da3ee5dc3d14d8f94e3fb169
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''},
''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number:
int, second_number: int) -> float - Useful for when you need to multiply two
numbers together.\n\nUse this text to inform a valid ouput schema:\nThought:
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
3, \"second_number\": 4}\n\nThe output should be formatted as a JSON instance
that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1494'
content-type:
- application/json
cookie:
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
_cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRzW7bMBCE73qKBc9y4UR2beiWAgFyKnLKJQpkmlpJdMldlVw1TQ29e0H5L7no
sDPfanZ4zACUbVQJyvRajB/cYvvbbA/tn+ft6uH5x7/486l76ewjv+/50D6oPBG8P6CRC/XNsB8c
imU6ySagFkxb7zbLzXq9LNabWfDcoEtYN8hitVh+vyvORM/WYFQlvGYAAMf5m7JRg39VCcv8MvEY
o+5QlVcTgArs0kTpGG0UTaLym2iYBGmO+4QBwUbQ8I7OLVoOXotgA5YSZhC4BekRounR67Kiina7
XUXHigAq1Y5k0p01aY+VKqFSfnRiB2cxVCo/uXToRo8kMTlmcmZtiFLT6PfJWkKRX5SIhqn5JK2S
MlU0zX9X51OmaweOuyHwPvVFo3PXeWvJxr4OqCNTujcKDyd8ygDe5q7HL/WpIbAfpBb+hZQWFvfr
0z51e9abutqeRWHR7hO1KbJzQhU/oqCvW0sdhiHYufqUM5uy/wAAAP//AwDgghcycQIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a8c4db427e2d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:32:39 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2563'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299661'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 67ms
x-request-id:
- req_bf33bff9b62213c75c624fc2dcbf5d7b
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
name.\nAction Input: Any and all relevant information input and context for
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
a response for your task, or if you do not need to use a tool, you MUST use
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
response here]```This is the summary of your work so far:\nBegin! This is VERY
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought:
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
3, \"second_number\": 4}\nObservation: 12\nThought: "}], "model": "gpt-4", "n":
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1190'
content-type:
- application/json
cookie:
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
_cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQzU7DMBCE73mKlc9tlSb9Uy4IqJAqRI9cAFVusklNba+xN6JQ9d2R09DCxYeZ
/caze0wAhKpEAaLcSS6N08PFR7kwTWaau/ZpzXvm+9Vzo+rHQ71cf4tBJGj7jiX/UqOSjNPIiuzZ
Lj1Kxpg6nqfz6TTNZ2lnGKpQR6xxPJwM09k474kdqRKDKOAlAQA4dm/sZis8iAI6vlMMhiAbFMVl
CEB40lERMgQVWFoWg6tZkmW0Xd0lwQosYgVM0AYECUykb2BNr/ZBWanh1oZP9AXkwMpggAmoAONs
JPrA06WJpsZ52sbWttX6otfKqrDbeJSBbPw1MLkzfkoA3rqN239LCOfJON4w7dHGwGyWn/PE9bh/
3Kw3mVjqP/pimvQNRfgKjGZTK9ugd151B4g9k1PyAwAA//8DAB+YxMT3AQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a8d5b9927e2d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:32:41 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1355'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299728'
- '299729'
x-ratelimit-remaining-tokens_usage_based:
- '299729'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 54ms
x-ratelimit-reset-tokens_usage_based:
- 54ms
x-request-id:
- req_5b85d3e382222bb334b58fa52fbd2152
status:
code: 200
message: OK
- 1714a9f5a2141d30f72506facf616944
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
comma separated list of numbers of \n\t\t\tlength two, representing the two
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [multiplier]\nAction Input: the input to the
action\nObservation: the result of the action\n```\n\nWhen you have a response
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
is the summary of your work so far:\n \nBegin! This is VERY important to
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do
I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\nObservation:
12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1303'
content-type:
- application/json
cookie:
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
_cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8XuE0tWsbI9QDkRau6rzSUZfuqhFN\",\n \"object\":
\"chat.completion\",\n \"created\": 1703091644,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\":
15,\n \"total_tokens\": 308\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 838971796af6a5fd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 20 Dec 2023 17:00:46 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2355'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299704'
x-ratelimit-remaining-tokens_usage_based:
- '299704'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 59ms
x-ratelimit-reset-tokens_usage_based:
- 59ms
x-request-id:
- a3de9d34f17496d9bdd2ae9360f6054a
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -345,8 +242,8 @@ interactions:
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What
is 3 times 4\nAI: 3 times 4 is 12.\n\nNew summary:"}], "model": "gpt-4", "n":
1, "stream": false, "temperature": 0.7}'
is 3 times 4\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -355,16 +252,16 @@ interactions:
connection:
- keep-alive
content-length:
- '885'
- '871'
content-type:
- application/json
cookie:
- __cf_bm=wpgtLOAFEfhtdsHgnBqy8JDTY8RJuFoCAv0TBhjKNpM-1707550357-1-AbN7PbjPkv1+YaFhvJRGUln0mdeLoN07Da0jaHrylFFSyrOBGZuw6oareFl2seDb+J2ojofmxYA1O89GVbSBl7Y=;
_cfuvid=7MORsmOTD3nJggM4dWcwuDMDpxZpuk2066xvKrzWSOM-1707550357219-0-604800000
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
_cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.0
x-stainless-arch:
- arm64
x-stainless-async:
@@ -374,7 +271,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -382,19 +279,18 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1xQy07CQBTd9ytuZl1MH5SS7owrN7rQRNEYMrSXdmBe9l4ihvDvZkoBdTOL85pz
zyECEKoRFYi6k1wbryfzz3pu34qX8u4hN8lTlsj9qywWm0fqFomIg8OtNljz2XVTO+M1snL2RNc9
SsaQmpZJWRRJPksHwrgGdbC1nifTSTJL89HROVUjiQreIwCAw/CGbrbBvaggic+IQSLZoqguIgDR
Ox0QIYkUsbQs4itZO8toh7rPHUK3M9KCpC0Bdwi39/DVSYYcWBkkmIKiGKRtzmyP5J1tgvqfDNLs
Roz/HC8FtWt971bhGLvT+oKvlVXULXuU5GwoQ+z8yX6MAD6GIXZ/bhO+d8bzkt0WbQhMi+KUJ66b
X9msHEl2LPUv1zyLxoaCvonRLNfKttj7Xg27hJ7RMfoBAAD//wMAZCh3Sw4CAAA=
content: "{\n \"id\": \"chatcmpl-8XuE3Di6FRdNAetXNfWs6OWyRAfkf\",\n \"object\":
\"chat.completion\",\n \"created\": 1703091647,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI what is 3 times
4 and the AI responds with 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 149,\n \"completion_tokens\":
20,\n \"total_tokens\": 169\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532a8df99427e2d-SJC
- 838971899954a5fd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -404,7 +300,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:32:43 GMT
- Wed, 20 Dec 2023 17:00:49 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -418,7 +314,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1805'
- '2698'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -427,17 +323,22 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299794'
- '299798'
x-ratelimit-remaining-tokens_usage_based:
- '299798'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 41ms
- 40ms
x-ratelimit-reset-tokens_usage_based:
- 40ms
x-request-id:
- req_c13cb4147b01d222d16b93ac893a895d
status:
code: 200
message: OK
- ddbd97cea4ec099c21c00ca922157ae1
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,20 +1,19 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
personal goal is: test goalTOOLS:\n------\nYou have access to the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
just the name.\nAction Input: Any and all relevant information input and context
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
have a response for your task, or if you do not need to use a tool, you MUST
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false,
"temperature": 0.7}'
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -23,13 +22,13 @@ interactions:
connection:
- keep-alive
content-length:
- '1137'
- '1075'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -39,7 +38,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -47,19 +46,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RQy07DMBC85ytWPreoTdMWckGIcugBxKECIUCV62wTF8dr7A0PVf135DR9cPFh
Zmc8M9sEQOhC5CBUJVnVzvQvP4ur9LEZfzXyeXH38rBxiydarSez9P5WiV5U0GqDig+qC0W1M8ia
7J5WHiVjdB1OB9PxOEvTQUvUVKCJstJxP+sPJsNRp6hIKwwih9cEAGDbvjGbLfBH5NDqW6TGEGSJ
Ij8eAQhPJiJChqADS8uidyIVWUbbxl1U1JQV5zAjmINFLIAJmoAggYnMNbxgeLM3KpbJoURerrWV
Zilt+EZ/YGBuXcM5ZKnovtkd8xkqnadV7GIbY474WlsdqqVHGcjGLIHJ7eW7BOC93aH5V004T7Xj
JdMH2miYZtneT5wmP2NHHcnE0pzhk2nSJRThNzDWsVSJ3nndzhJzJrvkDwAA//8DACdeRXQNAgAA
content: "{\n \"id\": \"chatcmpl-8frBTCWXULTV5ZYHy3Y5JXKovrKiN\",\n \"object\":
\"chat.completion\",\n \"created\": 1704986579,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85330710af4f175e-SJC
- 843e2886ceca1d23-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -69,14 +68,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:37:01 GMT
- Thu, 11 Jan 2024 15:23:03 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
path=/; expires=Sat, 10-Feb-24 09:07:01 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
path=/; expires=Thu, 11-Jan-24 15:53:03 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000;
- _cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -89,7 +88,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1517'
- '4424'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -101,145 +100,30 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299739'
- '299755'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 52ms
- 49ms
x-request-id:
- req_b962be8c11abe80a762fc1e1d5c0fb5d
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
get_final_answer(numbers) -> float - Get the final answer but don''t give it
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1407'
content-type:
- application/json
cookie:
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRzZLTMBCE736KqTknlBM7G/CRy24KKC4c+PGWo8hjW6w0MtKYZSuVd6fkZBO4
+NAz37i7dcwA0LRYAepBiXajXb791b4rdl8/5Hdf3n/+xJtitds95fcfv/8uvr3gIhH+8JO0vFJv
tHejJTGez2MdSAmlq6ttvt1syvV6NQ+cb8kmrB9lWS7zu1VxIQZvNEWs4EcGAHCcv8kbt/QHK8gX
r4qjGFVPWF2XADB4mxRUMZooigUXt6H2LMSz3QcKBCaCgmeydtn54JQItWA4YZrAdyADQdQDOVXV
XPN+v6/5WDNAjd3EOuVsWDmqsYIae5KmM6xsozg+U6hxcd5VoZ8cscS0N/NJ5ckdKMxauU7iqebT
/BO8OD5do1rfj8EfUi08WXvVO8MmDk0gFT2nWFH8eMZPGcDjXOn0X0s4Bu9GacQ/EaeDRb4938Pb
692m5aVvFC/K/kOV2+ziEONLFHIpe09hDGZuOPnMTtlfAAAA//8DABO023tYAgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533071c994c175e-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:37:04 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2139'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299683'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 63ms
x-request-id:
- req_f69fc36b19e66ed7d71a65ac81624ef5
status:
code: 200
message: OK
- 76974d365254ca84f70c43fc31af3378
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
personal goal is: test goalTOOLS:\n------\nYou have access to the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
just the name.\nAction Input: Any and all relevant information input and context
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
have a response for your task, or if you do not need to use a tool, you MUST
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
@@ -249,16 +133,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1246'
- '1186'
content-type:
- application/json
cookie:
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -268,7 +152,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -276,19 +160,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQzU7DMBCE73mKlc8tSkLatLkgJIRUIS6ISwsocpNtEnC8rr0RharvjpymP1x8
mPGMvtl9ACCaUmQgilpy0Ro1nm3L+dStntOX19+wfVy13XJWh0/RptxtrRj5BK0/seBT6qag1ijk
hvTRLixKRt8apWE6mSRxnPRGSyUqH6sMj5NxOI1uh0RNTYFOZPAWAADs+9ez6RJ3IoNwdFJadE5W
KLLzJwBhSXlFSOcax1KzGF3MgjSj7nEfCBagEUtggs4hSGAidQdLdO/6vvAbMqiQ802jpcqldt9o
Tw4stOk4gyQWQ/vhjKWoMpbWfoLulDrrm0Y3rs4tSkfaIzgmc4wfAoCPfn73b5EwllrDOdMXal8Y
p+mxT1wufeVGg8nEUl3p81kwEAr34xhbP6pCa2zTX8NzBofgDwAA//8DAA3DUnYEAgAA
content: "{\n \"id\": \"chatcmpl-8frBYkyVPtJuAJCESaOxEBg3UAfl4\",\n \"object\":
\"chat.completion\",\n \"created\": 1704986584,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
266,\n \"completion_tokens\": 22,\n \"total_tokens\": 288\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533072c9e37175e-SJC
- 843e28a57d911d23-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -298,7 +182,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:37:06 GMT
- Thu, 11 Jan 2024 15:23:07 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -312,7 +196,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2066'
- '3329'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -324,37 +208,34 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299712'
- '299728'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 57ms
- 54ms
x-request-id:
- req_e6cc60e6a53e65c41d1f4f36432d3be4
status:
code: 200
message: OK
- 1b9a1e09f863ff69cecfe4e7bed0aee5
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
personal goal is: test goalTOOLS:\n------\nYou have access to the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
just the name.\nAction Input: Any and all relevant information input and context
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
have a response for your task, or if you do not need to use a tool, you MUST
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\nObservation: Actually, I used too many tools, so I''ll stop now and
give you my absolute BEST Final answer NOW, using the expected format: ```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought:
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]This is the summary of your work
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: The final answer is 42. But don''t give it yet, instead keep using the
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: [42]\nObservation: I''ve used too many tools
for this task.\nI''m going to give you my absolute BEST Final answer now and\nnot
use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -363,16 +244,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1549'
- '1411'
content-type:
- application/json
cookie:
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -382,7 +263,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -390,20 +271,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRyW7CMBC95ytGvnABxL7kUlVqK4G63LsIGWdITB1Pag8FhPj3yg5bLz68ZfTe
8yEBEDoTKQhVSFZlZVqTn2w60f3N/G3+8r4bj3PpH58VzfaTWWctmsFByzUqPrvaisrKIGuyNa0c
SsZwtTvujIfDQa83ikRJGZpgyytuDVqdUbd/chSkFXqRwkcCAHCIb8hmM9yJFDrNM1Ki9zJHkV5E
AMKRCYiQ3mvP0rJoXklFltHGuA8EM7CIGTDBxiNIYCJzB6/0aZ+0lQburd+iS2EGhfzFIMqAC4w6
aOTIi1XQLWTUNYC3WiFIm4Eiu9KujHrJ0VSLQHsIRTCDQa8tTsmOl0qG8srRMtS3G2Mu+Epb7YuF
Q+nJhvieqartxwTgK063+beGqByVFS+YvtGGg/3hsL4nrr90ww5OJBNLc4NPpskpofB7z1iG1jm6
yum4ZMiZHJM/AAAA//8DACniEDhAAgAA
content: "{\n \"id\": \"chatcmpl-8frBbQ3vq0kEry4X3a1RkMEkIAP99\",\n \"object\":
\"chat.completion\",\n \"created\": 1704986587,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
I have used the tool multiple times and the final answer remains 42.\"\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 323,\n \"completion_tokens\": 28,\n
\ \"total_tokens\": 351\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533073bfc1f175e-SJC
- 843e28bbbb071d23-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -413,7 +293,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:37:08 GMT
- Thu, 11 Jan 2024 15:23:13 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -427,7 +307,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1584'
- '5459'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -439,16 +319,15 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299639'
- '299673'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 72ms
- 65ms
x-request-id:
- req_990de951a71fb1fc2621f985e4257c6e
status:
code: 200
message: OK
- 0a5c1064b324c997b16bf17d426f9638
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -461,9 +340,9 @@ interactions:
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nAI: I have used the tool ''get_final_answer'' twice and confirmed that
the answer is indeed 42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
false, "temperature": 0.7}'
tool.\nAI: I have used the tool multiple times and the final answer remains
42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
@@ -472,16 +351,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1035'
- '1014'
content-type:
- application/json
cookie:
- __cf_bm=MMmGgclX.vkPuwUu.Rh1biZEzuNbjCK4LPqnJLPgXTU-1707554221-1-ATQv7SjX2ImF49itFjTFEhz9QuQKNd98zq0RcvMmPyo/RQZZvJ5nw0lfAfAJOe89QhC43RIyfAa1zjbn5MclqP0=;
_cfuvid=FVFxQ612sbA.HZ6jhsUmvIxmSbsCgqRCy2rTl9a8XAI-1707554221466-0-604800000
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -491,7 +370,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -499,20 +378,20 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRQW7bMBC86xULXnKxDVuWK9c3H5seChQ5FC0Kg6FWEhOKy3BXTYvAfy9IKRZy
IYgZzuzs8K0AULZRJ1Cm12KG4NbHl+bz+fjj286cX5jrn/f4tP1++DO2X/f3tVolBT0+oZF31cbQ
EByKJT/RJqIWTK67elsfDlVZHjMxUIMuybog62q9/bTbz4qerEFWJ/hVAAC85TNl8w3+VSfYrt6R
AZl1h+p0ewSgIrmEKM1sWbQXtVpIQ17Q57gPPUI/DtqD9S3FgUF6hPMXkF5LvrfWawfa8ytGsAxV
Cdo3YD1LHI0wWAEheEYMMLL1XVbddSiXLL1M0jsQIreBh8l+ZJxGJRTk1RrMtoZ8a6cY8/xlclod
G6jKjZp3ud5KcNSFSI+pMD86d8Nb6y33l4iayaeFWShM8msB8DuXPX7oT4VIQ5CL0DP6ZLg7VpOf
Wv51Yav9TAqJdgtelnUxJ1T8jwWH1EeHMUSbu085i2vxHwAA//8DAKC/EipyAgAA
content: "{\n \"id\": \"chatcmpl-8frBhKxiCRICQ8o6aJanmn8PTMsAr\",\n \"object\":
\"chat.completion\",\n \"created\": 1704986593,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human tells the AI that the final
answer is 42 and instructs it to continue using the `get_final_answer` tool.
The AI confirms it has used the tool multiple times and the final answer stays
at 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 178,\n \"completion_tokens\":
46,\n \"total_tokens\": 224\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853307467f19175e-SJC
- 843e28df4ae81d23-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -522,7 +401,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:37:11 GMT
- Thu, 11 Jan 2024 15:23:18 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -536,7 +415,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2805'
- '5518'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -548,14 +427,13 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299757'
- '299761'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 48ms
- 47ms
x-request-id:
- req_cd5aacfcc2283f70e3903b274541fd8c
status:
code: 200
message: OK
- 4100fde9c68d27d808de645637b3e7cc
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,995 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
tool in a loop.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2031'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRS0/rMBCF9/kVI69T1LQBSjboSkj3srkIxAbRqnKdIXFxZow9EY+q/x05fcHG
i3PmjL4z3mQAytaqAmVaLabzbjR7Y/2OcfZQ/CdT373erL/s+u3fvaW/k1blKcGrNRo5pM4Md96h
WKadbQJqwbS1uBxfnl/NJtNyMDqu0aVY42VUjsYXxXSfaNkajKqC5wwAYDO8iY1q/FAVjPOD0mGM
ukFVHYcAVGCXFKVjtFE0icpPpmESpAH3seW+aaWCG4ZbIMQahKGPCBqE2V3DE8Y5/TGpTAUNyvLF
knZLTfEdw8GBW/K9VLCZK+q7FYY4T+hFDpMcpjmUOZwvtmqPsD2yO2584FXqSb1zR/3Fko3tMqCO
TIkzCvtdfJsBLIYb9b9qKx+487IUfkVKC8uy2O1Tp+84udOrvSks2v1IzcbZnlDFzyjYpcINBh/s
cLLEmW2zbwAAAP//AwAtHZ7UKQIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853739a24f53f93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:50:40 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
path=/; expires=Sat, 10-Feb-24 21:20:40 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '6390'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299516'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 96ms
x-request-id:
- req_446106800e9e8d8da73e526abbc77d9b
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
get_final_answer(numbers) -> float - Get the final answer but don''t give it
yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work
to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'':
{''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate
work to co-worker(coworker: str, task: str, context: str) - Delegate a specific
task to one of the following co-workers: . The input to this tool should be
the role of the coworker, the task you want them to do, and ALL necessary context
to exectue the task, they know nothing about the task, so share absolute everything
you know, don''t reference things but instead explain them.\n--\nFuntion Name:
Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''},
''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription:
Ask question to co-worker(coworker: str, question: str, context: str) - Ask
a specific question to one of the following co-workers: . The input to this
tool should be the role of the coworker, the question you have for them, and
ALL necessary context to ask the question properly, they know nothing about
the question, so share absolute everything you know, don''t reference things
but instead explain them.\n\nUse this text to inform a valid ouput schema:\nThought:
Do I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
[1, 2, 3, 4, 5]}\n\nThe output should be formatted as a JSON instance that conforms
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2581'
content-type:
- application/json
cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQ3U7DMAyF7/sUlq871Hb/fQAeAARCWqcq7bwukMQhScVg6rujdGUTN5Fyjo/9
2ZcEAOUBS8D2JEKrrZptPrkptvz08pgVh9xmP83ZPq8blm+FeMU0Jrh5pzb8pR5a1lZRkGyudutI
BIpd83W2Xm43xSIbDc0HUjHW2TBbzLJVPp8SJ5YteSxhlwAAXMY3spkDnbGEMT8qmrwXHWF5KwJA
xyoqKLyXPggTML2bLZtAZsS9VHjsTRtRayM0VVhChR2F+iiNULUw/otchSlUKFzXazLBx6JLhabX
Dbnxt8tTKFKYp7BIYbkfBpymDTdMxZ113MSVTK/UTT9KI/2pdiQ8m4jkA9trfEgA9uM5+n8bonWs
bagDf5CJDZfb9bUf3i9/d+f5ZAYOQt31VbFJJkL03z6Qjkt35KyT43UiZzIkvwAAAP//AwBXA9cg
FAIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853739cafc36f93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:50:42 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1987'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299391'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 121ms
x-request-id:
- req_70e4d9540d0633db0b0e10623c0d5472
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
tool in a loop.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: {\"numbers\": [1, 2, 3, 4, 5]}\nObservation: 42\nThought: "}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2168'
content-type:
- application/json
cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRSU/DMBCF7/kVI59T1CXdckFURSwSiAMcCq0qJ52mAcdjPBMWVf3vyOkmLj68
5zf+3ngbAahypVJQ+UZLXjnTGn1Sltx+zeg6u5+wcS9Pj9PJc/L6kNwIqzgkKHvHXI6pi5wqZ1BK
sns796gFw9TOsD3sj0fdpNsYFa3QhFjhpJW02oNO75DYUJkjqxTeIgCAbXMGNrvCH5VCOz4qFTLr
AlV6ugSgPJmgKM1csmgrKj6bOVlB2+BOCe7AIq5ACGpG0CBE5hJmyHN7lYcOKRQoy3VptVlqy9/o
jw7cWVdLCtu5snWVoed5IB7EMIxhFMM4hk57sVOHp3cnZkOF85SFfrY25qSvS1vyZulRM9nAx0Ju
H99FAItmN/W/usp5qpwshT7QhoHJaLyfp87fcHZ7w4MpJNqc9X53EB0IFf+yYBUaF+idL5tVBc5o
F/0BAAD//wMAg8kFAyECAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853739d99e49f93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:50:50 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '7534'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299484'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 103ms
x-request-id:
- req_e8de19a1404e0081e1db382d204a2679
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
get_final_answer(numbers) -> float - Get the final answer but don''t give it
yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work
to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'':
{''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate
work to co-worker(coworker: str, task: str, context: str) - Delegate a specific
task to one of the following co-workers: . The input to this tool should be
the role of the coworker, the task you want them to do, and ALL necessary context
to exectue the task, they know nothing about the task, so share absolute everything
you know, don''t reference things but instead explain them.\n--\nFuntion Name:
Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''},
''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription:
Ask question to co-worker(coworker: str, question: str, context: str) - Ask
a specific question to one of the following co-workers: . The input to this
tool should be the role of the coworker, the question you have for them, and
ALL necessary context to ask the question properly, they know nothing about
the question, so share absolute everything you know, don''t reference things
but instead explain them.\n\nUse this text to inform a valid ouput schema:\nDo
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
[6, 7, 8, 9, 10]}\n\nThe output should be formatted as a JSON instance that
conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2573'
content-type:
- application/json
cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRPW/bMBCGd/2Kw81yIceVLWsLsmRqByfIYAUCTZ9lJeRRJU9tCkP/PSDt2MjC
4f04PHc8ZQDY77EG1Ecl2g5mVv1xu4cXKcT91Zv7X7/N0/ruefMYqsKU95jHhtu9kZav1g/t7GBI
esdnW3tSQnHqfFWsynV1VxbJsG5PJta6QWY/Z8Vyvrg0jq7XFLCGbQYAcEpvZOM9fWANqZ8USyGo
jrC+hgDQOxMVVCH0QRQL5jdTOxbihHtqGKDBw8g64rasLDVYQ4MdSXvoWZlWcfhHvsH8nFW+Gy2x
hJhL/ajyaHfkk7Zd5rDKocphncO8eI2RqeEJLwTTFd24bvBuF9fk0Zirfui5D8fWkwqOI2YQN5zr
Uwbwmk40ftsaB+/sIK24d+I4sFyX53l4+42bu6gupjhR5qYvF4vsQojhfxCy8Qgd+cH36WKRM5uy
TwAAAP//AwC0p3SrKAIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85373a09fbe1f93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:50:53 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2726'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299394'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 121ms
x-request-id:
- req_f0982e90897053fa7ea290c5fc976e43
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
tool in a loop.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: {\"numbers\": [1, 2, 3, 4, 5]}\nObservation: 42\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\": [6, 7,
8, 9, 10]}\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2297'
content-type:
- application/json
cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRS2/bMBCE7/oVC57lwIykOtElSFIUzS2XAnnYMGhpI7OhdhlyhTwM//eA8qu9
8DDDWX6z3GQAyraqBtWsjTS9d5OLN1796m/L8vXr5okePh+r6o+l31K091NWeUrw6i82ckidNdx7
h2KZdnYT0AimqXo2nVWXF+dVMRo9t+hSrPMyKSfTH7rYJ9ZsG4yqhucMAGAznomNWvxQNUzzg9Jj
jKZDVR8vAajALinKxGijGBKVn8yGSZBG3J8Md0CILQjDEBEMCLO7gkeMc7puUocaOpTliyXjlobi
O4aDA3fkB6lhM1c09CsMcZ6Itc5Bn+egixx0mYOuFlu1f3575Hbc+cCr1JEG5476iyUb18uAJjIl
xijsd/FtBrAY9zP8V1n5wL2XpfArUhpYFeVunjp9xcktZntTWIz7JzXT2Z5Qxc8o2KfWHQYf7Liu
xJlts28AAAD//wMANhlcFSUCAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85373a1bc83cf93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:50:56 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '3406'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299453'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 109ms
x-request-id:
- req_a5a6994e55cdf125c20fd34abc6279f2
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n(\"get_final_answer: get_final_answer(numbers) -> float - Get the
final answer but don''t give it yet, just re-use this\\n tool non-stop.\\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: . The input
to this tool should be the role of the coworker, the task you want them to do,
and ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\\nAsk question to co-worker: Ask question to co-worker(coworker: str,
question: str, context: str) - Ask a specific question to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\",)\n\nTo use a tool, please
use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [get_final_answer, Delegate work to
co-worker, Ask question to co-worker], just the name.\nAction Input: Any and
all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Never give the final answer. Use the get_final_answer
tool in a loop.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: {\"numbers\": [1, 2, 3, 4, 5]}\nObservation: 42\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\": [6, 7,
8, 9, 10]}\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: {\"numbers\": [11, 12, 13, 14, 15]}\nObservation:
Actually, I used too many tools, so I''ll stop now and give you my absolute
BEST Final answer NOW, using exaclty the expected format bellow: \n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought:
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2650'
content-type:
- application/json
cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRS2/CMBCE7/kVK59DFfMK5FJV6gWuXPqgQibZhLSO19ibthTx3yuHAO3FhxnP
+pv1MQIQdSEyEPlOcd5YPZjtabvUaVm2hyc9Wa7mrVx9zvY/8gWxEnFI0PYdc76k7nJqrEauyZzt
3KFiDFNlmqST+Ww4STujoQJ1iFWWB+NBMpWjPrGjOkcvMniNAACO3RnYTIHfIoMkvigNeq8qFNn1
EoBwpIMilPe1Z2VYxDczJ8NoOtxHggUYxAKYoPUICphI38Mz+rV5yEOHDCrkTVkbpTfK+C90FwcW
xracwXEtTNts0fl1IJbTGGQag5zFIOcxDJO3k+ifP125NVXW0TZ0NK3WV72sTe13G4fKkwmMnsme
46cI4K3bT/uvsrCOGssbpg80YeB0ND7PE7evuLmjtDeZWOk/qVRGPaHwB8/YhNYVOuvqbl2BMzpF
vwAAAP//AwBfP5bvJQIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85373a323a04f93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:51:03 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '6786'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299367'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 126ms
x-request-id:
- req_3180621ea488c8926b943b613305b069
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
get_final_answer(numbers) -> float - Get the final answer but don''t give it
yet, just re-use this\n tool non-stop.\n--\nFuntion Name: Delegate work
to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''}, ''task'':
{''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription: Delegate
work to co-worker(coworker: str, task: str, context: str) - Delegate a specific
task to one of the following co-workers: . The input to this tool should be
the role of the coworker, the task you want them to do, and ALL necessary context
to exectue the task, they know nothing about the task, so share absolute everything
you know, don''t reference things but instead explain them.\n--\nFuntion Name:
Ask question to co-worker\nFuntion attributes: {''coworker'': {''type'': ''string''},
''question'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\nDescription:
Ask question to co-worker(coworker: str, question: str, context: str) - Ask
a specific question to one of the following co-workers: . The input to this
tool should be the role of the coworker, the question you have for them, and
ALL necessary context to ask the question properly, they know nothing about
the question, so share absolute everything you know, don''t reference things
but instead explain them.\n\nUse this text to inform a valid ouput schema:\nDo
I need to use a tool? Yes\nAction: get_final_answer\nAction Input: {\"numbers\":
[16, 17, 18, 19, 20]}\n\nThe output should be formatted as a JSON instance that
conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2577'
content-type:
- application/json
cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRy27bMBC86ysWPMuFFNuyrVse6KmXIAGaIgpkilpLbMklQ1JOU0P/XlB27OTC
wzyWs7OHBIDJlpXARM+D0FbN1q+muc+ebl9/PjwVt/N97v+9i7tfP75fdw83LI0O0/xGET5c34TR
VmGQho60cMgDxqn5KlstN+urYjER2rSooq2zYbaYZUU+Pzl6IwV6VsJzAgBwmN6YjVr8y0rI0g9E
o/e8Q1aeRQDMGRURxr2XPnAKLL2QwlBAmuI+9gh7rmQLZgh2COBFj5pDwz22YAhCj2Cd2csWW5AU
JW9mUC00WFZU0Xa7rehQEUDFdgOJuHRNXGPFSqhYh6HeSeKq5uTf0FUsPWq56waNFHzUTf6I0qAb
dBP2nBcp5KsU8nUK+SaFq+wlysaKxulbdlpoPDehTGedaWJrNCh1xneSpO9rh9wbilv7YOzRPiYA
L1Pjw5cSmXVG21AH8wcpDlxulsd57HLcT+z8RAYTuLrgxWKdnBIy/+4D6thGh846OR0g5kzG5D8A
AAD//wMAi96BtncCAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85373a5dce03f93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:51:08 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '4076'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299393'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 121ms
x-request-id:
- req_5856f2fff5c6c31e202a692aa1f3168d
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Never
give the final answer. Use the get_final_answer tool in a loop.\nAI: Agent stopped
due to iteration limit or time limit.\n\nNew summary:"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '972'
content-type:
- application/json
cookie:
- __cf_bm=df7eBHr9.h_147A1v5JGmdTKX4bN0QzWq4bfTeRUi50-1707598240-1-AcxqAl2MKUE946IQLYfZ2bxp8Ldup4iBf5lYCF6HGfmfob1+PkAaNoCiiN45oy1j422ZnG062SIabck6uXKz7Ug=;
_cfuvid=R1Ko_WQuoItgz9ZhvZQcOwu26bwbzC4Xfwyt28ezrOk-1707598240441-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRXUsDMRB8v1+x5Lkt/dC29k0EUaSKYJ9ESnq33qUmuzHZ+IH0v0vuzhZfApnZ
mZ1MfgoAZSq1AlU2Wkrn7XD5zrvNo/+a36X1ZnFfL9P6ah3o4Xoz2yc1yAre7bGUP9WoZOctimHq
6DKgFsyuk8V4cX6xnM6XLeG4QptltZfh2XA8n8x6RcOmxKhW8FwAAPy0Z85GFX6pFYwHf4jDGHWN
anUcAlCBbUaUjtFE0SRqcCJLJkFq4z41CE1ymsBQlJBKiSAMhB8YoDYfCNIgvBrSFjTFTwygqcoj
KXZcjbJt+W3PC7MFQ6DBMvsR3PBndhu005e3oGskgSjsI1QJs1deLxh0bgyscUaAA4hx2N1Gqg9/
OL7acu0D73JDlKw94q+GTGy2AXVkyi/Mizr5oQB4adtN/wpTPrDzshV+Q8qGk/lZ56dOH3liZ4ue
FBZtT/h0PCn6hCp+R0GXe6kx+GDasnPO4lD8AgAA//8DAJhMI05jAgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85373a78093ff93d-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 20:51:10 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2378'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299771'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 45ms
x-request-id:
- req_43b967457c1ba06279e8102f4651389b
status:
code: 200
message: OK
version: 1

View File

@@ -1,675 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
just the name.\nAction Input: Any and all relevant information input and context
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
have a response for your task, or if you do not need to use a tool, you MUST
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false,
"temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1137'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RQy07DMBC85ytWPrcobUIDuaBKPFrECQEVAhS5zjYJOF4Tb1Qq1H9HTtMHFx9m
dsYz8xsAiCoXKQhVSla11cOL79UiKe/Dl/JOPS/CBzOb4XSN6vbRbG7EwCto+YmK96ozRbXVyBWZ
Ha0alIzedZSEyfkkiqLLjqgpR+1lheVhPAwno6hXlFQpdCKFtwAA4Ld7fTaT449IIRzskRqdkwWK
9HAEIBrSHhHSucqxNCwGR1KRYTRd3KeS2qLkFK4J5mAQc2CC1iFIYCJ9Ba/o3s1U+TIpFMjZqjJS
Z9K4NTZ7BubGtpxCPBb9N9tDPk2FbWjpu5hW6wO+qkzlyqxB6cj4LI7J7uTbAOCj26H9V03YhmrL
GdMXGm84juOdnzhOfsJGPcnEUp/gkyToEwq3cYy1L1VgY5uqm8XnDLbBHwAAAP//AwCTRhYdDQIA
AA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533e5b6686a96a1-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 11:09:00 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
path=/; expires=Sat, 10-Feb-24 11:39:00 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1657'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299739'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 52ms
x-request-id:
- req_3b8ed78b5dca776e8092ad4bc1e07945
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
get_final_answer(numbers) -> float - Get the final answer but don''t give it
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
ouput schema:\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1407'
content-type:
- application/json
cookie:
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRTY/TMBC951eMfG5RS7ot5IxWwAEQKy2gDUpdZ5J4iWeMPYGtqvz3lZNuCxcf
3sf4zZtTBqBsrQpQptNinO+Xb343397e37rj09fdHT7+uD/68OXDse3eafyuFsnBh0c08uJ6Zdj5
HsUyzbQJqAXT1PVutbvZ5vlmPRGOa+yTrfWy3CxX23V+dnRsDUZVwEMGAHCa3pSNanxSBawWL4jD
GHWLqriIAFTgPiFKx2ijaBK1uJKGSZCmuO8xINgI0iE0HJwWwRp4ED8I6AgaPt59/gSW0hCDIJ0W
MExJG0F4MvrAf2yN9ayNpkOni5L2+31Jp5IAStUMZFIfFWmHpSqgVC1K1VjSfaUp/sVQqsWs1aEd
HJLEpJv8CaXBHTBM2OZ1AseSxukTdd5svFTSc+sDH1J9NPT9BW8s2dhVAXVkSutHYT/bxwzg51T9
8F+bygd2XirhX0hpYL7azfPU9cpXdrM9k8Ki+39cN3l2TqjiMQq6tHuLwQc7XSLlzMbsGQAA//8D
AGHVex6AAgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533e5c1dfe296a1-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 11:09:02 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1588'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299683'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 63ms
x-request-id:
- req_9ffe6f9841407a91b5e11cc2009a1e45
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
just the name.\nAction Input: Any and all relevant information input and context
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
have a response for your task, or if you do not need to use a tool, you MUST
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1246'
content-type:
- application/json
cookie:
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQS2/CMBCE7/kVK5+h4lUCuVTQHsqpUi+oLyHjbIKL43XtjaBC/PfKITx68WHG
M/pmDwmA0LnIQKiNZFU50538FMvZfrx8edz59aKo36ev2+fp3PGcJlvRiQlaf6Pic+pOUeUMsiZ7
spVHyRhb+2kvvR8Ph6NBY1SUo4mx0nF31O2N+8M2sSGtMIgMPhIAgEPzRjab415k0OuclQpDkCWK
7PIJQHgyUREyBB1YWhadq6nIMtoG94lgARYxByaoA4IEJjIP8Ibh085U3JBBibwqtJVmJW3YoT87
sLCu5gxGA9G2Hy9YhkrnaR0n2NqYi15oq8Nm5VEGshEhMLlT/JgAfDXz63+LhPNUOV4xbdHGwkGa
nvrE9dI3br81mViaG306SVpCEX4DYxVHleid1801ImdyTP4AAAD//wMAoP/ZMQQCAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533e5cc7d4396a1-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 11:09:04 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1729'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299713'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 57ms
x-request-id:
- req_204859a5fd0b455d5d5b2fcafea18ab2
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
use a tool, please use the exact following format:\n\n```\nThought: Do I need
to use a tool? Yes\nAction: the tool you wanna use, should be one of [get_final_answer],
just the name.\nAction Input: Any and all relevant information input and context
for using the tool\nObservation: the result of using the tool\n```\n\nWhen you
have a response for your task, or if you do not need to use a tool, you MUST
use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\nObservation: Actually, I used too many tools, so I''ll stop now and
give you my absolute BEST Final answer NOW, using the expected format: ```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```\nThought:
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1549'
content-type:
- application/json
cookie:
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQT0/CQBDF7/0Ukz2DaWkLphdDNFGCBxMT0ahplnZaitudZXeIfwjf3WwpoJc9
vN++l/dmFwCIphQZiGIluWiNGl5uqsX13eJn8/gUPt9H6cPU6jlN02o9v12IgXfQco0FH10XBbVG
ITekD7iwKBl9ajQJJ+k4jpOkAy2VqLytNjxMhuE4invHipoCncjgNQAA2HWv76ZL/BIZhIOj0qJz
skaRnT4BCEvKK0I61ziWmsXgDAvSjLqre0MwA41YAhNsHYIEJlJX8ILuTU8LvyGDGjmvGi1VLrX7
RHskMNNmyxkkI9Gn70+1FNXG0tJP0FulTnrV6MatcovSkfYVHJM52PcBwHs3f/tvkTCWWsM50wdq
Hxin6SFPnC99pqOoh0ws1R/XZBz0DYX7doytH1WjNbbpruF7BvvgFwAA//8DAMXWvogEAgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533e5d81ae396a1-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 11:09:05 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1177'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299639'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 72ms
x-request-id:
- req_b910e4b0341d6248b46d0e2ba4602f86
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: get_final_answer\nFuntion attributes: {''numbers'': {}}\nDescription:
get_final_answer(numbers) -> float - Get the final answer but don''t give it
yet, just re-use this\n tool non-stop.\n\nUse this text to inform a valid
ouput schema:\nDo I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: 42\n\nThe output should be formatted as a JSON instance that conforms
to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1398'
content-type:
- application/json
cookie:
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRzW7bMBCE73qKxZ7twKlkJ9W9QHtIf9BDUFSFTNMriQm5pMkVksDQuxeUHbu9
6DC732pmeCwA0OyxBtSDEu2CXd4fusdPDx/7X/7h21c+/Ci/P7nD888v9xsJFheZ8Lsn0vJO3Wjv
giUxnk9jHUkJ5au3d6u79aYsq808cH5PNmN9kGW1XG1uyzMxeKMpYQ2/CwCA4/zN3nhPr1jDavGu
OEpJ9YT1ZQkAo7dZQZWSSaJYcHEdas9CPNv9TJHAJFDwQtYuOx+dEqE9GM6YJvAdyECQ9EBO1Q03
vN1uGz42DNBgN7LOOVtWjhqsocGepO0MK9sqTi8UG1ycdlXsR0csKe/NfFZ5dDuKs1Z9yOLU8DT/
BM+Op0tU6/sQ/S7XwqO1F70zbNLQRlLJc46VxIcTPhUAf+ZKx/9awhC9C9KKfybOB8vV+nQPr693
nVbnvlG8KPsPVa2Ls0NMb0nI5ew9xRDN3HD2WUzFXwAAAP//AwC92yRkWAIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533e5e02f2f96a1-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 11:09:09 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2852'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299685'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 63ms
x-request-id:
- req_164c005261f8c276123bf69961c10198
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
tool.\nAI: Agent stopped due to iteration limit or time limit.\n\nNew summary:"}],
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '997'
content-type:
- application/json
cookie:
- __cf_bm=UMEw8srVdwpa2TU38H2XN7c9yRB43pgp1kvO1rRuPVE-1707563340-1-ARd2N36Wvpnk/GruerkQ9HuyzyyTnin/J25VL/qPutgHpLWqdGHQ8Kj+QjBLAX79Kk9MYuRGo1PH2GCcBj0HWk8=;
_cfuvid=Q5shHUNkkqsi2PetAV7IWqdcumIc3ctNBTT7SLHr.Ho-1707563340988-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRy27bMBC86ysWPMuGbalx4ltuKVC0KBC0QIvCoakVxYbcVcmVUyPwvxekX+iF
h5md4ezsewWgXKc2oMygxYTRz+7/9N+fvj3a+x969+lrw19en1f9A32Ww17Wqs4K3v1GIxfV3HAY
PYpjOtEmohbMrsv1Yv3hrmnah0IE7tBnmR1l1s4Wd8vmrBjYGUxqAz8rAID38uZs1OFftYFFfUEC
pqQtqs11CEBF9hlROiWXRJOo+kYaJkEqcZ8HhGEKmsBRkjgZSSADwuNHIBYQBuv2WKDekfagKb1h
BO6hXcEBpQZNXZ7Lpo4mhCk5skXxYlG2RbY9yV5AmP0cnvgN9xjry1faIgkk4TFBN2G2i6jNkI1y
NMGoc5vAEcQFBO+Ck7k6r3S8duHZjpF3uTeavL/ivSOXhm1EnZjy3vmvk/xYAfwqnU//1ajGyGGU
rfArUiqna05+6nbeG9u2Z1JYtL/hq+W6OidU6ZAEQ27EYhyjKyfIOatj9Q8AAP//AwAIzVnseQIA
AA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533e5f4693b96a1-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 11:09:10 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1561'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299765'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 46ms
x-request-id:
- req_7f3006cdd24fec9a5fc15ec53c50d32f
status:
code: 200
message: OK
version: 1

File diff suppressed because it is too large Load Diff

View File

@@ -1,501 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you
need to learn about AI to write an paragraph about it.\nDelegate work to co-worker:
Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate
a specific task to one of the following co-workers: . The input to this tool
should be the role of the coworker, the task you want them to do, and ALL necessary
context to exectue the task, they know nothing about the task, so share absolute
everything you know, don''t reference things but instead explain them.\nAsk
question to co-worker: Ask question to co-worker(coworker: str, question: str,
context: str) - Ask a specific question to one of the following co-workers:
. The input to this tool should be the role of the coworker, the question you
have for them, and ALL necessary context to ask the question properly, they
know nothing about the question, so share absolute everything you know, don''t
reference things but instead explain them.\n\nTo use a tool, please use the
exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [learn_about_AI, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: Any and all relevant
information input and context for using the tool\nObservation: the result of
using the tool\n```\n\nWhen you have a response for your task, or if you do
not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need
to use a tool? No\nFinal Answer: [your response here]```This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Write and then review an small paragraph on AI until
it''s AMAZING\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1999'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRS0/DMBCE7/kVK59blNKnckEVINEDggNCAooix9kmpo7X2BvRqup/R05fcPFh
xrP+Zr1LAIQuRQZC1ZJV40x/9l3et+93mx8jC1rdpo+btEi3/vX54el2LXoxQcUXKj6lrhQ1ziBr
sgdbeZSMcepgmk7H49F4NuqMhko0MVY57o/66WQwPCZq0gqDyOAjAQDYdWdksyVuRAZp76Q0GIKs
UGTnSwDCk4mKkCHowNKy6F1MRZbRdrgvNbVVzRncESzAIpbABG1AkMBE5gbeMCztXMUyGRiU3uay
oJbz+eKkw8K6ljNYirlnvdJKSwMLy2iMrtAqXApxfH1/xjZUOU9FrGhbY876Slsd6tyjDGQjYmBy
h/g+Afjs1tP+ayycp8ZxzrRGGweOhoPDPHH5iYt7PTmaTCzNn9R4mhwJRdgGxiZfaVuhd15324qc
yT75BQAA//8DAG14RFskAgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85330ff5ff101637-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:43:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
path=/; expires=Sat, 10-Feb-24 09:13:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1403'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299524'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 95ms
x-request-id:
- req_c3e3d009fb1789af53949c6665e20c2d
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: learn_about_AI\nFuntion attributes: {''topic'': {}}\nDescription: learn_about_AI(topic)
-> float - Useful for when you need to learn about AI to write an paragraph
about it.\n--\nFuntion Name: Delegate work to co-worker\nFuntion attributes:
{''coworker'': {''type'': ''string''}, ''task'': {''type'': ''string''}, ''context'':
{''type'': ''string''}}\nDescription: Delegate work to co-worker(coworker: str,
task: str, context: str) - Delegate a specific task to one of the following
co-workers: . The input to this tool should be the role of the coworker, the
task you want them to do, and ALL necessary context to exectue the task, they
know nothing about the task, so share absolute everything you know, don''t reference
things but instead explain them.\n--\nFuntion Name: Ask question to co-worker\nFuntion
attributes: {''coworker'': {''type'': ''string''}, ''question'': {''type'':
''string''}, ''context'': {''type'': ''string''}}\nDescription: Ask question
to co-worker(coworker: str, question: str, context: str) - Ask a specific question
to one of the following co-workers: . The input to this tool should be the role
of the coworker, the question you have for them, and ALL necessary context to
ask the question properly, they know nothing about the question, so share absolute
everything you know, don''t reference things but instead explain them.\n\nUse
this text to inform a valid ouput schema:\nThought: Do I need to use a tool?
Yes\nAction: learn_about_AI\nAction Input: \"Artificial Intelligence\"\n\nThe
output should be formatted as a JSON instance that conforms to the JSON schema
below.\n\nAs an example, for the schema {\"properties\": {\"foo\": {\"title\":
\"Foo\", \"description\": \"a list of strings\", \"type\": \"array\", \"items\":
{\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe object {\"foo\": [\"bar\",
\"baz\"]} is a well-formatted instance of the schema. The object {\"properties\":
{\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere is the output
schema:\n```\n{\"properties\": {\"function_name\": {\"title\": \"Function Name\",
\"description\": \"The name of the function to be called.\", \"type\": \"string\"},
\"arguments\": {\"title\": \"Arguments\", \"description\": \"A dictinary of
arguments to be passed to the function.\", \"type\": \"object\"}}, \"required\":
[\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4", "n": 1,
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2558'
content-type:
- application/json
cookie:
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
_cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRwW7bMBBE7/qKBc92YTeWbehmIC4a9JJrURYyTa0kJuSSIVdtWkP/XlBybPSi
w8y+1ezwUgAI04gKhO4Vaxfscv/WHH/x8fl7n/rHxy+H4zf8u7HP7/i23SexyIQ/v6DmD+qT9i5Y
ZONptnVExZi3rnerXVluyn05Gc43aDPWBV5ulqvt+uFK9N5oTKKCHwUAwGX65mzU4LuoYLX4UBym
pDoU1W0IQERvsyJUSiaxIhaLu6k9MdIU9ytGBJNAwW+0dtn66BQzNmAoYxrBt8A9gh84DAxJ9+hU
JUnS6XSSdJEEIEU7kM7n1qQcSlGBFBZVpFqd/cD14UmKxTypYjc4JE55aqKzyj4YPXOHyKY12igL
T8RoremQNEqRZ0dJ4/Rncb1mvNVgfReiP+fKaLD2preGTOrriCp5yicn9mHGxwLg51T38F+DIkTv
AtfsX5HywnK3n/eJ+8ve3c3D1WTPyt717ed1cU0o0p/E6OrWUIcxRDO1n3MWY/EPAAD//wMA8S8H
Q3QCAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853310010fcd1637-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:43:08 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2273'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299398'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 120ms
x-request-id:
- req_9af33ac92b825be43da4372550c8c502
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nlearn_about_AI: learn_about_AI(topic) -> float - Useful for when you
need to learn about AI to write an paragraph about it.\nDelegate work to co-worker:
Delegate work to co-worker(coworker: str, task: str, context: str) - Delegate
a specific task to one of the following co-workers: . The input to this tool
should be the role of the coworker, the task you want them to do, and ALL necessary
context to exectue the task, they know nothing about the task, so share absolute
everything you know, don''t reference things but instead explain them.\nAsk
question to co-worker: Ask question to co-worker(coworker: str, question: str,
context: str) - Ask a specific question to one of the following co-workers:
. The input to this tool should be the role of the coworker, the question you
have for them, and ALL necessary context to ask the question properly, they
know nothing about the question, so share absolute everything you know, don''t
reference things but instead explain them.\n\nTo use a tool, please use the
exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
the tool you wanna use, should be one of [learn_about_AI, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: Any and all relevant
information input and context for using the tool\nObservation: the result of
using the tool\n```\n\nWhen you have a response for your task, or if you do
not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need
to use a tool? No\nFinal Answer: [your response here]```This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Write and then review an small paragraph on AI until
it''s AMAZING\nThought: Do I need to use a tool? Yes\nAction: learn_about_AI\nAction
Input: \"Artificial Intelligence\"\nObservation: AI is a very broad field.\nThought:
"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2154'
content-type:
- application/json
cookie:
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
_cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA2RTwWobSxC86yuaPa+ERazY1iUYXh4IHoGEBAJJMK3Z3t22Z7s30z1ylOB/D7Na
WwnvMoepqaaqpvrXAqDiptpCFXr0MIxxef29eXu8eRzS/dX43+fmU2P69lV4v+4+HHhT1YWh+3sK
/sxaBR3GSM4qJzgkQqcydX11cbXZXG6urydg0IZioXWjLy+XF6/Xr2ZGrxzIqi18WQAA/JrOok0a
+lFt4aJ+vhnIDDuqti+PAKqksdxUaMbmKF7VZzCoOMkk9x+FHQhRA66QjQDBVeMbeKdf5V8WjHAr
9khpC7fJueXAGGEnTjFyRxKoBm2dBHC/T3Tg4hPQ4HZXAxsgHNAcWqbYrGDnQFLCQTMyOGBizQaW
98uGLfAYWcgApQGn0ItG7ZgMvEcH5KGIPGUJA4b+5bEdzWkwCDjiPhJoCyOlVtPA0oGjPcwzHjXH
BrJljPEIib5nTgR9HlCA/zC1go89Gc1UlhBzQxAJk5SJbdIB6MdIictrqyFLQ6kE3RRc0HPCCBGl
y9hRDYmCdsI/CzqiOyWxGsak+0jD0jQeWLp6MtNQYGMVGPCBpZukQKcYi63bXUlV1OE+m5c8Eo2R
Q4nk/zZgnx0wmv4R3HNWUx4B5Tmp2eugiYDa8tEkHo+TJAwhJ3SKx1VR0GOhE4xaalQKMek4aMyl
8/yTzl9LwTVZPWdY7PeE0fuAiWpoWXDqkCcUGzU5lgmnIIqUVTXX9uml71G7klvZDckxvty3LGz9
XSI0ldJtcx1P9KcFwLdpr/Jfq1KNSYfR71wfSMrAy6t5r6rzCp/R9fpmRl0d4xnYXN8sZonVKd+7
lqWjNCae9qwIXTwtfgMAAP//AwDp3F89XgQAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 853310125de31637-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:43:14 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '6017'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299486'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 102ms
x-request-id:
- req_c153504e2e53f18e15a640221b664aba
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Write
and then review an small paragraph on AI until it''s AMAZING\nAI: Artificial
Intelligence, often abbreviated as AI, is a vast field. It encompasses various
sub-disciplines and technologies that aim to create machines and systems capable
of performing tasks that would usually require human intelligence. These tasks
include learning from experiences, understanding natural language, recognizing
patterns, problem-solving, and decision making. The goal of AI is not just to
replicate human intelligence but also to create systems that can perform tasks
more efficiently and accurately. AI has the potential to revolutionize various
sectors, including healthcare, finance, transportation, and more.\n\nNew summary:"}],
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1547'
content-type:
- application/json
cookie:
- __cf_bm=FHcpgH1eZBeZ3Rbcza28Oc6NWTXs_wBg2dFweW8n7iE-1707554585-1-AeO0TVKmSDiPyhIhqVNGXLU1AqKVAJroEV7Mq1JaqrBTJlB6hP4FrF9q/F66TJ4PAMcnMJ6YKH9wW4gQAx2jhis=;
_cfuvid=CyquHMSR3lyGv7DVvBmUUvZ_N5p0T0sRkvGYdcRgwUo-1707554585722-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RSTW/bMAy951cQuuySBM0aL21uxYBhxW5FdhqGgpFpm60sqiKd9AP974PstMEu
OpB6j4+P720G4Lh2W3C+Q/N9Courp/pH9TPZ8+53vNu8rL5Xq4fjjW52d9/CLzcvCNk/kLcP1NJL
nwIZS5zaPhMaFdbV5mJTVevquhobvdQUCqxNtlgvLr6tLk+ITtiTui38mQEAvI1v0RZrenZbuJh/
VHpSxZbc9vMTgMsSSsWhKqthNDc/N71EozjK3XUE3dBjBNRHBesIbm7BBI6ZjQBjXWoRMh2YjoCg
PYYACTO2GVMHEgtgiMYB2L4oYI+vHNsl7CauTJok1gpHtg4Q9pmpATlQHhmlgZvbOdSkPvOeYwts
gAoIB1SDhinUE/SAmWVQ0GG/qFk9p8CRdNJIvosSpGXSJdwa9BSL/WUjtHGtVjBM04C1bDgdBfRF
jXoFjwn3gcqXRLmR3BcxdrIFDQYdMIQXyPQ0cP6wjaNRCNxS9DQ/+YVWpnQ4+ZmkmM0YytBMBwlD
kcavdF6JvElW4OjDUJe5HWGwzmOmOTQc8cyeMWqSbFhIlu501ffPOARpU5Z9iU4cQvisNxxZu/tM
qBLL6dUkTfD3GcDfMXbDf0lyKUuf7N7kkWIh/FpdT3zunPBz96o6NU0Mw7l+uV7PTgrd5PZ9w7Gl
nDKPKSw6Z++zfwAAAP//AwBANdK6fAMAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8533103aae201637-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:43:18 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2948'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299629'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 74ms
x-request-id:
- req_bf6f7315d0ac178598b80627d7b1af72
status:
code: 200
message: OK
version: 1

View File

@@ -2,27 +2,25 @@ interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nDelegate work to co-worker: Delegate work to co-worker(coworker: str,
task: str, context: str) - Delegate a specific task to one of the following
co-workers: test role2. The input to this tool should be the role of the coworker,
the task you want them to do, and ALL necessary context to exectue the task,
they know nothing about the task, so share absolute everything you know, don''t
reference things but instead explain them.\nAsk question to co-worker: Ask question
to co-worker(coworker: str, question: str, context: str) - Ask a specific question
to one of the following co-workers: test role2. The input to this tool should
be the role of the coworker, the question you have for them, and ALL necessary
context to ask the question properly, they know nothing about the question,
so share absolute everything you know, don''t reference things but instead explain
them.\n\nTo use a tool, please use the exact following format:\n\n```\nThought:
Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one
of [Delegate work to co-worker, Ask question to co-worker], just the name.\nAction
Input: Any and all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Just say hi.\n"}], "model": "gpt-4", "n": 1,
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
tools:\n\nDelegate work to co-worker: Useful to delegate a specific task to
one of the following co-workers: test role, test role2.\nThe input to this tool
should be a pipe (|) separated text of length 3 (three), representing the co-worker
you want to ask it to (one of the options), the task and all actual context
you have for the task.\nFor example, `coworker|task|context`.\nAsk question
to co-worker: Useful to ask a question, opinion or take from on of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the question and all actual context you have
for the question.\n For example, `coworker|question|context`.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the action to take, should be one of [Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Just say hi.\n"}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -31,13 +29,13 @@ interactions:
connection:
- keep-alive
content-length:
- '1825'
- '1652'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -47,7 +45,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -55,19 +53,18 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQzU7DMBCE73mKlc8tStSmLbmgih8BEiAECFRAlZtsE4PjNfZGQKu+O3KatuKy
hxnP7LdeRwBCFSIDkVeS89rq/uSruFk9u+kjPfF92sx4tjq/G3h8Ob2ePoheSNDiA3PepY5yqq1G
VmS2du5QMobWZByP0zSNJ2lr1FSgDrHScn/Yj0fJoEtUpHL0IoPXCABg3c7AZgr8ERnEvZ1So/ey
RJHtHwEIRzooQnqvPEvDoncwczKMpsV9rKgpK87gjOAKDGIBTNB4BAlMpE/glt7MhTJSw9T4b3QZ
XCrRdW32EJpK62gRgE2j9V5fKqN8NXcoPZmw0DPZbXwTAby3xzb/+IV1VFueM32iCYWD4+G2Txz+
9eAmo85kYqkP+jCJo45Q+F/PWM+XypTorFPt7YEz2kR/AAAA//8DABJPZgfyAQAA
content: "{\n \"id\": \"chatcmpl-8gkPA1vnW1g25Qr3SJLWB5zeOsUe4\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198848,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal
Answer: Hi!\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 367,\n \"completion_tokens\":
17,\n \"total_tokens\": 384\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85331c3028ba7aac-SJC
- 845266dd5f1f1a96-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -77,14 +74,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:51:26 GMT
- Sun, 14 Jan 2024 02:20:50 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=PApKfMOxYmqCPfv1QzKEuqGGYEUQhozoSuKopWRTbzg-1707555086-1-AeIZ1ozsIHX7RW0ZxnKJ17u8m5eIvUTNkNMhFDdTwc71TeSLRieXLv/0waJkA48Sz1+FjLHulvMGaPnUEf7K19k=;
path=/; expires=Sat, 10-Feb-24 09:21:26 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=;
path=/; expires=Sun, 14-Jan-24 02:50:50 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ivN7v5z_DwT9MCx4fk0bd86wOUph.ZASgr.z0qq5y2k-1707555086057-0-604800000;
- _cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -97,7 +94,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '899'
- '1832'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -109,16 +106,15 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299566'
- '299611'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 86ms
- 77ms
x-request-id:
- req_561f62ebd7103f1fd1e54ced4754dcce
status:
code: 200
message: OK
- 12d633fe239a29022ea7206037da096a
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -130,7 +126,7 @@ interactions:
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Just
say hi.\nAI: Hi\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
say hi.\nAI: Hi!\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
"temperature": 0.7}'
headers:
accept:
@@ -140,16 +136,16 @@ interactions:
connection:
- keep-alive
content-length:
- '866'
- '867'
content-type:
- application/json
cookie:
- __cf_bm=PApKfMOxYmqCPfv1QzKEuqGGYEUQhozoSuKopWRTbzg-1707555086-1-AeIZ1ozsIHX7RW0ZxnKJ17u8m5eIvUTNkNMhFDdTwc71TeSLRieXLv/0waJkA48Sz1+FjLHulvMGaPnUEf7K19k=;
_cfuvid=ivN7v5z_DwT9MCx4fk0bd86wOUph.ZASgr.z0qq5y2k-1707555086057-0-604800000
- __cf_bm=TbP3njfV8Qa2w4gTk3GfLx8EOChqceUdui85lv8w_0s-1705198850-1-AVv9rXKeGNOAPhhVrRvcK49dv9odkct+so0djAQM52Bfzf/nc8ZTJ3zhk2LlfNTWSQ9dcT4UwZHjEHyP8LH39Xg=;
_cfuvid=y6p2l8rtoLMo6gBiQsUGfETrVWxPY83uCFZsS7Jozok-1705198850215-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -159,7 +155,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -167,20 +163,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQT0sDMRDF7/sphpxb2dW2lt48qQgFUbyolDQ73aRmM2lmFq3S7y7Z/tNLIPPy
Xn7zfgoA5Wo1A2WsFtNGP5xu6nl5a76f6ul9s35plo9JwsO6ruaXk40aZAct12jk6Low1EaP4ijs
ZZNQC+bU6rq8Ho/H5XTSCy3V6LOtiTIcDctJdXVwWHIGWc3gtQAA+OnPzBZq/FIzKAfHSYvMukE1
Oz0CUIl8nijN7Fh0EDU4i4aCYOhxny2C7VodwAWW1BlhEItwcw9CsO5YgPUWrBvk+6d1xh71hBwp
1Aw6xkQxOS3ot/DpxMKbunNv6kId/tydYD01MdEyLxY670/zlQuO7SKhZgoZjIXi3r4rAN77Urp/
e6qYqI2yEPrAkAOr0Wifp879n9XL6iAKifZ/XJNxcSBUvGXBdrFyocEUk+s7ypzFrvgFAAD//wMA
D9d32RoCAAA=
content: "{\n \"id\": \"chatcmpl-8gkPCF34yiaqHuTbro7PMBexYagyU\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198850,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI to simply say hi,
and the AI responds with \\\"Hi!\\\"\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
144,\n \"completion_tokens\": 18,\n \"total_tokens\": 162\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85331c380f547aac-SJC
- 845266ee78dc1a96-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -190,7 +185,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:51:27 GMT
- Sun, 14 Jan 2024 02:20:52 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -204,7 +199,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1035'
- '1287'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -222,38 +217,34 @@ interactions:
x-ratelimit-reset-tokens:
- 40ms
x-request-id:
- req_d47c41d14baa963cd2a25770f0f33620
status:
code: 200
message: OK
- 2a891eacb4940e36bec1689b1ab92ee0
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: test role.
The input to this tool should be the role of the coworker, the task you want
them to do, and ALL necessary context to exectue the task, they know nothing
about the task, so share absolute everything you know, don''t reference things
but instead explain them.\nAsk question to co-worker: Ask question to co-worker(coworker:
str, question: str, context: str) - Ask a specific question to one of the following
co-workers: test role. The input to this tool should be the role of the coworker,
the question you have for them, and ALL necessary context to ask the question
properly, they know nothing about the question, so share absolute everything
you know, don''t reference things but instead explain them.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [get_final_answer, Delegate
work to co-worker, Ask question to co-worker], just the name.\nAction Input:
Any and all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using
the `get_final_answer` tool non-stop\nThis is the context you''re working with:\nHi\n"}],
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\n"}], "model": "gpt-4",
"n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -262,13 +253,13 @@ interactions:
connection:
- keep-alive
content-length:
- '2101'
- '1923'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -278,7 +269,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -286,20 +277,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RQy27CMBC85ytWPkMFhfDIparUQ+mhJ4pUAYqMsyRuHa+JN4UW8e+VQ4D24sPM
znhmjhGA0JlIQKhCsiqd6U522Wt/+rVT+UEt9s67xXQ03728LUY/RSw6QUGbD1R8Ud0pKp1B1mTP
tKpQMgbX/rg3juO4Nxk3REkZmiDLHXeH3d6oP2gVBWmFXiSwjAAAjs0bstkMDyKBXueClOi9zFEk
1yMAUZEJiJDea8/SsujcSEWW0TZx5wXVecEJPBHMwCJmwAS1R5DAROYB3tGv7KMKZRLIkdOtttKk
0vo9VhcGZtbVnMByJZ71SqxF+9npmtJQ7irahEa2NuaKb7XVvkgrlJ5sSOSZ3Fl+igDWzRr1v4LC
VVQ6Tpk+0QbDYTw9+4nb8Df2ftiSTCzNH9VkELUJhf/2jGWolmPlKt2ME3JGp+gXAAD//wMAVFuU
bxMCAAA=
content: "{\n \"id\": \"chatcmpl-8gkPEml4Ifldq51knwwASO3nJroK3\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198852,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [1,2,3]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
432,\n \"completion_tokens\": 28,\n \"total_tokens\": 460\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85331c3f6c38cfc4-SJC
- 845266f9fc150316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -309,14 +299,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:51:28 GMT
- Sun, 14 Jan 2024 02:20:53 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=;
path=/; expires=Sat, 10-Feb-24 09:21:28 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
path=/; expires=Sun, 14-Jan-24 02:50:53 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000;
- _cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -329,7 +319,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1442'
- '1570'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -341,48 +331,42 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299499'
- '299545'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 100ms
- 91ms
x-request-id:
- req_b76f1918aa33f67b05eee313ca29e621
status:
code: 200
message: OK
- 133ee7e16a1491cfd7daa90ea3e42b74
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: test role.
The input to this tool should be the role of the coworker, the task you want
them to do, and ALL necessary context to exectue the task, they know nothing
about the task, so share absolute everything you know, don''t reference things
but instead explain them.\nAsk question to co-worker: Ask question to co-worker(coworker:
str, question: str, context: str) - Ask a specific question to one of the following
co-workers: test role. The input to this tool should be the role of the coworker,
the question you have for them, and ALL necessary context to ask the question
properly, they know nothing about the question, so share absolute everything
you know, don''t reference things but instead explain them.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [get_final_answer, Delegate
work to co-worker, Ask question to co-worker], just the name.\nAction Input:
Any and all relevant information input and context for using the tool\nObservation:
the result of using the tool\n```\n\nWhen you have a response for your task,
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
summary of your work so far:\nBegin! This is VERY important to you, your job
depends on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using
the `get_final_answer` tool non-stop\nThis is the context you''re working with:\nHi\nThought:
Do I need to use a tool? Yes\nAction: get_final_answer\nAction Input: [\"Hi\"]\nObservation:
Actually, I used too many tools, so I''ll stop now and give you my absolute
BEST Final answer NOW, using the expected format: ```\nThought: Do I need to
use a tool? No\nFinal Answer: [your response here]```\nThought: "}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
42\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -391,16 +375,16 @@ interactions:
connection:
- keep-alive
content-length:
- '2419'
- '2037'
content-type:
- application/json
cookie:
- __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=;
_cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -410,7 +394,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -418,22 +402,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3RSTY/aMBS88ytGvvQSKhDLss2lAm2rIlVspa5arQARkzySFMcva7+UTVf73yuH
L/XQiw8znvG8N37tAarMVAyVFlrSqjb9u+dsMbLPRTX6+kO4SfdPB/rybfbn3j7M9ioKCt7+olTO
qvcpV7UhKdke6dSRFgquw8lgMh6PB3cfOqLijEyQ5bX0b/qD2+HopCi4TMmrGMseALx2Z8hmM3pR
MQbRGanIe52Tii+XAOXYBERp70sv2oqKrmTKVsh2ce8Zc1iiDMJoPEFDmM1HPJFf2WkaZoiRk2x2
pdVmo60/kDszmNu6kRjLlZqm0mhj2gjz4BP8GJW2befnI3jG/J0x8MI1LB+gbYa8/E1ouUHVQm89
m0YIs0/fH/E5PIfjc1g8/IzQ+NLmkIJALzWlQhl27CotMZIkwWPBTV5IjP+OtOCT67RzjbFsuXFw
5Gu2nlCQo3WSJCu1VqdlvV22bDivHW9DI7Yx5oLvSlv6YuNIe7Zho2G+o/ytB6y7Npt/ClK146qW
jfCebDAc35zaVNePc2UnZ1JYtLnit8NB75RQ+dYLVaGjnFztyq7ckLP31vsLAAD//wMAzsuhnNMC
AAA=
content: "{\n \"id\": \"chatcmpl-8gkPGwVu7CvxvQd3ZmBPHwz0WYbVQ\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198854,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [4,5,6]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
469,\n \"completion_tokens\": 26,\n \"total_tokens\": 495\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85331c496beccfc4-SJC
- 8452670589920316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -443,7 +424,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:51:32 GMT
- Sun, 14 Jan 2024 02:20:55 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -457,7 +438,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2973'
- '983'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -469,16 +450,259 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299421'
- '299517'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 115ms
- 96ms
x-request-id:
- req_90dad1554de8c627760a03ab5313ce5d
status:
code: 200
message: OK
- cb1d820e78376ed344c16e6051cab6bc
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [4,5,6]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2142'
content-type:
- application/json
cookie:
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPHbI2gWbt2veTT8auCPCstOnZh\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198855,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
get_final_answer\\nAction Input: [7,8,9]\"\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
503,\n \"completion_tokens\": 26,\n \"total_tokens\": 529\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8452670dca090316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:20:57 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2350'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299492'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 101ms
x-request-id:
- b40ffecf3dc8f547453bfb166c8c52dd
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are test role2.\ntest backstory2\n\nYour
personal goal is: test goal2TOOLS:\n------\nYou have access to only the following
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
answer but don''t give it yet, just re-use this\n tool non-stop.\nDelegate
work to co-worker: Useful to delegate a specific task to one of the following
co-workers: test role, test role2.\nThe input to this tool should be a pipe
(|) separated text of length 3 (three), representing the co-worker you want
to ask it to (one of the options), the task and all actual context you have
for the task.\nFor example, `coworker|task|context`.\nAsk question to co-worker:
Useful to ask a question, opinion or take from on of the following co-workers:
test role, test role2.\nThe input to this tool should be a pipe (|) separated
text of length 3 (three), representing the co-worker you want to ask it to (one
of the options), the question and all actual context you have for the question.\n
For example, `coworker|question|context`.\n\nTo use a tool, please use the exact
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
action to take, should be one of [get_final_answer, Delegate work to co-worker,
Ask question to co-worker], just the name.\nAction Input: the input to the action\nObservation:
the result of the action\n```\n\nWhen you have a response for your task, or
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
Do I need to use a tool? No\nFinal Answer: [your response here]This is the summary
of your work so far:\nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: Don''t give a Final Answer, instead keep using the `get_final_answer`
tool.\nThis is the context you''re working with:\nHi!\nThought: Do I need to
use a tool? Yes\nAction: get_final_answer\nAction Input: [1,2,3]\nObservation:
42\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
Input: [4,5,6]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
get_final_answer\nAction Input: [7,8,9]\nObservation: I''ve used too many tools
for this task. I''m going to give you my absolute BEST Final answer now and
not use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2368'
content-type:
- application/json
cookie:
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8gkPKsrN7s3j8bLplwjkAD2c7E6RK\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198858,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
I've used too many tools for this task. I'm going to give you my absolute BEST
Final answer now and not use any more tools.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
563,\n \"completion_tokens\": 42,\n \"total_tokens\": 605\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8452671e0b500316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 14 Jan 2024 02:21:00 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2080'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299436'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 112ms
x-request-id:
- b2b7142dc669054b82773d62077e9918
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -490,10 +714,11 @@ interactions:
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Don''t
give a Final Answer, instead keep using the `get_final_answer` tool non-stop\nThis
is the context you''re working with:\nHi\nAI: Agent stopped due to iteration
limit or time limit.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
false, "temperature": 0.7}'
give a Final Answer, instead keep using the `get_final_answer` tool.\nThis is
the context you''re working with:\nHi!\nAI: I''ve used too many tools for this
task. I''m going to give you my absolute BEST Final answer now and not use any
more tools.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
@@ -502,16 +727,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1031'
- '1096'
content-type:
- application/json
cookie:
- __cf_bm=2FkgHfXDTEWwtJx9fuYC7bMgshbFiKyrz77uUDT7zZs-1707555088-1-Aepn1cZI06+lHA4fg6WnGlRYQvOzF/0+FklFgWjzvCx0c9PWNVB0SHCULztQ+AdxbloscbFE14+qlktmxccvWLA=;
_cfuvid=euVu9TT31y3M3IcbZHjgSQIkmaipgL6G3sfBSomB.wA-1707555088785-0-604800000
- __cf_bm=.BcqEMXmdqfYfcBhvXzVDw6VVpZcS222j9Ky_mjHYg8-1705198853-1-AWhcBEd9Rcv3V/4iZJoUcp4zRPfVw7kkOb9e8JZ9J3WChdb10kNHT7f78/k6uarWxT3kJDReeFJHAIhx5KG+i40=;
_cfuvid=AKii9trCQGiEB.0xzvo5UXqowzTR3yys6v0hI5JiumE-1705198853906-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.7.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -521,7 +746,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.7.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -529,20 +754,21 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1RRTU8jMQy9z6+wcm6raekH7Q3tHuAC0mq10oJQCTNmJpDY2dhTFqH+d5SZfohL
pLznZz8/fxYAxtVmA6ZqrVYh+vHlv/p2eXf9a/5nfcft6/3fm58rb2ed/qDd0oyygp9fsdKjalJx
iB7VMQ10ldAq5q7TVblaLBbletYTgWv0WdZEHc/H5XJ6cVC07CoUs4GHAgDgs3+zN6rxv9lAOToi
AUVsg2ZzKgIwiX1GjBVxopbUjM5kxaRIvd3fLULbBUvgSDR1lQpoi3B1A8QKytC4HYKFF0fWgyV5
xwSW6ky9IUboxFHTa54a1G1ftx3qnkCZPRDTWJTjBK75HXeYRscRtkFSyJxA3WHuiU5bTOAUk80B
gnfBKXACdQGH38QcdtmfQvDcxMTPOTDqvD/hL46ctNuEVpjywnnYIN8XAI992N23/ExMHKJuld+Q
pL/Zeuhnznc9s/PpgVRW68/4bFYWB4dGPkQx5GwaTDG5Pvvss9gXXwAAAP//AwAJcyracgIAAA==
content: "{\n \"id\": \"chatcmpl-8gkPMD1hkXKA5DP8MyMJNpAxJXhzj\",\n \"object\":
\"chat.completion\",\n \"created\": 1705198860,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human instructs the AI not to give
a final answer but to continue using the `get_final_answer` tool. However, the
AI responds that it has already used many tools for the task and chooses to
provide its best final answer without using any more tools.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
196,\n \"completion_tokens\": 53,\n \"total_tokens\": 249\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85331c5e2baccfc4-SJC
- 8452672d1bba0316-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -552,7 +778,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 08:51:35 GMT
- Sun, 14 Jan 2024 02:21:03 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -566,7 +792,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2537'
- '2683'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -578,14 +804,13 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299758'
- '299742'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 48ms
- 51ms
x-request-id:
- req_c2fe098b8481dac7ed32f9d683eb36ac
status:
code: 200
message: OK
- c00193638e659df4a60ef9425649fe2c
http_version: HTTP/1.1
status_code: 200
version: 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,431 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n\n\nTo use a tool, please use the exact following format:\n\n```\nThought:
Do I need to use a tool? Yes\nAction: the action to take, should be one of [],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]```This is the summary of your
work so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: just say hi!\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '846'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.11.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.11.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQS0/DMBCE7/kVi88tSiDqIxdUCSrgAKpAQghQ5TqbxMXxGnsjQFX/O0qaPrj4
MLOz/mY3EYDQuchAqEqyqp0ZTr7mj2kS+0IvXmor5dPrfVIvblTh4/VMDNoErdaoeJ86V1Q7g6zJ
7mzlUTK2W5NxPE5HF5PpqDNqytG0sdLxMB3Go+SyT1SkFQaRwVsEALDp3pbN5vgjMogHe6XGEGSJ
IjsMAQhPplWEDEEHlpbF4Ggqsoy2w32uqCkrzuCa4A4sYg5M0AQECUxkruCB3u1cW2lgZsM3+gxu
9Znol20PFIZK52nVEtvGmINeaKtDtfQoA9n2x8DkdvFtBPDRtW3+FRDOU+14yfSJNnRHG+/2ieNh
T93eZGJpTvRpGvWEIvwGxnpZaFuid1535VvOaBv9AQAA//8DAB3dd33zAQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 852a51805aad67eb-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 09 Feb 2024 07:14:58 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=9ly7231xuotblGKWs6jbqP1UIOLx4Z4fGC_GzaLrjf0-1707462898-1-ARsy16USRPFgm9nFkRCgyVEJ0bZHxoljRrPhQDJWOAGApKCPHnJUOUKp5ZIYM+Cye1FsXshcWJ4eSs/1aHvjl74=;
path=/; expires=Fri, 09-Feb-24 07:44:58 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Myd_SP.rh1qGv3eu8_4uoiAw_XxGG6sO.jpzVZqLJGc-1707462898242-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1227'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299811'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 37ms
x-request-id:
- req_8e9048cd17dae372866713b1f204fc99
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: just
say hi!\nAI: Hi!\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
"temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '867'
content-type:
- application/json
cookie:
- __cf_bm=9ly7231xuotblGKWs6jbqP1UIOLx4Z4fGC_GzaLrjf0-1707462898-1-ARsy16USRPFgm9nFkRCgyVEJ0bZHxoljRrPhQDJWOAGApKCPHnJUOUKp5ZIYM+Cye1FsXshcWJ4eSs/1aHvjl74=;
_cfuvid=Myd_SP.rh1qGv3eu8_4uoiAw_XxGG6sO.jpzVZqLJGc-1707462898242-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.11.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.11.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQS0/DMBCE7/kVK5/bqi1R+rhxAKlFqBw4gVDlOpvY4Niud1OoUP87cvrkYlkz
nvW385sBCFOKOQilJasm2P50+7gqXp6ed25FbzM92y4Xq4nmAstl/SB6KeE3n6j4nBoo3wSLbLw7
2iqiZExTR5PhJC/G09m0Mxpfok2xOnA/7w+L0d0pob1RSGIO7xkAwG93JjZX4o+Yw7B3VhokkjWK
+eURgIjeJkVIIkMsHYve1VTeMboO91Uj6LaRDowjjq1iAtYI9wtgDyT3oE0vXb+1UfpsRaTgXUkg
q8rERrLZod0PxOmLw4XN+jpEv0l7uNbai14ZZ0ivI0ryLnEQ+3CMHzKAj66D9t9aIkTfBF6z/0KX
Bo7y/DhPXOu+cacnkz1Le6MX4+xEKGhPjM26Mq7GGKLpKkmc2SH7AwAA//8DAHvVVasJAgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 852a518a581d67eb-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 09 Feb 2024 07:14:59 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1261'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299799'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 40ms
x-request-id:
- req_c169edc1f697d9f897dd46b3fe23da76
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n\n\nTo use a tool, please use the exact following format:\n\n```\nThought:
Do I need to use a tool? Yes\nAction: the action to take, should be one of [],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]```This is the summary of your
work so far:\nThe human instructs the AI to say hi, to which the AI responds
affirmatively.Begin! This is VERY important to you, your job depends on it!\n\nCurrent
Task: just say hello!\nThis is the context you''re working with:\nHi!\n"}],
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '973'
content-type:
- application/json
cookie:
- __cf_bm=9ly7231xuotblGKWs6jbqP1UIOLx4Z4fGC_GzaLrjf0-1707462898-1-ARsy16USRPFgm9nFkRCgyVEJ0bZHxoljRrPhQDJWOAGApKCPHnJUOUKp5ZIYM+Cye1FsXshcWJ4eSs/1aHvjl74=;
_cfuvid=Myd_SP.rh1qGv3eu8_4uoiAw_XxGG6sO.jpzVZqLJGc-1707462898242-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.11.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.11.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQX0/CMBTF3/cprn0GMyYC7sUYDVHxT4z6YNSQ0l22Ytc72ruoEL676ZigL304
p7/Tc7qOAITORApCFZJVWZnuaDm+Hz08r9RiNdHLx4InN6pY3Onr89vsRXQCQbMFKv6lDhWVlUHW
ZLe2cigZQ2pvGA/7g+QkjhujpAxNwPKKu/1uPOgdtURBWqEXKbxGAADr5gzdbIZfIoWGb5QSvZc5
inR3CUA4MkER0nvtWVoWnb2pyDLapu5TQXVecAoXBFdgETNggtojSGAicwp39GbH2koDZ9Z/okvh
Eo2hA9HmbXZFDOWVo1kobWtjdvpcW+2LqUPpyYZHPVO1xTcRwHszuP63QVSOyoqnTB9oQ2ASH2/z
xP5v925v2JpMLM0fKkmitqHw356xnM61zdFVTjf7Q89oE/0AAAD//wMAxl1LJ/YBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 852a5196983067eb-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 09 Feb 2024 07:15:01 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1093'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299780'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 43ms
x-request-id:
- req_908d51731310c13867cd2dd7d565348b
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\nThe human instructs the AI to say hi, to which
the AI responds affirmatively.\n\nNew lines of conversation:\nHuman: just say
hello!\nThis is the context you''re working with:\nHi!\nAI: Hello!\n\nNew summary:"}],
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '997'
content-type:
- application/json
cookie:
- __cf_bm=9ly7231xuotblGKWs6jbqP1UIOLx4Z4fGC_GzaLrjf0-1707462898-1-ARsy16USRPFgm9nFkRCgyVEJ0bZHxoljRrPhQDJWOAGApKCPHnJUOUKp5ZIYM+Cye1FsXshcWJ4eSs/1aHvjl74=;
_cfuvid=Myd_SP.rh1qGv3eu8_4uoiAw_XxGG6sO.jpzVZqLJGc-1707462898242-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.11.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.11.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRS2sjMRCE7/MrGp3tYMdZO/bNEJaEXQgse8iDYGRNz0iOpFbUPZuY4P8eNH5l
LzpU9ddUtT4rAOVqtQBlrBYTkh9ev/28n//4jY+/uLkd0+My0Hr28HR3037c/FGDQtB6g0aO1IWh
kDyKo7i3TUYtWLaOZ6PZ1fRyPhr3RqAafcHaJMOr4Wg6nhwIS84gqwU8VwAAn/1bssUaP9QCRoOj
EpBZt6gWpyEAlckXRWlmx6KjqMHZNBQFYx/3r0WwXdARXGTJnREGsQjLOxAC1luwDnSsixiLtOlY
9jp6T4MivVtn7JHKyIlizaCbxuWgxf1DvwXUZcQFvFCHHLtTAU9tyrQuZWPn/UlvXHRsVxk1Uyxh
WSjt8V0F8NIfqvuvu0qZQpKV0CtG7u892e9T5z85u5fTgykk2n+j5vPqkFDxlgXDqnGxxZyy6+9W
cla76gsAAP//AwDz/udFLgIAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 852a519e29bb67eb-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 09 Feb 2024 07:15:03 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1955'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299767'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 46ms
x-request-id:
- req_24c78e42a235c08ae9c6f1b5dbff3c6f
status:
code: 200
message: OK
version: 1

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +1,21 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
name.\nAction Input: Any and all relevant information input and context for
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
a response for your task, or if you do not need to use a tool, you MUST use
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
response here]```This is the summary of your work so far:\nBegin! This is VERY
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 5?\n"}],
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
- Useful for when you need to multiply two numbers together.\n The input
to this tool should be a comma separated list of numbers of\n length
two, representing the two numbers you want to multiply together.\n For
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
a tool, please use the exact following format:\n\n```\nThought: Do I need to
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
of your work so far:\n \nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4", "n": 1,
"stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
@@ -22,13 +24,13 @@ interactions:
connection:
- keep-alive
content-length:
- '1047'
- '1231'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -38,7 +40,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -46,20 +48,18 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRS0/DMBCE7/kVqz2nqK8AzQUhEIIbIDggiqrU2SQujtfYGwmo+t+R0ycXH2Z2
Vt+s1wkA6hJzQNUUolpnBpdfarby1ROZ2fju9fGesko/X36W05vfc41pTPByRUr2qTPFrTMkmu3W
Vp4Kobh1dDG8yLLhdDTrjZZLMjFWOxlMB8Pz0WSXaFgrCpjDewIAsO7fyGZL+sYchuleaSmEoibM
D0MA6NlEBYsQdJDCCqZHU7EVsj3uS8Nd3UgOtwwPYIlKEIYuEBQgzOYK3ijM7bWKZXJoOyPaGU1+
r8GDdZ3ksJ5jpX2Qhe3aJfk55jBJYY6BFNvyRM02uCPZHCoYrp3nZaxrO2MOeqWtDs3CUxHYRtwg
7LbxTQLw0Z+q+9cenefWyUL4k2xcOB6Ptvvw+CtHdzLZmcJSmJNUNk12hBh+glC7qLStyTuv+8tF
zmST/AEAAP//AwCr+Q67MAIAAA==
content: "{\n \"id\": \"chatcmpl-8b7eDhOwdMJw0817wIqiD9MoXVRU8\",\n \"object\":
\"chat.completion\",\n \"created\": 1703858225,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 264,\n \"completion_tokens\":
23,\n \"total_tokens\": 287\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532aa4bca4e679a-SJC
- 83d28cd70f9f1cd9-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -69,14 +69,14 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:33:41 GMT
- Fri, 29 Dec 2023 13:57:07 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
path=/; expires=Sat, 10-Feb-24 08:03:41 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
path=/; expires=Fri, 29-Dec-23 14:27:07 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000;
- _cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -89,7 +89,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1673'
- '1725'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -98,39 +98,43 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299762'
- '299717'
x-ratelimit-remaining-tokens_usage_based:
- '299717'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 47ms
- 56ms
x-ratelimit-reset-tokens_usage_based:
- 56ms
x-request-id:
- req_e5461f12abf90cd87cea37b9c47989e0
status:
code: 200
message: OK
- 689757a1e38baf0844ec69f20525c5d7
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Return a valid schema for the
one tool you must use with its arguments and values.\n\nTools available:\n\nFuntion
Name: multiplier\nFuntion attributes: {''first_number'': {''type'': ''integer''},
''second_number'': {''type'': ''integer''}}\nDescription: multiplier(first_number:
int, second_number: int) -> float - Useful for when you need to multiply two
numbers together.\n\nUse this text to inform a valid ouput schema:\nThought:
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
3, \"second_number\": 5}\n\nThe output should be formatted as a JSON instance
that conforms to the JSON schema below.\n\nAs an example, for the schema {\"properties\":
{\"foo\": {\"title\": \"Foo\", \"description\": \"a list of strings\", \"type\":
\"array\", \"items\": {\"type\": \"string\"}}}, \"required\": [\"foo\"]}\nthe
object {\"foo\": [\"bar\", \"baz\"]} is a well-formatted instance of the schema.
The object {\"properties\": {\"foo\": [\"bar\", \"baz\"]}} is not well-formatted.\n\nHere
is the output schema:\n```\n{\"properties\": {\"function_name\": {\"title\":
\"Function Name\", \"description\": \"The name of the function to be called.\",
\"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"description\":
\"A dictinary of arguments to be passed to the function.\", \"type\": \"object\"}},
\"required\": [\"function_name\", \"arguments\"]}\n```\n```"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
- Useful for when you need to multiply two numbers together.\n The input
to this tool should be a comma separated list of numbers of\n length
two, representing the two numbers you want to multiply together.\n For
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
a tool, please use the exact following format:\n\n```\nThought: Do I need to
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
of your work so far:\n \nBegin! This is VERY important to you, your job depends
on it!\n\nCurrent Task: What is 3 times 5?\nThought: Do I need to use a tool?
Yes\nAction: multiplier\nAction Input: 3,5\nObservation: 15\nThought: \n"}],
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
@@ -139,16 +143,16 @@ interactions:
connection:
- keep-alive
content-length:
- '1494'
- '1335'
content-type:
- application/json
cookie:
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
_cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -158,7 +162,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -166,20 +170,18 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SRO2/cMBCEe/2KBWtdIN0jF6t1kRQG7MApDISBjketJNrkUiFX8AUH/feAupfd
sNiZbzE7PGYAwjSiAqF7xdoNdvHtr76zT+XDy+HXz/v3t2e6f/xelHx4ennmpcgT4fevqPlCfdHe
DRbZeDrJOqBiTFvLbbHdbIr1spwF5xu0CesGXqwXxddydSZ6bzRGUcHvDADgOL8pGzV4EBUU+WXi
MEbVoaiuJgARvE0ToWI0kRWxyG+i9sRIc9wfGBBMBAXvaO2i9cEpZmzAUMI0gm+Be4Soe3SqkiRp
t9u9Rk+SjpIApGhH0unWmpRDKSqQwo2WzWANBinyk0uFbnRIHJNjJmfWhMg1jW6frBWs8osSUXtq
PkibpEySpjmBOJ8zXXuwvhuC36fOaLT2Om8NmdjXAVX0lG6O7IcTPmUAf+a+x08ViiF4N3DN/g0p
LVwtN6d94va1N3V9dxbZs7IfqO06OycU8V9kdHVrqMMwBDPXn3JmU/YfAAD//wMAtRBeM3UCAAA=
content: "{\n \"id\": \"chatcmpl-8b7eG2qnUr6SZcankL5AqkwIJvYFc\",\n \"object\":
\"chat.completion\",\n \"created\": 1703858228,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 297,\n \"completion_tokens\":
22,\n \"total_tokens\": 319\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532aa5709e5679a-SJC
- 83d28ce3ab3b1cd9-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -189,7 +191,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:33:44 GMT
- Fri, 29 Dec 2023 13:57:09 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -203,7 +205,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2203'
- '1568'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -212,128 +214,24 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299661'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 67ms
x-request-id:
- req_a428e78fc8e9d475e463316f65950fa9
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\nmultiplier: multiplier(first_number: int, second_number: int) -> float
- Useful for when you need to multiply two numbers together.\n\nTo use a tool,
please use the exact following format:\n\n```\nThought: Do I need to use a tool?
Yes\nAction: the tool you wanna use, should be one of [multiplier], just the
name.\nAction Input: Any and all relevant information input and context for
using the tool\nObservation: the result of using the tool\n```\n\nWhen you have
a response for your task, or if you do not need to use a tool, you MUST use
the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your
response here]```This is the summary of your work so far:\nBegin! This is VERY
important to you, your job depends on it!\n\nCurrent Task: What is 3 times 5?\nThought:
Do I need to use a tool? Yes\nAction: multiplier\nAction Input: {\"first_number\":
3, \"second_number\": 5}\nObservation: 15\nThought: "}], "model": "gpt-4", "n":
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1191'
content-type:
- application/json
cookie:
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
_cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQW0sDMRCF3/dXDHluy/aybd0XEYqiiKAiFFRKmp1uY7OZmMyipfS/S7bbiy95
OGfOyTezSwCELkQOQq0lq8qZ7vRbXRGrVz2fPPKd3z7MeSPty2r29uy06MQELb9Q8THVU1Q5g6zJ
HmzlUTLG1v4knWRZOhqMGqOiAk2MlY67o2467g/bxJq0wiByeE8AAHbNG9lsgb8ih7RzVCoMQZYo
8tMQgPBkoiJkCDqwtCw6Z1ORZbQN7ozgHixiAUxQBwQJTGSu4Yk+7K220sCNDT/ocxgC6woDZKAD
9LOeaAv3JxJDpfO0jNS2Nuakr7TVYb3wKAPZ+Gtgcof4PgH4bDau/y0hnKfK8YJpgzYWDsbDQ584
H/fCHbQmE0tzoU+zpCUUYRsYq8VK2xK987o5QORM9skfAAAA//8DACAb4vP3AQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532aa66fd5c679a-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:33:45 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1275'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299728'
- '299693'
x-ratelimit-remaining-tokens_usage_based:
- '299693'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 54ms
- 61ms
x-ratelimit-reset-tokens_usage_based:
- 61ms
x-request-id:
- req_dd069a32b6514615a79356287e845a29
status:
code: 200
message: OK
- 66a02e3e08e3f6b5b29fb04cd19f50a0
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
@@ -359,12 +257,12 @@ interactions:
content-type:
- application/json
cookie:
- __cf_bm=iA8sDHDKVnfosmI8wG_ElV9lor.u9ivoU2KD_Uw2Ck0-1707550421-1-AakLurCxUahX1wP/EqH55USvoahi23iv5/ZX2jxvr6ltSDBhzk8J0ipuSFcgyazwnWfYsqu0n467tSBqzuyTan0=;
_cfuvid=CpE5jxQFzpK609ZrTNgy1kxDFHYN2x4AY1XCThUEeQk-1707550421579-0-604800000
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
- OpenAI/Python 1.6.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -374,7 +272,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
- 1.6.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
@@ -382,19 +280,19 @@ interactions:
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQT0/CQBDF7/0Ukz0XQgsV6E0TD2qiHtSYGEOW7dCu7D92p4lC+O5mS6F62cO8
93v7Zg4JAJMVK4GJhpPQTo0WO7F0ezl7549q+bx/eMp3t+v8Zlrdv27eWBoJu/5CQWdqLKx2Ckla
c5KFR04YU7P5ZF4Uk1ledIK2FaqI1Y5Gs9HkKpv2RGOlwMBK+EgAAA7dG7uZCr9ZCZP0PNEYAq+R
lRcTAPNWxQnjIchA3BBLB1FYQ2i6ui8NQtNqboCHbQBqEK7vgCwIrkSrOCFMgaTGAEUK3FRni8fg
rKkiwmnwgAyQFWPWf3a8tFS2dt6u40amVeoy30gjQ7PyyIM1sVEg6074MQH47K7R/luQOW+1oxXZ
LZoYmBXFKY8Nhx/UfN6LZImrP9QiT/qGLPwEQr3aSFOjd152x4k9k2PyCwAA//8DAHepd0ITAgAA
content: "{\n \"id\": \"chatcmpl-8b7eIfmBgQoWGLW4jiQZTu3qzEBBL\",\n \"object\":
\"chat.completion\",\n \"created\": 1703858230,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI to calculate 3
times 5 and the AI responds that 3 times 5 is 15.\"\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
155,\n \"completion_tokens\": 26,\n \"total_tokens\": 181\n },\n \"system_fingerprint\":
null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8532aa703b92679a-SJC
- 83d28cf118221cd9-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -404,7 +302,7 @@ interactions:
Content-Type:
- application/json
Date:
- Sat, 10 Feb 2024 07:33:47 GMT
- Fri, 29 Dec 2023 13:57:12 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -418,7 +316,7 @@ interactions:
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1667'
- '2188'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -427,17 +325,366 @@ interactions:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299794'
x-ratelimit-remaining-tokens_usage_based:
- '299794'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 41ms
x-ratelimit-reset-tokens_usage_based:
- 41ms
x-request-id:
- req_14ec03ab8500562c8eaac94fe2cec349
status:
code: 200
message: OK
- a644c703d416b1e4b06586e96e6d7297
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
- Useful for when you need to multiply two numbers together.\n The input
to this tool should be a comma separated list of numbers of\n length
two, representing the two numbers you want to multiply together.\n For
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
a tool, please use the exact following format:\n\n```\nThought: Do I need to
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
of your work so far:\n The human asks the AI to calculate 3 times 5 and the
AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your
job depends on it!\n\nCurrent Task: What is 3 times 5?\n\n"}], "model": "gpt-4",
"n": 1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1317'
content-type:
- application/json
cookie:
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.6.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.6.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8b7eKehkNtTjrXJD0aFjiVUeXU7te\",\n \"object\":
\"chat.completion\",\n \"created\": 1703858232,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
multiplier\\nAction Input: 3,5\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 290,\n \"completion_tokens\":
23,\n \"total_tokens\": 313\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 83d28d007f461cd9-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 29 Dec 2023 13:57:14 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1885'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299697'
x-ratelimit-remaining-tokens_usage_based:
- '299697'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 60ms
x-ratelimit-reset-tokens_usage_based:
- 60ms
x-request-id:
- 87959bfa6a029f7bcafb5095e66d4964
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "You are new test role.\nnew
test backstory\n\nYour personal goal is: new test goal\n\nTOOLS:\n------\nYou
have access to the following tools:\n\nmultiplier: multiplier(numbers) -> float
- Useful for when you need to multiply two numbers together.\n The input
to this tool should be a comma separated list of numbers of\n length
two, representing the two numbers you want to multiply together.\n For
example, `1,2` would be the input if you wanted to multiply 1 by 2.\n\nTo use
a tool, please use the exact following format:\n\n```\nThought: Do I need to
use a tool? Yes\nAction: the action to take, should be one of [multiplier],
just the name.\nAction Input: the input to the action\nObservation: the result
of the action\n```\n\nWhen you have a response for your task, or if you do not
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis is the summary
of your work so far:\n The human asks the AI to calculate 3 times 5 and the
AI responds that 3 times 5 is 15.\nBegin! This is VERY important to you, your
job depends on it!\n\nCurrent Task: What is 3 times 5?\n\nI just used the multiplier
tool with input 3,5. So I already knwo the result of that.\nObservation: Invalid
or incomplete response\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
"stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1460'
content-type:
- application/json
cookie:
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.6.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.6.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8b7eME5fg1w8QcXUnl0FBcqxJWy7u\",\n \"object\":
\"chat.completion\",\n \"created\": 1703858234,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
3 times 5 is 15.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\":
22,\n \"total_tokens\": 346\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 83d28d0fcfaf1cd9-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 29 Dec 2023 13:57:17 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2494'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299661'
x-ratelimit-remaining-tokens_usage_based:
- '299661'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 67ms
x-ratelimit-reset-tokens_usage_based:
- 67ms
x-request-id:
- eb9704531634d4cd3c6652dff09914bc
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI to calculate 3 times 5
and the AI responds that 3 times 5 is 15.\n\nNew lines of conversation:\nHuman:
What is 3 times 5?\nAI: 3 times 5 is 15.\n\nNew summary:"}], "model": "gpt-4",
"n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '972'
content-type:
- application/json
cookie:
- __cf_bm=iNTKkM.TU1FqaOYqOtdlGljM5kh2eYMOQyBBsDxjV7o-1703858227-1-AUtPBBabQY5DO7jJdGlU5rhZeJigXNSYGJkA8uYAsRU5NSaEt+YcHCLzXRQWpzm+lHKeh5JKd9iq/sLM5ylFvc8=;
_cfuvid=FKpE5gHVpQFRYsdYjjLPKhDX2CeCXpdvvJ8tjGmsYrM-1703858227679-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.6.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.6.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-8b7eP77MzprkbgjaMbeaEob0aDx39\",\n \"object\":
\"chat.completion\",\n \"created\": 1703858237,\n \"model\": \"gpt-4-0613\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The human asks the AI to calculate 3
times 5 twice, and both times the AI responds that 3 times 5 is 15.\"\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 181,\n \"completion_tokens\": 30,\n
\ \"total_tokens\": 211\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 83d28d211f591cd9-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 29 Dec 2023 13:57:20 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2967'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-limit-tokens_usage_based:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299771'
x-ratelimit-remaining-tokens_usage_based:
- '299771'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 45ms
x-ratelimit-reset-tokens_usage_based:
- 45ms
x-request-id:
- e3bbd11b2e9b28f76f8b43dc7a4d2b6d
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -160,7 +160,22 @@ def test_hierarchical_process():
assert (
crew.kickoff()
== """Here are the five interesting ideas for an article with a highlight paragraph for each:\n\n1. The Evolution of Artificial Intelligence:\nDive deep into the fascinating journey of artificial intelligence, from its humble beginnings as a concept in science fiction to an integral part of our daily lives and a catalyst of modern innovations. Explore how AI has evolved over the years, the key milestones that have shaped its growth, and the visionary minds behind these advancements. Uncover the remarkable transformation of AI and its astounding potential for the future.\n\n2. AI in Everyday Life:\nUncover the unseen impact of AI in our every day lives, from our smartphones and home appliances to social media and healthcare. Learn about the subtle yet profound ways AI has become a silent partner in your daily routine, enhancing convenience, productivity, and decision-making. Explore the numerous applications of AI right at your fingertips and how it shapes our interactions with technology and the world around us.\n\n3. Ethical Implications of AI:\nVenture into the ethical labyrinth of artificial intelligence, where innovation meets responsibility. Explore the implications of AI on privacy, job security, and societal norms, and the moral obligations we have towards its development and use. Delve into the thought-provoking debates about AI ethics and the measures being taken to ensure its responsible and equitable use.\n\n4. The Rise of AI Startups:\nWitness the rise of AI startups, the new champions of innovation, driving the technology revolution. Discover how these trailblazing companies are harnessing the power of AI to solve complex problems, create new markets, and revolutionize industries. Learn about their unique challenges, their groundbreaking solutions, and the potential they hold for reshaping the future of technology and business.\n\n5. AI and the Environment:\nExplore the intersection of AI and the environment, where technology meets sustainability. Uncover how AI is being used to combat climate change, conserve biodiversity, and optimize resource utilization. Learn about the innovative ways AI is being used to create a sustainable future and the challenges and opportunities it presents."""
== """Here are the 5 interesting ideas with a highlight paragraph for each:
1. "The Future of AI in Healthcare: Predicting Diseases Before They Happen"
- "Imagine a future where AI empowers us to detect diseases before they arise, transforming healthcare from reactive to proactive. Machine learning algorithms, trained on vast amounts of patient data, could potentially predict heart diseases, strokes, or cancers before they manifest, allowing for early interventions and significantly improving patient outcomes. This article will delve into the rapid advancements in AI within the healthcare sector and how these technologies are ushering us into a new era of predictive medicine."
2. "How AI is Changing the Way We Cook: An Insight into Smart Kitchens"
- "From the humble home kitchen to grand culinary stages, AI is revolutionizing the way we cook. Smart appliances, equipped with advanced sensors and predictive algorithms, are turning kitchens into creative playgrounds, offering personalized recipes, precise cooking instructions, and even automated meal preparation. This article explores the fascinating intersection of AI and gastronomy, revealing how technology is transforming our culinary experiences."
3. "Redefining Fitness with AI: Personalized Workout Plans and Nutritional Advice"
- "Fitness reimagined that's the promise of AI in the wellness industry. Picture a personal trainer who knows your strengths, weaknesses, and nutritional needs intimately. An AI-powered fitness app can provide this personalized experience, adapting your workout plans and dietary recommendations in real-time based on your progress and feedback. Join us as we unpack how AI is revolutionizing the fitness landscape, offering personalized, data-driven approaches to health and well-being."
4. "AI and the Art World: How Technology is Shaping Creativity"
- "Art and AI may seem like unlikely partners, but their synergy is sparking a creative revolution. AI algorithms are now creating mesmerizing artworks, challenging our perceptions of creativity and originality. From AI-assisted painting to generative music composition, this article will take you on a journey through the fascinating world of AI in art, exploring how technology is reshaping the boundaries of human creativity."
5. "AI in Space Exploration: The Next Frontier"
- "The vast expanse of space, once the sole domain of astronauts and rovers, is the next frontier for AI. AI technology is playing an increasingly vital role in space exploration, from predicting space weather to assisting in interstellar navigation. This article will delve into the exciting intersection of AI and space exploration, exploring how these advanced technologies are helping us uncover the mysteries of the cosmos.\""""
)
@@ -262,14 +277,18 @@ def test_crew_verbose_levels_output(capsys):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_cache_hitting_between_agents():
from unittest.mock import call, patch
from unittest.mock import patch
from langchain.tools import tool
@tool
def multiplier(first_number: int, second_number: int) -> float:
"""Useful for when you need to multiply two numbers together."""
return first_number * second_number
def multiplier(numbers) -> float:
"""Useful for when you need to multiply two numbers together.
The input to this tool should be a comma separated list of numbers of
length two, representing the two numbers you want to multiply together.
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
a, b = numbers.split(",")
return int(a) * int(b)
tasks = [
Task(
@@ -289,16 +308,15 @@ def test_cache_hitting_between_agents():
tasks=tasks,
)
assert crew._cache_handler._cache == {}
output = crew.kickoff()
assert crew._cache_handler._cache == {"multiplier-2,6": "12"}
assert output == "12"
with patch.object(CacheHandler, "read") as read:
read.return_value = "12"
crew.kickoff()
assert read.call_count == 2, "read was not called exactly twice"
# Check if read was called with the expected arguments
expected_calls = [
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
call(tool="multiplier", input={"first_number": 2, "second_number": 6}),
]
read.assert_has_calls(expected_calls, any_order=False)
read.assert_called_with("multiplier", "2,6")
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -338,34 +356,6 @@ def test_api_calls_throttling(capsys):
moveon.assert_called()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_full_ouput():
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
allow_delegation=False,
verbose=True,
)
task1 = Task(
description="just say hi!",
agent=agent,
)
task2 = Task(
description="just say hello!",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task1, task2], full_output=True)
result = crew.kickoff()
assert result == {
"final_output": "Hello!",
"tasks_outputs": [task1.output, task2.output],
}
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
agent = Agent(
role="test role",
@@ -421,7 +411,7 @@ def test_async_task_execution():
with patch.object(threading.Thread, "start") as start:
thread = threading.Thread(target=lambda: None, args=()).start()
start.return_value = thread
with patch.object(threading.Thread, "join", wraps=thread.join()) as join:
with patch.object(threading.Thread, "join", wraps=thread.join()) as join: # type: ignore
list_ideas.output = TaskOutput(
description="A 4 paragraph article about AI.", result="ok"
)
@@ -431,71 +421,3 @@ def test_async_task_execution():
crew.kickoff()
start.assert_called()
join.assert_called()
def test_set_agents_step_callback():
from unittest.mock import patch
researcher_agent = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
)
list_ideas = Task(
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 important events.",
agent=researcher_agent,
async_execution=True,
)
crew = Crew(
agents=[researcher_agent],
process=Process.sequential,
tasks=[list_ideas],
step_callback=lambda: None,
)
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok"
crew.kickoff()
assert researcher_agent.step_callback is not None
def test_dont_set_agents_step_callback_if_already_set():
from unittest.mock import patch
def agent_callback(_):
pass
def crew_callback(_):
pass
researcher_agent = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
step_callback=agent_callback,
)
list_ideas = Task(
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 important events.",
agent=researcher_agent,
async_execution=True,
)
crew = Crew(
agents=[researcher_agent],
process=Process.sequential,
tasks=[list_ideas],
step_callback=crew_callback,
)
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok"
crew.kickoff()
assert researcher_agent.step_callback is not crew_callback
assert researcher_agent.step_callback is agent_callback

View File

@@ -74,7 +74,7 @@ def test_task_prompt_includes_expected_output():
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok"
task.execute()
execute.assert_called_once_with(task=task, context=None, tools=[])
execute.assert_called_once_with(task=task._prompt(), context=None, tools=[])
def test_task_callback():
@@ -115,7 +115,7 @@ def test_execute_with_agent():
with patch.object(Agent, "execute_task", return_value="ok") as execute:
task.execute(agent=researcher)
execute.assert_called_once_with(task=task, context=None, tools=[])
execute.assert_called_once_with(task=task._prompt(), context=None, tools=[])
def test_async_execution():
@@ -135,4 +135,4 @@ def test_async_execution():
with patch.object(Agent, "execute_task", return_value="ok") as execute:
task.execute(agent=researcher)
execute.assert_called_once_with(task=task, context=None, tools=[])
execute.assert_called_once_with(task=task._prompt(), context=None, tools=[])