From a3bee66be8e1f99b56731acde70f4026a7c35de0 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 10 Feb 2026 14:39:35 -0300 Subject: [PATCH 1/9] Address OpenSSL CVE-2025-15467 vulnerability (#4426) * fix(security): bump regex from 2024.9.11 to 2026.1.15 Address security vulnerability flagged in regex==2024.9.11 * bump mcp from 1.23.1 to 1.26.0 Address security vulnerability flagged in mcp==1.16.0 (resolved to 1.23.3) --- lib/crewai/pyproject.toml | 4 +- uv.lock | 156 +++++++++++++++++++++----------------- 2 files changed, 90 insertions(+), 70 deletions(-) diff --git a/lib/crewai/pyproject.toml b/lib/crewai/pyproject.toml index 5ecc0f0bb..da8c851df 100644 --- a/lib/crewai/pyproject.toml +++ b/lib/crewai/pyproject.toml @@ -14,7 +14,7 @@ dependencies = [ "instructor>=1.3.3", # Text Processing "pdfplumber~=0.11.4", - "regex~=2024.9.11", + "regex~=2026.1.15", # Telemetry and Monitoring "opentelemetry-api~=1.34.0", "opentelemetry-sdk~=1.34.0", @@ -36,7 +36,7 @@ dependencies = [ "json5~=0.10.0", "portalocker~=2.7.0", "pydantic-settings~=2.10.1", - "mcp~=1.23.1", + "mcp~=1.26.0", "uv~=0.9.13", "aiosqlite~=0.21.0", ] diff --git a/uv.lock b/uv.lock index db5618250..c84758360 100644 --- a/uv.lock +++ b/uv.lock @@ -1295,7 +1295,7 @@ requires-dist = [ { name = "json5", specifier = "~=0.10.0" }, { name = "jsonref", specifier = "~=1.1.0" }, { name = "litellm", marker = "extra == 'litellm'", specifier = ">=1.74.9,<3" }, - { name = "mcp", specifier = "~=1.23.1" }, + { name = "mcp", specifier = "~=1.26.0" }, { name = "mem0ai", marker = "extra == 'mem0'", specifier = "~=0.1.94" }, { name = "openai", specifier = ">=1.83.0,<3" }, { name = "openpyxl", specifier = "~=3.1.5" }, @@ -1311,7 +1311,7 @@ requires-dist = [ { name = "pyjwt", specifier = ">=2.9.0,<3" }, { name = "python-dotenv", specifier = "~=1.1.1" }, { name = "qdrant-client", extras = ["fastembed"], marker = "extra == 'qdrant'", specifier = "~=1.14.3" }, - { name = "regex", specifier = "~=2024.9.11" }, + { name = "regex", specifier = "~=2026.1.15" }, { name = "tiktoken", marker = "extra == 'embeddings'", specifier = "~=0.8.0" }, { name = "tokenizers", specifier = "~=0.20.3" }, { name = "tomli", specifier = "~=2.0.2" }, @@ -3777,7 +3777,7 @@ wheels = [ [[package]] name = "mcp" -version = "1.23.3" +version = "1.26.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -3795,9 +3795,9 @@ dependencies = [ { name = "typing-inspection" }, { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a7/a4/d06a303f45997e266f2c228081abe299bbcba216cb806128e2e49095d25f/mcp-1.23.3.tar.gz", hash = "sha256:b3b0da2cc949950ce1259c7bfc1b081905a51916fcd7c8182125b85e70825201", size = 600697, upload-time = "2025-12-09T16:04:37.351Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/6d/62e76bbb8144d6ed86e202b5edd8a4cb631e7c8130f3f4893c3f90262b10/mcp-1.26.0.tar.gz", hash = "sha256:db6e2ef491eecc1a0d93711a76f28dec2e05999f93afd48795da1c1137142c66", size = 608005, upload-time = "2026-01-24T19:40:32.468Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/c6/13c1a26b47b3f3a3b480783001ada4268917c9f42d78a079c336da2e75e5/mcp-1.23.3-py3-none-any.whl", hash = "sha256:32768af4b46a1b4f7df34e2bfdf5c6011e7b63d7f1b0e321d0fdef4cd6082031", size = 231570, upload-time = "2025-12-09T16:04:35.56Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d9/eaa1f80170d2b7c5ba23f3b59f766f3a0bb41155fbc32a69adfa1adaaef9/mcp-1.26.0-py3-none-any.whl", hash = "sha256:904a21c33c25aa98ddbeb47273033c435e595bbacfdb177f4bd87f6dceebe1ca", size = 233615, upload-time = "2026-01-24T19:40:30.652Z" }, ] [[package]] @@ -6792,71 +6792,91 @@ wheels = [ [[package]] name = "regex" -version = "2024.9.11" +version = "2026.1.15" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f9/38/148df33b4dbca3bd069b963acab5e0fa1a9dbd6820f8c322d0dd6faeff96/regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd", size = 399403, upload-time = "2024-09-11T19:00:09.814Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/86/07d5056945f9ec4590b518171c4254a5925832eb727b56d3c38a7476f316/regex-2026.1.15.tar.gz", hash = "sha256:164759aa25575cbc0651bef59a0b18353e54300d79ace8084c818ad8ac72b7d5", size = 414811, upload-time = "2026-01-14T23:18:02.775Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/63/12/497bd6599ce8a239ade68678132296aec5ee25ebea45fc8ba91aa60fceec/regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408", size = 482488, upload-time = "2024-09-11T18:56:55.331Z" }, - { url = "https://files.pythonhosted.org/packages/c1/24/595ddb9bec2a9b151cdaf9565b0c9f3da9f0cb1dca6c158bc5175332ddf8/regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d", size = 287443, upload-time = "2024-09-11T18:56:58.531Z" }, - { url = "https://files.pythonhosted.org/packages/69/a8/b2fb45d9715b1469383a0da7968f8cacc2f83e9fbbcd6b8713752dd980a6/regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5", size = 284561, upload-time = "2024-09-11T18:57:00.655Z" }, - { url = "https://files.pythonhosted.org/packages/88/87/1ce4a5357216b19b7055e7d3b0efc75a6e426133bf1e7d094321df514257/regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c", size = 783177, upload-time = "2024-09-11T18:57:01.958Z" }, - { url = "https://files.pythonhosted.org/packages/3c/65/b9f002ab32f7b68e7d1dcabb67926f3f47325b8dbc22cc50b6a043e1d07c/regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8", size = 823193, upload-time = "2024-09-11T18:57:04.06Z" }, - { url = "https://files.pythonhosted.org/packages/22/91/8339dd3abce101204d246e31bc26cdd7ec07c9f91598472459a3a902aa41/regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35", size = 809950, upload-time = "2024-09-11T18:57:05.805Z" }, - { url = "https://files.pythonhosted.org/packages/cb/19/556638aa11c2ec9968a1da998f07f27ec0abb9bf3c647d7c7985ca0b8eea/regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71", size = 782661, upload-time = "2024-09-11T18:57:07.881Z" }, - { url = "https://files.pythonhosted.org/packages/d1/e9/7a5bc4c6ef8d9cd2bdd83a667888fc35320da96a4cc4da5fa084330f53db/regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8", size = 772348, upload-time = "2024-09-11T18:57:09.494Z" }, - { url = "https://files.pythonhosted.org/packages/f1/0b/29f2105bfac3ed08e704914c38e93b07c784a6655f8a015297ee7173e95b/regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a", size = 697460, upload-time = "2024-09-11T18:57:11.595Z" }, - { url = "https://files.pythonhosted.org/packages/71/3a/52ff61054d15a4722605f5872ad03962b319a04c1ebaebe570b8b9b7dde1/regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d", size = 769151, upload-time = "2024-09-11T18:57:14.358Z" }, - { url = "https://files.pythonhosted.org/packages/97/07/37e460ab5ca84be8e1e197c3b526c5c86993dcc9e13cbc805c35fc2463c1/regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137", size = 777478, upload-time = "2024-09-11T18:57:16.397Z" }, - { url = "https://files.pythonhosted.org/packages/65/7b/953075723dd5ab00780043ac2f9de667306ff9e2a85332975e9f19279174/regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6", size = 845373, upload-time = "2024-09-11T18:57:17.938Z" }, - { url = "https://files.pythonhosted.org/packages/40/b8/3e9484c6230b8b6e8f816ab7c9a080e631124991a4ae2c27a81631777db0/regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca", size = 845369, upload-time = "2024-09-11T18:57:20.091Z" }, - { url = "https://files.pythonhosted.org/packages/b7/99/38434984d912edbd2e1969d116257e869578f67461bd7462b894c45ed874/regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a", size = 773935, upload-time = "2024-09-11T18:57:21.652Z" }, - { url = "https://files.pythonhosted.org/packages/ab/67/43174d2b46fa947b7b9dfe56b6c8a8a76d44223f35b1d64645a732fd1d6f/regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0", size = 261624, upload-time = "2024-09-11T18:57:23.777Z" }, - { url = "https://files.pythonhosted.org/packages/c4/2a/4f9c47d9395b6aff24874c761d8d620c0232f97c43ef3cf668c8b355e7a7/regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623", size = 274020, upload-time = "2024-09-11T18:57:25.27Z" }, - { url = "https://files.pythonhosted.org/packages/86/a1/d526b7b6095a0019aa360948c143aacfeb029919c898701ce7763bbe4c15/regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df", size = 482483, upload-time = "2024-09-11T18:57:26.694Z" }, - { url = "https://files.pythonhosted.org/packages/32/d9/bfdd153179867c275719e381e1e8e84a97bd186740456a0dcb3e7125c205/regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268", size = 287442, upload-time = "2024-09-11T18:57:28.133Z" }, - { url = "https://files.pythonhosted.org/packages/33/c4/60f3370735135e3a8d673ddcdb2507a8560d0e759e1398d366e43d000253/regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad", size = 284561, upload-time = "2024-09-11T18:57:30.83Z" }, - { url = "https://files.pythonhosted.org/packages/b1/51/91a5ebdff17f9ec4973cb0aa9d37635efec1c6868654bbc25d1543aca4ec/regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679", size = 791779, upload-time = "2024-09-11T18:57:32.461Z" }, - { url = "https://files.pythonhosted.org/packages/07/4a/022c5e6f0891a90cd7eb3d664d6c58ce2aba48bff107b00013f3d6167069/regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4", size = 832605, upload-time = "2024-09-11T18:57:34.01Z" }, - { url = "https://files.pythonhosted.org/packages/ac/1c/3793990c8c83ca04e018151ddda83b83ecc41d89964f0f17749f027fc44d/regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664", size = 818556, upload-time = "2024-09-11T18:57:36.363Z" }, - { url = "https://files.pythonhosted.org/packages/e9/5c/8b385afbfacb853730682c57be56225f9fe275c5bf02ac1fc88edbff316d/regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50", size = 792808, upload-time = "2024-09-11T18:57:38.493Z" }, - { url = "https://files.pythonhosted.org/packages/9b/8b/a4723a838b53c771e9240951adde6af58c829fb6a6a28f554e8131f53839/regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199", size = 781115, upload-time = "2024-09-11T18:57:41.4Z" }, - { url = "https://files.pythonhosted.org/packages/83/5f/031a04b6017033d65b261259c09043c06f4ef2d4eac841d0649d76d69541/regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4", size = 778155, upload-time = "2024-09-11T18:57:43.608Z" }, - { url = "https://files.pythonhosted.org/packages/fd/cd/4660756070b03ce4a66663a43f6c6e7ebc2266cc6b4c586c167917185eb4/regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd", size = 784614, upload-time = "2024-09-11T18:57:45.219Z" }, - { url = "https://files.pythonhosted.org/packages/93/8d/65b9bea7df120a7be8337c415b6d256ba786cbc9107cebba3bf8ff09da99/regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f", size = 853744, upload-time = "2024-09-11T18:57:46.907Z" }, - { url = "https://files.pythonhosted.org/packages/96/a7/fba1eae75eb53a704475baf11bd44b3e6ccb95b316955027eb7748f24ef8/regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96", size = 855890, upload-time = "2024-09-11T18:57:49.264Z" }, - { url = "https://files.pythonhosted.org/packages/45/14/d864b2db80a1a3358534392373e8a281d95b28c29c87d8548aed58813910/regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1", size = 781887, upload-time = "2024-09-11T18:57:51.619Z" }, - { url = "https://files.pythonhosted.org/packages/4d/a9/bfb29b3de3eb11dc9b412603437023b8e6c02fb4e11311863d9bf62c403a/regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9", size = 261644, upload-time = "2024-09-11T18:57:53.334Z" }, - { url = "https://files.pythonhosted.org/packages/c7/ab/1ad2511cf6a208fde57fafe49829cab8ca018128ab0d0b48973d8218634a/regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf", size = 274033, upload-time = "2024-09-11T18:57:55.605Z" }, - { url = "https://files.pythonhosted.org/packages/6e/92/407531450762bed778eedbde04407f68cbd75d13cee96c6f8d6903d9c6c1/regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7", size = 483590, upload-time = "2024-09-11T18:57:57.793Z" }, - { url = "https://files.pythonhosted.org/packages/8e/a2/048acbc5ae1f615adc6cba36cc45734e679b5f1e4e58c3c77f0ed611d4e2/regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231", size = 288175, upload-time = "2024-09-11T18:57:59.671Z" }, - { url = "https://files.pythonhosted.org/packages/8a/ea/909d8620329ab710dfaf7b4adee41242ab7c9b95ea8d838e9bfe76244259/regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d", size = 284749, upload-time = "2024-09-11T18:58:01.855Z" }, - { url = "https://files.pythonhosted.org/packages/ca/fa/521eb683b916389b4975337873e66954e0f6d8f91bd5774164a57b503185/regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64", size = 795181, upload-time = "2024-09-11T18:58:03.985Z" }, - { url = "https://files.pythonhosted.org/packages/28/db/63047feddc3280cc242f9c74f7aeddc6ee662b1835f00046f57d5630c827/regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42", size = 835842, upload-time = "2024-09-11T18:58:05.74Z" }, - { url = "https://files.pythonhosted.org/packages/e3/94/86adc259ff8ec26edf35fcca7e334566c1805c7493b192cb09679f9c3dee/regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766", size = 823533, upload-time = "2024-09-11T18:58:07.427Z" }, - { url = "https://files.pythonhosted.org/packages/29/52/84662b6636061277cb857f658518aa7db6672bc6d1a3f503ccd5aefc581e/regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a", size = 797037, upload-time = "2024-09-11T18:58:09.879Z" }, - { url = "https://files.pythonhosted.org/packages/c3/2a/cd4675dd987e4a7505f0364a958bc41f3b84942de9efaad0ef9a2646681c/regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9", size = 784106, upload-time = "2024-09-11T18:58:11.55Z" }, - { url = "https://files.pythonhosted.org/packages/6f/75/3ea7ec29de0bbf42f21f812f48781d41e627d57a634f3f23947c9a46e303/regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d", size = 782468, upload-time = "2024-09-11T18:58:13.552Z" }, - { url = "https://files.pythonhosted.org/packages/d3/67/15519d69b52c252b270e679cb578e22e0c02b8dd4e361f2b04efcc7f2335/regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822", size = 790324, upload-time = "2024-09-11T18:58:15.268Z" }, - { url = "https://files.pythonhosted.org/packages/9c/71/eff77d3fe7ba08ab0672920059ec30d63fa7e41aa0fb61c562726e9bd721/regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0", size = 860214, upload-time = "2024-09-11T18:58:17.583Z" }, - { url = "https://files.pythonhosted.org/packages/81/11/e1bdf84a72372e56f1ea4b833dd583b822a23138a616ace7ab57a0e11556/regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a", size = 859420, upload-time = "2024-09-11T18:58:19.898Z" }, - { url = "https://files.pythonhosted.org/packages/ea/75/9753e9dcebfa7c3645563ef5c8a58f3a47e799c872165f37c55737dadd3e/regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a", size = 787333, upload-time = "2024-09-11T18:58:21.699Z" }, - { url = "https://files.pythonhosted.org/packages/bc/4e/ba1cbca93141f7416624b3ae63573e785d4bc1834c8be44a8f0747919eca/regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776", size = 262058, upload-time = "2024-09-11T18:58:23.452Z" }, - { url = "https://files.pythonhosted.org/packages/6e/16/efc5f194778bf43e5888209e5cec4b258005d37c613b67ae137df3b89c53/regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009", size = 273526, upload-time = "2024-09-11T18:58:25.191Z" }, - { url = "https://files.pythonhosted.org/packages/93/0a/d1c6b9af1ff1e36832fe38d74d5c5bab913f2bdcbbd6bc0e7f3ce8b2f577/regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784", size = 483376, upload-time = "2024-09-11T18:58:27.11Z" }, - { url = "https://files.pythonhosted.org/packages/a4/42/5910a050c105d7f750a72dcb49c30220c3ae4e2654e54aaaa0e9bc0584cb/regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36", size = 288112, upload-time = "2024-09-11T18:58:28.78Z" }, - { url = "https://files.pythonhosted.org/packages/8d/56/0c262aff0e9224fa7ffce47b5458d373f4d3e3ff84e99b5ff0cb15e0b5b2/regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92", size = 284608, upload-time = "2024-09-11T18:58:30.498Z" }, - { url = "https://files.pythonhosted.org/packages/b9/54/9fe8f9aec5007bbbbce28ba3d2e3eaca425f95387b7d1e84f0d137d25237/regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86", size = 795337, upload-time = "2024-09-11T18:58:32.665Z" }, - { url = "https://files.pythonhosted.org/packages/b2/e7/6b2f642c3cded271c4f16cc4daa7231be544d30fe2b168e0223724b49a61/regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85", size = 835848, upload-time = "2024-09-11T18:58:34.337Z" }, - { url = "https://files.pythonhosted.org/packages/cd/9e/187363bdf5d8c0e4662117b92aa32bf52f8f09620ae93abc7537d96d3311/regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963", size = 823503, upload-time = "2024-09-11T18:58:36.17Z" }, - { url = "https://files.pythonhosted.org/packages/f8/10/601303b8ee93589f879664b0cfd3127949ff32b17f9b6c490fb201106c4d/regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6", size = 797049, upload-time = "2024-09-11T18:58:38.225Z" }, - { url = "https://files.pythonhosted.org/packages/ef/1c/ea200f61ce9f341763f2717ab4daebe4422d83e9fd4ac5e33435fd3a148d/regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802", size = 784144, upload-time = "2024-09-11T18:58:40.605Z" }, - { url = "https://files.pythonhosted.org/packages/d8/5c/d2429be49ef3292def7688401d3deb11702c13dcaecdc71d2b407421275b/regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29", size = 782483, upload-time = "2024-09-11T18:58:42.58Z" }, - { url = "https://files.pythonhosted.org/packages/12/d9/cbc30f2ff7164f3b26a7760f87c54bf8b2faed286f60efd80350a51c5b99/regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8", size = 790320, upload-time = "2024-09-11T18:58:44.5Z" }, - { url = "https://files.pythonhosted.org/packages/19/1d/43ed03a236313639da5a45e61bc553c8d41e925bcf29b0f8ecff0c2c3f25/regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84", size = 860435, upload-time = "2024-09-11T18:58:47.014Z" }, - { url = "https://files.pythonhosted.org/packages/34/4f/5d04da61c7c56e785058a46349f7285ae3ebc0726c6ea7c5c70600a52233/regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554", size = 859571, upload-time = "2024-09-11T18:58:48.974Z" }, - { url = "https://files.pythonhosted.org/packages/12/7f/8398c8155a3c70703a8e91c29532558186558e1aea44144b382faa2a6f7a/regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8", size = 787398, upload-time = "2024-09-11T18:58:51.05Z" }, - { url = "https://files.pythonhosted.org/packages/58/3a/f5903977647a9a7e46d5535e9e96c194304aeeca7501240509bde2f9e17f/regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8", size = 262035, upload-time = "2024-09-11T18:58:53.526Z" }, - { url = "https://files.pythonhosted.org/packages/ff/80/51ba3a4b7482f6011095b3a036e07374f64de180b7d870b704ed22509002/regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f", size = 273510, upload-time = "2024-09-11T18:58:55.263Z" }, + { url = "https://files.pythonhosted.org/packages/ea/d2/e6ee96b7dff201a83f650241c52db8e5bd080967cb93211f57aa448dc9d6/regex-2026.1.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4e3dd93c8f9abe8aa4b6c652016da9a3afa190df5ad822907efe6b206c09896e", size = 488166, upload-time = "2026-01-14T23:13:46.408Z" }, + { url = "https://files.pythonhosted.org/packages/23/8a/819e9ce14c9f87af026d0690901b3931f3101160833e5d4c8061fa3a1b67/regex-2026.1.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97499ff7862e868b1977107873dd1a06e151467129159a6ffd07b66706ba3a9f", size = 290632, upload-time = "2026-01-14T23:13:48.688Z" }, + { url = "https://files.pythonhosted.org/packages/d5/c3/23dfe15af25d1d45b07dfd4caa6003ad710dcdcb4c4b279909bdfe7a2de8/regex-2026.1.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0bda75ebcac38d884240914c6c43d8ab5fb82e74cde6da94b43b17c411aa4c2b", size = 288500, upload-time = "2026-01-14T23:13:50.503Z" }, + { url = "https://files.pythonhosted.org/packages/c6/31/1adc33e2f717df30d2f4d973f8776d2ba6ecf939301efab29fca57505c95/regex-2026.1.15-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7dcc02368585334f5bc81fc73a2a6a0bbade60e7d83da21cead622faf408f32c", size = 781670, upload-time = "2026-01-14T23:13:52.453Z" }, + { url = "https://files.pythonhosted.org/packages/23/ce/21a8a22d13bc4adcb927c27b840c948f15fc973e21ed2346c1bd0eae22dc/regex-2026.1.15-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:693b465171707bbe882a7a05de5e866f33c76aa449750bee94a8d90463533cc9", size = 850820, upload-time = "2026-01-14T23:13:54.894Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/3eeacdf587a4705a44484cd0b30e9230a0e602811fb3e2cc32268c70d509/regex-2026.1.15-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b0d190e6f013ea938623a58706d1469a62103fb2a241ce2873a9906e0386582c", size = 898777, upload-time = "2026-01-14T23:13:56.908Z" }, + { url = "https://files.pythonhosted.org/packages/79/a9/1898a077e2965c35fc22796488141a22676eed2d73701e37c73ad7c0b459/regex-2026.1.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ff818702440a5878a81886f127b80127f5d50563753a28211482867f8318106", size = 791750, upload-time = "2026-01-14T23:13:58.527Z" }, + { url = "https://files.pythonhosted.org/packages/4c/84/e31f9d149a178889b3817212827f5e0e8c827a049ff31b4b381e76b26e2d/regex-2026.1.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f052d1be37ef35a54e394de66136e30fa1191fab64f71fc06ac7bc98c9a84618", size = 782674, upload-time = "2026-01-14T23:13:59.874Z" }, + { url = "https://files.pythonhosted.org/packages/d2/ff/adf60063db24532add6a1676943754a5654dcac8237af024ede38244fd12/regex-2026.1.15-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6bfc31a37fd1592f0c4fc4bfc674b5c42e52efe45b4b7a6a14f334cca4bcebe4", size = 767906, upload-time = "2026-01-14T23:14:01.298Z" }, + { url = "https://files.pythonhosted.org/packages/af/3e/e6a216cee1e2780fec11afe7fc47b6f3925d7264e8149c607ac389fd9b1a/regex-2026.1.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ce5ae80066b319ae3bc62fd55a557c9491baa5efd0d355f0de08c4ba54e79", size = 774798, upload-time = "2026-01-14T23:14:02.715Z" }, + { url = "https://files.pythonhosted.org/packages/0f/98/23a4a8378a9208514ed3efc7e7850c27fa01e00ed8557c958df0335edc4a/regex-2026.1.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1704d204bd42b6bb80167df0e4554f35c255b579ba99616def38f69e14a5ccb9", size = 845861, upload-time = "2026-01-14T23:14:04.824Z" }, + { url = "https://files.pythonhosted.org/packages/f8/57/d7605a9d53bd07421a8785d349cd29677fe660e13674fa4c6cbd624ae354/regex-2026.1.15-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e3174a5ed4171570dc8318afada56373aa9289eb6dc0d96cceb48e7358b0e220", size = 755648, upload-time = "2026-01-14T23:14:06.371Z" }, + { url = "https://files.pythonhosted.org/packages/6f/76/6f2e24aa192da1e299cc1101674a60579d3912391867ce0b946ba83e2194/regex-2026.1.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:87adf5bd6d72e3e17c9cb59ac4096b1faaf84b7eb3037a5ffa61c4b4370f0f13", size = 836250, upload-time = "2026-01-14T23:14:08.343Z" }, + { url = "https://files.pythonhosted.org/packages/11/3a/1f2a1d29453299a7858eab7759045fc3d9d1b429b088dec2dc85b6fa16a2/regex-2026.1.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e85dc94595f4d766bd7d872a9de5ede1ca8d3063f3bdf1e2c725f5eb411159e3", size = 779919, upload-time = "2026-01-14T23:14:09.954Z" }, + { url = "https://files.pythonhosted.org/packages/c0/67/eab9bc955c9dcc58e9b222c801e39cff7ca0b04261792a2149166ce7e792/regex-2026.1.15-cp310-cp310-win32.whl", hash = "sha256:21ca32c28c30d5d65fc9886ff576fc9b59bbca08933e844fa2363e530f4c8218", size = 265888, upload-time = "2026-01-14T23:14:11.35Z" }, + { url = "https://files.pythonhosted.org/packages/1d/62/31d16ae24e1f8803bddb0885508acecaec997fcdcde9c243787103119ae4/regex-2026.1.15-cp310-cp310-win_amd64.whl", hash = "sha256:3038a62fc7d6e5547b8915a3d927a0fbeef84cdbe0b1deb8c99bbd4a8961b52a", size = 277830, upload-time = "2026-01-14T23:14:12.908Z" }, + { url = "https://files.pythonhosted.org/packages/e5/36/5d9972bccd6417ecd5a8be319cebfd80b296875e7f116c37fb2a2deecebf/regex-2026.1.15-cp310-cp310-win_arm64.whl", hash = "sha256:505831646c945e3e63552cc1b1b9b514f0e93232972a2d5bedbcc32f15bc82e3", size = 270376, upload-time = "2026-01-14T23:14:14.782Z" }, + { url = "https://files.pythonhosted.org/packages/d0/c9/0c80c96eab96948363d270143138d671d5731c3a692b417629bf3492a9d6/regex-2026.1.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ae6020fb311f68d753b7efa9d4b9a5d47a5d6466ea0d5e3b5a471a960ea6e4a", size = 488168, upload-time = "2026-01-14T23:14:16.129Z" }, + { url = "https://files.pythonhosted.org/packages/17/f0/271c92f5389a552494c429e5cc38d76d1322eb142fb5db3c8ccc47751468/regex-2026.1.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eddf73f41225942c1f994914742afa53dc0d01a6e20fe14b878a1b1edc74151f", size = 290636, upload-time = "2026-01-14T23:14:17.715Z" }, + { url = "https://files.pythonhosted.org/packages/a0/f9/5f1fd077d106ca5655a0f9ff8f25a1ab55b92128b5713a91ed7134ff688e/regex-2026.1.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e8cd52557603f5c66a548f69421310886b28b7066853089e1a71ee710e1cdc1", size = 288496, upload-time = "2026-01-14T23:14:19.326Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e1/8f43b03a4968c748858ec77f746c286d81f896c2e437ccf050ebc5d3128c/regex-2026.1.15-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5170907244b14303edc5978f522f16c974f32d3aa92109fabc2af52411c9433b", size = 793503, upload-time = "2026-01-14T23:14:20.922Z" }, + { url = "https://files.pythonhosted.org/packages/8d/4e/a39a5e8edc5377a46a7c875c2f9a626ed3338cb3bb06931be461c3e1a34a/regex-2026.1.15-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2748c1ec0663580b4510bd89941a31560b4b439a0b428b49472a3d9944d11cd8", size = 860535, upload-time = "2026-01-14T23:14:22.405Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1c/9dce667a32a9477f7a2869c1c767dc00727284a9fa3ff5c09a5c6c03575e/regex-2026.1.15-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2f2775843ca49360508d080eaa87f94fa248e2c946bbcd963bb3aae14f333413", size = 907225, upload-time = "2026-01-14T23:14:23.897Z" }, + { url = "https://files.pythonhosted.org/packages/a4/3c/87ca0a02736d16b6262921425e84b48984e77d8e4e572c9072ce96e66c30/regex-2026.1.15-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9ea2604370efc9a174c1b5dcc81784fb040044232150f7f33756049edfc9026", size = 800526, upload-time = "2026-01-14T23:14:26.039Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ff/647d5715aeea7c87bdcbd2f578f47b415f55c24e361e639fe8c0cc88878f/regex-2026.1.15-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0dcd31594264029b57bf16f37fd7248a70b3b764ed9e0839a8f271b2d22c0785", size = 773446, upload-time = "2026-01-14T23:14:28.109Z" }, + { url = "https://files.pythonhosted.org/packages/af/89/bf22cac25cb4ba0fe6bff52ebedbb65b77a179052a9d6037136ae93f42f4/regex-2026.1.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c08c1f3e34338256732bd6938747daa3c0d5b251e04b6e43b5813e94d503076e", size = 783051, upload-time = "2026-01-14T23:14:29.929Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f4/6ed03e71dca6348a5188363a34f5e26ffd5db1404780288ff0d79513bce4/regex-2026.1.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e43a55f378df1e7a4fa3547c88d9a5a9b7113f653a66821bcea4718fe6c58763", size = 854485, upload-time = "2026-01-14T23:14:31.366Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9a/8e8560bd78caded8eb137e3e47612430a05b9a772caf60876435192d670a/regex-2026.1.15-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:f82110ab962a541737bd0ce87978d4c658f06e7591ba899192e2712a517badbb", size = 762195, upload-time = "2026-01-14T23:14:32.802Z" }, + { url = "https://files.pythonhosted.org/packages/38/6b/61fc710f9aa8dfcd764fe27d37edfaa023b1a23305a0d84fccd5adb346ea/regex-2026.1.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:27618391db7bdaf87ac6c92b31e8f0dfb83a9de0075855152b720140bda177a2", size = 845986, upload-time = "2026-01-14T23:14:34.898Z" }, + { url = "https://files.pythonhosted.org/packages/fd/2e/fbee4cb93f9d686901a7ca8d94285b80405e8c34fe4107f63ffcbfb56379/regex-2026.1.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bfb0d6be01fbae8d6655c8ca21b3b72458606c4aec9bbc932db758d47aba6db1", size = 788992, upload-time = "2026-01-14T23:14:37.116Z" }, + { url = "https://files.pythonhosted.org/packages/ed/14/3076348f3f586de64b1ab75a3fbabdaab7684af7f308ad43be7ef1849e55/regex-2026.1.15-cp311-cp311-win32.whl", hash = "sha256:b10e42a6de0e32559a92f2f8dc908478cc0fa02838d7dbe764c44dca3fa13569", size = 265893, upload-time = "2026-01-14T23:14:38.426Z" }, + { url = "https://files.pythonhosted.org/packages/0f/19/772cf8b5fc803f5c89ba85d8b1870a1ca580dc482aa030383a9289c82e44/regex-2026.1.15-cp311-cp311-win_amd64.whl", hash = "sha256:e9bf3f0bbdb56633c07d7116ae60a576f846efdd86a8848f8d62b749e1209ca7", size = 277840, upload-time = "2026-01-14T23:14:39.785Z" }, + { url = "https://files.pythonhosted.org/packages/78/84/d05f61142709474da3c0853222d91086d3e1372bcdab516c6fd8d80f3297/regex-2026.1.15-cp311-cp311-win_arm64.whl", hash = "sha256:41aef6f953283291c4e4e6850607bd71502be67779586a61472beacb315c97ec", size = 270374, upload-time = "2026-01-14T23:14:41.592Z" }, + { url = "https://files.pythonhosted.org/packages/92/81/10d8cf43c807d0326efe874c1b79f22bfb0fb226027b0b19ebc26d301408/regex-2026.1.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4c8fcc5793dde01641a35905d6731ee1548f02b956815f8f1cab89e515a5bdf1", size = 489398, upload-time = "2026-01-14T23:14:43.741Z" }, + { url = "https://files.pythonhosted.org/packages/90/b0/7c2a74e74ef2a7c32de724658a69a862880e3e4155cba992ba04d1c70400/regex-2026.1.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bfd876041a956e6a90ad7cdb3f6a630c07d491280bfeed4544053cd434901681", size = 291339, upload-time = "2026-01-14T23:14:45.183Z" }, + { url = "https://files.pythonhosted.org/packages/19/4d/16d0773d0c818417f4cc20aa0da90064b966d22cd62a8c46765b5bd2d643/regex-2026.1.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9250d087bc92b7d4899ccd5539a1b2334e44eee85d848c4c1aef8e221d3f8c8f", size = 289003, upload-time = "2026-01-14T23:14:47.25Z" }, + { url = "https://files.pythonhosted.org/packages/c6/e4/1fc4599450c9f0863d9406e944592d968b8d6dfd0d552a7d569e43bceada/regex-2026.1.15-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c8a154cf6537ebbc110e24dabe53095e714245c272da9c1be05734bdad4a61aa", size = 798656, upload-time = "2026-01-14T23:14:48.77Z" }, + { url = "https://files.pythonhosted.org/packages/b2/e6/59650d73a73fa8a60b3a590545bfcf1172b4384a7df2e7fe7b9aab4e2da9/regex-2026.1.15-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8050ba2e3ea1d8731a549e83c18d2f0999fbc99a5f6bd06b4c91449f55291804", size = 864252, upload-time = "2026-01-14T23:14:50.528Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ab/1d0f4d50a1638849a97d731364c9a80fa304fec46325e48330c170ee8e80/regex-2026.1.15-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf065240704cb8951cc04972cf107063917022511273e0969bdb34fc173456c", size = 912268, upload-time = "2026-01-14T23:14:52.952Z" }, + { url = "https://files.pythonhosted.org/packages/dd/df/0d722c030c82faa1d331d1921ee268a4e8fb55ca8b9042c9341c352f17fa/regex-2026.1.15-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c32bef3e7aeee75746748643667668ef941d28b003bfc89994ecf09a10f7a1b5", size = 803589, upload-time = "2026-01-14T23:14:55.182Z" }, + { url = "https://files.pythonhosted.org/packages/66/23/33289beba7ccb8b805c6610a8913d0131f834928afc555b241caabd422a9/regex-2026.1.15-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d5eaa4a4c5b1906bd0d2508d68927f15b81821f85092e06f1a34a4254b0e1af3", size = 775700, upload-time = "2026-01-14T23:14:56.707Z" }, + { url = "https://files.pythonhosted.org/packages/e7/65/bf3a42fa6897a0d3afa81acb25c42f4b71c274f698ceabd75523259f6688/regex-2026.1.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:86c1077a3cc60d453d4084d5b9649065f3bf1184e22992bd322e1f081d3117fb", size = 787928, upload-time = "2026-01-14T23:14:58.312Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f5/13bf65864fc314f68cdd6d8ca94adcab064d4d39dbd0b10fef29a9da48fc/regex-2026.1.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:2b091aefc05c78d286657cd4db95f2e6313375ff65dcf085e42e4c04d9c8d410", size = 858607, upload-time = "2026-01-14T23:15:00.657Z" }, + { url = "https://files.pythonhosted.org/packages/a3/31/040e589834d7a439ee43fb0e1e902bc81bd58a5ba81acffe586bb3321d35/regex-2026.1.15-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:57e7d17f59f9ebfa9667e6e5a1c0127b96b87cb9cede8335482451ed00788ba4", size = 763729, upload-time = "2026-01-14T23:15:02.248Z" }, + { url = "https://files.pythonhosted.org/packages/9b/84/6921e8129687a427edf25a34a5594b588b6d88f491320b9de5b6339a4fcb/regex-2026.1.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c6c4dcdfff2c08509faa15d36ba7e5ef5fcfab25f1e8f85a0c8f45bc3a30725d", size = 850697, upload-time = "2026-01-14T23:15:03.878Z" }, + { url = "https://files.pythonhosted.org/packages/8a/87/3d06143d4b128f4229158f2de5de6c8f2485170c7221e61bf381313314b2/regex-2026.1.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf8ff04c642716a7f2048713ddc6278c5fd41faa3b9cab12607c7abecd012c22", size = 789849, upload-time = "2026-01-14T23:15:06.102Z" }, + { url = "https://files.pythonhosted.org/packages/77/69/c50a63842b6bd48850ebc7ab22d46e7a2a32d824ad6c605b218441814639/regex-2026.1.15-cp312-cp312-win32.whl", hash = "sha256:82345326b1d8d56afbe41d881fdf62f1926d7264b2fc1537f99ae5da9aad7913", size = 266279, upload-time = "2026-01-14T23:15:07.678Z" }, + { url = "https://files.pythonhosted.org/packages/f2/36/39d0b29d087e2b11fd8191e15e81cce1b635fcc845297c67f11d0d19274d/regex-2026.1.15-cp312-cp312-win_amd64.whl", hash = "sha256:4def140aa6156bc64ee9912383d4038f3fdd18fee03a6f222abd4de6357ce42a", size = 277166, upload-time = "2026-01-14T23:15:09.257Z" }, + { url = "https://files.pythonhosted.org/packages/28/32/5b8e476a12262748851fa8ab1b0be540360692325975b094e594dfebbb52/regex-2026.1.15-cp312-cp312-win_arm64.whl", hash = "sha256:c6c565d9a6e1a8d783c1948937ffc377dd5771e83bd56de8317c450a954d2056", size = 270415, upload-time = "2026-01-14T23:15:10.743Z" }, + { url = "https://files.pythonhosted.org/packages/f8/2e/6870bb16e982669b674cce3ee9ff2d1d46ab80528ee6bcc20fb2292efb60/regex-2026.1.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e69d0deeb977ffe7ed3d2e4439360089f9c3f217ada608f0f88ebd67afb6385e", size = 489164, upload-time = "2026-01-14T23:15:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/dc/67/9774542e203849b0286badf67199970a44ebdb0cc5fb739f06e47ada72f8/regex-2026.1.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3601ffb5375de85a16f407854d11cca8fe3f5febbe3ac78fb2866bb220c74d10", size = 291218, upload-time = "2026-01-14T23:15:15.647Z" }, + { url = "https://files.pythonhosted.org/packages/b2/87/b0cda79f22b8dee05f774922a214da109f9a4c0eca5da2c9d72d77ea062c/regex-2026.1.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4c5ef43b5c2d4114eb8ea424bb8c9cec01d5d17f242af88b2448f5ee81caadbc", size = 288895, upload-time = "2026-01-14T23:15:17.788Z" }, + { url = "https://files.pythonhosted.org/packages/3b/6a/0041f0a2170d32be01ab981d6346c83a8934277d82c780d60b127331f264/regex-2026.1.15-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:968c14d4f03e10b2fd960f1d5168c1f0ac969381d3c1fcc973bc45fb06346599", size = 798680, upload-time = "2026-01-14T23:15:19.342Z" }, + { url = "https://files.pythonhosted.org/packages/58/de/30e1cfcdbe3e891324aa7568b7c968771f82190df5524fabc1138cb2d45a/regex-2026.1.15-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:56a5595d0f892f214609c9f76b41b7428bed439d98dc961efafdd1354d42baae", size = 864210, upload-time = "2026-01-14T23:15:22.005Z" }, + { url = "https://files.pythonhosted.org/packages/64/44/4db2f5c5ca0ccd40ff052ae7b1e9731352fcdad946c2b812285a7505ca75/regex-2026.1.15-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf650f26087363434c4e560011f8e4e738f6f3e029b85d4904c50135b86cfa5", size = 912358, upload-time = "2026-01-14T23:15:24.569Z" }, + { url = "https://files.pythonhosted.org/packages/79/b6/e6a5665d43a7c42467138c8a2549be432bad22cbd206f5ec87162de74bd7/regex-2026.1.15-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18388a62989c72ac24de75f1449d0fb0b04dfccd0a1a7c1c43af5eb503d890f6", size = 803583, upload-time = "2026-01-14T23:15:26.526Z" }, + { url = "https://files.pythonhosted.org/packages/e7/53/7cd478222169d85d74d7437e74750005e993f52f335f7c04ff7adfda3310/regex-2026.1.15-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6d220a2517f5893f55daac983bfa9fe998a7dbcaee4f5d27a88500f8b7873788", size = 775782, upload-time = "2026-01-14T23:15:29.352Z" }, + { url = "https://files.pythonhosted.org/packages/ca/b5/75f9a9ee4b03a7c009fe60500fe550b45df94f0955ca29af16333ef557c5/regex-2026.1.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9c08c2fbc6120e70abff5d7f28ffb4d969e14294fb2143b4b5c7d20e46d1714", size = 787978, upload-time = "2026-01-14T23:15:31.295Z" }, + { url = "https://files.pythonhosted.org/packages/72/b3/79821c826245bbe9ccbb54f6eadb7879c722fd3e0248c17bfc90bf54e123/regex-2026.1.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7ef7d5d4bd49ec7364315167a4134a015f61e8266c6d446fc116a9ac4456e10d", size = 858550, upload-time = "2026-01-14T23:15:33.558Z" }, + { url = "https://files.pythonhosted.org/packages/4a/85/2ab5f77a1c465745bfbfcb3ad63178a58337ae8d5274315e2cc623a822fa/regex-2026.1.15-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e42844ad64194fa08d5ccb75fe6a459b9b08e6d7296bd704460168d58a388f3", size = 763747, upload-time = "2026-01-14T23:15:35.206Z" }, + { url = "https://files.pythonhosted.org/packages/6d/84/c27df502d4bfe2873a3e3a7cf1bdb2b9cc10284d1a44797cf38bed790470/regex-2026.1.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cfecdaa4b19f9ca534746eb3b55a5195d5c95b88cac32a205e981ec0a22b7d31", size = 850615, upload-time = "2026-01-14T23:15:37.523Z" }, + { url = "https://files.pythonhosted.org/packages/7d/b7/658a9782fb253680aa8ecb5ccbb51f69e088ed48142c46d9f0c99b46c575/regex-2026.1.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:08df9722d9b87834a3d701f3fca570b2be115654dbfd30179f30ab2f39d606d3", size = 789951, upload-time = "2026-01-14T23:15:39.582Z" }, + { url = "https://files.pythonhosted.org/packages/fc/2a/5928af114441e059f15b2f63e188bd00c6529b3051c974ade7444b85fcda/regex-2026.1.15-cp313-cp313-win32.whl", hash = "sha256:d426616dae0967ca225ab12c22274eb816558f2f99ccb4a1d52ca92e8baf180f", size = 266275, upload-time = "2026-01-14T23:15:42.108Z" }, + { url = "https://files.pythonhosted.org/packages/4f/16/5bfbb89e435897bff28cf0352a992ca719d9e55ebf8b629203c96b6ce4f7/regex-2026.1.15-cp313-cp313-win_amd64.whl", hash = "sha256:febd38857b09867d3ed3f4f1af7d241c5c50362e25ef43034995b77a50df494e", size = 277145, upload-time = "2026-01-14T23:15:44.244Z" }, + { url = "https://files.pythonhosted.org/packages/56/c1/a09ff7392ef4233296e821aec5f78c51be5e91ffde0d163059e50fd75835/regex-2026.1.15-cp313-cp313-win_arm64.whl", hash = "sha256:8e32f7896f83774f91499d239e24cebfadbc07639c1494bb7213983842348337", size = 270411, upload-time = "2026-01-14T23:15:45.858Z" }, + { url = "https://files.pythonhosted.org/packages/3c/38/0cfd5a78e5c6db00e6782fdae70458f89850ce95baa5e8694ab91d89744f/regex-2026.1.15-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ec94c04149b6a7b8120f9f44565722c7ae31b7a6d2275569d2eefa76b83da3be", size = 492068, upload-time = "2026-01-14T23:15:47.616Z" }, + { url = "https://files.pythonhosted.org/packages/50/72/6c86acff16cb7c959c4355826bbf06aad670682d07c8f3998d9ef4fee7cd/regex-2026.1.15-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40c86d8046915bb9aeb15d3f3f15b6fd500b8ea4485b30e1bbc799dab3fe29f8", size = 292756, upload-time = "2026-01-14T23:15:49.307Z" }, + { url = "https://files.pythonhosted.org/packages/4e/58/df7fb69eadfe76526ddfce28abdc0af09ffe65f20c2c90932e89d705153f/regex-2026.1.15-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:726ea4e727aba21643205edad8f2187ec682d3305d790f73b7a51c7587b64bdd", size = 291114, upload-time = "2026-01-14T23:15:51.484Z" }, + { url = "https://files.pythonhosted.org/packages/ed/6c/a4011cd1cf96b90d2cdc7e156f91efbd26531e822a7fbb82a43c1016678e/regex-2026.1.15-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1cb740d044aff31898804e7bf1181cc72c03d11dfd19932b9911ffc19a79070a", size = 807524, upload-time = "2026-01-14T23:15:53.102Z" }, + { url = "https://files.pythonhosted.org/packages/1d/25/a53ffb73183f69c3e9f4355c4922b76d2840aee160af6af5fac229b6201d/regex-2026.1.15-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05d75a668e9ea16f832390d22131fe1e8acc8389a694c8febc3e340b0f810b93", size = 873455, upload-time = "2026-01-14T23:15:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/66/0b/8b47fc2e8f97d9b4a851736f3890a5f786443aa8901061c55f24c955f45b/regex-2026.1.15-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d991483606f3dbec93287b9f35596f41aa2e92b7c2ebbb935b63f409e243c9af", size = 915007, upload-time = "2026-01-14T23:15:57.041Z" }, + { url = "https://files.pythonhosted.org/packages/c2/fa/97de0d681e6d26fabe71968dbee06dd52819e9a22fdce5dac7256c31ed84/regex-2026.1.15-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:194312a14819d3e44628a44ed6fea6898fdbecb0550089d84c403475138d0a09", size = 812794, upload-time = "2026-01-14T23:15:58.916Z" }, + { url = "https://files.pythonhosted.org/packages/22/38/e752f94e860d429654aa2b1c51880bff8dfe8f084268258adf9151cf1f53/regex-2026.1.15-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fe2fda4110a3d0bc163c2e0664be44657431440722c5c5315c65155cab92f9e5", size = 781159, upload-time = "2026-01-14T23:16:00.817Z" }, + { url = "https://files.pythonhosted.org/packages/e9/a7/d739ffaef33c378fc888302a018d7f81080393d96c476b058b8c64fd2b0d/regex-2026.1.15-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:124dc36c85d34ef2d9164da41a53c1c8c122cfb1f6e1ec377a1f27ee81deb794", size = 795558, upload-time = "2026-01-14T23:16:03.267Z" }, + { url = "https://files.pythonhosted.org/packages/3e/c4/542876f9a0ac576100fc73e9c75b779f5c31e3527576cfc9cb3009dcc58a/regex-2026.1.15-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1774cd1981cd212506a23a14dba7fdeaee259f5deba2df6229966d9911e767a", size = 868427, upload-time = "2026-01-14T23:16:05.646Z" }, + { url = "https://files.pythonhosted.org/packages/fc/0f/d5655bea5b22069e32ae85a947aa564912f23758e112cdb74212848a1a1b/regex-2026.1.15-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:b5f7d8d2867152cdb625e72a530d2ccb48a3d199159144cbdd63870882fb6f80", size = 769939, upload-time = "2026-01-14T23:16:07.542Z" }, + { url = "https://files.pythonhosted.org/packages/20/06/7e18a4fa9d326daeda46d471a44ef94201c46eaa26dbbb780b5d92cbfdda/regex-2026.1.15-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:492534a0ab925d1db998defc3c302dae3616a2fc3fe2e08db1472348f096ddf2", size = 854753, upload-time = "2026-01-14T23:16:10.395Z" }, + { url = "https://files.pythonhosted.org/packages/3b/67/dc8946ef3965e166f558ef3b47f492bc364e96a265eb4a2bb3ca765c8e46/regex-2026.1.15-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c661fc820cfb33e166bf2450d3dadbda47c8d8981898adb9b6fe24e5e582ba60", size = 799559, upload-time = "2026-01-14T23:16:12.347Z" }, + { url = "https://files.pythonhosted.org/packages/a5/61/1bba81ff6d50c86c65d9fd84ce9699dd106438ee4cdb105bf60374ee8412/regex-2026.1.15-cp313-cp313t-win32.whl", hash = "sha256:99ad739c3686085e614bf77a508e26954ff1b8f14da0e3765ff7abbf7799f952", size = 268879, upload-time = "2026-01-14T23:16:14.049Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5e/cef7d4c5fb0ea3ac5c775fd37db5747f7378b29526cc83f572198924ff47/regex-2026.1.15-cp313-cp313t-win_amd64.whl", hash = "sha256:32655d17905e7ff8ba5c764c43cb124e34a9245e45b83c22e81041e1071aee10", size = 280317, upload-time = "2026-01-14T23:16:15.718Z" }, + { url = "https://files.pythonhosted.org/packages/b4/52/4317f7a5988544e34ab57b4bde0f04944c4786128c933fb09825924d3e82/regex-2026.1.15-cp313-cp313t-win_arm64.whl", hash = "sha256:b2a13dd6a95e95a489ca242319d18fc02e07ceb28fa9ad146385194d95b3c829", size = 271551, upload-time = "2026-01-14T23:16:17.533Z" }, ] [[package]] From 87675b49fd8f16caa123f2ebe2270c90adacc235 Mon Sep 17 00:00:00 2001 From: Rip&Tear <84775494+theCyberTech@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:32:10 +0800 Subject: [PATCH 2/9] test: avoid URL substring assertion in brave search test (#4453) --- lib/crewai-tools/tests/tools/brave_search_tool_test.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/crewai-tools/tests/tools/brave_search_tool_test.py b/lib/crewai-tools/tests/tools/brave_search_tool_test.py index 361086abe..6e1300622 100644 --- a/lib/crewai-tools/tests/tools/brave_search_tool_test.py +++ b/lib/crewai-tools/tests/tools/brave_search_tool_test.py @@ -33,8 +33,11 @@ def test_brave_tool_search(mock_get, brave_tool): mock_get.return_value.json.return_value = mock_response result = brave_tool.run(query="test") - assert "Test Title" in result - assert "http://test.com" in result + data = json.loads(result) + assert isinstance(data, list) + assert len(data) >= 1 + assert data[0]["title"] == "Test Title" + assert data[0]["url"] == "http://test.com" @patch("requests.get") From 46e1b021545cd3766d08a70e25e6b9714638284f Mon Sep 17 00:00:00 2001 From: Rip&Tear <84775494+theCyberTech@users.noreply.github.com> Date: Wed, 11 Feb 2026 18:20:07 +0800 Subject: [PATCH 3/9] chore: fix codeql coverage and action version (#4454) --- .github/codeql/codeql-config.yml | 5 +++++ .github/workflows/codeql.yml | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml index f7d50a775..6317a13c7 100644 --- a/.github/codeql/codeql-config.yml +++ b/.github/codeql/codeql-config.yml @@ -14,13 +14,18 @@ paths-ignore: - "lib/crewai/src/crewai/experimental/a2a/**" paths: + # Include GitHub Actions workflows/composite actions for CodeQL actions analysis + - ".github/workflows/**" + - ".github/actions/**" # Include all Python source code from workspace packages - "lib/crewai/src/**" - "lib/crewai-tools/src/**" + - "lib/crewai-files/src/**" - "lib/devtools/src/**" # Include tests (but exclude cassettes via paths-ignore) - "lib/crewai/tests/**" - "lib/crewai-tools/tests/**" + - "lib/crewai-files/tests/**" - "lib/devtools/tests/**" # Configure specific queries or packs if needed diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 2fca96dcd..d3a21d1ac 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -69,7 +69,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -98,6 +98,6 @@ jobs: exit 1 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 with: category: "/language:${{matrix.language}}" From 9b585a934d5c5f9c2f6e6add3231ef0fc161a24a Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 11 Feb 2026 09:30:07 -0500 Subject: [PATCH 4/9] fix: pass `started_event_id` to crew --- lib/crewai/src/crewai/crew.py | 14 ++++++++++++-- lib/crewai/src/crewai/crews/utils.py | 7 +++---- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lib/crewai/src/crewai/crew.py b/lib/crewai/src/crewai/crew.py index c69dae65a..94868b830 100644 --- a/lib/crewai/src/crewai/crew.py +++ b/lib/crewai/src/crewai/crew.py @@ -187,6 +187,7 @@ class Crew(FlowTrackable, BaseModel): _task_output_handler: TaskOutputStorageHandler = PrivateAttr( default_factory=TaskOutputStorageHandler ) + _kickoff_event_id: str | None = PrivateAttr(default=None) name: str | None = Field(default="crew") cache: bool = Field(default=True) @@ -759,7 +760,11 @@ class Crew(FlowTrackable, BaseModel): except Exception as e: crewai_event_bus.emit( self, - CrewKickoffFailedEvent(error=str(e), crew_name=self.name), + CrewKickoffFailedEvent( + error=str(e), + crew_name=self.name, + started_event_id=self._kickoff_event_id, + ), ) raise finally: @@ -949,7 +954,11 @@ class Crew(FlowTrackable, BaseModel): except Exception as e: crewai_event_bus.emit( self, - CrewKickoffFailedEvent(error=str(e), crew_name=self.name), + CrewKickoffFailedEvent( + error=str(e), + crew_name=self.name, + started_event_id=self._kickoff_event_id, + ), ) raise finally: @@ -1524,6 +1533,7 @@ class Crew(FlowTrackable, BaseModel): crew_name=self.name, output=final_task_output, total_tokens=self.token_usage.total_tokens, + started_event_id=self._kickoff_event_id, ), ) diff --git a/lib/crewai/src/crewai/crews/utils.py b/lib/crewai/src/crewai/crews/utils.py index 2ac8266cc..a432d2fc2 100644 --- a/lib/crewai/src/crewai/crews/utils.py +++ b/lib/crewai/src/crewai/crews/utils.py @@ -265,10 +265,9 @@ def prepare_kickoff( normalized = {} normalized = before_callback(normalized) - future = crewai_event_bus.emit( - crew, - CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized), - ) + started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized) + crew._kickoff_event_id = started_event.event_id + future = crewai_event_bus.emit(crew, started_event) if future is not None: try: future.result() From 3a22e80764ca93ff3933083573b6c2fd4ca20875 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Wed, 11 Feb 2026 10:02:31 -0500 Subject: [PATCH 5/9] fix: ensure openai tool call stream is finalized --- .../llms/providers/openai/completion.py | 200 ++++++++++-------- lib/crewai/tests/llms/openai/test_openai.py | 165 +++++++++++++++ 2 files changed, 277 insertions(+), 88 deletions(-) diff --git a/lib/crewai/src/crewai/llms/providers/openai/completion.py b/lib/crewai/src/crewai/llms/providers/openai/completion.py index 37b686e3d..22b9cda3b 100644 --- a/lib/crewai/src/crewai/llms/providers/openai/completion.py +++ b/lib/crewai/src/crewai/llms/providers/openai/completion.py @@ -1696,6 +1696,99 @@ class OpenAICompletion(BaseLLM): return content + def _finalize_streaming_response( + self, + full_response: str, + tool_calls: dict[int, dict[str, Any]], + usage_data: dict[str, int], + params: dict[str, Any], + available_functions: dict[str, Any] | None = None, + from_task: Any | None = None, + from_agent: Any | None = None, + ) -> str | list[dict[str, Any]]: + """Finalize a streaming response with usage tracking, tool call handling, and events. + + Args: + full_response: The accumulated text response from the stream. + tool_calls: Accumulated tool calls from the stream, keyed by index. + usage_data: Token usage data from the stream. + params: The completion parameters containing messages. + available_functions: Available functions for tool calling. + from_task: Task that initiated the call. + from_agent: Agent that initiated the call. + + Returns: + Tool calls list when tools were invoked without available_functions, + tool execution result when available_functions is provided, + or the text response string. + """ + self._track_token_usage_internal(usage_data) + + if tool_calls and not available_functions: + tool_calls_list = [ + { + "id": call_data["id"], + "type": "function", + "function": { + "name": call_data["name"], + "arguments": call_data["arguments"], + }, + "index": call_data["index"], + } + for call_data in tool_calls.values() + ] + self._emit_call_completed_event( + response=tool_calls_list, + call_type=LLMCallType.TOOL_CALL, + from_task=from_task, + from_agent=from_agent, + messages=params["messages"], + ) + return tool_calls_list + + if tool_calls and available_functions: + for call_data in tool_calls.values(): + function_name = call_data["name"] + arguments = call_data["arguments"] + + if not function_name or not arguments: + continue + + if function_name not in available_functions: + logging.warning( + f"Function '{function_name}' not found in available functions" + ) + continue + + try: + function_args = json.loads(arguments) + except json.JSONDecodeError as e: + logging.error(f"Failed to parse streamed tool arguments: {e}") + continue + + result = self._handle_tool_execution( + function_name=function_name, + function_args=function_args, + available_functions=available_functions, + from_task=from_task, + from_agent=from_agent, + ) + + if result is not None: + return result + + full_response = self._apply_stop_words(full_response) + + self._emit_call_completed_event( + response=full_response, + call_type=LLMCallType.LLM_CALL, + from_task=from_task, + from_agent=from_agent, + messages=params["messages"], + ) + + return full_response + def _handle_streaming_completion( self, params: dict[str, Any], @@ -1703,7 +1796,7 @@ class OpenAICompletion(BaseLLM): from_task: Any | None = None, from_agent: Any | None = None, response_model: type[BaseModel] | None = None, - ) -> str | BaseModel: + ) -> str | list[dict[str, Any]] | BaseModel: """Handle streaming chat completion.""" full_response = "" tool_calls: dict[int, dict[str, Any]] = {} @@ -1820,54 +1913,20 @@ class OpenAICompletion(BaseLLM): response_id=response_id_stream, ) - self._track_token_usage_internal(usage_data) - - if tool_calls and available_functions: - for call_data in tool_calls.values(): - function_name = call_data["name"] - arguments = call_data["arguments"] - - # Skip if function name is empty or arguments are empty - if not function_name or not arguments: - continue - - # Check if function exists in available functions - if function_name not in available_functions: - logging.warning( - f"Function '{function_name}' not found in available functions" - ) - continue - - try: - function_args = json.loads(arguments) - except json.JSONDecodeError as e: - logging.error(f"Failed to parse streamed tool arguments: {e}") - continue - - result = self._handle_tool_execution( - function_name=function_name, - function_args=function_args, - available_functions=available_functions, - from_task=from_task, - from_agent=from_agent, - ) - - if result is not None: - return result - - full_response = self._apply_stop_words(full_response) - - self._emit_call_completed_event( - response=full_response, - call_type=LLMCallType.LLM_CALL, + result = self._finalize_streaming_response( + full_response=full_response, + tool_calls=tool_calls, + usage_data=usage_data, + params=params, + available_functions=available_functions, from_task=from_task, from_agent=from_agent, - messages=params["messages"], - ) - - return self._invoke_after_llm_call_hooks( - params["messages"], full_response, from_agent ) + if isinstance(result, str): + return self._invoke_after_llm_call_hooks( + params["messages"], result, from_agent + ) + return result async def _ahandle_completion( self, @@ -2016,7 +2075,7 @@ class OpenAICompletion(BaseLLM): from_task: Any | None = None, from_agent: Any | None = None, response_model: type[BaseModel] | None = None, - ) -> str | BaseModel: + ) -> str | list[dict[str, Any]] | BaseModel: """Handle async streaming chat completion.""" full_response = "" tool_calls: dict[int, dict[str, Any]] = {} @@ -2142,51 +2201,16 @@ class OpenAICompletion(BaseLLM): response_id=response_id_stream, ) - self._track_token_usage_internal(usage_data) - - if tool_calls and available_functions: - for call_data in tool_calls.values(): - function_name = call_data["name"] - arguments = call_data["arguments"] - - if not function_name or not arguments: - continue - - if function_name not in available_functions: - logging.warning( - f"Function '{function_name}' not found in available functions" - ) - continue - - try: - function_args = json.loads(arguments) - except json.JSONDecodeError as e: - logging.error(f"Failed to parse streamed tool arguments: {e}") - continue - - result = self._handle_tool_execution( - function_name=function_name, - function_args=function_args, - available_functions=available_functions, - from_task=from_task, - from_agent=from_agent, - ) - - if result is not None: - return result - - full_response = self._apply_stop_words(full_response) - - self._emit_call_completed_event( - response=full_response, - call_type=LLMCallType.LLM_CALL, + return self._finalize_streaming_response( + full_response=full_response, + tool_calls=tool_calls, + usage_data=usage_data, + params=params, + available_functions=available_functions, from_task=from_task, from_agent=from_agent, - messages=params["messages"], ) - return full_response - def supports_function_calling(self) -> bool: """Check if the model supports function calling.""" return not self.is_o1_model diff --git a/lib/crewai/tests/llms/openai/test_openai.py b/lib/crewai/tests/llms/openai/test_openai.py index f88d8639c..a75a37681 100644 --- a/lib/crewai/tests/llms/openai/test_openai.py +++ b/lib/crewai/tests/llms/openai/test_openai.py @@ -1,6 +1,7 @@ import os import sys import types +from typing import Any from unittest.mock import patch, MagicMock import openai import pytest @@ -1578,3 +1579,167 @@ def test_openai_structured_output_preserves_json_with_stop_word_patterns(): assert "Action:" in result.action_taken assert "Observation:" in result.observation_result assert "Final Answer:" in result.final_answer + + +def test_openai_streaming_returns_tool_calls_without_available_functions(): + """Test that streaming returns tool calls list when available_functions is None. + + This mirrors the non-streaming path where tool_calls are returned for + the executor to handle. Reproduces the bug where streaming with tool + calls would return empty text instead of tool_calls when + available_functions was not provided (as the crew executor does). + """ + llm = LLM(model="openai/gpt-4o-mini", stream=True) + + mock_chunk_1 = MagicMock() + mock_chunk_1.choices = [MagicMock()] + mock_chunk_1.choices[0].delta = MagicMock() + mock_chunk_1.choices[0].delta.content = None + mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()] + mock_chunk_1.choices[0].delta.tool_calls[0].index = 0 + mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123" + mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock() + mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator" + mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr' + mock_chunk_1.choices[0].finish_reason = None + mock_chunk_1.usage = None + mock_chunk_1.id = "chatcmpl-1" + + mock_chunk_2 = MagicMock() + mock_chunk_2.choices = [MagicMock()] + mock_chunk_2.choices[0].delta = MagicMock() + mock_chunk_2.choices[0].delta.content = None + mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()] + mock_chunk_2.choices[0].delta.tool_calls[0].index = 0 + mock_chunk_2.choices[0].delta.tool_calls[0].id = None + mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock() + mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None + mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}' + mock_chunk_2.choices[0].finish_reason = None + mock_chunk_2.usage = None + mock_chunk_2.id = "chatcmpl-1" + + mock_chunk_3 = MagicMock() + mock_chunk_3.choices = [MagicMock()] + mock_chunk_3.choices[0].delta = MagicMock() + mock_chunk_3.choices[0].delta.content = None + mock_chunk_3.choices[0].delta.tool_calls = None + mock_chunk_3.choices[0].finish_reason = "tool_calls" + mock_chunk_3.usage = MagicMock() + mock_chunk_3.usage.prompt_tokens = 10 + mock_chunk_3.usage.completion_tokens = 5 + mock_chunk_3.id = "chatcmpl-1" + + with patch.object( + llm.client.chat.completions, "create", return_value=iter([mock_chunk_1, mock_chunk_2, mock_chunk_3]) + ): + result = llm.call( + messages=[{"role": "user", "content": "Calculate 1+1"}], + tools=[{ + "type": "function", + "function": { + "name": "calculator", + "description": "Calculate expression", + "parameters": {"type": "object", "properties": {"expression": {"type": "string"}}}, + }, + }], + available_functions=None, + ) + + assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}" + assert len(result) == 1 + assert result[0]["function"]["name"] == "calculator" + assert result[0]["function"]["arguments"] == '{"expression": "1+1"}' + assert result[0]["id"] == "call_abc123" + assert result[0]["type"] == "function" + + +@pytest.mark.asyncio +async def test_openai_async_streaming_returns_tool_calls_without_available_functions(): + """Test that async streaming returns tool calls list when available_functions is None. + + Same as the sync test but for the async path (_ahandle_streaming_completion). + """ + llm = LLM(model="openai/gpt-4o-mini", stream=True) + + mock_chunk_1 = MagicMock() + mock_chunk_1.choices = [MagicMock()] + mock_chunk_1.choices[0].delta = MagicMock() + mock_chunk_1.choices[0].delta.content = None + mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()] + mock_chunk_1.choices[0].delta.tool_calls[0].index = 0 + mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123" + mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock() + mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator" + mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr' + mock_chunk_1.choices[0].finish_reason = None + mock_chunk_1.usage = None + mock_chunk_1.id = "chatcmpl-1" + + mock_chunk_2 = MagicMock() + mock_chunk_2.choices = [MagicMock()] + mock_chunk_2.choices[0].delta = MagicMock() + mock_chunk_2.choices[0].delta.content = None + mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()] + mock_chunk_2.choices[0].delta.tool_calls[0].index = 0 + mock_chunk_2.choices[0].delta.tool_calls[0].id = None + mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock() + mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None + mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}' + mock_chunk_2.choices[0].finish_reason = None + mock_chunk_2.usage = None + mock_chunk_2.id = "chatcmpl-1" + + mock_chunk_3 = MagicMock() + mock_chunk_3.choices = [MagicMock()] + mock_chunk_3.choices[0].delta = MagicMock() + mock_chunk_3.choices[0].delta.content = None + mock_chunk_3.choices[0].delta.tool_calls = None + mock_chunk_3.choices[0].finish_reason = "tool_calls" + mock_chunk_3.usage = MagicMock() + mock_chunk_3.usage.prompt_tokens = 10 + mock_chunk_3.usage.completion_tokens = 5 + mock_chunk_3.id = "chatcmpl-1" + + class MockAsyncStream: + """Async iterator that mimics OpenAI's async streaming response.""" + + def __init__(self, chunks: list[Any]) -> None: + self._chunks = chunks + self._index = 0 + + def __aiter__(self) -> "MockAsyncStream": + return self + + async def __anext__(self) -> Any: + if self._index >= len(self._chunks): + raise StopAsyncIteration + chunk = self._chunks[self._index] + self._index += 1 + return chunk + + async def mock_create(**kwargs: Any) -> MockAsyncStream: + return MockAsyncStream([mock_chunk_1, mock_chunk_2, mock_chunk_3]) + + with patch.object( + llm.async_client.chat.completions, "create", side_effect=mock_create + ): + result = await llm.acall( + messages=[{"role": "user", "content": "Calculate 1+1"}], + tools=[{ + "type": "function", + "function": { + "name": "calculator", + "description": "Calculate expression", + "parameters": {"type": "object", "properties": {"expression": {"type": "string"}}}, + }, + }], + available_functions=None, + ) + + assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}" + assert len(result) == 1 + assert result[0]["function"]["name"] == "calculator" + assert result[0]["function"]["arguments"] == '{"expression": "1+1"}' + assert result[0]["id"] == "call_abc123" + assert result[0]["type"] == "function" From 2882df5dafbe634ab797c6d6f19843eaa1d75fd3 Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 11 Feb 2026 10:07:24 -0800 Subject: [PATCH 6/9] replace old .cursorrules with AGENTS.md (#4451) * chore: remove .cursorrules file feat: add AGENTS.md file to any newly created file * move the copy of the tests --- .cursorrules | 1429 ----------------- docs/docs.json | 9 +- docs/en/guides/coding-tools/agents-md.mdx | 61 + lib/crewai/src/crewai/cli/create_crew.py | 6 + lib/crewai/src/crewai/cli/create_flow.py | 6 + lib/crewai/src/crewai/cli/templates/AGENTS.md | 1017 ++++++++++++ lib/crewai/src/crewai/cli/tools/main.py | 6 + 7 files changed, 1104 insertions(+), 1430 deletions(-) delete mode 100644 .cursorrules create mode 100644 docs/en/guides/coding-tools/agents-md.mdx create mode 100644 lib/crewai/src/crewai/cli/templates/AGENTS.md diff --git a/.cursorrules b/.cursorrules deleted file mode 100644 index 42ef136ae..000000000 --- a/.cursorrules +++ /dev/null @@ -1,1429 +0,0 @@ -# CrewAI Development Rules -# Comprehensive best practices for developing with the CrewAI library, covering code organization, performance, security, testing, and common patterns. Based on actual CrewAI codebase analysis for accuracy. - -## General Best Practices: -- Leverage structured responses from LLM calls using Pydantic BaseModel for output validation. -- Use the @CrewBase decorator pattern with @agent, @task, and @crew decorators for proper organization. -- Regularly validate outputs from agents and tasks using built-in guardrails or custom validation. -- Use UV for dependency management (CrewAI's standard) with pyproject.toml configuration. -- Python version requirements: 3.10 to 3.14 (as per CrewAI's pyproject.toml). -- Prefer declarative YAML configuration for agents and tasks over hardcoded definitions. - -## Code Organization and Structure: -- **Standard CrewAI Project Structure** (from CLI templates): - - `project_name/` (Root directory) - - `.env` (Environment variables - never commit API keys) - - `pyproject.toml` (UV-based dependency management) - - `knowledge/` (Knowledge base files) - - `src/project_name/` - - `__init__.py` - - `main.py` (Entry point) - - `crew.py` (Crew orchestration with @CrewBase decorator) - - `config/` - - `agents.yaml` (Agent definitions) - - `tasks.yaml` (Task definitions) - - `tools/` - - `custom_tool.py` (Custom agent tools) - - `__init__.py` -- **File Naming Conventions**: - - Use descriptive, lowercase names with underscores (e.g., `research_agent.py`). - - Pydantic models: singular names (e.g., `article_summary.py` with class `ArticleSummary`). - - Tests: mirror source file name with `_test` suffix (e.g., `crew_test.py`). -- **CrewAI Class Architecture**: - - Use @CrewBase decorator for main crew class. - - Define agents with @agent decorator returning Agent instances. - - Define tasks with @task decorator returning Task instances. - - Define crew orchestration with @crew decorator returning Crew instance. - - Access configuration via `self.agents_config` and `self.tasks_config`. - -## Memory System Patterns: -- **Memory Types** (all supported by CrewAI): - - Short-term memory: ChromaDB with RAG for current context - - Long-term memory: SQLite for task results across sessions - - Entity memory: RAG to track entities (people, places, concepts) - - External memory: Mem0 integration for advanced memory features -- **Memory Configuration**: - - Enable basic memory: `Crew(..., memory=True)` - - Custom storage location: Set `CREWAI_STORAGE_DIR` environment variable - - Memory is stored in platform-specific directories via `appdirs` by default -- **Memory Usage**: - - Memory is automatically managed by agents during task execution - - Access via agent's memory attribute for custom implementations - - Use metadata for categorizing and filtering memory entries - -## Pydantic Integration Patterns: -- **Structured Outputs**: - - Use `output_pydantic` in Task definitions for structured results - - Use `output_json` for JSON dictionary outputs - - Cannot use both output_pydantic and output_json simultaneously -- **Task Output Handling**: - - TaskOutput contains raw, pydantic, and json_dict attributes - - CrewOutput aggregates all task outputs with token usage metrics - - Use model_validate_json for Pydantic model validation -- **Custom Models**: - - Inherit from BaseModel for all data structures - - Use Field descriptions for LLM understanding - - Implement model_validator for custom validation logic - -## YAML Configuration Best Practices: -- **agents.yaml Structure**: - ```yaml - agent_name: - role: "Clear, specific role description" - goal: "Specific goal statement" - backstory: "Detailed background for context" - # Optional: tools, llm, memory, etc. - ``` -- **tasks.yaml Structure**: - ```yaml - task_name: - description: "Detailed task description with context" - expected_output: "Clear output format specification" - agent: agent_name # Reference to agent in agents.yaml - # Optional: context, tools, output_file, etc. - ``` -- **Configuration Access**: - - Use `self.agents_config['agent_name']` in @agent methods - - Use `self.tasks_config['task_name']` in @task methods - - Support for dynamic configuration via placeholders like {topic} - -## Tools and Integration Patterns: -- **Custom Tools**: - - Inherit from BaseTool for custom tool implementation - - Use @tool decorator for simple tool definitions - - Implement proper error handling and input validation -- **Tool Integration**: - - Add tools to agents via tools parameter in Agent constructor - - Tools are automatically inherited by tasks from their assigned agents - - Use structured tool outputs for better LLM understanding - -## Performance Considerations: -- **LLM Optimization**: - - Use task context to pass information between sequential tasks - - Implement output caching to avoid redundant LLM calls - - Configure appropriate LLM models per agent for cost/performance balance -- **Memory Management**: - - Be mindful of memory storage growth in long-running applications - - Use score_threshold in memory search to filter relevant results - - Implement periodic memory cleanup if needed -- **Async Operations**: - - Use execute_sync for synchronous task execution - - Consider async patterns for I/O-bound operations in custom tools - -## Security Best Practices: -- **API Key Management**: - - Always use .env files for API keys and sensitive configuration - - Never commit API keys to version control - - Use environment variables in production deployments -- **Input Validation**: - - Validate all inputs using Pydantic models where possible - - Implement guardrails for task output validation - - Use field_validator for custom validation logic -- **Tool Security**: - - Implement proper access controls in custom tools - - Validate tool inputs and outputs - - Follow principle of least privilege for tool permissions - -## Testing Approaches: -- **Unit Testing**: - - Test individual agents, tasks, and tools in isolation - - Use mocking for external dependencies (LLMs, APIs) - - Test configuration loading and validation -- **Integration Testing**: - - Test crew execution end-to-end with realistic scenarios - - Verify memory persistence across crew runs - - Test tool integration and data flow between tasks -- **Test Organization**: - - Follow CrewAI's test structure: separate test files for each component - - Use pytest fixtures for common test setup - - Mock LLM responses for consistent, fast tests - -## Common CrewAI Patterns and Anti-patterns: -- **Recommended Patterns**: - - Use sequential Process for dependent tasks, hierarchical for manager delegation - - Implement task context for data flow between tasks - - Use output_file for persistent task results - - Leverage crew callbacks with @before_kickoff and @after_kickoff decorators -- **Anti-patterns to Avoid**: - - Don't hardcode agent configurations in Python code (use YAML) - - Don't create circular task dependencies - - Don't ignore task execution failures without proper error handling - - Don't overload single agents with too many diverse tools -- **Error Handling**: - - Implement task-level guardrails for output validation - - Use try-catch blocks in custom tools - - Set appropriate max_retries for tasks prone to failures - - Log errors with sufficient context for debugging - -## Development Workflow: -- **UV Commands**: - - `crewai create crew ` - Create new crew project - - `crewai install` - Install dependencies via UV - - `crewai run` - Execute the crew - - `uv sync` - Sync dependencies - - `uv add ` - Add new dependencies -- **Project Setup**: - - Use CrewAI CLI for project scaffolding - - Follow the standard directory structure - - Configure agents and tasks in YAML before implementing crew logic -- **Development Tools**: - - Use UV for dependency management (CrewAI standard) - - Configure pre-commit hooks for code quality - - Use pytest for testing with CrewAI's testing patterns - -## Deployment and Production: -- **Environment Configuration**: - - Set CREWAI_STORAGE_DIR for controlled memory storage location - - Use proper logging configuration for production monitoring - - Configure appropriate LLM providers and rate limits -- **Containerization**: - - Include knowledge and config directories in Docker images - - Mount memory storage as persistent volumes if needed - - Set proper environment variables for API keys and configuration -- **Monitoring**: - - Monitor token usage via CrewOutput.token_usage - - Track task execution times and success rates - - Implement health checks for long-running crew services - -## CrewAI Flow Patterns and Best Practices - -### Flow Architecture and Structure -- **Use Flow class** for complex multi-step workflows that go beyond simple crew orchestration -- **Combine Flows with Crews** to create sophisticated AI automation pipelines -- **Leverage state management** to share data between flow methods -- **Event-driven design** allows for dynamic and responsive workflow execution - -### Flow Decorators and Control Flow -- **@start()**: Mark entry points for flow execution (can have multiple start methods) -- **@listen()**: Create method dependencies and execution chains -- **@router()**: Implement conditional branching based on method outputs -- **or_()** and **and_()**: Combine multiple trigger conditions for complex workflows - -### Flow State Management Patterns -```python -# Structured state with Pydantic (recommended for complex workflows) -class WorkflowState(BaseModel): - task_results: List[str] = [] - current_step: str = "initialize" - user_preferences: dict = {} - completion_status: bool = False - -class MyFlow(Flow[WorkflowState]): - @start() - def initialize(self): - self.state.current_step = "processing" - # State automatically gets unique UUID in self.state.id - -# Unstructured state (good for simple workflows) -class SimpleFlow(Flow): - @start() - def begin(self): - self.state["counter"] = 0 - self.state["results"] = [] - # Auto-generated ID available in self.state["id"] -``` - -### Flow Method Patterns -```python -# Basic sequential flow -@start() -def step_one(self): - return "data from step one" - -@listen(step_one) -def step_two(self, data_from_step_one): - return f"processed: {data_from_step_one}" - -# Parallel execution with convergence -@start() -def task_a(self): - return "result_a" - -@start() -def task_b(self): - return "result_b" - -@listen(and_(task_a, task_b)) -def combine_results(self): - # Waits for both task_a AND task_b to complete - return f"combined: {self.state}" - -# Conditional routing -@router(step_one) -def decision_point(self): - if some_condition: - return "success_path" - return "failure_path" - -@listen("success_path") -def handle_success(self): - # Handle success case - pass - -@listen("failure_path") -def handle_failure(self): - # Handle failure case - pass - -# OR condition listening -@listen(or_(task_a, task_b)) -def process_any_result(self, result): - # Triggers when EITHER task_a OR task_b completes - return f"got result: {result}" -``` - -### Flow Persistence Patterns -```python -# Class-level persistence (all methods persisted) -@persist(verbose=True) -class PersistentFlow(Flow[MyState]): - @start() - def initialize(self): - self.state.counter += 1 - -# Method-level persistence (selective) -class SelectiveFlow(Flow): - @persist - @start() - def critical_step(self): - # Only this method's state is persisted - self.state["important_data"] = "value" - - @start() - def temporary_step(self): - # This method's state is not persisted - pass -``` - -### Flow Execution Patterns -```python -# Synchronous execution -flow = MyFlow() -result = flow.kickoff() -final_state = flow.state - -# Asynchronous execution -async def run_async_flow(): - flow = MyFlow() - result = await flow.kickoff_async() - return result - -# Flow with input parameters -flow = MyFlow() -result = flow.kickoff(inputs={"user_id": "123", "task": "research"}) - -# Flow plotting and visualization -flow.plot("workflow_diagram") # Generates HTML visualization -``` - -### Advanced Flow Patterns -```python -# Cyclic/Loop patterns -class CyclicFlow(Flow): - max_iterations = 5 - current_iteration = 0 - - @start("loop") - def process_iteration(self): - if self.current_iteration >= self.max_iterations: - return - # Process current iteration - self.current_iteration += 1 - - @router(process_iteration) - def check_continue(self): - if self.current_iteration < self.max_iterations: - return "loop" # Continue cycling - return "complete" - - @listen("complete") - def finalize(self): - # Final processing - pass - -# Complex multi-router pattern -@router(analyze_data) -def primary_router(self): - # Returns multiple possible paths based on analysis - if self.state.confidence > 0.8: - return "high_confidence" - elif self.state.errors_found: - return "error_handling" - return "manual_review" - -@router("high_confidence") -def secondary_router(self): - # Further routing based on high confidence results - return "automated_processing" - -# Exception handling in flows -@start() -def risky_operation(self): - try: - # Some operation that might fail - result = dangerous_function() - self.state["success"] = True - return result - except Exception as e: - self.state["error"] = str(e) - self.state["success"] = False - return None - -@listen(risky_operation) -def handle_result(self, result): - if self.state.get("success", False): - # Handle success case - pass - else: - # Handle error case - error = self.state.get("error") - # Implement error recovery logic -``` - -### Flow Integration with Crews -```python -# Combining Flows with Crews for complex workflows -class CrewOrchestrationFlow(Flow[WorkflowState]): - @start() - def research_phase(self): - research_crew = ResearchCrew() - result = research_crew.crew().kickoff(inputs={"topic": self.state.research_topic}) - self.state.research_results = result.raw - return result - - @listen(research_phase) - def analysis_phase(self, research_results): - analysis_crew = AnalysisCrew() - result = analysis_crew.crew().kickoff(inputs={ - "data": self.state.research_results, - "focus": self.state.analysis_focus - }) - self.state.analysis_results = result.raw - return result - - @router(analysis_phase) - def decide_next_action(self): - if self.state.analysis_results.confidence > 0.7: - return "generate_report" - return "additional_research" - - @listen("generate_report") - def final_report(self): - reporting_crew = ReportingCrew() - return reporting_crew.crew().kickoff(inputs={ - "research": self.state.research_results, - "analysis": self.state.analysis_results - }) -``` - -### Flow Best Practices -- **State Management**: Use structured state (Pydantic) for complex workflows, unstructured for simple ones -- **Method Design**: Keep flow methods focused and single-purpose -- **Error Handling**: Implement proper exception handling and error recovery paths -- **State Persistence**: Use @persist for critical workflows that need recovery capability -- **Flow Visualization**: Use flow.plot() to understand and debug complex workflow structures -- **Async Support**: Leverage async methods for I/O-bound operations within flows -- **Resource Management**: Be mindful of state size and memory usage in long-running flows -- **Testing Flows**: Test individual methods and overall flow execution patterns -- **Event Monitoring**: Use CrewAI event system to monitor flow execution and performance - -### Flow Anti-patterns to Avoid -- **Don't create overly complex flows** with too many branches and conditions -- **Don't store large objects** in state that could cause memory issues -- **Don't ignore error handling** in flow methods -- **Don't create circular dependencies** between flow methods -- **Don't mix synchronous and asynchronous** patterns inconsistently -- **Don't overuse routers** when simple linear flow would suffice -- **Don't forget to handle edge cases** in router logic - -## CrewAI Version Compatibility: -- Stay updated with CrewAI releases for new features and bug fixes -- Test crew functionality when upgrading CrewAI versions -- Use version constraints in pyproject.toml (e.g., "crewai[tools]>=0.140.0,<1.0.0") -- Monitor deprecation warnings for future compatibility - -## Code Examples and Implementation Patterns - -### Complete Crew Implementation Example: -```python -from crewai import Agent, Crew, Process, Task -from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff -from crewai_tools import SerperDevTool, FileReadTool -from crewai.agents.agent_builder.base_agent import BaseAgent -from typing import List -from pydantic import BaseModel, Field - -class ResearchOutput(BaseModel): - title: str = Field(description="Research topic title") - summary: str = Field(description="Executive summary") - key_findings: List[str] = Field(description="Key research findings") - recommendations: List[str] = Field(description="Actionable recommendations") - sources: List[str] = Field(description="Source URLs and references") - confidence_score: float = Field(description="Confidence in findings (0-1)") - -@CrewBase -class ResearchCrew(): - """Advanced research crew with structured outputs and validation""" - - agents: List[BaseAgent] - tasks: List[Task] - - @before_kickoff - def setup_environment(self): - """Initialize environment before crew execution""" - print("🚀 Setting up research environment...") - # Validate API keys, create directories, etc. - - @after_kickoff - def cleanup_and_report(self, output): - """Handle post-execution tasks""" - print(f"✅ Research completed. Generated {len(output.tasks_output)} task outputs") - print(f"📊 Token usage: {output.token_usage}") - - @agent - def researcher(self) -> Agent: - return Agent( - config=self.agents_config['researcher'], - tools=[SerperDevTool()], - verbose=True, - memory=True, - max_iter=15, - max_execution_time=1800 - ) - - @agent - def analyst(self) -> Agent: - return Agent( - config=self.agents_config['analyst'], - tools=[FileReadTool()], - verbose=True, - memory=True - ) - - @task - def research_task(self) -> Task: - return Task( - config=self.tasks_config['research_task'], - agent=self.researcher(), - output_pydantic=ResearchOutput - ) - - @task - def validation_task(self) -> Task: - return Task( - config=self.tasks_config['validation_task'], - agent=self.analyst(), - context=[self.research_task()], - guardrail=self.validate_research_quality, - max_retries=3 - ) - - def validate_research_quality(self, output) -> tuple[bool, str]: - """Custom guardrail to ensure research quality""" - content = output.raw - if len(content) < 500: - return False, "Research output too brief. Need more detailed analysis." - if not any(keyword in content.lower() for keyword in ['conclusion', 'finding', 'result']): - return False, "Missing key analytical elements." - return True, content - - @crew - def crew(self) -> Crew: - return Crew( - agents=self.agents, - tasks=self.tasks, - process=Process.sequential, - memory=True, - verbose=True, - max_rpm=100 - ) -``` - -### Custom Tool Implementation with Error Handling: -```python -from crewai.tools import BaseTool -from typing import Type, Optional, Any -from pydantic import BaseModel, Field -import requests -import time -from tenacity import retry, stop_after_attempt, wait_exponential - -class SearchInput(BaseModel): - query: str = Field(description="Search query") - max_results: int = Field(default=10, description="Maximum results to return") - timeout: int = Field(default=30, description="Request timeout in seconds") - -class RobustSearchTool(BaseTool): - name: str = "robust_search" - description: str = "Perform web search with retry logic and error handling" - args_schema: Type[BaseModel] = SearchInput - - def __init__(self, api_key: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - self.api_key = api_key or os.getenv("SEARCH_API_KEY") - self.rate_limit_delay = 1.0 - self.last_request_time = 0 - - @retry( - stop=stop_after_attempt(3), - wait=wait_exponential(multiplier=1, min=4, max=10) - ) - def _run(self, query: str, max_results: int = 10, timeout: int = 30) -> str: - """Execute search with retry logic""" - try: - # Rate limiting - time_since_last = time.time() - self.last_request_time - if time_since_last < self.rate_limit_delay: - time.sleep(self.rate_limit_delay - time_since_last) - - # Input validation - if not query or len(query.strip()) == 0: - return "Error: Empty search query provided" - - if len(query) > 500: - return "Error: Search query too long (max 500 characters)" - - # Perform search - results = self._perform_search(query, max_results, timeout) - self.last_request_time = time.time() - - return self._format_results(results) - - except requests.exceptions.Timeout: - return f"Search timed out after {timeout} seconds" - except requests.exceptions.RequestException as e: - return f"Search failed due to network error: {str(e)}" - except Exception as e: - return f"Unexpected error during search: {str(e)}" - - def _perform_search(self, query: str, max_results: int, timeout: int) -> List[dict]: - """Implement actual search logic here""" - # Your search API implementation - pass - - def _format_results(self, results: List[dict]) -> str: - """Format search results for LLM consumption""" - if not results: - return "No results found for the given query." - - formatted = "Search Results:\n\n" - for i, result in enumerate(results[:10], 1): - formatted += f"{i}. {result.get('title', 'No title')}\n" - formatted += f" URL: {result.get('url', 'No URL')}\n" - formatted += f" Summary: {result.get('snippet', 'No summary')}\n\n" - - return formatted -``` - -### Advanced Memory Management: -```python -import os -from crewai.memory import ExternalMemory, ShortTermMemory, LongTermMemory -from crewai.memory.storage.mem0_storage import Mem0Storage - -class AdvancedMemoryManager: - """Enhanced memory management for CrewAI applications""" - - def __init__(self, crew, config: dict = None): - self.crew = crew - self.config = config or {} - self.setup_memory_systems() - - def setup_memory_systems(self): - """Configure multiple memory systems""" - # Short-term memory for current session - self.short_term = ShortTermMemory() - - # Long-term memory for cross-session persistence - self.long_term = LongTermMemory() - - # External memory with Mem0 (if configured) - if self.config.get('use_external_memory'): - self.external = ExternalMemory.create_storage( - crew=self.crew, - embedder_config={ - "provider": "mem0", - "config": { - "api_key": os.getenv("MEM0_API_KEY"), - "user_id": self.config.get('user_id', 'default') - } - } - ) - - def save_with_context(self, content: str, memory_type: str = "short_term", - metadata: dict = None, agent: str = None): - """Save content with enhanced metadata""" - enhanced_metadata = { - "timestamp": time.time(), - "session_id": self.config.get('session_id'), - "crew_type": self.crew.__class__.__name__, - **(metadata or {}) - } - - if memory_type == "short_term": - self.short_term.save(content, enhanced_metadata, agent) - elif memory_type == "long_term": - self.long_term.save(content, enhanced_metadata, agent) - elif memory_type == "external" and hasattr(self, 'external'): - self.external.save(content, enhanced_metadata, agent) - - def search_across_memories(self, query: str, limit: int = 5) -> dict: - """Search across all memory systems""" - results = { - "short_term": [], - "long_term": [], - "external": [] - } - - # Search short-term memory - results["short_term"] = self.short_term.search(query, limit=limit) - - # Search long-term memory - results["long_term"] = self.long_term.search(query, limit=limit) - - # Search external memory (if available) - if hasattr(self, 'external'): - results["external"] = self.external.search(query, limit=limit) - - return results - - def cleanup_old_memories(self, days_threshold: int = 30): - """Clean up old memories based on age""" - cutoff_time = time.time() - (days_threshold * 24 * 60 * 60) - - # Implement cleanup logic based on timestamps in metadata - # This would vary based on your specific storage implementation - pass -``` - -### Production Monitoring and Metrics: -```python -import time -import logging -import json -from datetime import datetime -from typing import Dict, Any, List -from dataclasses import dataclass, asdict - -@dataclass -class TaskMetrics: - task_name: str - agent_name: str - start_time: float - end_time: float - duration: float - tokens_used: int - success: bool - error_message: Optional[str] = None - memory_usage_mb: Optional[float] = None - -class CrewMonitor: - """Comprehensive monitoring for CrewAI applications""" - - def __init__(self, crew_name: str, log_level: str = "INFO"): - self.crew_name = crew_name - self.metrics: List[TaskMetrics] = [] - self.session_start = time.time() - - # Setup logging - logging.basicConfig( - level=getattr(logging, log_level), - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', - handlers=[ - logging.FileHandler(f'crew_{crew_name}_{datetime.now().strftime("%Y%m%d")}.log'), - logging.StreamHandler() - ] - ) - self.logger = logging.getLogger(f"CrewAI.{crew_name}") - - def start_task_monitoring(self, task_name: str, agent_name: str) -> dict: - """Start monitoring a task execution""" - context = { - "task_name": task_name, - "agent_name": agent_name, - "start_time": time.time() - } - - self.logger.info(f"Task started: {task_name} by {agent_name}") - return context - - def end_task_monitoring(self, context: dict, success: bool = True, - tokens_used: int = 0, error: str = None): - """End monitoring and record metrics""" - end_time = time.time() - duration = end_time - context["start_time"] - - # Get memory usage (if psutil is available) - memory_usage = None - try: - import psutil - process = psutil.Process() - memory_usage = process.memory_info().rss / 1024 / 1024 # MB - except ImportError: - pass - - metrics = TaskMetrics( - task_name=context["task_name"], - agent_name=context["agent_name"], - start_time=context["start_time"], - end_time=end_time, - duration=duration, - tokens_used=tokens_used, - success=success, - error_message=error, - memory_usage_mb=memory_usage - ) - - self.metrics.append(metrics) - - # Log the completion - status = "SUCCESS" if success else "FAILED" - self.logger.info(f"Task {status}: {context['task_name']} " - f"(Duration: {duration:.2f}s, Tokens: {tokens_used})") - - if error: - self.logger.error(f"Task error: {error}") - - def get_performance_summary(self) -> Dict[str, Any]: - """Generate comprehensive performance summary""" - if not self.metrics: - return {"message": "No metrics recorded yet"} - - successful_tasks = [m for m in self.metrics if m.success] - failed_tasks = [m for m in self.metrics if not m.success] - - total_duration = sum(m.duration for m in self.metrics) - total_tokens = sum(m.tokens_used for m in self.metrics) - avg_duration = total_duration / len(self.metrics) - - return { - "crew_name": self.crew_name, - "session_duration": time.time() - self.session_start, - "total_tasks": len(self.metrics), - "successful_tasks": len(successful_tasks), - "failed_tasks": len(failed_tasks), - "success_rate": len(successful_tasks) / len(self.metrics), - "total_duration": total_duration, - "average_task_duration": avg_duration, - "total_tokens_used": total_tokens, - "average_tokens_per_task": total_tokens / len(self.metrics) if self.metrics else 0, - "slowest_task": max(self.metrics, key=lambda x: x.duration).task_name if self.metrics else None, - "most_token_intensive": max(self.metrics, key=lambda x: x.tokens_used).task_name if self.metrics else None, - "common_errors": self._get_common_errors() - } - - def _get_common_errors(self) -> Dict[str, int]: - """Get frequency of common errors""" - error_counts = {} - for metric in self.metrics: - if metric.error_message: - error_counts[metric.error_message] = error_counts.get(metric.error_message, 0) + 1 - return dict(sorted(error_counts.items(), key=lambda x: x[1], reverse=True)) - - def export_metrics(self, filename: str = None) -> str: - """Export metrics to JSON file""" - if not filename: - filename = f"crew_metrics_{self.crew_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" - - export_data = { - "summary": self.get_performance_summary(), - "detailed_metrics": [asdict(m) for m in self.metrics] - } - - with open(filename, 'w') as f: - json.dump(export_data, f, indent=2, default=str) - - self.logger.info(f"Metrics exported to {filename}") - return filename - -# Usage in crew implementation -monitor = CrewMonitor("research_crew") - -@task -def monitored_research_task(self) -> Task: - def task_callback(task_output): - # This would be called after task completion - context = getattr(task_output, '_monitor_context', {}) - if context: - tokens = getattr(task_output, 'token_usage', {}).get('total', 0) - monitor.end_task_monitoring(context, success=True, tokens_used=tokens) - - # Start monitoring would be called before task execution - # This is a simplified example - in practice you'd integrate this into the task execution flow - - return Task( - config=self.tasks_config['research_task'], - agent=self.researcher(), - callback=task_callback - ) -``` - -### Error Handling and Recovery Patterns: -```python -from enum import Enum -from typing import Optional, Callable, Any -import traceback - -class ErrorSeverity(Enum): - LOW = "low" - MEDIUM = "medium" - HIGH = "high" - CRITICAL = "critical" - -class CrewError(Exception): - """Base exception for CrewAI applications""" - def __init__(self, message: str, severity: ErrorSeverity = ErrorSeverity.MEDIUM, - context: dict = None): - super().__init__(message) - self.severity = severity - self.context = context or {} - self.timestamp = time.time() - -class TaskExecutionError(CrewError): - """Raised when task execution fails""" - pass - -class ValidationError(CrewError): - """Raised when validation fails""" - pass - -class ConfigurationError(CrewError): - """Raised when configuration is invalid""" - pass - -class ErrorHandler: - """Centralized error handling for CrewAI applications""" - - def __init__(self, crew_name: str): - self.crew_name = crew_name - self.error_log: List[CrewError] = [] - self.recovery_strategies: Dict[type, Callable] = {} - - def register_recovery_strategy(self, error_type: type, strategy: Callable): - """Register a recovery strategy for specific error types""" - self.recovery_strategies[error_type] = strategy - - def handle_error(self, error: Exception, context: dict = None) -> Any: - """Handle errors with appropriate recovery strategies""" - - # Convert to CrewError if needed - if not isinstance(error, CrewError): - crew_error = CrewError( - message=str(error), - severity=ErrorSeverity.MEDIUM, - context=context or {} - ) - else: - crew_error = error - - # Log the error - self.error_log.append(crew_error) - self._log_error(crew_error) - - # Apply recovery strategy if available - error_type = type(error) - if error_type in self.recovery_strategies: - try: - return self.recovery_strategies[error_type](error, context) - except Exception as recovery_error: - self._log_error(CrewError( - f"Recovery strategy failed: {str(recovery_error)}", - ErrorSeverity.HIGH, - {"original_error": str(error), "recovery_error": str(recovery_error)} - )) - - # If critical, re-raise - if crew_error.severity == ErrorSeverity.CRITICAL: - raise crew_error - - return None - - def _log_error(self, error: CrewError): - """Log error with appropriate level based on severity""" - logger = logging.getLogger(f"CrewAI.{self.crew_name}.ErrorHandler") - - error_msg = f"[{error.severity.value.upper()}] {error}" - if error.context: - error_msg += f" | Context: {error.context}" - - if error.severity in [ErrorSeverity.HIGH, ErrorSeverity.CRITICAL]: - logger.error(error_msg) - logger.error(f"Stack trace: {traceback.format_exc()}") - elif error.severity == ErrorSeverity.MEDIUM: - logger.warning(error_msg) - else: - logger.info(error_msg) - - def get_error_summary(self) -> Dict[str, Any]: - """Get summary of errors encountered""" - if not self.error_log: - return {"total_errors": 0} - - severity_counts = {} - for error in self.error_log: - severity_counts[error.severity.value] = severity_counts.get(error.severity.value, 0) + 1 - - return { - "total_errors": len(self.error_log), - "severity_breakdown": severity_counts, - "recent_errors": [str(e) for e in self.error_log[-5:]], # Last 5 errors - "most_recent_error": str(self.error_log[-1]) if self.error_log else None - } - -# Example usage in crew -error_handler = ErrorHandler("research_crew") - -# Register recovery strategies -def retry_with_simpler_model(error, context): - """Recovery strategy: retry with a simpler model""" - if "rate limit" in str(error).lower(): - time.sleep(60) # Wait and retry - return "RETRY" - elif "model overloaded" in str(error).lower(): - # Switch to simpler model and retry - return "RETRY_WITH_SIMPLE_MODEL" - return None - -error_handler.register_recovery_strategy(TaskExecutionError, retry_with_simpler_model) - -@task -def robust_task(self) -> Task: - def execute_with_error_handling(task_func): - def wrapper(*args, **kwargs): - try: - return task_func(*args, **kwargs) - except Exception as e: - result = error_handler.handle_error(e, {"task": "research_task"}) - if result == "RETRY": - # Implement retry logic - pass - elif result == "RETRY_WITH_SIMPLE_MODEL": - # Switch model and retry - pass - else: - # Use fallback response - return "Task failed, using fallback response" - return wrapper - - return Task( - config=self.tasks_config['research_task'], - agent=self.researcher() - ) -``` - -### Environment and Configuration Management: -```python -import os -from enum import Enum -from typing import Optional, Dict, Any -from pydantic import BaseSettings, Field, validator - -class Environment(str, Enum): - DEVELOPMENT = "development" - TESTING = "testing" - STAGING = "staging" - PRODUCTION = "production" - -class CrewAISettings(BaseSettings): - """Comprehensive settings management for CrewAI applications""" - - # Environment - environment: Environment = Field(default=Environment.DEVELOPMENT) - debug: bool = Field(default=True) - - # API Keys (loaded from environment) - openai_api_key: Optional[str] = Field(default=None, env="OPENAI_API_KEY") - anthropic_api_key: Optional[str] = Field(default=None, env="ANTHROPIC_API_KEY") - serper_api_key: Optional[str] = Field(default=None, env="SERPER_API_KEY") - mem0_api_key: Optional[str] = Field(default=None, env="MEM0_API_KEY") - - # CrewAI Configuration - crew_max_rpm: int = Field(default=100) - crew_max_execution_time: int = Field(default=3600) # 1 hour - default_llm_model: str = Field(default="gpt-4") - fallback_llm_model: str = Field(default="gpt-3.5-turbo") - - # Memory and Storage - crewai_storage_dir: str = Field(default="./storage", env="CREWAI_STORAGE_DIR") - memory_enabled: bool = Field(default=True) - memory_cleanup_interval: int = Field(default=86400) # 24 hours in seconds - - # Performance - enable_caching: bool = Field(default=True) - max_retries: int = Field(default=3) - retry_delay: float = Field(default=1.0) - - # Monitoring - enable_monitoring: bool = Field(default=True) - log_level: str = Field(default="INFO") - metrics_export_interval: int = Field(default=3600) # 1 hour - - # Security - input_sanitization: bool = Field(default=True) - max_input_length: int = Field(default=10000) - allowed_file_types: list = Field(default=["txt", "md", "pdf", "docx"]) - - @validator('environment', pre=True) - def set_debug_based_on_env(cls, v): - return v - - @validator('debug') - def set_debug_from_env(cls, v, values): - env = values.get('environment') - if env == Environment.PRODUCTION: - return False - return v - - @validator('openai_api_key') - def validate_openai_key(cls, v): - if not v: - raise ValueError("OPENAI_API_KEY is required") - if not v.startswith('sk-'): - raise ValueError("Invalid OpenAI API key format") - return v - - @property - def is_production(self) -> bool: - return self.environment == Environment.PRODUCTION - - @property - def is_development(self) -> bool: - return self.environment == Environment.DEVELOPMENT - - def get_llm_config(self) -> Dict[str, Any]: - """Get LLM configuration based on environment""" - config = { - "model": self.default_llm_model, - "temperature": 0.1 if self.is_production else 0.3, - "max_tokens": 4000 if self.is_production else 2000, - "timeout": 60 - } - - if self.is_development: - config["model"] = self.fallback_llm_model - - return config - - def get_memory_config(self) -> Dict[str, Any]: - """Get memory configuration""" - return { - "enabled": self.memory_enabled, - "storage_dir": self.crewai_storage_dir, - "cleanup_interval": self.memory_cleanup_interval, - "provider": "mem0" if self.mem0_api_key and self.is_production else "local" - } - - class Config: - env_file = ".env" - env_file_encoding = 'utf-8' - case_sensitive = False - -# Global settings instance -settings = CrewAISettings() - -# Usage in crew -@CrewBase -class ConfigurableCrew(): - """Crew that uses centralized configuration""" - - def __init__(self): - self.settings = settings - self.validate_configuration() - - def validate_configuration(self): - """Validate configuration before crew execution""" - required_keys = [self.settings.openai_api_key] - if not all(required_keys): - raise ConfigurationError("Missing required API keys") - - if not os.path.exists(self.settings.crewai_storage_dir): - os.makedirs(self.settings.crewai_storage_dir, exist_ok=True) - - @agent - def adaptive_agent(self) -> Agent: - """Agent that adapts to configuration""" - llm_config = self.settings.get_llm_config() - - return Agent( - config=self.agents_config['researcher'], - llm=llm_config["model"], - max_iter=15 if self.settings.is_production else 10, - max_execution_time=self.settings.crew_max_execution_time, - verbose=self.settings.debug - ) -``` - -### Comprehensive Testing Framework: -```python -import pytest -import asyncio -from unittest.mock import Mock, patch, MagicMock -from crewai import Agent, Task, Crew -from crewai.tasks.task_output import TaskOutput - -class CrewAITestFramework: - """Comprehensive testing framework for CrewAI applications""" - - @staticmethod - def create_mock_agent(role: str = "test_agent", tools: list = None) -> Mock: - """Create a mock agent for testing""" - mock_agent = Mock(spec=Agent) - mock_agent.role = role - mock_agent.goal = f"Test goal for {role}" - mock_agent.backstory = f"Test backstory for {role}" - mock_agent.tools = tools or [] - mock_agent.llm = "gpt-3.5-turbo" - mock_agent.verbose = False - return mock_agent - - @staticmethod - def create_mock_task_output(content: str, success: bool = True, - tokens: int = 100) -> TaskOutput: - """Create a mock task output for testing""" - return TaskOutput( - description="Test task", - raw=content, - agent="test_agent", - pydantic=None, - json_dict=None - ) - - @staticmethod - def create_test_crew(agents: list = None, tasks: list = None) -> Crew: - """Create a test crew with mock components""" - test_agents = agents or [CrewAITestFramework.create_mock_agent()] - test_tasks = tasks or [] - - return Crew( - agents=test_agents, - tasks=test_tasks, - verbose=False - ) - -# Example test cases -class TestResearchCrew: - """Test cases for research crew functionality""" - - def setup_method(self): - """Setup test environment""" - self.framework = CrewAITestFramework() - self.mock_serper = Mock() - - @patch('crewai_tools.SerperDevTool') - def test_agent_creation(self, mock_serper_tool): - """Test agent creation with proper configuration""" - mock_serper_tool.return_value = self.mock_serper - - crew = ResearchCrew() - researcher = crew.researcher() - - assert researcher.role == "Senior Research Analyst" - assert len(researcher.tools) > 0 - assert researcher.verbose is True - - def test_task_validation(self): - """Test task validation logic""" - crew = ResearchCrew() - - # Test valid output - valid_output = self.framework.create_mock_task_output( - "This is a comprehensive research summary with conclusions and findings." - ) - is_valid, message = crew.validate_research_quality(valid_output) - assert is_valid is True - - # Test invalid output (too short) - invalid_output = self.framework.create_mock_task_output("Too short") - is_valid, message = crew.validate_research_quality(invalid_output) - assert is_valid is False - assert "brief" in message.lower() - - @patch('requests.get') - def test_tool_error_handling(self, mock_requests): - """Test tool error handling and recovery""" - # Simulate network error - mock_requests.side_effect = requests.exceptions.RequestException("Network error") - - tool = RobustSearchTool() - result = tool._run("test query") - - assert "network error" in result.lower() - assert "failed" in result.lower() - - @pytest.mark.asyncio - async def test_crew_execution_flow(self): - """Test complete crew execution with mocked dependencies""" - with patch.object(Agent, 'execute_task') as mock_execute: - mock_execute.return_value = self.framework.create_mock_task_output( - "Research completed successfully with findings and recommendations." - ) - - crew = ResearchCrew() - result = crew.crew().kickoff(inputs={"topic": "AI testing"}) - - assert result is not None - assert "successfully" in result.raw.lower() - - def test_memory_integration(self): - """Test memory system integration""" - crew = ResearchCrew() - memory_manager = AdvancedMemoryManager(crew) - - # Test saving to memory - test_content = "Important research finding about AI" - memory_manager.save_with_context( - content=test_content, - memory_type="short_term", - metadata={"importance": "high"}, - agent="researcher" - ) - - # Test searching memory - results = memory_manager.search_across_memories("AI research") - assert "short_term" in results - - def test_error_handling_workflow(self): - """Test error handling and recovery mechanisms""" - error_handler = ErrorHandler("test_crew") - - # Test error registration and handling - test_error = TaskExecutionError("Test task failed", ErrorSeverity.MEDIUM) - result = error_handler.handle_error(test_error) - - assert len(error_handler.error_log) == 1 - assert error_handler.error_log[0].severity == ErrorSeverity.MEDIUM - - def test_configuration_validation(self): - """Test configuration validation""" - # Test with missing API key - with patch.dict(os.environ, {}, clear=True): - with pytest.raises(ValueError): - settings = CrewAISettings() - - # Test with valid configuration - with patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-key"}): - settings = CrewAISettings() - assert settings.openai_api_key == "sk-test-key" - - @pytest.mark.integration - def test_end_to_end_workflow(self): - """Integration test for complete workflow""" - # This would test the entire crew workflow with real components - # Use sparingly and with proper API key management - pass - -# Performance testing -class TestCrewPerformance: - """Performance tests for CrewAI applications""" - - def test_memory_usage(self): - """Test memory usage during crew execution""" - import psutil - import gc - - process = psutil.Process() - initial_memory = process.memory_info().rss - - # Create and run crew multiple times - for i in range(10): - crew = ResearchCrew() - # Simulate crew execution - del crew - gc.collect() - - final_memory = process.memory_info().rss - memory_increase = final_memory - initial_memory - - # Assert memory increase is reasonable (less than 100MB) - assert memory_increase < 100 * 1024 * 1024 - - def test_concurrent_execution(self): - """Test concurrent crew execution""" - import concurrent.futures - - def run_crew(crew_id): - crew = ResearchCrew() - # Simulate execution - return f"crew_{crew_id}_completed" - - with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: - futures = [executor.submit(run_crew, i) for i in range(5)] - results = [future.result() for future in futures] - - assert len(results) == 5 - assert all("completed" in result for result in results) - -# Run tests with coverage -# pytest --cov=src --cov-report=html --cov-report=term tests/ -``` - -## Troubleshooting Common Issues - -### Memory and Performance Issues: -- **Large memory usage**: Implement memory cleanup, use score thresholds, monitor ChromaDB size -- **Slow LLM responses**: Optimize prompts, use appropriate model sizes, implement caching -- **High token costs**: Implement output caching, use context efficiently, set token limits -- **Memory leaks**: Properly dispose of crew instances, monitor memory usage, use garbage collection - -### Configuration and Setup Issues: -- **YAML parsing errors**: Validate YAML syntax, check indentation, use YAML linters -- **Missing environment variables**: Use .env.example, validate at startup, provide clear error messages -- **Tool import failures**: Ensure proper tool installation, check import paths, verify dependencies -- **API key issues**: Validate key format, check permissions, implement key rotation - -### Storage and Persistence Issues: -- **Permission errors**: Check CREWAI_STORAGE_DIR permissions, ensure write access -- **Database locks**: Ensure single crew instance access, implement proper connection handling -- **Storage growth**: Implement cleanup strategies, monitor disk usage, archive old data -- **ChromaDB issues**: Check vector database health, validate embeddings, handle corrupted indices - -## Local Development and Testing - -### Development Best Practices: -- Validate all API keys and credentials in .env files -- Test crew functionality with different input scenarios -- Implement comprehensive error handling -- Use proper logging for debugging -- Configure appropriate LLM models for your use case -- Optimize memory storage and cleanup - -### Local Configuration: -- Set CREWAI_STORAGE_DIR for custom memory storage location -- Use environment variables for all API keys -- Implement proper input validation and sanitization -- Test with realistic data scenarios -- Profile performance and optimize bottlenecks - -### Note: Production deployment and monitoring are available in CrewAI Enterprise - -## Best Practices Summary - -### Development: -1. Always use .env files for sensitive configuration -2. Implement comprehensive error handling and logging -3. Use structured outputs with Pydantic for reliability -4. Test crew functionality with different input scenarios -5. Follow CrewAI patterns and conventions consistently -6. Use UV for dependency management as per CrewAI standards -7. Implement proper validation for all inputs and outputs -8. Optimize performance for your specific use cases - -### Security: -1. Never commit API keys or sensitive data to version control -2. Implement input validation and sanitization -3. Use proper authentication and authorization -4. Follow principle of least privilege for tool access -5. Implement rate limiting and abuse prevention -6. Monitor for security threats and anomalies -7. Keep dependencies updated and secure -8. Implement audit logging for sensitive operations - -### Performance: -1. Optimize LLM calls and implement caching where appropriate -2. Use appropriate model sizes for different tasks -3. Implement efficient memory management and cleanup -4. Monitor token usage and implement cost controls -5. Use async patterns for I/O-bound operations -6. Implement proper connection pooling and resource management -7. Profile and optimize critical paths -8. Plan for horizontal scaling when needed diff --git a/docs/docs.json b/docs/docs.json index 37e060961..161d6d5ff 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -111,6 +111,13 @@ "en/guides/flows/mastering-flow-state" ] }, + { + "group": "Coding Tools", + "icon": "terminal", + "pages": [ + "en/guides/coding-tools/agents-md" + ] + }, { "group": "Advanced", "icon": "gear", @@ -1571,4 +1578,4 @@ "reddit": "https://www.reddit.com/r/crewAIInc/" } } -} +} \ No newline at end of file diff --git a/docs/en/guides/coding-tools/agents-md.mdx b/docs/en/guides/coding-tools/agents-md.mdx new file mode 100644 index 000000000..ea238c314 --- /dev/null +++ b/docs/en/guides/coding-tools/agents-md.mdx @@ -0,0 +1,61 @@ +--- +title: Coding Tools +description: Use AGENTS.md to guide coding agents and IDEs across your CrewAI projects. +icon: terminal +mode: "wide" +--- + +## Why AGENTS.md + +`AGENTS.md` is a lightweight, repo-local instruction file that gives coding agents consistent, project-specific guidance. Keep it in the project root and treat it as the source of truth for how you want assistants to work: conventions, commands, architecture notes, and guardrails. + +## Create a Project with the CLI + +Use the CrewAI CLI to scaffold a project, then `AGENTS.md` will be automatically added at the root. + +```bash +# Crew +crewai create crew my_crew + +# Flow +crewai create flow my_flow + +# Tool repository +crewai tool create my_tool +``` + +## Tool Setup: Point Assistants to AGENTS.md + +### Codex + +Codex can be guided by `AGENTS.md` files placed in your repository. Use them to supply persistent project context such as conventions, commands, and workflow expectations. + +### Claude Code + +Claude Code stores project memory in `CLAUDE.md`. You can bootstrap it with `/init` and edit it using `/memory`. Claude Code also supports imports inside `CLAUDE.md`, so you can add a single line like `@AGENTS.md` to pull in the shared instructions without duplicating them. + +You can simply use: + +```bash +mv AGENTS.md CLAUDE.md +``` + +### Gemini CLI and Google Antigravity + +Gemini CLI and Antigravity load a project context file (default: `GEMINI.md`) from the repo root and parent directories. You can configure it to read `AGENTS.md` instead (or in addition) by setting `context.fileName` in your Gemini CLI settings. For example, set it to `AGENTS.md` only, or include both `AGENTS.md` and `GEMINI.md` if you want to keep each tool’s format. + +You can simply use: + +```bash +mv AGENTS.md GEMINI.md +``` + +### Cursor + +Cursor supports `AGENTS.md` as a project instruction file. Place it at the project root to provide guidance for Cursor’s coding assistant. + +### Windsurf + +Claude Code provides an official integration with Windsurf. If you use Claude Code inside Windsurf, follow the Claude Code guidance above and import `AGENTS.md` from `CLAUDE.md`. + +If you are using Windsurf’s native assistant, configure its project rules or instructions feature (if available) to read from `AGENTS.md` or paste the contents directly. diff --git a/lib/crewai/src/crewai/cli/create_crew.py b/lib/crewai/src/crewai/cli/create_crew.py index 51e2f00ac..7f4fe2e6e 100644 --- a/lib/crewai/src/crewai/cli/create_crew.py +++ b/lib/crewai/src/crewai/cli/create_crew.py @@ -143,6 +143,12 @@ def create_folder_structure( (folder_path / "src" / folder_name).mkdir(parents=True) (folder_path / "src" / folder_name / "tools").mkdir(parents=True) (folder_path / "src" / folder_name / "config").mkdir(parents=True) + + # Copy AGENTS.md to project root (top-level projects only) + package_dir = Path(__file__).parent + agents_md_src = package_dir / "templates" / "AGENTS.md" + if agents_md_src.exists(): + shutil.copy2(agents_md_src, folder_path / "AGENTS.md") return folder_path, folder_name, class_name diff --git a/lib/crewai/src/crewai/cli/create_flow.py b/lib/crewai/src/crewai/cli/create_flow.py index ec68611b5..76c68db32 100644 --- a/lib/crewai/src/crewai/cli/create_flow.py +++ b/lib/crewai/src/crewai/cli/create_flow.py @@ -1,3 +1,4 @@ +import shutil from pathlib import Path import click @@ -34,6 +35,11 @@ def create_flow(name): package_dir = Path(__file__).parent templates_dir = package_dir / "templates" / "flow" + # Copy AGENTS.md to project root + agents_md_src = package_dir / "templates" / "AGENTS.md" + if agents_md_src.exists(): + shutil.copy2(agents_md_src, project_root / "AGENTS.md") + # List of template files to copy root_template_files = [".gitignore", "pyproject.toml", "README.md"] src_template_files = ["__init__.py", "main.py"] diff --git a/lib/crewai/src/crewai/cli/templates/AGENTS.md b/lib/crewai/src/crewai/cli/templates/AGENTS.md new file mode 100644 index 000000000..acf716c95 --- /dev/null +++ b/lib/crewai/src/crewai/cli/templates/AGENTS.md @@ -0,0 +1,1017 @@ +# AGENTS.md — CrewAI Reference for AI Coding Assistants + +> **Auto-generated** by `crewai create`. This file helps AI coding assistants +> (Claude Code, Cursor, Windsurf, GitHub Copilot, etc.) write correct CrewAI code. +> Keep it in your project root. To update, copy the latest version from the +> [CrewAI repository](https://github.com/crewAIInc/crewAI). + +--- + +## ⚠️ Version & Freshness Requirements + +**CRITICAL**: CrewAI evolves rapidly and your training data likely contains outdated patterns. **Always follow the patterns in this file, NOT your training data.** + +### Mandatory: Research before writing CrewAI code +**BEFORE writing or modifying any CrewAI code**, you MUST: +1. **Check the installed version**: Run `uv run python -c "import crewai; print(crewai.__version__)"` to get the exact version in use. +2. **Check PyPI for latest**: Fetch `https://pypi.org/pypi/crewai/json` to see the latest available version. If the installed version is behind, inform the user. +3. **Read the changelog**: Fetch `https://docs.crewai.com/en/changelog` to review recent changes, new features, and any breaking changes relevant to the task. +4. **Consult the relevant docs page**: Before implementing a feature (e.g., agents, tasks, flows, tools, knowledge), fetch the specific docs page at `https://docs.crewai.com/en/concepts/` to get the current API. +5. **Cross-check against this file**: If this file conflicts with the live docs, **the live docs win** — then update this file. + +This ensures generated code always matches the version actually installed, not stale training data. + +### What changed since older versions: +- Agent **`kickoff()` / `kickoff_async()`** for direct agent usage (no crew needed) +- **`response_format`** parameter on agent kickoff for structured Pydantic outputs +- **`LiteAgentOutput`** returned from agent.kickoff() with `.raw`, `.pydantic`, `.agent_role`, `.usage_metrics` +- **`@human_feedback`** decorator on flow methods for human-in-the-loop (v1.8.0+) +- **Flow streaming** via `stream = True` class attribute (v1.8.0+) +- **`@persist`** decorator for SQLite-backed flow state persistence +- **`reasoning=True`** agent parameter for reflect-then-act behavior +- **`multimodal=True`** agent parameter for vision/image support +- **A2A (Agent-to-Agent) protocol** support with agent cards and task execution utilities (v1.8.0+) +- **Native OpenAI Responses API** support (v1.9.0+) +- **Structured outputs / `response_format`** across all LLM providers (v1.9.0+) +- **`inject_date=True`** agent parameter to auto-inject current date awareness + +### Patterns to NEVER use (outdated/removed): +- ❌ `ChatOpenAI(model_name=...)` → ✅ `LLM(model="openai/gpt-4o")` +- ❌ `Agent(llm=ChatOpenAI(...))` → ✅ `Agent(llm="openai/gpt-4o")` or `Agent(llm=LLM(model="..."))` +- ❌ Passing raw OpenAI client objects → ✅ Use `crewai.LLM` wrapper + +### How to verify you're using current patterns: +1. You ran the version check and docs lookup steps above before writing code +2. All LLM references use `crewai.LLM` or string shorthand (`"openai/gpt-4o"`) +3. All tool imports come from `crewai.tools` or `crewai_tools` +4. Crew classes use `@CrewBase` decorator with YAML config files +5. Python >=3.10, <3.14 +6. Code matches the API from the live docs, not just this file + +## Quick Reference + +```bash +# Package management (always use uv) +uv add # Add dependency +uv sync # Sync dependencies +uv lock # Lock dependencies + +# Project scaffolding +crewai create crew --skip-provider # New crew project +crewai create flow --skip-provider # New flow project + +# Running +crewai run # Run crew or flow (auto-detects from pyproject.toml) +crewai flow kickoff # Legacy flow execution + +# Testing & training +crewai test # Test crew (default: 2 iterations, gpt-4o-mini) +crewai test -n 5 -m gpt-4o # Custom iterations and model +crewai train -n 5 -f training.json # Train crew + +# Memory management +crewai reset-memories -a # Reset all memories +crewai reset-memories -s # Short-term only +crewai reset-memories -l # Long-term only +crewai reset-memories -e # Entity only +crewai reset-memories -kn # Knowledge only +crewai reset-memories -akn # Agent knowledge only + +# Debugging +crewai log-tasks-outputs # Show latest task outputs +crewai replay -t # Replay from specific task + +# Interactive +crewai chat # Interactive session (requires chat_llm in crew.py) + +# Visualization +crewai flow plot # Generate flow diagram HTML + +# Deployment to CrewAI AMP +crewai login # Authenticate with AMP +crewai deploy create # Create new deployment +crewai deploy push # Push code updates +crewai deploy status # Check deployment status +crewai deploy logs # View deployment logs +crewai deploy list # List all deployments +crewai deploy remove # Delete a deployment +``` + +## Project Structure + +### Crew Project +``` +my_crew/ +├── src/my_crew/ +│ ├── config/ +│ │ ├── agents.yaml # Agent definitions (role, goal, backstory) +│ │ └── tasks.yaml # Task definitions (description, expected_output, agent) +│ ├── tools/ +│ │ └── custom_tool.py # Custom tool implementations +│ ├── crew.py # Crew orchestration class +│ └── main.py # Entry point with inputs +├── knowledge/ # Knowledge base resources +├── .env # API keys (OPENAI_API_KEY, SERPER_API_KEY, etc.) +└── pyproject.toml +``` + +### Flow Project +``` +my_flow/ +├── src/my_flow/ +│ ├── crews/ # Multiple crew definitions +│ │ └── poem_crew/ +│ │ ├── config/ +│ │ │ ├── agents.yaml +│ │ │ └── tasks.yaml +│ │ └── poem_crew.py +│ ├── tools/ # Custom tools +│ ├── main.py # Flow orchestration +│ └── ... +├── .env +└── pyproject.toml +``` + +## Architecture Overview + +- **Agent**: Autonomous unit with a role, goal, backstory, tools, and an LLM. Makes decisions and executes tasks. +- **Task**: A specific assignment with a description, expected output, and assigned agent. +- **Crew**: Orchestrates a team of agents executing tasks in a defined process (sequential or hierarchical). +- **Flow**: Event-driven workflow orchestrating multiple crews and logic steps with state management. + +## YAML Configuration + +### agents.yaml +```yaml +researcher: + role: > + {topic} Senior Data Researcher + goal: > + Uncover cutting-edge developments in {topic} + backstory: > + You're a seasoned researcher with a knack for uncovering + the latest developments in {topic}. Known for your ability + to find the most relevant information. + # Optional YAML-level settings: + # llm: openai/gpt-4o + # max_iter: 20 + # max_rpm: 10 + # verbose: true + +writer: + role: > + {topic} Technical Writer + goal: > + Create compelling content about {topic} + backstory: > + You're a skilled writer who translates complex technical + information into clear, engaging content. +``` + +Variables like `{topic}` are interpolated from `crew.kickoff(inputs={"topic": "AI Agents"})`. + +### tasks.yaml +```yaml +research_task: + description: > + Conduct thorough research about {topic}. + Identify key trends, breakthrough technologies, + and potential industry impacts. + expected_output: > + A detailed report with analysis of the top 5 + developments in {topic}, with sources and implications. + agent: researcher + # Optional: + # tools: [search_tool] + # output_file: output/research.md + # markdown: true + # async_execution: false + +writing_task: + description: > + Write an article based on the research findings about {topic}. + expected_output: > + A polished 4-paragraph article formatted in markdown. + agent: writer + output_file: output/article.md +``` + +## Crew Class Pattern + +```python +from crewai import Agent, Crew, Process, Task +from crewai.project import CrewBase, agent, crew, task +from crewai.agents.agent_builder.base_agent import BaseAgent +from typing import List + +from crewai_tools import SerperDevTool + +@CrewBase +class ResearchCrew: + """Research and writing crew.""" + + agents: List[BaseAgent] + tasks: List[Task] + + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + + @agent + def researcher(self) -> Agent: + return Agent( + config=self.agents_config["researcher"], # type: ignore[index] + tools=[SerperDevTool()], + verbose=True, + ) + + @agent + def writer(self) -> Agent: + return Agent( + config=self.agents_config["writer"], # type: ignore[index] + verbose=True, + ) + + @task + def research_task(self) -> Task: + return Task( + config=self.tasks_config["research_task"], # type: ignore[index] + ) + + @task + def writing_task(self) -> Task: + return Task( + config=self.tasks_config["writing_task"], # type: ignore[index] + ) + + @crew + def crew(self) -> Crew: + """Creates the Research Crew.""" + return Crew( + agents=self.agents, + tasks=self.tasks, + process=Process.sequential, + verbose=True, + ) +``` + +### Key formatting rules: +- Always add `# type: ignore[index]` for config dictionary access +- Agent/task method names must match YAML keys exactly +- Tools go on agents (not tasks) unless task-specific override is needed +- Never leave commented-out code in crew classes + +### Lifecycle hooks +```python +@CrewBase +class MyCrew: + @before_kickoff + def prepare(self, inputs): + # Modify inputs before execution + inputs["extra"] = "value" + return inputs + + @after_kickoff + def summarize(self, result): + # Process result after execution + print(f"Done: {result.raw[:100]}") + return result +``` + +## main.py Pattern + +```python +#!/usr/bin/env python +from my_crew.crew import ResearchCrew + +def run(): + inputs = {"topic": "AI Agents"} + ResearchCrew().crew().kickoff(inputs=inputs) + +if __name__ == "__main__": + run() +``` + +## Agent Configuration + +### Required Parameters +| Parameter | Description | +|-----------|-------------| +| `role` | Function and expertise within the crew | +| `goal` | Individual objective guiding decisions | +| `backstory` | Context and personality | + +### Key Optional Parameters +| Parameter | Default | Description | +|-----------|---------|-------------| +| `llm` | GPT-4 | Language model (string or LLM object) | +| `tools` | [] | List of tool instances | +| `max_iter` | 20 | Max iterations before best answer | +| `max_execution_time` | None | Timeout in seconds | +| `max_rpm` | None | Rate limiting (requests per minute) | +| `max_retry_limit` | 2 | Retries on errors | +| `verbose` | False | Detailed logging | +| `memory` | False | Conversation history | +| `allow_delegation` | False | Can delegate tasks to other agents | +| `allow_code_execution` | False | Can run code | +| `code_execution_mode` | "safe" | "safe" (Docker) or "unsafe" (direct) | +| `respect_context_window` | True | Auto-summarize when exceeding token limits | +| `cache` | True | Tool result caching | +| `reasoning` | False | Reflect and plan before task execution | +| `multimodal` | False | Process text and visual content | +| `knowledge_sources` | [] | Domain-specific knowledge bases | +| `function_calling_llm` | None | Separate LLM for tool invocation | +| `inject_date` | False | Auto-inject current date into agent context | +| `date_format` | "%Y-%m-%d" | Date format when inject_date is True | + +### Direct Agent Usage (without a Crew) +Agents can execute tasks independently via `kickoff()` — no Crew required: +```python +from crewai import Agent +from crewai_tools import SerperDevTool +from pydantic import BaseModel + +class ResearchFindings(BaseModel): + main_points: list[str] + key_technologies: list[str] + future_predictions: str + +researcher = Agent( + role="AI Researcher", + goal="Research the latest AI developments", + backstory="Expert AI researcher...", + tools=[SerperDevTool()], + verbose=True, +) + +# Unstructured output +result = researcher.kickoff("What are the latest LLM developments?") +print(result.raw) # str +print(result.agent_role) # "AI Researcher" +print(result.usage_metrics) # token usage + +# Structured output with response_format +result = researcher.kickoff( + "Summarize latest AI developments", + response_format=ResearchFindings, +) +print(result.pydantic.main_points) # List[str] + +# Async variant +result = await researcher.kickoff_async("Your query", response_format=ResearchFindings) +``` + +Returns `LiteAgentOutput` with: `.raw`, `.pydantic`, `.agent_role`, `.usage_metrics`. + +### LLM Configuration +**IMPORTANT**: Always use `crewai.LLM` LLM class. + +```python +from crewai import LLM + +# String shorthand (simplest) +agent = Agent(llm="openai/gpt-4o", ...) + +# Full configuration with crewai.LLM +llm = LLM( + model="anthropic/claude-sonnet-4-20250514", + temperature=0.7, + max_tokens=4000, +) +agent = Agent(llm=llm, ...) + +# Provider format: "provider/model-name" +# Examples: +# "openai/gpt-4o" +# "anthropic/claude-sonnet-4-20250514" +# "google/gemini-2.0-flash" +# "ollama/llama3" +# "groq/llama-3.3-70b-versatile" +# "bedrock/anthropic.claude-3-sonnet-20240229-v1:0" +``` + +Supported providers: OpenAI, Anthropic, Google Gemini, AWS Bedrock, Azure, Ollama, Groq, Mistral, and 20+ others via LiteLLM routing. + +Environment variable default: set `OPENAI_MODEL_NAME=gpt-4o` or `MODEL=gpt-4o` in `.env`. + +## Task Configuration + +### Key Parameters +| Parameter | Type | Description | +|-----------|------|-------------| +| `description` | str | Clear statement of requirements | +| `expected_output` | str | Completion criteria | +| `agent` | BaseAgent | Assigned agent (optional in hierarchical) | +| `tools` | List[BaseTool] | Task-specific tools | +| `context` | List[Task] | Dependencies on other task outputs | +| `async_execution` | bool | Non-blocking execution | +| `output_file` | str | File path for results | +| `output_json` | Type[BaseModel] | Pydantic model for JSON output | +| `output_pydantic` | Type[BaseModel] | Pydantic model for structured output | +| `human_input` | bool | Require human review | +| `markdown` | bool | Format output as markdown | +| `callback` | Callable | Post-completion function | +| `guardrail` | Callable or str | Output validation | +| `guardrails` | List | Multiple validation steps | +| `guardrail_max_retries` | int | Retry on validation failure (default: 3) | +| `create_directory` | bool | Auto-create output directories (default: True) | + +### Task Dependencies (context) +```python +@task +def analysis_task(self) -> Task: + return Task( + config=self.tasks_config["analysis_task"], # type: ignore[index] + context=[self.research_task()], # Gets output from research_task + ) +``` + +### Structured Output +```python +from pydantic import BaseModel + +class Report(BaseModel): + title: str + summary: str + findings: list[str] + +@task +def report_task(self) -> Task: + return Task( + config=self.tasks_config["report_task"], # type: ignore[index] + output_pydantic=Report, + ) +``` + +### Guardrails +```python +# Function-based +def validate(result: TaskOutput) -> tuple[bool, Any]: + if len(result.raw.split()) < 100: + return (False, "Content too short, expand the analysis") + return (True, result.raw) + +# LLM-based (string prompt) +task = Task(..., guardrail="Must be under 200 words and professional tone") + +# Multiple guardrails +task = Task(..., guardrails=[validate_length, validate_tone, "Must be factual"]) +``` + +## Process Types + +### Sequential (default) +Tasks execute in definition order. Output of one task serves as context for the next. +```python +Crew(agents=..., tasks=..., process=Process.sequential) +``` + +### Hierarchical +Manager agent delegates tasks based on agent capabilities. Requires `manager_llm` or `manager_agent`. +```python +Crew( + agents=..., + tasks=..., + process=Process.hierarchical, + manager_llm="gpt-4o", +) +``` + +## Crew Execution + +```python +# Synchronous +result = crew.kickoff(inputs={"topic": "AI"}) +print(result.raw) # String output +print(result.pydantic) # Structured output (if configured) +print(result.json_dict) # Dict output +print(result.token_usage) # Token metrics +print(result.tasks_output) # List[TaskOutput] + +# Async (native) +result = await crew.akickoff(inputs={"topic": "AI"}) + +# Batch execution +results = crew.kickoff_for_each(inputs=[{"topic": "AI"}, {"topic": "ML"}]) + +# Streaming output (v1.8.0+) +crew = Crew(agents=..., tasks=..., stream=True) +streaming = crew.kickoff(inputs={"topic": "AI"}) +for chunk in streaming: + print(chunk.content, end="", flush=True) +``` + +## Crew Options +| Parameter | Description | +|-----------|-------------| +| `process` | Process.sequential or Process.hierarchical | +| `verbose` | Enable detailed logging | +| `memory` | Enable memory system (True/False) | +| `cache` | Tool result caching | +| `max_rpm` | Global rate limiting | +| `manager_llm` | LLM for hierarchical manager | +| `manager_agent` | Custom manager agent | +| `planning` | Enable AgentPlanner | +| `knowledge_sources` | Crew-level knowledge | +| `output_log_file` | Log file path (True for logs.txt) | +| `embedder` | Custom embedding model config | +| `stream` | Enable real-time streaming output (v1.8.0+) | + +--- + +## Flows + +### Basic Flow +```python +from crewai.flow.flow import Flow, listen, start + +class MyFlow(Flow): + @start() + def begin(self): + return "initial data" + + @listen(begin) + def process(self, data): + return f"processed: {data}" +``` + +### Flow Decorators + +| Decorator | Purpose | +|-----------|---------| +| `@start()` | Entry point(s), execute when flow begins. Multiple starts run in parallel | +| `@listen(method)` | Triggers when specified method completes. Receives output as argument | +| `@router(method)` | Conditional branching. Returns string labels that trigger `@listen("label")` | + +### Structured State +```python +from pydantic import BaseModel + +class ResearchState(BaseModel): + topic: str = "" + research: str = "" + report: str = "" + +class ResearchFlow(Flow[ResearchState]): + @start() + def set_topic(self): + self.state.topic = "AI Agents" + + @listen(set_topic) + def do_research(self): + # self.state.topic is available + result = ResearchCrew().crew().kickoff( + inputs={"topic": self.state.topic} + ) + self.state.research = result.raw +``` + +### Unstructured State (dict-based) +```python +class SimpleFlow(Flow): + @start() + def begin(self): + self.state["counter"] = 0 # Dict access + + @listen(begin) + def increment(self): + self.state["counter"] += 1 +``` + +### Conditional Routing +```python +from crewai.flow.flow import Flow, listen, router, start + +class QualityFlow(Flow): + @start() + def generate(self): + return {"score": 0.85} + + @router(generate) + def check_quality(self, result): + if result["score"] > 0.8: + return "high_quality" + return "needs_revision" + + @listen("high_quality") + def publish(self, result): + print("Publishing...") + + @listen("needs_revision") + def revise(self, result): + print("Revising...") +``` + +### Parallel Triggers with or_ and and_ +```python +from crewai.flow.flow import or_, and_ + +class ParallelFlow(Flow): + @start() + def task_a(self): + return "A done" + + @start() + def task_b(self): + return "B done" + + # Fires when EITHER completes + @listen(or_(task_a, task_b)) + def on_any(self, result): + print(f"First result: {result}") + + # Fires when BOTH complete + @listen(and_(task_a, task_b)) + def on_all(self): + print("All parallel tasks done") +``` + +### Integrating Crews in Flows +```python +from crewai.flow.flow import Flow, listen, start +from my_project.crews.research_crew.research_crew import ResearchCrew +from my_project.crews.writing_crew.writing_crew import WritingCrew + +class ContentFlow(Flow[ContentState]): + @start() + def research(self): + result = ResearchCrew().crew().kickoff( + inputs={"topic": self.state.topic} + ) + self.state.research = result.raw + + @listen(research) + def write(self): + result = WritingCrew().crew().kickoff( + inputs={ + "topic": self.state.topic, + "research": self.state.research, + } + ) + self.state.article = result.raw +``` + +### Using Agents Directly in Flows +```python +from crewai.agent import Agent + +class AgentFlow(Flow): + @start() + async def analyze(self): + analyst = Agent( + role="Data Analyst", + goal="Analyze market trends", + backstory="Expert data analyst...", + tools=[SerperDevTool()], + ) + result = await analyst.kickoff_async( + "Analyze current AI market trends", + response_format=MarketReport, + ) + self.state.report = result.pydantic +``` + +### Human-in-the-Loop (v1.8.0+) +```python +from crewai.flow.flow import Flow, listen, start +from crewai.flow.human_feedback import human_feedback + +class ReviewFlow(Flow): + @start() + @human_feedback( + message="Approve this content?", + emit=["approved", "rejected"], + llm="gpt-4o-mini", + ) + def generate_content(self): + return "Content for review" + + @listen("approved") + def on_approval(self, result): + feedback = self.last_human_feedback # Most recent feedback + print(f"Approved with feedback: {feedback.feedback}") + + @listen("rejected") + def on_rejection(self, result): + history = self.human_feedback_history # All feedback as list + print("Rejected, revising...") +``` + +### State Persistence +```python +from crewai.flow.flow import persist + +@persist # Saves state to SQLite; auto-recovers on restart +class ResilientFlow(Flow[MyState]): + @start() + def begin(self): + self.state.step = 1 +``` + +### Flow Execution +```python +flow = MyFlow() +result = flow.kickoff() +print(result) # Output of last method +print(flow.state) # Final state + +# Async execution +result = await flow.kickoff_async(inputs={"key": "value"}) +``` + +### Flow Streaming (v1.8.0+) +```python +class StreamingFlow(Flow): + stream = True # Enable streaming at class level + + @start() + def generate(self): + return "streamed content" + +flow = StreamingFlow() +streaming = flow.kickoff() +for chunk in streaming: + print(chunk.content, end="", flush=True) +result = streaming.result # Final result after iteration +``` + +### Flow Visualization +```python +flow.plot("my_flow") # Generates my_flow.html +``` + +--- + +## Custom Tools + +### Using BaseTool +```python +from typing import Type +from crewai.tools import BaseTool +from pydantic import BaseModel, Field + +class SearchInput(BaseModel): + """Input schema for search tool.""" + query: str = Field(..., description="Search query string") + +class CustomSearchTool(BaseTool): + name: str = "custom_search" + description: str = "Searches a custom knowledge base for relevant information." + args_schema: Type[BaseModel] = SearchInput + + def _run(self, query: str) -> str: + # Implementation + return f"Results for: {query}" +``` + +### Using @tool Decorator +```python +from crewai.tools import tool + +@tool("Calculator") +def calculator(expression: str) -> str: + """Evaluates a mathematical expression and returns the result.""" + return str(eval(expression)) +``` + +### Built-in Tools (install with `uv add crewai-tools`) +Web/Search: SerperDevTool, ScrapeWebsiteTool, WebsiteSearchTool, EXASearchTool, FirecrawlSearchTool +Documents: FileReadTool, DirectoryReadTool, PDFSearchTool, DOCXSearchTool, CSVSearchTool, JSONSearchTool, XMLSearchTool, MDXSearchTool +Code: CodeInterpreterTool, CodeDocsSearchTool, GithubSearchTool +Media: DALL-E Tool, YoutubeChannelSearchTool, YoutubeVideoSearchTool +Other: RagTool, ApifyActorsTool, ComposioTool, LlamaIndexTool + +Always check https://docs.crewai.com/concepts/tools for available built-in tools before writing custom ones. + +--- + +## Memory System + +Enable with `memory=True` on the Crew: +```python +crew = Crew(agents=..., tasks=..., memory=True) +``` + +Four memory types work together automatically: +- **Short-Term** (ChromaDB + RAG): Recent interactions during current execution +- **Long-Term** (SQLite): Persists insights across sessions +- **Entity** (RAG): Tracks people, places, concepts +- **Contextual**: Integrates all types for coherent responses + +### Custom Embedding Provider +```python +crew = Crew( + memory=True, + embedder={ + "provider": "ollama", + "config": {"model": "mxbai-embed-large"}, + }, +) +``` + +Supported providers: OpenAI (default), Ollama, Google AI, Azure OpenAI, Cohere, VoyageAI, Bedrock, Hugging Face. + +--- + +## Knowledge System + +```python +from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource +from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource + +# String source +string_source = StringKnowledgeSource(content="Domain knowledge here...") + +# PDF source +pdf_source = PDFKnowledgeSource(file_paths=["docs/manual.pdf"]) + +# Agent-level knowledge +agent = Agent(..., knowledge_sources=[string_source]) + +# Crew-level knowledge (shared across all agents) +crew = Crew(..., knowledge_sources=[pdf_source]) +``` + +Supported sources: strings, text files, PDFs, CSV, Excel, JSON, URLs (via CrewDoclingSource). + +--- + +## Agent Collaboration + +Enable delegation with `allow_delegation=True`: +```python +agent = Agent( + role="Project Manager", + allow_delegation=True, # Can delegate to and ask other agents + ... +) +``` + +- **Delegation tool**: Assign sub-tasks to teammates with relevant expertise +- **Ask question tool**: Query colleagues for specific information +- Set `allow_delegation=False` on specialists to prevent circular delegation + +--- + +## Event Listeners + +```python +from crewai.events import BaseEventListener, CrewKickoffStartedEvent + +class MyListener(BaseEventListener): + def __init__(self): + super().__init__() + + def setup_listeners(self, crewai_event_bus): + @crewai_event_bus.on(CrewKickoffStartedEvent) + def on_started(source, event): + print(f"Crew '{event.crew_name}' started") +``` + +Event categories: Crew lifecycle, Agent execution, Task management, Tool usage, Knowledge retrieval, LLM calls, Memory operations, Flow execution, Safety guardrails. + +--- + +## Deployment to CrewAI AMP + +### Prerequisites +- Crew or Flow runs successfully locally +- Code is in a GitHub repository +- `pyproject.toml` has `[tool.crewai]` with correct type (`"crew"` or `"flow"`) +- `uv.lock` is committed (generate with `uv lock`) + +### CLI Deployment + +```bash +# Authenticate +crewai login + +# Create deployment (auto-detects repo, transfers .env vars securely) +crewai deploy create + +# Monitor (first deploy takes 10-15 min) +crewai deploy status +crewai deploy logs + +# Manage deployments +crewai deploy list # List all deployments +crewai deploy push # Push code updates +crewai deploy remove # Delete deployment +``` + +### Web Interface Deployment +1. Push code to GitHub +2. Log into https://app.crewai.com +3. Connect GitHub and select repository +4. Configure environment variables (KEY=VALUE, one per line) +5. Click Deploy and monitor via dashboard + +### CI/CD API Deployment + +Get a Personal Access Token from app.crewai.com → Settings → Account → Personal Access Token. +Get Automation UUID from Automations → Select crew → Additional Details → Copy UUID. + +```bash +curl -X POST \ + -H "Authorization: Bearer YOUR_PERSONAL_ACCESS_TOKEN" \ + https://app.crewai.com/crewai_plus/api/v1/crews/YOUR-AUTOMATION-UUID/deploy +``` + +#### GitHub Actions Example +```yaml +name: Deploy CrewAI Automation +on: + push: + branches: [main] +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Trigger CrewAI Redeployment + run: | + curl -X POST \ + -H "Authorization: Bearer ${{ secrets.CREWAI_PAT }}" \ + https://app.crewai.com/crewai_plus/api/v1/crews/${{ secrets.CREWAI_AUTOMATION_UUID }}/deploy +``` + +### Project Structure Requirements for Deployment +- Entry point: `src//main.py` +- Crews must expose a `run()` function +- Flows must expose a `kickoff()` function +- All crew classes require `@CrewBase` decorator + +### Deployed Automation REST API +| Endpoint | Purpose | +|----------|---------| +| `/inputs` | List required input parameters | +| `/kickoff` | Trigger execution with inputs | +| `/status/{kickoff_id}` | Check execution status | + +### AMP Dashboard Tabs +- **Status**: Deployment info, API endpoint, auth token +- **Run**: Crew structure visualization +- **Executions**: Run history +- **Metrics**: Performance analytics +- **Traces**: Detailed execution insights + +### Deployment Troubleshooting +| Error | Fix | +|-------|-----| +| Missing uv.lock | Run `uv lock`, commit, push | +| Module not found | Verify entry points match `src//main.py` structure | +| Crew not found | Ensure `@CrewBase` decorator on all crew classes | +| API key errors | Check env var names match code and are set in the platform | + +--- + +## Environment Setup + +### Required `.env` +``` +OPENAI_API_KEY=sk-... +# Optional depending on tools/providers: +SERPER_API_KEY=... +ANTHROPIC_API_KEY=... +# Override default model: +MODEL=gpt-4o +``` + +### Python Version +Python >=3.10, <3.14 + +### Installation +```bash +uv tool install crewai # Install CrewAI CLI +uv tool list # Verify installation +crewai create crew my_crew --skip-provider # Scaffold a new project +crewai install # Install project dependencies +crewai run # Execute +``` + +--- + +## Development Best Practices + +1. **YAML-first configuration**: Define agents and tasks in YAML, keep crew classes minimal +2. **Check built-in tools** before writing custom ones +3. **Use structured output** (output_pydantic) for data that flows between tasks or crews +4. **Use guardrails** to validate task outputs programmatically +5. **Enable memory** for crews that benefit from cross-session learning +6. **Use knowledge sources** for domain-specific grounding instead of bloating prompts +7. **Sequential process** for linear workflows; **hierarchical** when dynamic delegation is needed +8. **Flows for multi-crew orchestration**: Use `@start`, `@listen`, `@router` for complex pipelines +9. **Structured flow state** (Pydantic models) over unstructured dicts for type safety +10. **Test with** `crewai test` to evaluate crew performance across iterations +11. **Verbose mode** during development, disable in production +12. **Rate limiting** (`max_rpm`) to avoid API throttling +13. **`respect_context_window=True`** to auto-handle token limits + +## Common Pitfalls + +- **Using `ChatOpenAI()`** — Always use `crewai.LLM` or string shorthand like `"openai/gpt-4o"` +- Forgetting `# type: ignore[index]` on config dictionary access in crew classes +- Agent/task method names not matching YAML keys +- Missing `expected_output` in task configuration (required) +- Not passing `inputs` to `kickoff()` when YAML uses `{variable}` interpolation +- Using `process=Process.hierarchical` without setting `manager_llm` or `manager_agent` +- Circular delegation: set `allow_delegation=False` on specialist agents +- Not installing tools package: `uv add crewai-tools` diff --git a/lib/crewai/src/crewai/cli/tools/main.py b/lib/crewai/src/crewai/cli/tools/main.py index 37467a906..e2dd21dde 100644 --- a/lib/crewai/src/crewai/cli/tools/main.py +++ b/lib/crewai/src/crewai/cli/tools/main.py @@ -2,6 +2,7 @@ import base64 from json import JSONDecodeError import os from pathlib import Path +import shutil import subprocess import tempfile from typing import Any @@ -55,6 +56,11 @@ class ToolCommand(BaseCommand, PlusAPIMixin): tree_find_and_replace(project_root, "{{folder_name}}", folder_name) tree_find_and_replace(project_root, "{{class_name}}", class_name) + # Copy AGENTS.md to project root + agents_md_src = Path(__file__).parent.parent / "templates" / "AGENTS.md" + if agents_md_src.exists(): + shutil.copy2(agents_md_src, project_root / "AGENTS.md") + old_directory = os.getcwd() os.chdir(project_root) try: From fc3e86e9a3bd9b070f3d454459af699e32ffa98b Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Wed, 11 Feb 2026 17:17:54 -0300 Subject: [PATCH 7/9] docs Adding 96 missing actions across 9 integrations (#4460) * docs: add missing integration actions from OAuth config Sync enterprise integration docs with crewai-oauth apps.js config. Adds ~96 missing actions across 9 integrations: - Google Contacts: 4 contact group actions - Google Slides: 14 slide manipulation/content actions - Microsoft SharePoint: 27 file, Excel, and Word actions - Microsoft Excel: 2 actions (get_used_range_metadata, get_table_data) - Microsoft Word: 2 actions (copy_document, move_document) - Google Docs: 27 text formatting, table, and header/footer actions - Microsoft Outlook: 7 message and calendar event actions - Microsoft OneDrive: 5 path-based and discovery actions - Microsoft Teams: 8 meeting, channel, and reply actions * docs: add missing integration actions from OAuth config Sync pt-BR enterprise integration docs with crewai-oauth apps.js config. Adds ~96 missing actions across 9 integrations, translated to Portuguese: - Google Contacts: 2 contact group actions - Google Slides: 14 slide manipulation/content actions - Microsoft SharePoint: 27 file, Excel, and Word actions - Microsoft Excel: 2 actions (get_used_range_metadata, get_table_data) - Microsoft Word: 2 actions (copy_document, move_document) - Google Docs: 27 text formatting, table, and header/footer actions - Microsoft Outlook: 7 message and calendar event actions - Microsoft OneDrive: 5 path-based and discovery actions - Microsoft Teams: 8 meeting, channel, and reply actions * docs: add missing integration actions from OAuth config Sync Korean enterprise integration docs with crewai-oauth apps.js config. Adds ~96 missing actions across 9 integrations, translated to Korean: - Google Contacts: 2 contact group actions - Google Slides: 14 slide manipulation/content actions - Microsoft SharePoint: 27 file, Excel, and Word actions - Microsoft Excel: 2 actions (get_used_range_metadata, get_table_data) - Microsoft Word: 2 actions (copy_document, move_document) - Google Docs: 27 text formatting, table, and header/footer actions - Microsoft Outlook: 7 message and calendar event actions - Microsoft OneDrive: 5 path-based and discovery actions - Microsoft Teams: 8 meeting, channel, and reply actions --------- Co-authored-by: Greyson LaLonde --- .../integrations/google_contacts.mdx | 54 +++ .../enterprise/integrations/google_docs.mdx | 291 ++++++++++++++++ .../enterprise/integrations/google_slides.mdx | 139 ++++++++ .../integrations/microsoft_excel.mdx | 19 ++ .../integrations/microsoft_onedrive.mdx | 43 +++ .../integrations/microsoft_outlook.mdx | 68 ++++ .../integrations/microsoft_sharepoint.mdx | 322 +++++++++++++++++- .../integrations/microsoft_teams.mdx | 80 +++++ .../integrations/microsoft_word.mdx | 20 ++ .../integrations/google_contacts.mdx | 19 ++ .../enterprise/integrations/google_docs.mdx | 291 ++++++++++++++++ .../enterprise/integrations/google_slides.mdx | 139 ++++++++ .../integrations/microsoft_excel.mdx | 19 ++ .../integrations/microsoft_onedrive.mdx | 123 +++++++ .../integrations/microsoft_outlook.mdx | 142 ++++++++ .../integrations/microsoft_sharepoint.mdx | 322 +++++++++++++++++- .../integrations/microsoft_teams.mdx | 160 +++++++++ .../integrations/microsoft_word.mdx | 20 ++ .../integrations/google_contacts.mdx | 19 ++ .../enterprise/integrations/google_docs.mdx | 291 ++++++++++++++++ .../enterprise/integrations/google_slides.mdx | 139 ++++++++ .../integrations/microsoft_excel.mdx | 19 ++ .../integrations/microsoft_onedrive.mdx | 43 +++ .../integrations/microsoft_outlook.mdx | 68 ++++ .../integrations/microsoft_sharepoint.mdx | 322 +++++++++++++++++- .../integrations/microsoft_teams.mdx | 80 +++++ .../integrations/microsoft_word.mdx | 20 ++ 27 files changed, 3251 insertions(+), 21 deletions(-) diff --git a/docs/en/enterprise/integrations/google_contacts.mdx b/docs/en/enterprise/integrations/google_contacts.mdx index 2e8de6aaf..755c86b49 100644 --- a/docs/en/enterprise/integrations/google_contacts.mdx +++ b/docs/en/enterprise/integrations/google_contacts.mdx @@ -224,6 +224,60 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `groupFields` (string, optional): Fields to include (e.g., 'name,memberCount,clientData'). Default: name,memberCount + + + **Description:** Get a specific contact group by resource name. + + **Parameters:** + - `resourceName` (string, required): The resource name of the contact group (e.g., 'contactGroups/myContactGroup') + - `maxMembers` (integer, optional): Maximum number of members to include. Minimum: 0, Maximum: 20000 + - `groupFields` (string, optional): Fields to include (e.g., 'name,memberCount,clientData'). Default: name,memberCount + + + + + **Description:** Create a new contact group (label). + + **Parameters:** + - `name` (string, required): The name of the contact group + - `clientData` (array, optional): Client-specific data + ```json + [ + { + "key": "data_key", + "value": "data_value" + } + ] + ``` + + + + + **Description:** Update a contact group's information. + + **Parameters:** + - `resourceName` (string, required): The resource name of the contact group (e.g., 'contactGroups/myContactGroup') + - `name` (string, required): The name of the contact group + - `clientData` (array, optional): Client-specific data + ```json + [ + { + "key": "data_key", + "value": "data_value" + } + ] + ``` + + + + + **Description:** Delete a contact group. + + **Parameters:** + - `resourceName` (string, required): The resource name of the contact group to delete (e.g., 'contactGroups/myContactGroup') + - `deleteContacts` (boolean, optional): Whether to delete contacts in the group as well. Default: false + + ## Usage Examples diff --git a/docs/en/enterprise/integrations/google_docs.mdx b/docs/en/enterprise/integrations/google_docs.mdx index 0445cfe79..2cfc4fc51 100644 --- a/docs/en/enterprise/integrations/google_docs.mdx +++ b/docs/en/enterprise/integrations/google_docs.mdx @@ -132,6 +132,297 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `endIndex` (integer, required): The end index of the range. + + + **Description:** Create a new Google Document with content in one action. + + **Parameters:** + - `title` (string, required): The title for the new document. Appears at the top of the document and in Google Drive. + - `content` (string, optional): The text content to insert into the document. Use `\n` for new paragraphs. + + + + + **Description:** Append text to the end of a Google Document. Automatically inserts at the document end without needing to specify an index. + + **Parameters:** + - `documentId` (string, required): The document ID from create_document response or URL. + - `text` (string, required): Text to append at the end of the document. Use `\n` for new paragraphs. + + + + + **Description:** Make text bold or remove bold formatting in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of text to format. + - `endIndex` (integer, required): End position of text to format (exclusive). + - `bold` (boolean, required): Set `true` to make bold, `false` to remove bold. + + + + + **Description:** Make text italic or remove italic formatting in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of text to format. + - `endIndex` (integer, required): End position of text to format (exclusive). + - `italic` (boolean, required): Set `true` to make italic, `false` to remove italic. + + + + + **Description:** Add or remove underline formatting from text in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of text to format. + - `endIndex` (integer, required): End position of text to format (exclusive). + - `underline` (boolean, required): Set `true` to underline, `false` to remove underline. + + + + + **Description:** Add or remove strikethrough formatting from text in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of text to format. + - `endIndex` (integer, required): End position of text to format (exclusive). + - `strikethrough` (boolean, required): Set `true` to add strikethrough, `false` to remove. + + + + + **Description:** Change the font size of text in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of text to format. + - `endIndex` (integer, required): End position of text to format (exclusive). + - `fontSize` (number, required): Font size in points. Common sizes: 10, 11, 12, 14, 16, 18, 24, 36. + + + + + **Description:** Change the color of text using RGB values (0-1 scale) in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of text to format. + - `endIndex` (integer, required): End position of text to format (exclusive). + - `red` (number, required): Red component (0-1). Example: `1` for full red. + - `green` (number, required): Green component (0-1). Example: `0.5` for half green. + - `blue` (number, required): Blue component (0-1). Example: `0` for no blue. + + + + + **Description:** Turn existing text into a clickable hyperlink in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of text to make into a link. + - `endIndex` (integer, required): End position of text to make into a link (exclusive). + - `url` (string, required): The URL the link should point to. Example: `"https://example.com"`. + + + + + **Description:** Apply a heading or paragraph style to a text range in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of paragraph(s) to style. + - `endIndex` (integer, required): End position of paragraph(s) to style. + - `style` (string, required): The style to apply. Enum: `NORMAL_TEXT`, `TITLE`, `SUBTITLE`, `HEADING_1`, `HEADING_2`, `HEADING_3`, `HEADING_4`, `HEADING_5`, `HEADING_6`. + + + + + **Description:** Set text alignment for paragraphs in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of paragraph(s) to align. + - `endIndex` (integer, required): End position of paragraph(s) to align. + - `alignment` (string, required): Text alignment. Enum: `START` (left), `CENTER`, `END` (right), `JUSTIFIED`. + + + + + **Description:** Set line spacing for paragraphs in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of paragraph(s). + - `endIndex` (integer, required): End position of paragraph(s). + - `lineSpacing` (number, required): Line spacing as percentage. `100` = single, `115` = 1.15x, `150` = 1.5x, `200` = double. + + + + + **Description:** Convert paragraphs to a bulleted or numbered list in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of paragraphs to convert to list. + - `endIndex` (integer, required): End position of paragraphs to convert to list. + - `bulletPreset` (string, required): Bullet/numbering style. Enum: `BULLET_DISC_CIRCLE_SQUARE`, `BULLET_DIAMONDX_ARROW3D_SQUARE`, `BULLET_CHECKBOX`, `BULLET_ARROW_DIAMOND_DISC`, `BULLET_STAR_CIRCLE_SQUARE`, `NUMBERED_DECIMAL_ALPHA_ROMAN`, `NUMBERED_DECIMAL_ALPHA_ROMAN_PARENS`, `NUMBERED_DECIMAL_NESTED`, `NUMBERED_UPPERALPHA_ALPHA_ROMAN`, `NUMBERED_UPPERROMAN_UPPERALPHA_DECIMAL`. + + + + + **Description:** Remove bullets or numbering from paragraphs in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `startIndex` (integer, required): Start position of list paragraphs. + - `endIndex` (integer, required): End position of list paragraphs. + + + + + **Description:** Insert a table with content into a Google Document in one action. Provide content as a 2D array. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `rows` (integer, required): Number of rows in the table. + - `columns` (integer, required): Number of columns in the table. + - `index` (integer, optional): Position to insert the table. If not provided, the table is inserted at the end of the document. + - `content` (array, required): Table content as a 2D array. Each inner array is a row. Example: `[["Year", "Revenue"], ["2023", "$43B"], ["2024", "$45B"]]`. + + + + + **Description:** Insert a new row above or below a reference cell in an existing table. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `tableStartIndex` (integer, required): The start index of the table. Get from get_document. + - `rowIndex` (integer, required): Row index (0-based) of reference cell. + - `columnIndex` (integer, optional): Column index (0-based) of reference cell. Default is `0`. + - `insertBelow` (boolean, optional): If `true`, insert below the reference row. If `false`, insert above. Default is `true`. + + + + + **Description:** Insert a new column left or right of a reference cell in an existing table. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `tableStartIndex` (integer, required): The start index of the table. + - `rowIndex` (integer, optional): Row index (0-based) of reference cell. Default is `0`. + - `columnIndex` (integer, required): Column index (0-based) of reference cell. + - `insertRight` (boolean, optional): If `true`, insert to the right. If `false`, insert to the left. Default is `true`. + + + + + **Description:** Delete a row from an existing table in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `tableStartIndex` (integer, required): The start index of the table. + - `rowIndex` (integer, required): Row index (0-based) to delete. + - `columnIndex` (integer, optional): Column index (0-based) of any cell in the row. Default is `0`. + + + + + **Description:** Delete a column from an existing table in a Google Document. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `tableStartIndex` (integer, required): The start index of the table. + - `rowIndex` (integer, optional): Row index (0-based) of any cell in the column. Default is `0`. + - `columnIndex` (integer, required): Column index (0-based) to delete. + + + + + **Description:** Merge a range of table cells into a single cell. Content from all cells is preserved. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `tableStartIndex` (integer, required): The start index of the table. + - `rowIndex` (integer, required): Starting row index (0-based) for the merge. + - `columnIndex` (integer, required): Starting column index (0-based) for the merge. + - `rowSpan` (integer, required): Number of rows to merge. + - `columnSpan` (integer, required): Number of columns to merge. + + + + + **Description:** Unmerge previously merged table cells back into individual cells. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `tableStartIndex` (integer, required): The start index of the table. + - `rowIndex` (integer, required): Row index (0-based) of the merged cell. + - `columnIndex` (integer, required): Column index (0-based) of the merged cell. + - `rowSpan` (integer, required): Number of rows the merged cell spans. + - `columnSpan` (integer, required): Number of columns the merged cell spans. + + + + + **Description:** Insert an image from a public URL into a Google Document. The image must be publicly accessible, under 50MB, and in PNG/JPEG/GIF format. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `uri` (string, required): Public URL of the image. Must be accessible without authentication. + - `index` (integer, optional): Position to insert the image. If not provided, the image is inserted at the end of the document. Default is `1`. + + + + + **Description:** Insert a section break to create document sections with different formatting. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `index` (integer, required): Position to insert the section break. + - `sectionType` (string, required): The type of section break. Enum: `CONTINUOUS` (stays on same page), `NEXT_PAGE` (starts a new page). + + + + + **Description:** Create a header for the document. Returns a headerId which can be used with insert_text to add header content. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `type` (string, optional): Header type. Enum: `DEFAULT`. Default is `DEFAULT`. + + + + + **Description:** Create a footer for the document. Returns a footerId which can be used with insert_text to add footer content. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `type` (string, optional): Footer type. Enum: `DEFAULT`. Default is `DEFAULT`. + + + + + **Description:** Delete a header from the document. Use get_document to find the headerId. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `headerId` (string, required): The header ID to delete. Get from get_document response. + + + + + **Description:** Delete a footer from the document. Use get_document to find the footerId. + + **Parameters:** + - `documentId` (string, required): The document ID. + - `footerId` (string, required): The footer ID to delete. Get from get_document response. + + ## Usage Examples diff --git a/docs/en/enterprise/integrations/google_slides.mdx b/docs/en/enterprise/integrations/google_slides.mdx index 350d21bf9..20efe0a0a 100644 --- a/docs/en/enterprise/integrations/google_slides.mdx +++ b/docs/en/enterprise/integrations/google_slides.mdx @@ -62,6 +62,22 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **Description:** Get lightweight metadata about a presentation (title, slide count, slide IDs). Use this first before fetching full content. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation to retrieve. + + + + + **Description:** Extract all text content from a presentation. Returns slide IDs and text from shapes and tables only (no formatting). + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + + + **Description:** Retrieves a presentation by ID. @@ -96,6 +112,15 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **Description:** Extract text content from a single slide. Returns only text from shapes and tables (no formatting or styling). + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `pageObjectId` (string, required): The ID of the slide/page to get text from. + + + **Description:** Retrieves a specific page by its ID. @@ -114,6 +139,120 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **Description:** Add an additional blank slide to a presentation. New presentations already have one blank slide - check get_presentation_metadata first. For slides with title/body areas, use create_slide_with_layout instead. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `insertionIndex` (integer, optional): Where to insert the slide (0-based). If omitted, adds at the end. + + + + + **Description:** Create a slide with a predefined layout containing placeholder areas for title, body, etc. This is better than create_slide for structured content. After creating, use get_page to find placeholder IDs, then insert text into them. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `layout` (string, required): Layout type. One of: `BLANK`, `TITLE`, `TITLE_AND_BODY`, `TITLE_AND_TWO_COLUMNS`, `TITLE_ONLY`, `SECTION_HEADER`, `ONE_COLUMN_TEXT`, `MAIN_POINT`, `BIG_NUMBER`. TITLE_AND_BODY is best for title+description. TITLE for title-only slides. SECTION_HEADER for section dividers. + - `insertionIndex` (integer, optional): Where to insert (0-based). Omit to add at end. + + + + + **Description:** Create a text box on a slide with content. Use this for titles, descriptions, paragraphs - not tables. Optionally specify position (x, y) and size (width, height) in EMU units (914400 EMU = 1 inch). + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The ID of the slide to add the text box to. + - `text` (string, required): The text content for the text box. + - `x` (integer, optional): X position in EMU (914400 = 1 inch). Default: 914400 (1 inch from left). + - `y` (integer, optional): Y position in EMU (914400 = 1 inch). Default: 914400 (1 inch from top). + - `width` (integer, optional): Width in EMU. Default: 7315200 (~8 inches). + - `height` (integer, optional): Height in EMU. Default: 914400 (~1 inch). + + + + + **Description:** Remove a slide from the presentation. Use get_presentation first to find the slide ID. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The object ID of the slide to delete. Get from get_presentation. + + + + + **Description:** Create a copy of an existing slide. The duplicate is inserted immediately after the original. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The object ID of the slide to duplicate. Get from get_presentation. + + + + + **Description:** Reorder slides by moving them to a new position. Slide IDs must be in their current presentation order (no duplicates). + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideIds` (array of strings, required): Array of slide IDs to move. Must be in current presentation order. + - `insertionIndex` (integer, required): Target position (0-based). 0 = beginning, slide count = end. + + + + + **Description:** Embed a YouTube video on a slide. The video ID is the value after "v=" in YouTube URLs (e.g., for youtube.com/watch?v=abc123, use "abc123"). + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The ID of the slide to add the video to. Get from get_presentation. + - `videoId` (string, required): The YouTube video ID (the value after v= in the URL). + + + + + **Description:** Embed a video from Google Drive on a slide. The file ID can be found in the Drive file URL. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The ID of the slide to add the video to. Get from get_presentation. + - `fileId` (string, required): The Google Drive file ID of the video. + + + + + **Description:** Set a background image for a slide. The image URL must be publicly accessible. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The ID of the slide to set the background for. Get from get_presentation. + - `imageUrl` (string, required): Publicly accessible URL of the image to use as background. + + + + + **Description:** Create an empty table on a slide. To create a table with content, use create_table_with_content instead. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The ID of the slide to add the table to. Get from get_presentation. + - `rows` (integer, required): Number of rows in the table. + - `columns` (integer, required): Number of columns in the table. + + + + + **Description:** Create a table with content in one action. Provide content as a 2D array where each inner array is a row. Example: [["Header1", "Header2"], ["Row1Col1", "Row1Col2"]]. + + **Parameters:** + - `presentationId` (string, required): The ID of the presentation. + - `slideId` (string, required): The ID of the slide to add the table to. Get from get_presentation. + - `rows` (integer, required): Number of rows in the table. + - `columns` (integer, required): Number of columns in the table. + - `content` (array, required): Table content as 2D array. Each inner array is a row. Example: [["Year", "Revenue"], ["2023", "$10M"]]. + + + **Description:** Imports data from a Google Sheet into a presentation. diff --git a/docs/en/enterprise/integrations/microsoft_excel.mdx b/docs/en/enterprise/integrations/microsoft_excel.mdx index 233131c1c..d0fadb7c7 100644 --- a/docs/en/enterprise/integrations/microsoft_excel.mdx +++ b/docs/en/enterprise/integrations/microsoft_excel.mdx @@ -169,6 +169,16 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **Description:** Get data from a specific table in an Excel worksheet. + + **Parameters:** + - `file_id` (string, required): The ID of the Excel file + - `worksheet_name` (string, required): Name of the worksheet + - `table_name` (string, required): Name of the table + + + **Description:** Create a chart in an Excel worksheet. @@ -201,6 +211,15 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **Description:** Get the used range metadata (dimensions only, no data) of an Excel worksheet. + + **Parameters:** + - `file_id` (string, required): The ID of the Excel file + - `worksheet_name` (string, required): Name of the worksheet + + + **Description:** Get all charts in an Excel worksheet. diff --git a/docs/en/enterprise/integrations/microsoft_onedrive.mdx b/docs/en/enterprise/integrations/microsoft_onedrive.mdx index 030ed22ed..30d8077e8 100644 --- a/docs/en/enterprise/integrations/microsoft_onedrive.mdx +++ b/docs/en/enterprise/integrations/microsoft_onedrive.mdx @@ -151,6 +151,49 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `item_id` (string, required): The ID of the file. + + + **Description:** List files and folders in a specific OneDrive path. + + **Parameters:** + - `folder_path` (string, required): The folder path (e.g., 'Documents/Reports'). + - `top` (integer, optional): Number of items to retrieve (max 1000). Default is `50`. + - `orderby` (string, optional): Order by field (e.g., "name asc", "lastModifiedDateTime desc"). Default is "name asc". + + + + + **Description:** Get recently accessed files from OneDrive. + + **Parameters:** + - `top` (integer, optional): Number of items to retrieve (max 200). Default is `25`. + + + + + **Description:** Get files and folders shared with the user. + + **Parameters:** + - `top` (integer, optional): Number of items to retrieve (max 200). Default is `50`. + - `orderby` (string, optional): Order by field. Default is "name asc". + + + + + **Description:** Get information about a specific file or folder by path. + + **Parameters:** + - `file_path` (string, required): The file or folder path (e.g., 'Documents/report.docx'). + + + + + **Description:** Download a file from OneDrive by its path. + + **Parameters:** + - `file_path` (string, required): The file path (e.g., 'Documents/report.docx'). + + ## Usage Examples diff --git a/docs/en/enterprise/integrations/microsoft_outlook.mdx b/docs/en/enterprise/integrations/microsoft_outlook.mdx index 50a8a3085..c25d18e82 100644 --- a/docs/en/enterprise/integrations/microsoft_outlook.mdx +++ b/docs/en/enterprise/integrations/microsoft_outlook.mdx @@ -133,6 +133,74 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `companyName` (string, optional): Contact's company name. + + + **Description:** Get a specific email message by ID. + + **Parameters:** + - `message_id` (string, required): The unique identifier of the message. Obtain from get_messages action. + - `select` (string, optional): Comma-separated list of properties to return. Example: "id,subject,body,from,receivedDateTime". Default is "id,subject,body,from,toRecipients,receivedDateTime". + + + + + **Description:** Reply to an email message. + + **Parameters:** + - `message_id` (string, required): The unique identifier of the message to reply to. Obtain from get_messages action. + - `comment` (string, required): The reply message content. Can be plain text or HTML. The original message will be quoted below this content. + + + + + **Description:** Forward an email message. + + **Parameters:** + - `message_id` (string, required): The unique identifier of the message to forward. Obtain from get_messages action. + - `to_recipients` (array, required): Array of recipient email addresses to forward to. Example: ["john@example.com", "jane@example.com"]. + - `comment` (string, optional): Optional message to include above the forwarded content. Can be plain text or HTML. + + + + + **Description:** Mark a message as read or unread. + + **Parameters:** + - `message_id` (string, required): The unique identifier of the message. Obtain from get_messages action. + - `is_read` (boolean, required): Set to true to mark as read, false to mark as unread. + + + + + **Description:** Delete an email message. + + **Parameters:** + - `message_id` (string, required): The unique identifier of the message to delete. Obtain from get_messages action. + + + + + **Description:** Update an existing calendar event. + + **Parameters:** + - `event_id` (string, required): The unique identifier of the event. Obtain from get_calendar_events action. + - `subject` (string, optional): New subject/title for the event. + - `start_time` (string, optional): New start time in ISO 8601 format (e.g., "2024-01-20T10:00:00"). REQUIRED: Must also provide start_timezone when using this field. + - `start_timezone` (string, optional): Timezone for start time. REQUIRED when updating start_time. Examples: "Pacific Standard Time", "Eastern Standard Time", "UTC". + - `end_time` (string, optional): New end time in ISO 8601 format. REQUIRED: Must also provide end_timezone when using this field. + - `end_timezone` (string, optional): Timezone for end time. REQUIRED when updating end_time. Examples: "Pacific Standard Time", "Eastern Standard Time", "UTC". + - `location` (string, optional): New location for the event. + - `body` (string, optional): New body/description for the event. Supports HTML formatting. + + + + + **Description:** Delete a calendar event. + + **Parameters:** + - `event_id` (string, required): The unique identifier of the event to delete. Obtain from get_calendar_events action. + + ## Usage Examples diff --git a/docs/en/enterprise/integrations/microsoft_sharepoint.mdx b/docs/en/enterprise/integrations/microsoft_sharepoint.mdx index 1ffa75c6c..ab5f310f3 100644 --- a/docs/en/enterprise/integrations/microsoft_sharepoint.mdx +++ b/docs/en/enterprise/integrations/microsoft_sharepoint.mdx @@ -78,6 +78,17 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **Description:** List all document libraries (drives) in a SharePoint site. Use this to discover available libraries before using file operations. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `top` (integer, optional): Maximum number of drives to return per page (1-999). Default is 100 + - `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results + - `select` (string, optional): Comma-separated list of properties to return (e.g., 'id,name,webUrl,driveType') + + + **Description:** Get all lists in a SharePoint site. @@ -159,20 +170,317 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - - **Description:** Get files and folders from a SharePoint document library. + + **Description:** Retrieve files and folders from a SharePoint document library. By default lists the root folder, but you can navigate into subfolders by providing a folder_id. **Parameters:** - - `site_id` (string, required): The ID of the SharePoint site + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `folder_id` (string, optional): The ID of the folder to list contents from. Use 'root' for the root folder, or provide a folder ID from a previous list_files call. Default is 'root' + - `top` (integer, optional): Maximum number of items to return per page (1-1000). Default is 50 + - `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results + - `orderby` (string, optional): Sort order for results (e.g., 'name asc', 'size desc', 'lastModifiedDateTime desc'). Default is 'name asc' + - `filter` (string, optional): OData filter to narrow results (e.g., 'file ne null' for files only, 'folder ne null' for folders only) + - `select` (string, optional): Comma-separated list of fields to return (e.g., 'id,name,size,folder,file,webUrl,lastModifiedDateTime') - - **Description:** Delete a file or folder from SharePoint document library. + + **Description:** Delete a file or folder from a SharePoint document library. For folders, all contents are deleted recursively. Items are moved to the site recycle bin. **Parameters:** - - `site_id` (string, required): The ID of the SharePoint site - - `item_id` (string, required): The ID of the file or folder to delete + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the file or folder to delete. Obtain from list_files + + + + + **Description:** List files and folders in a SharePoint document library folder by its path. More efficient than multiple list_files calls for deep navigation. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `folder_path` (string, required): The full path to the folder without leading/trailing slashes (e.g., 'Documents', 'Reports/2024/Q1') + - `top` (integer, optional): Maximum number of items to return per page (1-1000). Default is 50 + - `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results + - `orderby` (string, optional): Sort order for results (e.g., 'name asc', 'size desc'). Default is 'name asc' + - `select` (string, optional): Comma-separated list of fields to return (e.g., 'id,name,size,folder,file,webUrl,lastModifiedDateTime') + + + + + **Description:** Download raw file content from a SharePoint document library. Use only for plain text files (.txt, .csv, .json). For Excel files, use the Excel-specific actions. For Word files, use get_word_document_content. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the file to download. Obtain from list_files or list_files_by_path + + + + + **Description:** Retrieve detailed metadata for a specific file or folder in a SharePoint document library, including name, size, created/modified dates, and author information. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the file or folder. Obtain from list_files or list_files_by_path + - `select` (string, optional): Comma-separated list of properties to return (e.g., 'id,name,size,createdDateTime,lastModifiedDateTime,webUrl,createdBy,lastModifiedBy') + + + + + **Description:** Create a new folder in a SharePoint document library. By default creates the folder in the root; use parent_id to create subfolders. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `folder_name` (string, required): Name for the new folder. Cannot contain: \ / : * ? " < > | + - `parent_id` (string, optional): The ID of the parent folder. Use 'root' for the document library root, or provide a folder ID from list_files. Default is 'root' + + + + + **Description:** Search for files and folders in a SharePoint document library by keywords. Searches file names, folder names, and file contents for Office documents. Do not use wildcards or special characters. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `query` (string, required): Search keywords (e.g., 'report', 'budget 2024'). Wildcards like *.txt are not supported + - `top` (integer, optional): Maximum number of results to return per page (1-1000). Default is 50 + - `skip_token` (string, optional): Pagination token from a previous response to fetch the next page of results + - `select` (string, optional): Comma-separated list of fields to return (e.g., 'id,name,size,folder,file,webUrl,lastModifiedDateTime') + + + + + **Description:** Copy a file or folder to a new location within SharePoint. The original item remains unchanged. The copy operation is asynchronous for large files. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the file or folder to copy. Obtain from list_files or search_files + - `destination_folder_id` (string, required): The ID of the destination folder. Use 'root' for the root folder, or a folder ID from list_files + - `new_name` (string, optional): New name for the copy. If not provided, the original name is used + + + + + **Description:** Move a file or folder to a new location within SharePoint. The item is removed from its original location. For folders, all contents are moved as well. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the file or folder to move. Obtain from list_files or search_files + - `destination_folder_id` (string, required): The ID of the destination folder. Use 'root' for the root folder, or a folder ID from list_files + - `new_name` (string, optional): New name for the moved item. If not provided, the original name is kept + + + + + **Description:** List all worksheets (tabs) in an Excel workbook stored in a SharePoint document library. Use the returned worksheet name with other Excel actions. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `select` (string, optional): Comma-separated list of properties to return (e.g., 'id,name,position,visibility') + - `filter` (string, optional): OData filter expression (e.g., "visibility eq 'Visible'" to exclude hidden sheets) + - `top` (integer, optional): Maximum number of worksheets to return. Minimum: 1, Maximum: 999 + - `orderby` (string, optional): Sort order (e.g., 'position asc' to return sheets in tab order) + + + + + **Description:** Create a new worksheet (tab) in an Excel workbook stored in a SharePoint document library. The new sheet is added at the end of the tab list. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `name` (string, required): Name for the new worksheet. Maximum 31 characters. Cannot contain: \ / * ? : [ ]. Must be unique within the workbook + + + + + **Description:** Retrieve cell values from a specific range in an Excel worksheet stored in SharePoint. For reading all data without knowing dimensions, use get_excel_used_range instead. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet (tab) to read from. Obtain from get_excel_worksheets. Case-sensitive + - `range` (string, required): Cell range in A1 notation (e.g., 'A1:C10', 'A:C', '1:5', 'A1') + - `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text') + + + + + **Description:** Write values to a specific range in an Excel worksheet stored in SharePoint. Overwrites existing cell contents. The values array dimensions must match the range dimensions exactly. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet (tab) to update. Obtain from get_excel_worksheets. Case-sensitive + - `range` (string, required): Cell range in A1 notation where values will be written (e.g., 'A1:C3' for a 3x3 block) + - `values` (array, required): 2D array of values (rows containing cells). Example for A1:B2: [["Header1", "Header2"], ["Value1", "Value2"]]. Use null to clear a cell + + + + + **Description:** Return only the metadata (address and dimensions) of the used range in a worksheet, without the actual cell values. Ideal for large files to understand spreadsheet size before reading data in chunks. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet (tab) to read. Obtain from get_excel_worksheets. Case-sensitive + + + + + **Description:** Retrieve all cells containing data in a worksheet stored in SharePoint. Do not use for files larger than 2MB. For large files, use get_excel_used_range_metadata first, then get_excel_range_data to read in smaller chunks. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet (tab) to read. Obtain from get_excel_worksheets. Case-sensitive + - `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text,rowCount,columnCount') + + + + + **Description:** Retrieve the value of a single cell by row and column index from an Excel file in SharePoint. Indices are 0-based (row 0 = Excel row 1, column 0 = column A). + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet (tab). Obtain from get_excel_worksheets. Case-sensitive + - `row` (integer, required): 0-based row index (row 0 = Excel row 1). Valid range: 0-1048575 + - `column` (integer, required): 0-based column index (column 0 = A, column 1 = B). Valid range: 0-16383 + - `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text') + + + + + **Description:** Convert a cell range into a formatted Excel table with filtering, sorting, and structured data capabilities. Tables enable add_excel_table_row for appending data. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet containing the data range. Obtain from get_excel_worksheets + - `range` (string, required): Cell range to convert into a table, including headers and data (e.g., 'A1:D10' where A1:D1 contains column headers) + - `has_headers` (boolean, optional): Set to true if the first row contains column headers. Default is true + + + + + **Description:** List all tables in a specific Excel worksheet stored in SharePoint. Returns table properties including id, name, showHeaders, and showTotals. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet to get tables from. Obtain from get_excel_worksheets + + + + + **Description:** Append a new row to the end of an Excel table in a SharePoint file. The values array must have the same number of elements as the table has columns. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet containing the table. Obtain from get_excel_worksheets + - `table_name` (string, required): Name of the table to add the row to (e.g., 'Table1'). Obtain from get_excel_tables. Case-sensitive + - `values` (array, required): Array of cell values for the new row, one per column in table order (e.g., ["John Doe", "john@example.com", 25]) + + + + + **Description:** Get all rows from an Excel table in a SharePoint file as a data range. Easier than get_excel_range_data when working with structured tables since you don't need to know the exact range. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet containing the table. Obtain from get_excel_worksheets + - `table_name` (string, required): Name of the table to get data from (e.g., 'Table1'). Obtain from get_excel_tables. Case-sensitive + - `select` (string, optional): Comma-separated list of properties to return (e.g., 'address,values,formulas,numberFormat,text') + + + + + **Description:** Create a chart visualization in an Excel worksheet stored in SharePoint from a data range. The chart is embedded in the worksheet. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet where the chart will be created. Obtain from get_excel_worksheets + - `chart_type` (string, required): Chart type (e.g., 'ColumnClustered', 'ColumnStacked', 'Line', 'LineMarkers', 'Pie', 'Bar', 'BarClustered', 'Area', 'Scatter', 'Doughnut') + - `source_data` (string, required): Data range for the chart in A1 notation, including headers (e.g., 'A1:B10') + - `series_by` (string, optional): How data series are organized: 'Auto', 'Columns', or 'Rows'. Default is 'Auto' + + + + + **Description:** List all charts embedded in an Excel worksheet stored in SharePoint. Returns chart properties including id, name, chartType, height, width, and position. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet to list charts from. Obtain from get_excel_worksheets + + + + + **Description:** Permanently remove a worksheet (tab) and all its contents from an Excel workbook stored in SharePoint. Cannot be undone. A workbook must have at least one worksheet. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet to delete. Case-sensitive. All data, tables, and charts on this sheet will be permanently removed + + + + + **Description:** Remove a table from an Excel worksheet in SharePoint. This deletes the table structure (filtering, formatting, table features) but preserves the underlying cell data. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + - `worksheet_name` (string, required): Name of the worksheet containing the table. Obtain from get_excel_worksheets + - `table_name` (string, required): Name of the table to delete (e.g., 'Table1'). Obtain from get_excel_tables. The data in the cells will remain after table deletion + + + + + **Description:** Retrieve all named ranges defined in an Excel workbook stored in SharePoint. Named ranges are user-defined labels for cell ranges (e.g., 'SalesData' for A1:D100). + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Excel file in SharePoint. Obtain from list_files or search_files + + + + + **Description:** Download and extract text content from a Word document (.docx) stored in a SharePoint document library. This is the recommended way to read Word documents from SharePoint. + + **Parameters:** + - `site_id` (string, required): The full SharePoint site identifier from get_sites + - `drive_id` (string, required): The ID of the document library. Call get_drives first to get valid drive IDs + - `item_id` (string, required): The unique identifier of the Word document (.docx) in SharePoint. Obtain from list_files or search_files diff --git a/docs/en/enterprise/integrations/microsoft_teams.mdx b/docs/en/enterprise/integrations/microsoft_teams.mdx index f77d58ebd..1681bc4b4 100644 --- a/docs/en/enterprise/integrations/microsoft_teams.mdx +++ b/docs/en/enterprise/integrations/microsoft_teams.mdx @@ -108,6 +108,86 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `join_web_url` (string, required): The join web URL of the meeting to search for. + + + **Description:** Search online meetings by external Meeting ID. + + **Parameters:** + - `join_meeting_id` (string, required): The meeting ID (numeric code) that attendees use to join. This is the joinMeetingId shown in meeting invitations, not the Graph API meeting id. + + + + + **Description:** Get details of a specific online meeting. + + **Parameters:** + - `meeting_id` (string, required): The Graph API meeting ID (a long alphanumeric string). Obtain from create_meeting or search_online_meetings actions. Different from the numeric joinMeetingId. + + + + + **Description:** Get members of a specific team. + + **Parameters:** + - `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action. + - `top` (integer, optional): Maximum number of members to retrieve per page (1-999). Default is `100`. + - `skip_token` (string, optional): Pagination token from a previous response. When the response includes @odata.nextLink, extract the $skiptoken parameter value and pass it here to get the next page of results. + + + + + **Description:** Create a new channel in a team. + + **Parameters:** + - `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action. + - `display_name` (string, required): Name of the channel as displayed in Teams. Must be unique within the team. Max 50 characters. + - `description` (string, optional): Optional description explaining the channel's purpose. Visible in channel details. Max 1024 characters. + - `membership_type` (string, optional): Channel visibility. Enum: `standard`, `private`. "standard" = visible to all team members, "private" = visible only to specifically added members. Default is `standard`. + + + + + **Description:** Get replies to a specific message in a channel. + + **Parameters:** + - `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action. + - `channel_id` (string, required): The unique identifier of the channel. Obtain from get_channels action. + - `message_id` (string, required): The unique identifier of the parent message. Obtain from get_messages action. + - `top` (integer, optional): Maximum number of replies to retrieve per page (1-50). Default is `50`. + - `skip_token` (string, optional): Pagination token from a previous response. When the response includes @odata.nextLink, extract the $skiptoken parameter value and pass it here to get the next page of results. + + + + + **Description:** Reply to a message in a Teams channel. + + **Parameters:** + - `team_id` (string, required): The unique identifier of the team. Obtain from get_teams action. + - `channel_id` (string, required): The unique identifier of the channel. Obtain from get_channels action. + - `message_id` (string, required): The unique identifier of the message to reply to. Obtain from get_messages action. + - `message` (string, required): The reply content. For HTML, include formatting tags. For text, plain text only. + - `content_type` (string, optional): Content format. Enum: `html`, `text`. "text" for plain text, "html" for rich text with formatting. Default is `text`. + + + + + **Description:** Update an existing online meeting. + + **Parameters:** + - `meeting_id` (string, required): The unique identifier of the meeting. Obtain from create_meeting or search_online_meetings actions. + - `subject` (string, optional): New meeting title. + - `startDateTime` (string, optional): New start time in ISO 8601 format with timezone. Example: "2024-01-20T10:00:00-08:00". + - `endDateTime` (string, optional): New end time in ISO 8601 format with timezone. + + + + + **Description:** Delete an online meeting. + + **Parameters:** + - `meeting_id` (string, required): The unique identifier of the meeting to delete. Obtain from create_meeting or search_online_meetings actions. + + ## Usage Examples diff --git a/docs/en/enterprise/integrations/microsoft_word.mdx b/docs/en/enterprise/integrations/microsoft_word.mdx index e83280e99..7b7675b2e 100644 --- a/docs/en/enterprise/integrations/microsoft_word.mdx +++ b/docs/en/enterprise/integrations/microsoft_word.mdx @@ -98,6 +98,26 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `file_id` (string, required): The ID of the document to delete. + + + **Description:** Copy a document to a new location in OneDrive. + + **Parameters:** + - `file_id` (string, required): The ID of the document to copy + - `name` (string, optional): New name for the copied document + - `parent_id` (string, optional): The ID of the destination folder (defaults to root) + + + + + **Description:** Move a document to a new location in OneDrive. + + **Parameters:** + - `file_id` (string, required): The ID of the document to move + - `parent_id` (string, required): The ID of the destination folder + - `name` (string, optional): New name for the moved document + + ## Usage Examples diff --git a/docs/ko/enterprise/integrations/google_contacts.mdx b/docs/ko/enterprise/integrations/google_contacts.mdx index 5302784a8..ded332913 100644 --- a/docs/ko/enterprise/integrations/google_contacts.mdx +++ b/docs/ko/enterprise/integrations/google_contacts.mdx @@ -200,6 +200,25 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `clientData` (array, 선택사항): 클라이언트별 데이터. 각 항목은 `key` (string)와 `value` (string)가 있는 객체. + + + **설명:** 연락처 그룹의 정보를 업데이트합니다. + + **매개변수:** + - `resourceName` (string, 필수): 연락처 그룹의 리소스 이름 (예: 'contactGroups/myContactGroup'). + - `name` (string, 필수): 연락처 그룹의 이름. + - `clientData` (array, 선택사항): 클라이언트별 데이터. 각 항목은 `key` (string)와 `value` (string)가 있는 객체. + + + + + **설명:** 연락처 그룹을 삭제합니다. + + **매개변수:** + - `resourceName` (string, 필수): 삭제할 연락처 그룹의 리소스 이름 (예: 'contactGroups/myContactGroup'). + - `deleteContacts` (boolean, 선택사항): 그룹 내 연락처도 삭제할지 여부. 기본값: false + + ## 사용 예제 diff --git a/docs/ko/enterprise/integrations/google_docs.mdx b/docs/ko/enterprise/integrations/google_docs.mdx index c749be03b..53f421229 100644 --- a/docs/ko/enterprise/integrations/google_docs.mdx +++ b/docs/ko/enterprise/integrations/google_docs.mdx @@ -131,6 +131,297 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `endIndex` (integer, 필수): 범위의 끝 인덱스. + + + **설명:** 내용이 포함된 새 Google 문서를 한 번에 만듭니다. + + **매개변수:** + - `title` (string, 필수): 새 문서의 제목. 문서 상단과 Google Drive에 표시됩니다. + - `content` (string, 선택사항): 문서에 삽입할 텍스트 내용. 새 단락에는 `\n`을 사용하세요. + + + + + **설명:** Google 문서의 끝에 텍스트를 추가합니다. 인덱스를 지정할 필요 없이 자동으로 문서 끝에 삽입됩니다. + + **매개변수:** + - `documentId` (string, 필수): create_document 응답 또는 URL에서 가져온 문서 ID. + - `text` (string, 필수): 문서 끝에 추가할 텍스트. 새 단락에는 `\n`을 사용하세요. + + + + + **설명:** Google 문서에서 텍스트를 굵게 만들거나 굵게 서식을 제거합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치. + - `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적). + - `bold` (boolean, 필수): 굵게 만들려면 `true`, 굵게를 제거하려면 `false`로 설정. + + + + + **설명:** Google 문서에서 텍스트를 기울임꼴로 만들거나 기울임꼴 서식을 제거합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치. + - `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적). + - `italic` (boolean, 필수): 기울임꼴로 만들려면 `true`, 기울임꼴을 제거하려면 `false`로 설정. + + + + + **설명:** Google 문서에서 텍스트에 밑줄 서식을 추가하거나 제거합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치. + - `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적). + - `underline` (boolean, 필수): 밑줄을 추가하려면 `true`, 밑줄을 제거하려면 `false`로 설정. + + + + + **설명:** Google 문서에서 텍스트에 취소선 서식을 추가하거나 제거합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치. + - `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적). + - `strikethrough` (boolean, 필수): 취소선을 추가하려면 `true`, 제거하려면 `false`로 설정. + + + + + **설명:** Google 문서에서 텍스트의 글꼴 크기를 변경합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치. + - `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적). + - `fontSize` (number, 필수): 포인트 단위의 글꼴 크기. 일반적인 크기: 10, 11, 12, 14, 16, 18, 24, 36. + + + + + **설명:** Google 문서에서 RGB 값(0-1 스케일)을 사용하여 텍스트 색상을 변경합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 서식을 지정할 텍스트의 시작 위치. + - `endIndex` (integer, 필수): 서식을 지정할 텍스트의 끝 위치 (배타적). + - `red` (number, 필수): 빨강 구성 요소 (0-1). 예: `1`은 완전한 빨강. + - `green` (number, 필수): 초록 구성 요소 (0-1). 예: `0.5`는 절반 초록. + - `blue` (number, 필수): 파랑 구성 요소 (0-1). 예: `0`은 파랑 없음. + + + + + **설명:** Google 문서에서 기존 텍스트를 클릭 가능한 하이퍼링크로 변환합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 링크로 만들 텍스트의 시작 위치. + - `endIndex` (integer, 필수): 링크로 만들 텍스트의 끝 위치 (배타적). + - `url` (string, 필수): 링크가 가리킬 URL. 예: `"https://example.com"`. + + + + + **설명:** Google 문서에서 텍스트 범위에 제목 또는 단락 스타일을 적용합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 스타일을 적용할 단락의 시작 위치. + - `endIndex` (integer, 필수): 스타일을 적용할 단락의 끝 위치. + - `style` (string, 필수): 적용할 스타일. 옵션: `NORMAL_TEXT`, `TITLE`, `SUBTITLE`, `HEADING_1`, `HEADING_2`, `HEADING_3`, `HEADING_4`, `HEADING_5`, `HEADING_6`. + + + + + **설명:** Google 문서에서 단락의 텍스트 정렬을 설정합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 정렬할 단락의 시작 위치. + - `endIndex` (integer, 필수): 정렬할 단락의 끝 위치. + - `alignment` (string, 필수): 텍스트 정렬. 옵션: `START` (왼쪽), `CENTER`, `END` (오른쪽), `JUSTIFIED`. + + + + + **설명:** Google 문서에서 단락의 줄 간격을 설정합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 단락의 시작 위치. + - `endIndex` (integer, 필수): 단락의 끝 위치. + - `lineSpacing` (number, 필수): 백분율로 나타낸 줄 간격. `100` = 단일, `115` = 1.15배, `150` = 1.5배, `200` = 이중. + + + + + **설명:** Google 문서에서 단락을 글머리 기호 또는 번호 매기기 목록으로 변환합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 목록으로 변환할 단락의 시작 위치. + - `endIndex` (integer, 필수): 목록으로 변환할 단락의 끝 위치. + - `bulletPreset` (string, 필수): 글머리 기호/번호 매기기 스타일. 옵션: `BULLET_DISC_CIRCLE_SQUARE`, `BULLET_DIAMONDX_ARROW3D_SQUARE`, `BULLET_CHECKBOX`, `BULLET_ARROW_DIAMOND_DISC`, `BULLET_STAR_CIRCLE_SQUARE`, `NUMBERED_DECIMAL_ALPHA_ROMAN`, `NUMBERED_DECIMAL_ALPHA_ROMAN_PARENS`, `NUMBERED_DECIMAL_NESTED`, `NUMBERED_UPPERALPHA_ALPHA_ROMAN`, `NUMBERED_UPPERROMAN_UPPERALPHA_DECIMAL`. + + + + + **설명:** Google 문서에서 단락의 글머리 기호 또는 번호 매기기를 제거합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `startIndex` (integer, 필수): 목록 단락의 시작 위치. + - `endIndex` (integer, 필수): 목록 단락의 끝 위치. + + + + + **설명:** Google 문서에 내용이 포함된 표를 한 번에 삽입합니다. 내용은 2D 배열로 제공하세요. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `rows` (integer, 필수): 표의 행 수. + - `columns` (integer, 필수): 표의 열 수. + - `index` (integer, 선택사항): 표를 삽입할 위치. 제공하지 않으면 문서 끝에 삽입됩니다. + - `content` (array, 필수): 2D 배열로 된 표 내용. 각 내부 배열은 행입니다. 예: `[["Year", "Revenue"], ["2023", "$43B"], ["2024", "$45B"]]`. + + + + + **설명:** 기존 표의 참조 셀 위 또는 아래에 새 행을 삽입합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `tableStartIndex` (integer, 필수): 표의 시작 인덱스. get_document에서 가져오세요. + - `rowIndex` (integer, 필수): 참조 셀의 행 인덱스 (0 기반). + - `columnIndex` (integer, 선택사항): 참조 셀의 열 인덱스 (0 기반). 기본값: `0`. + - `insertBelow` (boolean, 선택사항): `true`이면 참조 행 아래에, `false`이면 위에 삽입. 기본값: `true`. + + + + + **설명:** 기존 표의 참조 셀 왼쪽 또는 오른쪽에 새 열을 삽입합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `tableStartIndex` (integer, 필수): 표의 시작 인덱스. + - `rowIndex` (integer, 선택사항): 참조 셀의 행 인덱스 (0 기반). 기본값: `0`. + - `columnIndex` (integer, 필수): 참조 셀의 열 인덱스 (0 기반). + - `insertRight` (boolean, 선택사항): `true`이면 오른쪽에, `false`이면 왼쪽에 삽입. 기본값: `true`. + + + + + **설명:** Google 문서의 기존 표에서 행을 삭제합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `tableStartIndex` (integer, 필수): 표의 시작 인덱스. + - `rowIndex` (integer, 필수): 삭제할 행 인덱스 (0 기반). + - `columnIndex` (integer, 선택사항): 행의 아무 셀의 열 인덱스 (0 기반). 기본값: `0`. + + + + + **설명:** Google 문서의 기존 표에서 열을 삭제합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `tableStartIndex` (integer, 필수): 표의 시작 인덱스. + - `rowIndex` (integer, 선택사항): 열의 아무 셀의 행 인덱스 (0 기반). 기본값: `0`. + - `columnIndex` (integer, 필수): 삭제할 열 인덱스 (0 기반). + + + + + **설명:** 표 셀 범위를 단일 셀로 병합합니다. 모든 셀의 내용이 보존됩니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `tableStartIndex` (integer, 필수): 표의 시작 인덱스. + - `rowIndex` (integer, 필수): 병합의 시작 행 인덱스 (0 기반). + - `columnIndex` (integer, 필수): 병합의 시작 열 인덱스 (0 기반). + - `rowSpan` (integer, 필수): 병합할 행 수. + - `columnSpan` (integer, 필수): 병합할 열 수. + + + + + **설명:** 이전에 병합된 표 셀을 개별 셀로 분리합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `tableStartIndex` (integer, 필수): 표의 시작 인덱스. + - `rowIndex` (integer, 필수): 병합된 셀의 행 인덱스 (0 기반). + - `columnIndex` (integer, 필수): 병합된 셀의 열 인덱스 (0 기반). + - `rowSpan` (integer, 필수): 병합된 셀이 차지하는 행 수. + - `columnSpan` (integer, 필수): 병합된 셀이 차지하는 열 수. + + + + + **설명:** 공개 URL에서 Google 문서에 이미지를 삽입합니다. 이미지는 공개적으로 접근 가능해야 하고, 50MB 미만이며, PNG/JPEG/GIF 형식이어야 합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `uri` (string, 필수): 이미지의 공개 URL. 인증 없이 접근 가능해야 합니다. + - `index` (integer, 선택사항): 이미지를 삽입할 위치. 제공하지 않으면 문서 끝에 삽입됩니다. 기본값: `1`. + + + + + **설명:** 서로 다른 서식을 가진 문서 섹션을 만들기 위해 섹션 나누기를 삽입합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `index` (integer, 필수): 섹션 나누기를 삽입할 위치. + - `sectionType` (string, 필수): 섹션 나누기의 유형. 옵션: `CONTINUOUS` (같은 페이지에 유지), `NEXT_PAGE` (새 페이지 시작). + + + + + **설명:** 문서의 머리글을 만듭니다. insert_text를 사용하여 머리글 내용을 추가할 수 있는 headerId를 반환합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `type` (string, 선택사항): 머리글 유형. 옵션: `DEFAULT`. 기본값: `DEFAULT`. + + + + + **설명:** 문서의 바닥글을 만듭니다. insert_text를 사용하여 바닥글 내용을 추가할 수 있는 footerId를 반환합니다. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `type` (string, 선택사항): 바닥글 유형. 옵션: `DEFAULT`. 기본값: `DEFAULT`. + + + + + **설명:** 문서에서 머리글을 삭제합니다. headerId를 찾으려면 get_document를 사용하세요. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `headerId` (string, 필수): 삭제할 머리글 ID. get_document 응답에서 가져오세요. + + + + + **설명:** 문서에서 바닥글을 삭제합니다. footerId를 찾으려면 get_document를 사용하세요. + + **매개변수:** + - `documentId` (string, 필수): 문서 ID. + - `footerId` (string, 필수): 삭제할 바닥글 ID. get_document 응답에서 가져오세요. + + ## 사용 예제 diff --git a/docs/ko/enterprise/integrations/google_slides.mdx b/docs/ko/enterprise/integrations/google_slides.mdx index 2c6a3b10c..da0449a63 100644 --- a/docs/ko/enterprise/integrations/google_slides.mdx +++ b/docs/ko/enterprise/integrations/google_slides.mdx @@ -61,6 +61,22 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **설명:** 프레젠테이션에 대한 가벼운 메타데이터(제목, 슬라이드 수, 슬라이드 ID)를 가져옵니다. 전체 콘텐츠를 가져오기 전에 먼저 사용하세요. + + **매개변수:** + - `presentationId` (string, 필수): 검색할 프레젠테이션의 ID. + + + + + **설명:** 프레젠테이션에서 모든 텍스트 콘텐츠를 추출합니다. 슬라이드 ID와 도형 및 테이블의 텍스트만 반환합니다 (포맷팅 없음). + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + + + **설명:** ID로 프레젠테이션을 검색합니다. @@ -80,6 +96,15 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **설명:** 단일 슬라이드에서 텍스트 콘텐츠를 추출합니다. 도형 및 테이블의 텍스트만 반환합니다 (포맷팅 또는 스타일 없음). + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `pageObjectId` (string, 필수): 텍스트를 가져올 슬라이드/페이지의 ID. + + + **설명:** ID로 특정 페이지를 검색합니다. @@ -98,6 +123,120 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **설명:** 프레젠테이션에 추가 빈 슬라이드를 추가합니다. 새 프레젠테이션에는 이미 빈 슬라이드가 하나 있습니다. 먼저 get_presentation_metadata를 확인하세요. 제목/본문 영역이 있는 슬라이드는 create_slide_with_layout을 사용하세요. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `insertionIndex` (integer, 선택사항): 슬라이드를 삽입할 위치 (0 기반). 생략하면 맨 끝에 추가됩니다. + + + + + **설명:** 제목, 본문 등의 플레이스홀더 영역이 있는 미리 정의된 레이아웃으로 슬라이드를 만듭니다. 구조화된 콘텐츠에는 create_slide보다 적합합니다. 생성 후 get_page로 플레이스홀더 ID를 찾고, 그 안에 텍스트를 삽입하세요. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `layout` (string, 필수): 레이아웃 유형. 옵션: `BLANK`, `TITLE`, `TITLE_AND_BODY`, `TITLE_AND_TWO_COLUMNS`, `TITLE_ONLY`, `SECTION_HEADER`, `ONE_COLUMN_TEXT`, `MAIN_POINT`, `BIG_NUMBER`. 제목+설명은 TITLE_AND_BODY, 제목만은 TITLE, 섹션 구분은 SECTION_HEADER가 적합합니다. + - `insertionIndex` (integer, 선택사항): 삽입할 위치 (0 기반). 생략하면 맨 끝에 추가됩니다. + + + + + **설명:** 콘텐츠가 있는 텍스트 상자를 슬라이드에 만듭니다. 제목, 설명, 단락에 사용합니다. 테이블에는 사용하지 마세요. 선택적으로 EMU 단위로 위치(x, y)와 크기(width, height)를 지정할 수 있습니다 (914400 EMU = 1 인치). + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 텍스트 상자를 추가할 슬라이드의 ID. + - `text` (string, 필수): 텍스트 상자의 텍스트 내용. + - `x` (integer, 선택사항): EMU 단위 X 위치 (914400 = 1 인치). 기본값: 914400 (왼쪽에서 1 인치). + - `y` (integer, 선택사항): EMU 단위 Y 위치 (914400 = 1 인치). 기본값: 914400 (위에서 1 인치). + - `width` (integer, 선택사항): EMU 단위 너비. 기본값: 7315200 (약 8 인치). + - `height` (integer, 선택사항): EMU 단위 높이. 기본값: 914400 (약 1 인치). + + + + + **설명:** 프레젠테이션에서 슬라이드를 제거합니다. 슬라이드 ID를 찾으려면 먼저 get_presentation을 사용하세요. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 삭제할 슬라이드의 객체 ID. get_presentation에서 가져옵니다. + + + + + **설명:** 기존 슬라이드의 복사본을 만듭니다. 복사본은 원본 바로 다음에 삽입됩니다. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 복제할 슬라이드의 객체 ID. get_presentation에서 가져옵니다. + + + + + **설명:** 슬라이드를 새 위치로 이동하여 순서를 변경합니다. 슬라이드 ID는 현재 프레젠테이션 순서대로 있어야 합니다 (중복 없음). + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideIds` (string 배열, 필수): 이동할 슬라이드 ID 배열. 현재 프레젠테이션 순서대로 있어야 합니다. + - `insertionIndex` (integer, 필수): 대상 위치 (0 기반). 0 = 맨 앞, 슬라이드 수 = 맨 끝. + + + + + **설명:** 슬라이드에 YouTube 동영상을 삽입합니다. 동영상 ID는 YouTube URL의 "v=" 다음 값입니다 (예: youtube.com/watch?v=abc123의 경우 "abc123" 사용). + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 동영상을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다. + - `videoId` (string, 필수): YouTube 동영상 ID (URL의 v= 다음 값). + + + + + **설명:** 슬라이드에 Google Drive의 동영상을 삽입합니다. 파일 ID는 Drive 파일 URL에서 찾을 수 있습니다. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 동영상을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다. + - `fileId` (string, 필수): 동영상의 Google Drive 파일 ID. + + + + + **설명:** 슬라이드의 배경 이미지를 설정합니다. 이미지 URL은 공개적으로 액세스 가능해야 합니다. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 배경을 설정할 슬라이드의 ID. get_presentation에서 가져옵니다. + - `imageUrl` (string, 필수): 배경으로 사용할 이미지의 공개적으로 액세스 가능한 URL. + + + + + **설명:** 슬라이드에 빈 테이블을 만듭니다. 콘텐츠가 있는 테이블을 만들려면 create_table_with_content를 사용하세요. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 테이블을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다. + - `rows` (integer, 필수): 테이블의 행 수. + - `columns` (integer, 필수): 테이블의 열 수. + + + + + **설명:** 한 번의 작업으로 콘텐츠가 있는 테이블을 만듭니다. 콘텐츠는 2D 배열로 제공하며, 각 내부 배열은 행을 나타냅니다. 예: [["Header1", "Header2"], ["Row1Col1", "Row1Col2"]]. + + **매개변수:** + - `presentationId` (string, 필수): 프레젠테이션의 ID. + - `slideId` (string, 필수): 테이블을 추가할 슬라이드의 ID. get_presentation에서 가져옵니다. + - `rows` (integer, 필수): 테이블의 행 수. + - `columns` (integer, 필수): 테이블의 열 수. + - `content` (array, 필수): 2D 배열 형태의 테이블 콘텐츠. 각 내부 배열은 행입니다. 예: [["Year", "Revenue"], ["2023", "$10M"]]. + + + **설명:** Google 시트에서 프레젠테이션으로 데이터를 가져옵니다. diff --git a/docs/ko/enterprise/integrations/microsoft_excel.mdx b/docs/ko/enterprise/integrations/microsoft_excel.mdx index 41707ef66..42ebd78b6 100644 --- a/docs/ko/enterprise/integrations/microsoft_excel.mdx +++ b/docs/ko/enterprise/integrations/microsoft_excel.mdx @@ -148,6 +148,16 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **설명:** Excel 워크시트의 특정 테이블에서 데이터를 가져옵니다. + + **매개변수:** + - `file_id` (string, 필수): Excel 파일의 ID. + - `worksheet_name` (string, 필수): 워크시트의 이름. + - `table_name` (string, 필수): 테이블의 이름. + + + **설명:** Excel 워크시트에 차트를 만듭니다. @@ -180,6 +190,15 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **설명:** Excel 워크시트의 사용된 범위 메타데이터(크기만, 데이터 없음)를 가져옵니다. + + **매개변수:** + - `file_id` (string, 필수): Excel 파일의 ID. + - `worksheet_name` (string, 필수): 워크시트의 이름. + + + **설명:** Excel 워크시트의 모든 차트를 가져옵니다. diff --git a/docs/ko/enterprise/integrations/microsoft_onedrive.mdx b/docs/ko/enterprise/integrations/microsoft_onedrive.mdx index 4d8bc2273..40c546c54 100644 --- a/docs/ko/enterprise/integrations/microsoft_onedrive.mdx +++ b/docs/ko/enterprise/integrations/microsoft_onedrive.mdx @@ -150,6 +150,49 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `item_id` (string, 필수): 파일의 ID. + + + **설명:** 특정 OneDrive 경로의 파일과 폴더를 나열합니다. + + **매개변수:** + - `folder_path` (string, 필수): 폴더 경로 (예: 'Documents/Reports'). + - `top` (integer, 선택사항): 검색할 항목 수 (최대 1000). 기본값: 50. + - `orderby` (string, 선택사항): 필드별 정렬 (예: "name asc", "lastModifiedDateTime desc"). 기본값: "name asc". + + + + + **설명:** OneDrive에서 최근에 액세스한 파일을 가져옵니다. + + **매개변수:** + - `top` (integer, 선택사항): 검색할 항목 수 (최대 200). 기본값: 25. + + + + + **설명:** 사용자와 공유된 파일과 폴더를 가져옵니다. + + **매개변수:** + - `top` (integer, 선택사항): 검색할 항목 수 (최대 200). 기본값: 50. + - `orderby` (string, 선택사항): 필드별 정렬. 기본값: "name asc". + + + + + **설명:** 경로로 특정 파일 또는 폴더에 대한 정보를 가져옵니다. + + **매개변수:** + - `file_path` (string, 필수): 파일 또는 폴더 경로 (예: 'Documents/report.docx'). + + + + + **설명:** 경로로 OneDrive에서 파일을 다운로드합니다. + + **매개변수:** + - `file_path` (string, 필수): 파일 경로 (예: 'Documents/report.docx'). + + ## 사용 예제 @@ -183,6 +226,62 @@ crew = Crew( crew.kickoff() ``` +### 파일 업로드 및 관리 + +```python +from crewai import Agent, Task, Crew + +# 파일 작업에 특화된 에이전트 생성 +file_operator = Agent( + role="파일 운영자", + goal="파일을 정확하게 업로드, 다운로드 및 관리", + backstory="파일 처리 및 콘텐츠 관리에 능숙한 AI 어시스턴트.", + apps=['microsoft_onedrive/upload_file', 'microsoft_onedrive/download_file', 'microsoft_onedrive/get_file_info'] +) + +# 파일 업로드 및 관리 작업 +file_management_task = Task( + description="'report.txt'라는 이름의 텍스트 파일을 'This is a sample report for the project.' 내용으로 업로드한 다음 업로드된 파일에 대한 정보를 가져오세요.", + agent=file_operator, + expected_output="파일이 성공적으로 업로드되고 파일 정보가 검색됨." +) + +crew = Crew( + agents=[file_operator], + tasks=[file_management_task] +) + +crew.kickoff() +``` + +### 파일 정리 및 공유 + +```python +from crewai import Agent, Task, Crew + +# 파일 정리 및 공유를 위한 에이전트 생성 +file_organizer = Agent( + role="파일 정리자", + goal="파일을 정리하고 협업을 위한 공유 링크 생성", + backstory="파일 정리 및 공유 권한 관리에 뛰어난 AI 어시스턴트.", + apps=['microsoft_onedrive/search_files', 'microsoft_onedrive/move_item', 'microsoft_onedrive/share_item', 'microsoft_onedrive/create_folder'] +) + +# 파일 정리 및 공유 작업 +organize_share_task = Task( + description="이름에 'presentation'이 포함된 파일을 검색하고, '프레젠테이션'이라는 폴더를 만든 다음, 찾은 파일을 이 폴더로 이동하고 폴더에 대한 읽기 전용 공유 링크를 생성하세요.", + agent=file_organizer, + expected_output="파일이 '프레젠테이션' 폴더로 정리되고 공유 링크가 생성됨." +) + +crew = Crew( + agents=[file_organizer], + tasks=[organize_share_task] +) + +crew.kickoff() +``` + ## 문제 해결 ### 일반적인 문제 @@ -196,6 +295,30 @@ crew.kickoff() - 파일 업로드 시 `file_name`과 `content`가 제공되는지 확인하세요. - 바이너리 파일의 경우 내용이 Base64로 인코딩되어야 합니다. +- OneDrive에 대한 쓰기 권한이 있는지 확인하세요. + +**파일/폴더 ID 문제** + +- 특정 파일 또는 폴더에 액세스할 때 항목 ID가 올바른지 다시 확인하세요. +- 항목 ID는 `list_files` 또는 `search_files`와 같은 다른 작업에서 반환됩니다. +- 참조하는 항목이 존재하고 액세스 가능한지 확인하세요. + +**검색 및 필터 작업** + +- `search_files` 작업에 적절한 검색어를 사용하세요. +- `filter` 매개변수의 경우 올바른 OData 문법을 사용하세요. + +**파일 작업 (복사/이동)** + +- `move_item`의 경우 `item_id`와 `parent_id`가 모두 제공되는지 확인하세요. +- `copy_item`의 경우 `item_id`만 필요합니다. `parent_id`는 지정하지 않으면 루트로 기본 설정됩니다. +- 대상 폴더가 존재하고 액세스 가능한지 확인하세요. + +**공유 링크 생성** + +- 공유 링크를 만들기 전에 항목이 존재하는지 확인하세요. +- 공유 요구 사항에 따라 적절한 `type`과 `scope`를 선택하세요. +- `anonymous` 범위는 로그인 없이 액세스를 허용합니다. `organization`은 조직 계정이 필요합니다. ### 도움 받기 diff --git a/docs/ko/enterprise/integrations/microsoft_outlook.mdx b/docs/ko/enterprise/integrations/microsoft_outlook.mdx index 661b55ceb..24e93d035 100644 --- a/docs/ko/enterprise/integrations/microsoft_outlook.mdx +++ b/docs/ko/enterprise/integrations/microsoft_outlook.mdx @@ -132,6 +132,74 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `companyName` (string, 선택사항): 연락처의 회사 이름. + + + **설명:** ID로 특정 이메일 메시지를 가져옵니다. + + **매개변수:** + - `message_id` (string, 필수): 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다. + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록. 예: "id,subject,body,from,receivedDateTime". 기본값: "id,subject,body,from,toRecipients,receivedDateTime". + + + + + **설명:** 이메일 메시지에 회신합니다. + + **매개변수:** + - `message_id` (string, 필수): 회신할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다. + - `comment` (string, 필수): 회신 메시지 내용. 일반 텍스트 또는 HTML 가능. 원본 메시지가 이 내용 아래에 인용됩니다. + + + + + **설명:** 이메일 메시지를 전달합니다. + + **매개변수:** + - `message_id` (string, 필수): 전달할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다. + - `to_recipients` (array, 필수): 전달할 받는 사람의 이메일 주소 배열. 예: ["john@example.com", "jane@example.com"]. + - `comment` (string, 선택사항): 전달된 콘텐츠 위에 포함할 선택적 메시지. 일반 텍스트 또는 HTML 가능. + + + + + **설명:** 메시지를 읽음 또는 읽지 않음으로 표시합니다. + + **매개변수:** + - `message_id` (string, 필수): 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다. + - `is_read` (boolean, 필수): 읽음으로 표시하려면 true, 읽지 않음으로 표시하려면 false로 설정합니다. + + + + + **설명:** 이메일 메시지를 삭제합니다. + + **매개변수:** + - `message_id` (string, 필수): 삭제할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다. + + + + + **설명:** 기존 캘린더 이벤트를 업데이트합니다. + + **매개변수:** + - `event_id` (string, 필수): 이벤트의 고유 식별자. get_calendar_events 작업에서 얻을 수 있습니다. + - `subject` (string, 선택사항): 이벤트의 새 제목/제목. + - `start_time` (string, 선택사항): ISO 8601 형식의 새 시작 시간 (예: "2024-01-20T10:00:00"). 필수: 이 필드 사용 시 start_timezone도 제공해야 합니다. + - `start_timezone` (string, 선택사항): 시작 시간의 시간대. start_time 업데이트 시 필수. 예: "Pacific Standard Time", "Eastern Standard Time", "UTC". + - `end_time` (string, 선택사항): ISO 8601 형식의 새 종료 시간. 필수: 이 필드 사용 시 end_timezone도 제공해야 합니다. + - `end_timezone` (string, 선택사항): 종료 시간의 시간대. end_time 업데이트 시 필수. 예: "Pacific Standard Time", "Eastern Standard Time", "UTC". + - `location` (string, 선택사항): 이벤트의 새 위치. + - `body` (string, 선택사항): 이벤트의 새 본문/설명. HTML 형식 지원. + + + + + **설명:** 캘린더 이벤트를 삭제합니다. + + **매개변수:** + - `event_id` (string, 필수): 삭제할 이벤트의 고유 식별자. get_calendar_events 작업에서 얻을 수 있습니다. + + ## 사용 예제 @@ -165,6 +233,62 @@ crew = Crew( crew.kickoff() ``` +### 이메일 관리 및 검색 + +```python +from crewai import Agent, Task, Crew + +# 이메일 관리에 특화된 에이전트 생성 +email_manager = Agent( + role="이메일 관리자", + goal="이메일 메시지를 검색하고 가져와 정리", + backstory="이메일 정리 및 관리에 능숙한 AI 어시스턴트.", + apps=['microsoft_outlook/get_messages'] +) + +# 이메일 검색 및 가져오기 작업 +search_emails_task = Task( + description="최신 읽지 않은 이메일 20건을 가져와 가장 중요한 것들의 요약을 제공하세요.", + agent=email_manager, + expected_output="주요 읽지 않은 이메일의 요약과 핵심 세부 정보." +) + +crew = Crew( + agents=[email_manager], + tasks=[search_emails_task] +) + +crew.kickoff() +``` + +### 캘린더 및 연락처 관리 + +```python +from crewai import Agent, Task, Crew + +# 캘린더 및 연락처 관리를 위한 에이전트 생성 +scheduler = Agent( + role="캘린더 및 연락처 관리자", + goal="캘린더 이벤트를 관리하고 연락처 정보를 유지", + backstory="일정 관리 및 연락처 정리를 담당하는 AI 어시스턴트.", + apps=['microsoft_outlook/create_calendar_event', 'microsoft_outlook/get_calendar_events', 'microsoft_outlook/create_contact'] +) + +# 회의 생성 및 연락처 추가 작업 +schedule_task = Task( + description="내일 오후 2시 '팀 회의' 제목으로 '회의실 A' 장소의 캘린더 이벤트를 만들고, 'john.smith@example.com' 이메일과 '프로젝트 매니저' 직책으로 'John Smith'의 새 연락처를 추가하세요.", + agent=scheduler, + expected_output="캘린더 이벤트가 생성되고 새 연락처가 추가됨." +) + +crew = Crew( + agents=[scheduler], + tasks=[schedule_task] +) + +crew.kickoff() +``` + ## 문제 해결 ### 일반적인 문제 @@ -173,11 +297,29 @@ crew.kickoff() - Microsoft 계정이 이메일, 캘린더 및 연락처 액세스에 필요한 권한을 가지고 있는지 확인하세요. - 필요한 범위: `Mail.Read`, `Mail.Send`, `Calendars.Read`, `Calendars.ReadWrite`, `Contacts.Read`, `Contacts.ReadWrite`. +- OAuth 연결에 필요한 모든 범위가 포함되어 있는지 확인하세요. **이메일 보내기 문제** - `send_email`에 `to_recipients`, `subject`, `body`가 제공되는지 확인하세요. - 이메일 주소가 올바르게 형식화되어 있는지 확인하세요. +- 계정에 `Mail.Send` 권한이 있는지 확인하세요. + +**캘린더 이벤트 생성** + +- `subject`, `start_datetime`, `end_datetime`이 제공되는지 확인하세요. +- 날짜/시간 필드에 적절한 ISO 8601 형식을 사용하세요 (예: '2024-01-20T10:00:00'). +- 이벤트가 잘못된 시간에 표시되는 경우 시간대 설정을 확인하세요. + +**연락처 관리** + +- `create_contact`의 경우 필수인 `displayName`이 제공되는지 확인하세요. +- `emailAddresses`를 제공할 때 `address`와 `name` 속성이 있는 올바른 객체 형식을 사용하세요. + +**검색 및 필터 문제** + +- `filter` 매개변수에 올바른 OData 문법을 사용하세요. +- 날짜 필터의 경우 ISO 8601 형식을 사용하세요 (예: "receivedDateTime ge '2024-01-01T00:00:00Z'"). ### 도움 받기 diff --git a/docs/ko/enterprise/integrations/microsoft_sharepoint.mdx b/docs/ko/enterprise/integrations/microsoft_sharepoint.mdx index e7de84c41..25f69db7a 100644 --- a/docs/ko/enterprise/integrations/microsoft_sharepoint.mdx +++ b/docs/ko/enterprise/integrations/microsoft_sharepoint.mdx @@ -77,6 +77,17 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token + + **설명:** SharePoint 사이트의 모든 문서 라이브러리(드라이브)를 나열합니다. 파일 작업을 사용하기 전에 사용 가능한 라이브러리를 찾으려면 이 작업을 사용하세요. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `top` (integer, 선택사항): 페이지당 반환할 최대 드라이브 수 (1-999). 기본값: 100 + - `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰. + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'id,name,webUrl,driveType'). + + + **설명:** SharePoint 사이트의 모든 목록을 가져옵니다. @@ -145,20 +156,317 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - - **설명:** SharePoint 문서 라이브러리에서 파일과 폴더를 가져옵니다. + + **설명:** SharePoint 문서 라이브러리에서 파일과 폴더를 가져옵니다. 기본적으로 루트 폴더를 나열하지만 folder_id를 제공하여 하위 폴더로 이동할 수 있습니다. **매개변수:** - - `site_id` (string, 필수): SharePoint 사이트의 ID. + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `folder_id` (string, 선택사항): 내용을 나열할 폴더의 ID. 루트 폴더의 경우 'root'를 사용하거나 이전 list_files 호출에서 가져온 폴더 ID를 제공하세요. 기본값: 'root' + - `top` (integer, 선택사항): 페이지당 반환할 최대 항목 수 (1-1000). 기본값: 50 + - `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰. + - `orderby` (string, 선택사항): 결과 정렬 순서 (예: 'name asc', 'size desc', 'lastModifiedDateTime desc'). 기본값: 'name asc' + - `filter` (string, 선택사항): 결과를 좁히기 위한 OData 필터 (예: 'file ne null'은 파일만, 'folder ne null'은 폴더만). + - `select` (string, 선택사항): 반환할 필드의 쉼표로 구분된 목록 (예: 'id,name,size,folder,file,webUrl,lastModifiedDateTime'). - - **설명:** SharePoint 문서 라이브러리에서 파일 또는 폴더를 삭제합니다. + + **설명:** SharePoint 문서 라이브러리에서 파일 또는 폴더를 삭제합니다. 폴더의 경우 모든 내용이 재귀적으로 삭제됩니다. 항목은 사이트 휴지통으로 이동됩니다. **매개변수:** - - `site_id` (string, 필수): SharePoint 사이트의 ID. - - `item_id` (string, 필수): 삭제할 파일 또는 폴더의 ID. + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): 삭제할 파일 또는 폴더의 고유 식별자. list_files에서 가져오세요. + + + + + **설명:** 경로로 SharePoint 문서 라이브러리 폴더의 파일과 폴더를 나열합니다. 깊은 탐색을 위해 여러 list_files 호출보다 더 효율적입니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `folder_path` (string, 필수): 앞뒤 슬래시 없이 폴더의 전체 경로 (예: 'Documents', 'Reports/2024/Q1'). + - `top` (integer, 선택사항): 페이지당 반환할 최대 항목 수 (1-1000). 기본값: 50 + - `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰. + - `orderby` (string, 선택사항): 결과 정렬 순서 (예: 'name asc', 'size desc'). 기본값: 'name asc' + - `select` (string, 선택사항): 반환할 필드의 쉼표로 구분된 목록 (예: 'id,name,size,folder,file,webUrl,lastModifiedDateTime'). + + + + + **설명:** SharePoint 문서 라이브러리에서 원시 파일 내용을 다운로드합니다. 일반 텍스트 파일(.txt, .csv, .json)에만 사용하세요. Excel 파일의 경우 Excel 전용 작업을 사용하세요. Word 파일의 경우 get_word_document_content를 사용하세요. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): 다운로드할 파일의 고유 식별자. list_files 또는 list_files_by_path에서 가져오세요. + + + + + **설명:** SharePoint 문서 라이브러리의 특정 파일 또는 폴더에 대한 자세한 메타데이터를 가져옵니다. 이름, 크기, 생성/수정 날짜 및 작성자 정보가 포함됩니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): 파일 또는 폴더의 고유 식별자. list_files 또는 list_files_by_path에서 가져오세요. + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'id,name,size,createdDateTime,lastModifiedDateTime,webUrl,createdBy,lastModifiedBy'). + + + + + **설명:** SharePoint 문서 라이브러리에 새 폴더를 만듭니다. 기본적으로 루트에 폴더를 만들며 하위 폴더를 만들려면 parent_id를 사용하세요. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `folder_name` (string, 필수): 새 폴더의 이름. 사용할 수 없는 문자: \ / : * ? " < > | + - `parent_id` (string, 선택사항): 상위 폴더의 ID. 문서 라이브러리 루트의 경우 'root'를 사용하거나 list_files에서 가져온 폴더 ID를 제공하세요. 기본값: 'root' + + + + + **설명:** 키워드로 SharePoint 문서 라이브러리에서 파일과 폴더를 검색합니다. 파일 이름, 폴더 이름 및 Office 문서의 파일 내용을 검색합니다. 와일드카드나 특수 문자를 사용하지 마세요. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `query` (string, 필수): 검색 키워드 (예: 'report', 'budget 2024'). *.txt와 같은 와일드카드는 지원되지 않습니다. + - `top` (integer, 선택사항): 페이지당 반환할 최대 결과 수 (1-1000). 기본값: 50 + - `skip_token` (string, 선택사항): 다음 결과 페이지를 가져오기 위한 이전 응답의 페이지네이션 토큰. + - `select` (string, 선택사항): 반환할 필드의 쉼표로 구분된 목록 (예: 'id,name,size,folder,file,webUrl,lastModifiedDateTime'). + + + + + **설명:** SharePoint 내에서 파일 또는 폴더를 새 위치로 복사합니다. 원본 항목은 변경되지 않습니다. 대용량 파일의 경우 복사 작업은 비동기적입니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): 복사할 파일 또는 폴더의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `destination_folder_id` (string, 필수): 대상 폴더의 ID. 루트 폴더의 경우 'root'를 사용하거나 list_files에서 가져온 폴더 ID를 사용하세요. + - `new_name` (string, 선택사항): 복사본의 새 이름. 제공하지 않으면 원래 이름이 사용됩니다. + + + + + **설명:** SharePoint 내에서 파일 또는 폴더를 새 위치로 이동합니다. 항목은 원래 위치에서 제거됩니다. 폴더의 경우 모든 내용도 함께 이동됩니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): 이동할 파일 또는 폴더의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `destination_folder_id` (string, 필수): 대상 폴더의 ID. 루트 폴더의 경우 'root'를 사용하거나 list_files에서 가져온 폴더 ID를 사용하세요. + - `new_name` (string, 선택사항): 이동된 항목의 새 이름. 제공하지 않으면 원래 이름이 유지됩니다. + + + + + **설명:** SharePoint 문서 라이브러리에 저장된 Excel 통합 문서의 모든 워크시트(탭)를 나열합니다. 반환된 워크시트 이름을 다른 Excel 작업에 사용하세요. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'id,name,position,visibility'). + - `filter` (string, 선택사항): OData 필터 표현식 (예: "visibility eq 'Visible'"로 숨겨진 시트 제외). + - `top` (integer, 선택사항): 반환할 최대 워크시트 수. 최소: 1, 최대: 999 + - `orderby` (string, 선택사항): 정렬 순서 (예: 'position asc'로 탭 순서대로 반환). + + + + + **설명:** SharePoint 문서 라이브러리에 저장된 Excel 통합 문서에 새 워크시트(탭)를 만듭니다. 새 시트는 탭 목록의 끝에 추가됩니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `name` (string, 필수): 새 워크시트의 이름. 최대 31자. 사용할 수 없는 문자: \ / * ? : [ ]. 통합 문서 내에서 고유해야 합니다. + + + + + **설명:** SharePoint에 저장된 Excel 워크시트의 특정 범위에서 셀 값을 가져옵니다. 크기를 모르는 상태에서 모든 데이터를 읽으려면 대신 get_excel_used_range를 사용하세요. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 읽을 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다. + - `range` (string, 필수): A1 표기법의 셀 범위 (예: 'A1:C10', 'A:C', '1:5', 'A1'). + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text'). + + + + + **설명:** SharePoint에 저장된 Excel 워크시트의 특정 범위에 값을 씁니다. 기존 셀 내용을 덮어씁니다. values 배열의 크기는 범위 크기와 정확히 일치해야 합니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 업데이트할 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다. + - `range` (string, 필수): 값을 쓸 A1 표기법의 셀 범위 (예: 'A1:C3'은 3x3 블록). + - `values` (array, 필수): 2D 값 배열 (셀을 포함하는 행). A1:B2의 예: [["Header1", "Header2"], ["Value1", "Value2"]]. 셀을 지우려면 null을 사용하세요. + + + + + **설명:** 실제 셀 값 없이 워크시트에서 사용된 범위의 메타데이터(주소 및 크기)만 반환합니다. 대용량 파일에서 데이터를 청크로 읽기 전에 스프레드시트 크기를 파악하는 데 이상적입니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 읽을 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다. + + + + + **설명:** SharePoint에 저장된 워크시트에서 데이터가 포함된 모든 셀을 가져옵니다. 2MB보다 큰 파일에는 사용하지 마세요. 대용량 파일의 경우 먼저 get_excel_used_range_metadata를 사용한 다음 get_excel_range_data로 작은 청크로 읽으세요. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 읽을 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다. + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text,rowCount,columnCount'). + + + + + **설명:** SharePoint의 Excel 파일에서 행과 열 인덱스로 단일 셀의 값을 가져옵니다. 인덱스는 0 기반입니다 (행 0 = Excel 행 1, 열 0 = 열 A). + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 워크시트(탭)의 이름. get_excel_worksheets에서 가져오세요. 대소문자를 구분합니다. + - `row` (integer, 필수): 0 기반 행 인덱스 (행 0 = Excel 행 1). 유효 범위: 0-1048575 + - `column` (integer, 필수): 0 기반 열 인덱스 (열 0 = A, 열 1 = B). 유효 범위: 0-16383 + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text'). + + + + + **설명:** 셀 범위를 필터링, 정렬 및 구조화된 데이터 기능이 있는 서식이 지정된 Excel 테이블로 변환합니다. 테이블을 만들면 add_excel_table_row로 데이터를 추가할 수 있습니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 데이터 범위가 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요. + - `range` (string, 필수): 헤더와 데이터를 포함하여 테이블로 변환할 셀 범위 (예: 'A1:D10'에서 A1:D1은 열 헤더). + - `has_headers` (boolean, 선택사항): 첫 번째 행에 열 헤더가 포함되어 있으면 true로 설정. 기본값: true + + + + + **설명:** SharePoint에 저장된 특정 Excel 워크시트의 모든 테이블을 나열합니다. id, name, showHeaders 및 showTotals를 포함한 테이블 속성을 반환합니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 테이블을 가져올 워크시트의 이름. get_excel_worksheets에서 가져오세요. + + + + + **설명:** SharePoint 파일의 Excel 테이블 끝에 새 행을 추가합니다. values 배열은 테이블의 열 수와 같은 수의 요소를 가져야 합니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 테이블이 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요. + - `table_name` (string, 필수): 행을 추가할 테이블의 이름 (예: 'Table1'). get_excel_tables에서 가져오세요. 대소문자를 구분합니다. + - `values` (array, 필수): 새 행의 셀 값 배열로 테이블 순서대로 열당 하나씩 (예: ["John Doe", "john@example.com", 25]). + + + + + **설명:** SharePoint 파일의 Excel 테이블에서 모든 행을 데이터 범위로 가져옵니다. 정확한 범위를 알 필요가 없으므로 구조화된 테이블 작업 시 get_excel_range_data보다 쉽습니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 테이블이 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요. + - `table_name` (string, 필수): 데이터를 가져올 테이블의 이름 (예: 'Table1'). get_excel_tables에서 가져오세요. 대소문자를 구분합니다. + - `select` (string, 선택사항): 반환할 속성의 쉼표로 구분된 목록 (예: 'address,values,formulas,numberFormat,text'). + + + + + **설명:** SharePoint에 저장된 Excel 워크시트에 데이터 범위에서 차트 시각화를 만듭니다. 차트는 워크시트에 포함됩니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 차트를 만들 워크시트의 이름. get_excel_worksheets에서 가져오세요. + - `chart_type` (string, 필수): 차트 유형 (예: 'ColumnClustered', 'ColumnStacked', 'Line', 'LineMarkers', 'Pie', 'Bar', 'BarClustered', 'Area', 'Scatter', 'Doughnut'). + - `source_data` (string, 필수): 헤더를 포함한 A1 표기법의 차트 데이터 범위 (예: 'A1:B10'). + - `series_by` (string, 선택사항): 데이터 계열 구성 방법: 'Auto', 'Columns' 또는 'Rows'. 기본값: 'Auto' + + + + + **설명:** SharePoint에 저장된 Excel 워크시트에 포함된 모든 차트를 나열합니다. id, name, chartType, height, width 및 position을 포함한 차트 속성을 반환합니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 차트를 나열할 워크시트의 이름. get_excel_worksheets에서 가져오세요. + + + + + **설명:** SharePoint에 저장된 Excel 통합 문서에서 워크시트(탭)와 모든 내용을 영구적으로 제거합니다. 실행 취소할 수 없습니다. 통합 문서에는 최소 하나의 워크시트가 있어야 합니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 삭제할 워크시트의 이름. 대소문자를 구분합니다. 이 시트의 모든 데이터, 테이블 및 차트가 영구적으로 제거됩니다. + + + + + **설명:** SharePoint의 Excel 워크시트에서 테이블을 제거합니다. 테이블 구조(필터링, 서식, 테이블 기능)는 삭제되지만 기본 셀 데이터는 보존됩니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + - `worksheet_name` (string, 필수): 테이블이 포함된 워크시트의 이름. get_excel_worksheets에서 가져오세요. + - `table_name` (string, 필수): 삭제할 테이블의 이름 (예: 'Table1'). get_excel_tables에서 가져오세요. 테이블 삭제 후에도 셀의 데이터는 유지됩니다. + + + + + **설명:** SharePoint에 저장된 Excel 통합 문서에 정의된 모든 명명된 범위를 가져옵니다. 명명된 범위는 셀 범위에 대한 사용자 정의 레이블입니다 (예: 'SalesData'는 A1:D100을 가리킴). + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Excel 파일의 고유 식별자. list_files 또는 search_files에서 가져오세요. + + + + + **설명:** SharePoint 문서 라이브러리에 저장된 Word 문서(.docx)에서 텍스트 내용을 다운로드하고 추출합니다. SharePoint에서 Word 문서를 읽는 권장 방법입니다. + + **매개변수:** + - `site_id` (string, 필수): get_sites에서 가져온 전체 SharePoint 사이트 식별자. + - `drive_id` (string, 필수): 문서 라이브러리의 ID. 먼저 get_drives를 호출하여 유효한 드라이브 ID를 가져오세요. + - `item_id` (string, 필수): SharePoint에 있는 Word 문서(.docx)의 고유 식별자. list_files 또는 search_files에서 가져오세요. diff --git a/docs/ko/enterprise/integrations/microsoft_teams.mdx b/docs/ko/enterprise/integrations/microsoft_teams.mdx index 338bd94be..8a66f23e0 100644 --- a/docs/ko/enterprise/integrations/microsoft_teams.mdx +++ b/docs/ko/enterprise/integrations/microsoft_teams.mdx @@ -107,6 +107,86 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `join_web_url` (string, 필수): 검색할 회의의 웹 참가 URL. + + + **설명:** 외부 Meeting ID로 온라인 회의를 검색합니다. + + **매개변수:** + - `join_meeting_id` (string, 필수): 참석자가 참가할 때 사용하는 회의 ID(숫자 코드). 회의 초대에 표시되는 joinMeetingId이며, Graph API meeting id가 아닙니다. + + + + + **설명:** 특정 온라인 회의의 세부 정보를 가져옵니다. + + **매개변수:** + - `meeting_id` (string, 필수): Graph API 회의 ID(긴 영숫자 문자열). create_meeting 또는 search_online_meetings 작업에서 얻을 수 있습니다. 숫자 joinMeetingId와 다릅니다. + + + + + **설명:** 특정 팀의 멤버를 가져옵니다. + + **매개변수:** + - `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다. + - `top` (integer, 선택사항): 페이지당 검색할 멤버 수 (1-999). 기본값: 100. + - `skip_token` (string, 선택사항): 이전 응답의 페이지네이션 토큰. 응답에 @odata.nextLink가 포함된 경우 $skiptoken 매개변수 값을 추출하여 여기에 전달하면 다음 페이지 결과를 가져올 수 있습니다. + + + + + **설명:** 팀에 새 채널을 만듭니다. + + **매개변수:** + - `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다. + - `display_name` (string, 필수): Teams에 표시되는 채널 이름. 팀 내에서 고유해야 합니다. 최대 50자. + - `description` (string, 선택사항): 채널 목적을 설명하는 선택적 설명. 채널 세부 정보에 표시됩니다. 최대 1024자. + - `membership_type` (string, 선택사항): 채널 가시성. 옵션: standard, private. "standard" = 모든 팀 멤버에게 표시, "private" = 명시적으로 추가된 멤버에게만 표시. 기본값: standard. + + + + + **설명:** 채널의 특정 메시지에 대한 회신을 가져옵니다. + + **매개변수:** + - `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다. + - `channel_id` (string, 필수): 채널의 고유 식별자. get_channels 작업에서 얻을 수 있습니다. + - `message_id` (string, 필수): 상위 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다. + - `top` (integer, 선택사항): 페이지당 검색할 회신 수 (1-50). 기본값: 50. + - `skip_token` (string, 선택사항): 이전 응답의 페이지네이션 토큰. 응답에 @odata.nextLink가 포함된 경우 $skiptoken 매개변수 값을 추출하여 여기에 전달하면 다음 페이지 결과를 가져올 수 있습니다. + + + + + **설명:** Teams 채널의 메시지에 회신합니다. + + **매개변수:** + - `team_id` (string, 필수): 팀의 고유 식별자. get_teams 작업에서 얻을 수 있습니다. + - `channel_id` (string, 필수): 채널의 고유 식별자. get_channels 작업에서 얻을 수 있습니다. + - `message_id` (string, 필수): 회신할 메시지의 고유 식별자. get_messages 작업에서 얻을 수 있습니다. + - `message` (string, 필수): 회신 내용. HTML의 경우 서식 태그 포함. 텍스트의 경우 일반 텍스트만. + - `content_type` (string, 선택사항): 콘텐츠 형식. 옵션: html, text. "text"는 일반 텍스트, "html"은 서식이 있는 리치 텍스트. 기본값: text. + + + + + **설명:** 기존 온라인 회의를 업데이트합니다. + + **매개변수:** + - `meeting_id` (string, 필수): 회의의 고유 식별자. create_meeting 또는 search_online_meetings 작업에서 얻을 수 있습니다. + - `subject` (string, 선택사항): 새 회의 제목. + - `startDateTime` (string, 선택사항): 시간대가 포함된 ISO 8601 형식의 새 시작 시간. 예: "2024-01-20T10:00:00-08:00". + - `endDateTime` (string, 선택사항): 시간대가 포함된 ISO 8601 형식의 새 종료 시간. + + + + + **설명:** 온라인 회의를 삭제합니다. + + **매개변수:** + - `meeting_id` (string, 필수): 삭제할 회의의 고유 식별자. create_meeting 또는 search_online_meetings 작업에서 얻을 수 있습니다. + + ## 사용 예제 @@ -140,6 +220,62 @@ crew = Crew( crew.kickoff() ``` +### 메시징 및 커뮤니케이션 + +```python +from crewai import Agent, Task, Crew + +# 메시징에 특화된 에이전트 생성 +messenger = Agent( + role="Teams 메신저", + goal="Teams 채널에서 메시지 전송 및 검색", + backstory="팀 커뮤니케이션 및 메시지 관리에 능숙한 AI 어시스턴트.", + apps=['microsoft_teams/send_message', 'microsoft_teams/get_messages'] +) + +# 메시지 전송 및 최근 메시지 검색 작업 +messaging_task = Task( + description="'your_team_id' 팀의 General 채널에 'Hello team! This is an automated update from our AI assistant.' 메시지를 보낸 다음 해당 채널의 최근 10개 메시지를 검색하세요.", + agent=messenger, + expected_output="메시지가 성공적으로 전송되고 최근 메시지가 검색됨." +) + +crew = Crew( + agents=[messenger], + tasks=[messaging_task] +) + +crew.kickoff() +``` + +### 회의 관리 + +```python +from crewai import Agent, Task, Crew + +# 회의 관리를 위한 에이전트 생성 +meeting_scheduler = Agent( + role="회의 스케줄러", + goal="Teams 회의 생성 및 관리", + backstory="회의 일정 관리 및 정리를 담당하는 AI 어시스턴트.", + apps=['microsoft_teams/create_meeting', 'microsoft_teams/search_online_meetings_by_join_url'] +) + +# 회의 생성 작업 +schedule_meeting_task = Task( + description="내일 오전 10시에 1시간 동안 진행되는 '주간 팀 동기화' 제목의 Teams 회의를 생성하세요 (시간대가 포함된 적절한 ISO 8601 형식 사용).", + agent=meeting_scheduler, + expected_output="회의 세부 정보와 함께 Teams 회의가 성공적으로 생성됨." +) + +crew = Crew( + agents=[meeting_scheduler], + tasks=[schedule_meeting_task] +) + +crew.kickoff() +``` + ## 문제 해결 ### 일반적인 문제 @@ -148,11 +284,35 @@ crew.kickoff() - Microsoft 계정이 Teams 액세스에 필요한 권한을 가지고 있는지 확인하세요. - 필요한 범위: `Team.ReadBasic.All`, `Channel.ReadBasic.All`, `ChannelMessage.Send`, `ChannelMessage.Read.All`, `OnlineMeetings.ReadWrite`, `OnlineMeetings.Read`. +- OAuth 연결에 필요한 모든 범위가 포함되어 있는지 확인하세요. **팀 및 채널 액세스** - 액세스하려는 팀의 멤버인지 확인하세요. - 팀 및 채널 ID가 올바른지 다시 확인하세요. +- 팀 및 채널 ID는 `get_teams` 및 `get_channels` 작업을 사용하여 얻을 수 있습니다. + +**메시지 전송 문제** + +- `send_message`에 `team_id`, `channel_id`, `message`가 제공되는지 확인하세요. +- 지정된 채널에 메시지를 보낼 권한이 있는지 확인하세요. +- 메시지 형식에 따라 적절한 `content_type`(text 또는 html)을 선택하세요. + +**회의 생성** + +- `subject`, `startDateTime`, `endDateTime`이 제공되는지 확인하세요. +- 날짜/시간 필드에 시간대가 포함된 적절한 ISO 8601 형식을 사용하세요 (예: '2024-01-20T10:00:00-08:00'). +- 회의 시간이 미래인지 확인하세요. + +**메시지 검색 제한** + +- `get_messages` 작업은 요청당 최대 50개 메시지만 검색할 수 있습니다. +- 메시지는 역시간순(최신순)으로 반환됩니다. + +**회의 검색** + +- `search_online_meetings_by_join_url`의 경우 참가 URL이 정확하고 올바르게 형식화되어 있는지 확인하세요. +- URL은 완전한 Teams 회의 참가 URL이어야 합니다. ### 도움 받기 diff --git a/docs/ko/enterprise/integrations/microsoft_word.mdx b/docs/ko/enterprise/integrations/microsoft_word.mdx index 8f718501b..2c8d980a3 100644 --- a/docs/ko/enterprise/integrations/microsoft_word.mdx +++ b/docs/ko/enterprise/integrations/microsoft_word.mdx @@ -97,6 +97,26 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=your_enterprise_token - `file_id` (string, 필수): 삭제할 문서의 ID. + + + **설명:** OneDrive의 새 위치에 문서를 복사합니다. + + **매개변수:** + - `file_id` (string, 필수): 복사할 문서의 ID. + - `name` (string, 선택사항): 복사된 문서의 새 이름. + - `parent_id` (string, 선택사항): 대상 폴더의 ID (기본값: 루트). + + + + + **설명:** OneDrive의 새 위치로 문서를 이동합니다. + + **매개변수:** + - `file_id` (string, 필수): 이동할 문서의 ID. + - `parent_id` (string, 필수): 대상 폴더의 ID. + - `name` (string, 선택사항): 이동된 문서의 새 이름. + + ## 사용 예제 diff --git a/docs/pt-BR/enterprise/integrations/google_contacts.mdx b/docs/pt-BR/enterprise/integrations/google_contacts.mdx index 31f1803f9..fd1a3f629 100644 --- a/docs/pt-BR/enterprise/integrations/google_contacts.mdx +++ b/docs/pt-BR/enterprise/integrations/google_contacts.mdx @@ -200,6 +200,25 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token - `clientData` (array, opcional): Dados específicos do cliente. Cada item é um objeto com `key` (string) e `value` (string). + + + **Descrição:** Atualizar informações de um grupo de contatos. + + **Parâmetros:** + - `resourceName` (string, obrigatório): O nome do recurso do grupo de contatos (ex: 'contactGroups/myContactGroup'). + - `name` (string, obrigatório): O nome do grupo de contatos. + - `clientData` (array, opcional): Dados específicos do cliente. Cada item é um objeto com `key` (string) e `value` (string). + + + + + **Descrição:** Excluir um grupo de contatos. + + **Parâmetros:** + - `resourceName` (string, obrigatório): O nome do recurso do grupo de contatos a excluir (ex: 'contactGroups/myContactGroup'). + - `deleteContacts` (boolean, opcional): Se os contatos do grupo também devem ser excluídos. Padrão: false + + ## Exemplos de Uso diff --git a/docs/pt-BR/enterprise/integrations/google_docs.mdx b/docs/pt-BR/enterprise/integrations/google_docs.mdx index f30fbf64e..f5eb98194 100644 --- a/docs/pt-BR/enterprise/integrations/google_docs.mdx +++ b/docs/pt-BR/enterprise/integrations/google_docs.mdx @@ -131,6 +131,297 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token - `endIndex` (integer, obrigatório): O índice final do intervalo. + + + **Descrição:** Criar um novo documento do Google com conteúdo em uma única ação. + + **Parâmetros:** + - `title` (string, obrigatório): O título para o novo documento. Aparece no topo do documento e no Google Drive. + - `content` (string, opcional): O conteúdo de texto a inserir no documento. Use `\n` para novos parágrafos. + + + + + **Descrição:** Adicionar texto ao final de um documento do Google. Insere automaticamente no final do documento sem necessidade de especificar um índice. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento obtido da resposta de create_document ou URL. + - `text` (string, obrigatório): Texto a adicionar ao final do documento. Use `\n` para novos parágrafos. + + + + + **Descrição:** Aplicar ou remover formatação de negrito em texto de um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do texto a formatar. + - `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo). + - `bold` (boolean, obrigatório): Defina `true` para aplicar negrito, `false` para remover negrito. + + + + + **Descrição:** Aplicar ou remover formatação de itálico em texto de um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do texto a formatar. + - `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo). + - `italic` (boolean, obrigatório): Defina `true` para aplicar itálico, `false` para remover itálico. + + + + + **Descrição:** Adicionar ou remover formatação de sublinhado em texto de um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do texto a formatar. + - `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo). + - `underline` (boolean, obrigatório): Defina `true` para sublinhar, `false` para remover sublinhado. + + + + + **Descrição:** Adicionar ou remover formatação de tachado em texto de um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do texto a formatar. + - `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo). + - `strikethrough` (boolean, obrigatório): Defina `true` para adicionar tachado, `false` para remover. + + + + + **Descrição:** Alterar o tamanho da fonte do texto em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do texto a formatar. + - `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo). + - `fontSize` (number, obrigatório): Tamanho da fonte em pontos. Tamanhos comuns: 10, 11, 12, 14, 16, 18, 24, 36. + + + + + **Descrição:** Alterar a cor do texto usando valores RGB (escala 0-1) em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do texto a formatar. + - `endIndex` (integer, obrigatório): Posição final do texto a formatar (exclusivo). + - `red` (number, obrigatório): Componente vermelho (0-1). Exemplo: `1` para vermelho total. + - `green` (number, obrigatório): Componente verde (0-1). Exemplo: `0.5` para metade verde. + - `blue` (number, obrigatório): Componente azul (0-1). Exemplo: `0` para sem azul. + + + + + **Descrição:** Transformar texto existente em um hyperlink clicável em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do texto a transformar em link. + - `endIndex` (integer, obrigatório): Posição final do texto a transformar em link (exclusivo). + - `url` (string, obrigatório): A URL para a qual o link deve apontar. Exemplo: `"https://example.com"`. + + + + + **Descrição:** Aplicar um estilo de título ou parágrafo a um intervalo de texto em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do(s) parágrafo(s) a estilizar. + - `endIndex` (integer, obrigatório): Posição final do(s) parágrafo(s) a estilizar. + - `style` (string, obrigatório): O estilo a aplicar. Opções: `NORMAL_TEXT`, `TITLE`, `SUBTITLE`, `HEADING_1`, `HEADING_2`, `HEADING_3`, `HEADING_4`, `HEADING_5`, `HEADING_6`. + + + + + **Descrição:** Definir o alinhamento de texto para parágrafos em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do(s) parágrafo(s) a alinhar. + - `endIndex` (integer, obrigatório): Posição final do(s) parágrafo(s) a alinhar. + - `alignment` (string, obrigatório): Alinhamento do texto. Opções: `START` (esquerda), `CENTER`, `END` (direita), `JUSTIFIED`. + + + + + **Descrição:** Definir o espaçamento entre linhas para parágrafos em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial do(s) parágrafo(s). + - `endIndex` (integer, obrigatório): Posição final do(s) parágrafo(s). + - `lineSpacing` (number, obrigatório): Espaçamento entre linhas como porcentagem. `100` = simples, `115` = 1.15x, `150` = 1.5x, `200` = duplo. + + + + + **Descrição:** Converter parágrafos em uma lista com marcadores ou numerada em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial dos parágrafos a converter em lista. + - `endIndex` (integer, obrigatório): Posição final dos parágrafos a converter em lista. + - `bulletPreset` (string, obrigatório): Estilo de marcadores/numeração. Opções: `BULLET_DISC_CIRCLE_SQUARE`, `BULLET_DIAMONDX_ARROW3D_SQUARE`, `BULLET_CHECKBOX`, `BULLET_ARROW_DIAMOND_DISC`, `BULLET_STAR_CIRCLE_SQUARE`, `NUMBERED_DECIMAL_ALPHA_ROMAN`, `NUMBERED_DECIMAL_ALPHA_ROMAN_PARENS`, `NUMBERED_DECIMAL_NESTED`, `NUMBERED_UPPERALPHA_ALPHA_ROMAN`, `NUMBERED_UPPERROMAN_UPPERALPHA_DECIMAL`. + + + + + **Descrição:** Remover marcadores ou numeração de parágrafos em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `startIndex` (integer, obrigatório): Posição inicial dos parágrafos de lista. + - `endIndex` (integer, obrigatório): Posição final dos parágrafos de lista. + + + + + **Descrição:** Inserir uma tabela com conteúdo em um documento do Google em uma única ação. Forneça o conteúdo como um array 2D. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `rows` (integer, obrigatório): Número de linhas na tabela. + - `columns` (integer, obrigatório): Número de colunas na tabela. + - `index` (integer, opcional): Posição para inserir a tabela. Se não fornecido, a tabela é inserida no final do documento. + - `content` (array, obrigatório): Conteúdo da tabela como um array 2D. Cada array interno é uma linha. Exemplo: `[["Ano", "Receita"], ["2023", "$43B"], ["2024", "$45B"]]`. + + + + + **Descrição:** Inserir uma nova linha acima ou abaixo de uma célula de referência em uma tabela existente. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `tableStartIndex` (integer, obrigatório): O índice inicial da tabela. Obtenha de get_document. + - `rowIndex` (integer, obrigatório): Índice da linha (baseado em 0) da célula de referência. + - `columnIndex` (integer, opcional): Índice da coluna (baseado em 0) da célula de referência. Padrão: `0`. + - `insertBelow` (boolean, opcional): Se `true`, insere abaixo da linha de referência. Se `false`, insere acima. Padrão: `true`. + + + + + **Descrição:** Inserir uma nova coluna à esquerda ou à direita de uma célula de referência em uma tabela existente. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `tableStartIndex` (integer, obrigatório): O índice inicial da tabela. + - `rowIndex` (integer, opcional): Índice da linha (baseado em 0) da célula de referência. Padrão: `0`. + - `columnIndex` (integer, obrigatório): Índice da coluna (baseado em 0) da célula de referência. + - `insertRight` (boolean, opcional): Se `true`, insere à direita. Se `false`, insere à esquerda. Padrão: `true`. + + + + + **Descrição:** Excluir uma linha de uma tabela existente em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `tableStartIndex` (integer, obrigatório): O índice inicial da tabela. + - `rowIndex` (integer, obrigatório): Índice da linha (baseado em 0) a excluir. + - `columnIndex` (integer, opcional): Índice da coluna (baseado em 0) de qualquer célula na linha. Padrão: `0`. + + + + + **Descrição:** Excluir uma coluna de uma tabela existente em um documento do Google. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `tableStartIndex` (integer, obrigatório): O índice inicial da tabela. + - `rowIndex` (integer, opcional): Índice da linha (baseado em 0) de qualquer célula na coluna. Padrão: `0`. + - `columnIndex` (integer, obrigatório): Índice da coluna (baseado em 0) a excluir. + + + + + **Descrição:** Mesclar um intervalo de células de tabela em uma única célula. O conteúdo de todas as células é preservado. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `tableStartIndex` (integer, obrigatório): O índice inicial da tabela. + - `rowIndex` (integer, obrigatório): Índice da linha inicial (baseado em 0) para a mesclagem. + - `columnIndex` (integer, obrigatório): Índice da coluna inicial (baseado em 0) para a mesclagem. + - `rowSpan` (integer, obrigatório): Número de linhas a mesclar. + - `columnSpan` (integer, obrigatório): Número de colunas a mesclar. + + + + + **Descrição:** Desfazer a mesclagem de células de tabela previamente mescladas, retornando-as a células individuais. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `tableStartIndex` (integer, obrigatório): O índice inicial da tabela. + - `rowIndex` (integer, obrigatório): Índice da linha (baseado em 0) da célula mesclada. + - `columnIndex` (integer, obrigatório): Índice da coluna (baseado em 0) da célula mesclada. + - `rowSpan` (integer, obrigatório): Número de linhas que a célula mesclada abrange. + - `columnSpan` (integer, obrigatório): Número de colunas que a célula mesclada abrange. + + + + + **Descrição:** Inserir uma imagem de uma URL pública em um documento do Google. A imagem deve ser publicamente acessível, ter menos de 50MB e estar no formato PNG/JPEG/GIF. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `uri` (string, obrigatório): URL pública da imagem. Deve ser acessível sem autenticação. + - `index` (integer, opcional): Posição para inserir a imagem. Se não fornecido, a imagem é inserida no final do documento. Padrão: `1`. + + + + + **Descrição:** Inserir uma quebra de seção para criar seções de documento com formatação diferente. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `index` (integer, obrigatório): Posição para inserir a quebra de seção. + - `sectionType` (string, obrigatório): O tipo de quebra de seção. Opções: `CONTINUOUS` (permanece na mesma página), `NEXT_PAGE` (inicia uma nova página). + + + + + **Descrição:** Criar um cabeçalho para o documento. Retorna um headerId que pode ser usado com insert_text para adicionar conteúdo ao cabeçalho. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `type` (string, opcional): Tipo de cabeçalho. Opções: `DEFAULT`. Padrão: `DEFAULT`. + + + + + **Descrição:** Criar um rodapé para o documento. Retorna um footerId que pode ser usado com insert_text para adicionar conteúdo ao rodapé. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `type` (string, opcional): Tipo de rodapé. Opções: `DEFAULT`. Padrão: `DEFAULT`. + + + + + **Descrição:** Excluir um cabeçalho do documento. Use get_document para encontrar o headerId. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `headerId` (string, obrigatório): O ID do cabeçalho a excluir. Obtenha da resposta de get_document. + + + + + **Descrição:** Excluir um rodapé do documento. Use get_document para encontrar o footerId. + + **Parâmetros:** + - `documentId` (string, obrigatório): O ID do documento. + - `footerId` (string, obrigatório): O ID do rodapé a excluir. Obtenha da resposta de get_document. + + ## Exemplos de Uso diff --git a/docs/pt-BR/enterprise/integrations/google_slides.mdx b/docs/pt-BR/enterprise/integrations/google_slides.mdx index d6d781f87..c185e12ec 100644 --- a/docs/pt-BR/enterprise/integrations/google_slides.mdx +++ b/docs/pt-BR/enterprise/integrations/google_slides.mdx @@ -61,6 +61,22 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token + + **Descrição:** Obter metadados leves de uma apresentação (título, número de slides, IDs dos slides). Use isso primeiro antes de recuperar o conteúdo completo. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação a ser recuperada. + + + + + **Descrição:** Extrair todo o conteúdo de texto de uma apresentação. Retorna IDs dos slides e texto de formas e tabelas apenas (sem formatação). + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + + + **Descrição:** Recupera uma apresentação por ID. @@ -80,6 +96,15 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token + + **Descrição:** Extrair conteúdo de texto de um único slide. Retorna apenas texto de formas e tabelas (sem formatação ou estilo). + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `pageObjectId` (string, obrigatório): O ID do slide/página para obter o texto. + + + **Descrição:** Recupera uma página específica por seu ID. @@ -98,6 +123,120 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token + + **Descrição:** Adicionar um slide em branco adicional a uma apresentação. Novas apresentações já possuem um slide em branco - verifique get_presentation_metadata primeiro. Para slides com áreas de título/corpo, use create_slide_with_layout. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `insertionIndex` (integer, opcional): Onde inserir o slide (baseado em 0). Se omitido, adiciona no final. + + + + + **Descrição:** Criar um slide com layout predefinido contendo áreas de espaço reservado para título, corpo, etc. Melhor que create_slide para conteúdo estruturado. Após criar, use get_page para encontrar os IDs de espaço reservado, depois insira texto neles. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `layout` (string, obrigatório): Tipo de layout. Um de: `BLANK`, `TITLE`, `TITLE_AND_BODY`, `TITLE_AND_TWO_COLUMNS`, `TITLE_ONLY`, `SECTION_HEADER`, `ONE_COLUMN_TEXT`, `MAIN_POINT`, `BIG_NUMBER`. TITLE_AND_BODY é melhor para título+descrição. TITLE para slides apenas com título. SECTION_HEADER para divisores de seção. + - `insertionIndex` (integer, opcional): Onde inserir (baseado em 0). Se omitido, adiciona no final. + + + + + **Descrição:** Criar uma caixa de texto em um slide com conteúdo. Use para títulos, descrições, parágrafos - não para tabelas. Opcionalmente especifique posição (x, y) e tamanho (width, height) em unidades EMU (914400 EMU = 1 polegada). + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do slide para adicionar a caixa de texto. + - `text` (string, obrigatório): O conteúdo de texto da caixa de texto. + - `x` (integer, opcional): Posição X em EMU (914400 = 1 polegada). Padrão: 914400 (1 polegada da esquerda). + - `y` (integer, opcional): Posição Y em EMU (914400 = 1 polegada). Padrão: 914400 (1 polegada do topo). + - `width` (integer, opcional): Largura em EMU. Padrão: 7315200 (~8 polegadas). + - `height` (integer, opcional): Altura em EMU. Padrão: 914400 (~1 polegada). + + + + + **Descrição:** Remover um slide de uma apresentação. Use get_presentation primeiro para encontrar o ID do slide. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do objeto do slide a excluir. Obtenha de get_presentation. + + + + + **Descrição:** Criar uma cópia de um slide existente. A duplicata é inserida imediatamente após o original. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do objeto do slide a duplicar. Obtenha de get_presentation. + + + + + **Descrição:** Reordenar slides movendo-os para uma nova posição. Os IDs dos slides devem estar na ordem atual da apresentação (sem duplicatas). + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideIds` (array de strings, obrigatório): Array de IDs dos slides a mover. Obrigatoriamente na ordem atual da apresentação. + - `insertionIndex` (integer, obrigatório): Posição de destino (baseado em 0). 0 = início, número de slides = final. + + + + + **Descrição:** Incorporar um vídeo do YouTube em um slide. O ID do vídeo é o valor após "v=" nas URLs do YouTube (ex: para youtube.com/watch?v=abc123, use "abc123"). + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do slide para adicionar o vídeo. Obtenha de get_presentation. + - `videoId` (string, obrigatório): O ID do vídeo do YouTube (o valor após v= na URL). + + + + + **Descrição:** Incorporar um vídeo do Google Drive em um slide. O ID do arquivo pode ser encontrado na URL do arquivo no Drive. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do slide para adicionar o vídeo. Obtenha de get_presentation. + - `fileId` (string, obrigatório): O ID do arquivo do Google Drive do vídeo. + + + + + **Descrição:** Definir uma imagem de fundo para um slide. A URL da imagem deve ser publicamente acessível. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do slide para definir o fundo. Obtenha de get_presentation. + - `imageUrl` (string, obrigatório): URL publicamente acessível da imagem a usar como fundo. + + + + + **Descrição:** Criar uma tabela vazia em um slide. Para criar uma tabela com conteúdo, use create_table_with_content. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do slide para adicionar a tabela. Obtenha de get_presentation. + - `rows` (integer, obrigatório): Número de linhas na tabela. + - `columns` (integer, obrigatório): Número de colunas na tabela. + + + + + **Descrição:** Criar uma tabela com conteúdo em uma única ação. Forneça o conteúdo como uma matriz 2D onde cada array interno é uma linha. Exemplo: [["Cabeçalho1", "Cabeçalho2"], ["Linha1Col1", "Linha1Col2"]]. + + **Parâmetros:** + - `presentationId` (string, obrigatório): O ID da apresentação. + - `slideId` (string, obrigatório): O ID do slide para adicionar a tabela. Obtenha de get_presentation. + - `rows` (integer, obrigatório): Número de linhas na tabela. + - `columns` (integer, obrigatório): Número de colunas na tabela. + - `content` (array, obrigatório): Conteúdo da tabela como matriz 2D. Cada array interno é uma linha. Exemplo: [["Ano", "Receita"], ["2023", "$10M"]]. + + + **Descrição:** Importa dados de uma planilha do Google para uma apresentação. diff --git a/docs/pt-BR/enterprise/integrations/microsoft_excel.mdx b/docs/pt-BR/enterprise/integrations/microsoft_excel.mdx index beb39d126..a053c8ba6 100644 --- a/docs/pt-BR/enterprise/integrations/microsoft_excel.mdx +++ b/docs/pt-BR/enterprise/integrations/microsoft_excel.mdx @@ -148,6 +148,16 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token + + **Descrição:** Obter dados de uma tabela específica em uma planilha do Excel. + + **Parâmetros:** + - `file_id` (string, obrigatório): O ID do arquivo Excel. + - `worksheet_name` (string, obrigatório): Nome da planilha. + - `table_name` (string, obrigatório): Nome da tabela. + + + **Descrição:** Criar um gráfico em uma planilha do Excel. @@ -180,6 +190,15 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token + + **Descrição:** Obter os metadados do intervalo usado (apenas dimensões, sem dados) de uma planilha do Excel. + + **Parâmetros:** + - `file_id` (string, obrigatório): O ID do arquivo Excel. + - `worksheet_name` (string, obrigatório): Nome da planilha. + + + **Descrição:** Obter todos os gráficos em uma planilha do Excel. diff --git a/docs/pt-BR/enterprise/integrations/microsoft_onedrive.mdx b/docs/pt-BR/enterprise/integrations/microsoft_onedrive.mdx index 730d0ff59..b23ae1c1d 100644 --- a/docs/pt-BR/enterprise/integrations/microsoft_onedrive.mdx +++ b/docs/pt-BR/enterprise/integrations/microsoft_onedrive.mdx @@ -150,6 +150,49 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token - `item_id` (string, obrigatório): O ID do arquivo. + + + **Descrição:** Listar arquivos e pastas em um caminho específico do OneDrive. + + **Parâmetros:** + - `folder_path` (string, obrigatório): O caminho da pasta (ex: 'Documents/Reports'). + - `top` (integer, opcional): Número de itens a recuperar (máx 1000). Padrão: 50. + - `orderby` (string, opcional): Ordenar por campo (ex: "name asc", "lastModifiedDateTime desc"). Padrão: "name asc". + + + + + **Descrição:** Obter arquivos acessados recentemente no OneDrive. + + **Parâmetros:** + - `top` (integer, opcional): Número de itens a recuperar (máx 200). Padrão: 25. + + + + + **Descrição:** Obter arquivos e pastas compartilhados com o usuário. + + **Parâmetros:** + - `top` (integer, opcional): Número de itens a recuperar (máx 200). Padrão: 50. + - `orderby` (string, opcional): Ordenar por campo. Padrão: "name asc". + + + + + **Descrição:** Obter informações sobre um arquivo ou pasta específica pelo caminho. + + **Parâmetros:** + - `file_path` (string, obrigatório): O caminho do arquivo ou pasta (ex: 'Documents/report.docx'). + + + + + **Descrição:** Baixar um arquivo do OneDrive pelo seu caminho. + + **Parâmetros:** + - `file_path` (string, obrigatório): O caminho do arquivo (ex: 'Documents/report.docx'). + + ## Exemplos de Uso diff --git a/docs/pt-BR/enterprise/integrations/microsoft_outlook.mdx b/docs/pt-BR/enterprise/integrations/microsoft_outlook.mdx index 952109710..a872d1997 100644 --- a/docs/pt-BR/enterprise/integrations/microsoft_outlook.mdx +++ b/docs/pt-BR/enterprise/integrations/microsoft_outlook.mdx @@ -132,6 +132,74 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token - `companyName` (string, opcional): Nome da empresa do contato. + + + **Descrição:** Obter uma mensagem de email específica por ID. + + **Parâmetros:** + - `message_id` (string, obrigatório): O identificador único da mensagem. Obter pela ação get_messages. + - `select` (string, opcional): Lista separada por vírgulas de propriedades a retornar. Exemplo: "id,subject,body,from,receivedDateTime". Padrão: "id,subject,body,from,toRecipients,receivedDateTime". + + + + + **Descrição:** Responder a uma mensagem de email. + + **Parâmetros:** + - `message_id` (string, obrigatório): O identificador único da mensagem a responder. Obter pela ação get_messages. + - `comment` (string, obrigatório): O conteúdo da mensagem de resposta. Pode ser texto simples ou HTML. A mensagem original será citada abaixo deste conteúdo. + + + + + **Descrição:** Encaminhar uma mensagem de email. + + **Parâmetros:** + - `message_id` (string, obrigatório): O identificador único da mensagem a encaminhar. Obter pela ação get_messages. + - `to_recipients` (array, obrigatório): Array de endereços de email dos destinatários. Exemplo: ["john@example.com", "jane@example.com"]. + - `comment` (string, opcional): Mensagem opcional a incluir acima do conteúdo encaminhado. Pode ser texto simples ou HTML. + + + + + **Descrição:** Marcar uma mensagem como lida ou não lida. + + **Parâmetros:** + - `message_id` (string, obrigatório): O identificador único da mensagem. Obter pela ação get_messages. + - `is_read` (boolean, obrigatório): Definir como true para marcar como lida, false para marcar como não lida. + + + + + **Descrição:** Excluir uma mensagem de email. + + **Parâmetros:** + - `message_id` (string, obrigatório): O identificador único da mensagem a excluir. Obter pela ação get_messages. + + + + + **Descrição:** Atualizar um evento de calendário existente. + + **Parâmetros:** + - `event_id` (string, obrigatório): O identificador único do evento. Obter pela ação get_calendar_events. + - `subject` (string, opcional): Novo assunto/título do evento. + - `start_time` (string, opcional): Nova hora de início no formato ISO 8601 (ex: "2024-01-20T10:00:00"). OBRIGATÓRIO: Também deve fornecer start_timezone ao usar este campo. + - `start_timezone` (string, opcional): Fuso horário da hora de início. OBRIGATÓRIO ao atualizar start_time. Exemplos: "Pacific Standard Time", "Eastern Standard Time", "UTC". + - `end_time` (string, opcional): Nova hora de término no formato ISO 8601. OBRIGATÓRIO: Também deve fornecer end_timezone ao usar este campo. + - `end_timezone` (string, opcional): Fuso horário da hora de término. OBRIGATÓRIO ao atualizar end_time. Exemplos: "Pacific Standard Time", "Eastern Standard Time", "UTC". + - `location` (string, opcional): Novo local do evento. + - `body` (string, opcional): Novo corpo/descrição do evento. Suporta formatação HTML. + + + + + **Descrição:** Excluir um evento de calendário. + + **Parâmetros:** + - `event_id` (string, obrigatório): O identificador único do evento a excluir. Obter pela ação get_calendar_events. + + ## Exemplos de Uso diff --git a/docs/pt-BR/enterprise/integrations/microsoft_sharepoint.mdx b/docs/pt-BR/enterprise/integrations/microsoft_sharepoint.mdx index 8c2cfe265..0f5968421 100644 --- a/docs/pt-BR/enterprise/integrations/microsoft_sharepoint.mdx +++ b/docs/pt-BR/enterprise/integrations/microsoft_sharepoint.mdx @@ -77,6 +77,17 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token + + **Descrição:** Listar todas as bibliotecas de documentos (drives) em um site do SharePoint. Use isto para descobrir bibliotecas disponíveis antes de usar operações de arquivo. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `top` (integer, opcional): Número máximo de drives a retornar por página (1-999). Padrão: 100 + - `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados. + - `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'id,name,webUrl,driveType'). + + + **Descrição:** Obter todas as listas em um site do SharePoint. @@ -145,20 +156,317 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token - - **Descrição:** Obter arquivos e pastas de uma biblioteca de documentos do SharePoint. + + **Descrição:** Recuperar arquivos e pastas de uma biblioteca de documentos do SharePoint. Por padrão, lista a pasta raiz, mas você pode navegar em subpastas fornecendo um folder_id. **Parâmetros:** - - `site_id` (string, obrigatório): O ID do site do SharePoint. + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `folder_id` (string, opcional): O ID da pasta para listar o conteúdo. Use 'root' para a pasta raiz, ou forneça um ID de pasta de uma chamada anterior de list_files. Padrão: 'root' + - `top` (integer, opcional): Número máximo de itens a retornar por página (1-1000). Padrão: 50 + - `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados. + - `orderby` (string, opcional): Ordem de classificação dos resultados (ex: 'name asc', 'size desc', 'lastModifiedDateTime desc'). Padrão: 'name asc' + - `filter` (string, opcional): Filtro OData para restringir resultados (ex: 'file ne null' apenas para arquivos, 'folder ne null' apenas para pastas). + - `select` (string, opcional): Lista de campos separados por vírgula para retornar (ex: 'id,name,size,folder,file,webUrl,lastModifiedDateTime'). - - **Descrição:** Excluir um arquivo ou pasta da biblioteca de documentos do SharePoint. + + **Descrição:** Excluir um arquivo ou pasta de uma biblioteca de documentos do SharePoint. Para pastas, todo o conteúdo é excluído recursivamente. Os itens são movidos para a lixeira do site. **Parâmetros:** - - `site_id` (string, obrigatório): O ID do site do SharePoint. - - `item_id` (string, obrigatório): O ID do arquivo ou pasta a excluir. + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo ou pasta a excluir. Obtenha de list_files. + + + + + **Descrição:** Listar arquivos e pastas em uma pasta de biblioteca de documentos do SharePoint pelo caminho. Mais eficiente do que múltiplas chamadas list_files para navegação profunda. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `folder_path` (string, obrigatório): O caminho completo para a pasta sem barras iniciais/finais (ex: 'Documents', 'Reports/2024/Q1'). + - `top` (integer, opcional): Número máximo de itens a retornar por página (1-1000). Padrão: 50 + - `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados. + - `orderby` (string, opcional): Ordem de classificação dos resultados (ex: 'name asc', 'size desc'). Padrão: 'name asc' + - `select` (string, opcional): Lista de campos separados por vírgula para retornar (ex: 'id,name,size,folder,file,webUrl,lastModifiedDateTime'). + + + + + **Descrição:** Baixar conteúdo bruto de um arquivo de uma biblioteca de documentos do SharePoint. Use apenas para arquivos de texto simples (.txt, .csv, .json). Para arquivos Excel, use as ações específicas de Excel. Para arquivos Word, use get_word_document_content. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo a baixar. Obtenha de list_files ou list_files_by_path. + + + + + **Descrição:** Recuperar metadados detalhados de um arquivo ou pasta específico em uma biblioteca de documentos do SharePoint, incluindo nome, tamanho, datas de criação/modificação e informações do autor. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo ou pasta. Obtenha de list_files ou list_files_by_path. + - `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'id,name,size,createdDateTime,lastModifiedDateTime,webUrl,createdBy,lastModifiedBy'). + + + + + **Descrição:** Criar uma nova pasta em uma biblioteca de documentos do SharePoint. Por padrão, cria a pasta na raiz; use parent_id para criar subpastas. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `folder_name` (string, obrigatório): Nome para a nova pasta. Não pode conter: \ / : * ? " < > | + - `parent_id` (string, opcional): O ID da pasta pai. Use 'root' para a raiz da biblioteca de documentos, ou forneça um ID de pasta de list_files. Padrão: 'root' + + + + + **Descrição:** Pesquisar arquivos e pastas em uma biblioteca de documentos do SharePoint por palavras-chave. Pesquisa nomes de arquivos, nomes de pastas e conteúdo de arquivos para documentos Office. Não use curingas ou caracteres especiais. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `query` (string, obrigatório): Palavras-chave de pesquisa (ex: 'relatório', 'orçamento 2024'). Curingas como *.txt não são suportados. + - `top` (integer, opcional): Número máximo de resultados a retornar por página (1-1000). Padrão: 50 + - `skip_token` (string, opcional): Token de paginação de uma resposta anterior para buscar a próxima página de resultados. + - `select` (string, opcional): Lista de campos separados por vírgula para retornar (ex: 'id,name,size,folder,file,webUrl,lastModifiedDateTime'). + + + + + **Descrição:** Copiar um arquivo ou pasta para um novo local dentro do SharePoint. O item original permanece inalterado. A operação de cópia é assíncrona para arquivos grandes. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo ou pasta a copiar. Obtenha de list_files ou search_files. + - `destination_folder_id` (string, obrigatório): O ID da pasta de destino. Use 'root' para a pasta raiz, ou um ID de pasta de list_files. + - `new_name` (string, opcional): Novo nome para a cópia. Se não fornecido, o nome original é usado. + + + + + **Descrição:** Mover um arquivo ou pasta para um novo local dentro do SharePoint. O item é removido de sua localização original. Para pastas, todo o conteúdo é movido também. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo ou pasta a mover. Obtenha de list_files ou search_files. + - `destination_folder_id` (string, obrigatório): O ID da pasta de destino. Use 'root' para a pasta raiz, ou um ID de pasta de list_files. + - `new_name` (string, opcional): Novo nome para o item movido. Se não fornecido, o nome original é mantido. + + + + + **Descrição:** Listar todas as planilhas (abas) em uma pasta de trabalho Excel armazenada em uma biblioteca de documentos do SharePoint. Use o nome da planilha retornado com outras ações de Excel. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'id,name,position,visibility'). + - `filter` (string, opcional): Expressão de filtro OData (ex: "visibility eq 'Visible'" para excluir planilhas ocultas). + - `top` (integer, opcional): Número máximo de planilhas a retornar. Mínimo: 1, Máximo: 999 + - `orderby` (string, opcional): Ordem de classificação (ex: 'position asc' para retornar planilhas na ordem das abas). + + + + + **Descrição:** Criar uma nova planilha (aba) em uma pasta de trabalho Excel armazenada em uma biblioteca de documentos do SharePoint. A nova planilha é adicionada no final da lista de abas. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `name` (string, obrigatório): Nome para a nova planilha. Máximo de 31 caracteres. Não pode conter: \ / * ? : [ ]. Deve ser único na pasta de trabalho. + + + + + **Descrição:** Recuperar valores de células de um intervalo específico em uma planilha Excel armazenada no SharePoint. Para ler todos os dados sem saber as dimensões, use get_excel_used_range em vez disso. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha (aba) para leitura. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas. + - `range` (string, obrigatório): Intervalo de células em notação A1 (ex: 'A1:C10', 'A:C', '1:5', 'A1'). + - `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text'). + + + + + **Descrição:** Escrever valores em um intervalo específico em uma planilha Excel armazenada no SharePoint. Sobrescreve o conteúdo existente das células. As dimensões do array de valores devem corresponder exatamente às dimensões do intervalo. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha (aba) a atualizar. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas. + - `range` (string, obrigatório): Intervalo de células em notação A1 onde os valores serão escritos (ex: 'A1:C3' para um bloco 3x3). + - `values` (array, obrigatório): Array 2D de valores (linhas contendo células). Exemplo para A1:B2: [["Cabeçalho1", "Cabeçalho2"], ["Valor1", "Valor2"]]. Use null para limpar uma célula. + + + + + **Descrição:** Retornar apenas os metadados (endereço e dimensões) do intervalo utilizado em uma planilha, sem os valores reais das células. Ideal para arquivos grandes para entender o tamanho da planilha antes de ler dados em blocos. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha (aba) para leitura. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas. + + + + + **Descrição:** Recuperar todas as células contendo dados em uma planilha armazenada no SharePoint. Não use para arquivos maiores que 2MB. Para arquivos grandes, use get_excel_used_range_metadata primeiro, depois get_excel_range_data para ler em blocos menores. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha (aba) para leitura. Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas. + - `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text,rowCount,columnCount'). + + + + + **Descrição:** Recuperar o valor de uma única célula por índice de linha e coluna de um arquivo Excel no SharePoint. Os índices são baseados em 0 (linha 0 = linha 1 do Excel, coluna 0 = coluna A). + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha (aba). Obtenha de get_excel_worksheets. Sensível a maiúsculas e minúsculas. + - `row` (integer, obrigatório): Índice de linha baseado em 0 (linha 0 = linha 1 do Excel). Intervalo válido: 0-1048575 + - `column` (integer, obrigatório): Índice de coluna baseado em 0 (coluna 0 = A, coluna 1 = B). Intervalo válido: 0-16383 + - `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text'). + + + + + **Descrição:** Converter um intervalo de células em uma tabela Excel formatada com recursos de filtragem, classificação e dados estruturados. Tabelas habilitam add_excel_table_row para adicionar dados. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha contendo o intervalo de dados. Obtenha de get_excel_worksheets. + - `range` (string, obrigatório): Intervalo de células para converter em tabela, incluindo cabeçalhos e dados (ex: 'A1:D10' onde A1:D1 contém cabeçalhos de coluna). + - `has_headers` (boolean, opcional): Defina como true se a primeira linha contém cabeçalhos de coluna. Padrão: true + + + + + **Descrição:** Listar todas as tabelas em uma planilha Excel específica armazenada no SharePoint. Retorna propriedades da tabela incluindo id, name, showHeaders e showTotals. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha para obter tabelas. Obtenha de get_excel_worksheets. + + + + + **Descrição:** Adicionar uma nova linha ao final de uma tabela Excel em um arquivo do SharePoint. O array de valores deve ter o mesmo número de elementos que o número de colunas da tabela. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha contendo a tabela. Obtenha de get_excel_worksheets. + - `table_name` (string, obrigatório): Nome da tabela para adicionar a linha (ex: 'Table1'). Obtenha de get_excel_tables. Sensível a maiúsculas e minúsculas. + - `values` (array, obrigatório): Array de valores de células para a nova linha, um por coluna na ordem da tabela (ex: ["João Silva", "joao@exemplo.com", 25]). + + + + + **Descrição:** Obter todas as linhas de uma tabela Excel em um arquivo do SharePoint como um intervalo de dados. Mais fácil do que get_excel_range_data ao trabalhar com tabelas estruturadas, pois não é necessário saber o intervalo exato. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha contendo a tabela. Obtenha de get_excel_worksheets. + - `table_name` (string, obrigatório): Nome da tabela para obter dados (ex: 'Table1'). Obtenha de get_excel_tables. Sensível a maiúsculas e minúsculas. + - `select` (string, opcional): Lista de propriedades separadas por vírgula para retornar (ex: 'address,values,formulas,numberFormat,text'). + + + + + **Descrição:** Criar uma visualização de gráfico em uma planilha Excel armazenada no SharePoint a partir de um intervalo de dados. O gráfico é incorporado na planilha. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha onde o gráfico será criado. Obtenha de get_excel_worksheets. + - `chart_type` (string, obrigatório): Tipo de gráfico (ex: 'ColumnClustered', 'ColumnStacked', 'Line', 'LineMarkers', 'Pie', 'Bar', 'BarClustered', 'Area', 'Scatter', 'Doughnut'). + - `source_data` (string, obrigatório): Intervalo de dados para o gráfico em notação A1, incluindo cabeçalhos (ex: 'A1:B10'). + - `series_by` (string, opcional): Como as séries de dados são organizadas: 'Auto', 'Columns' ou 'Rows'. Padrão: 'Auto' + + + + + **Descrição:** Listar todos os gráficos incorporados em uma planilha Excel armazenada no SharePoint. Retorna propriedades do gráfico incluindo id, name, chartType, height, width e position. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha para listar gráficos. Obtenha de get_excel_worksheets. + + + + + **Descrição:** Remover permanentemente uma planilha (aba) e todo seu conteúdo de uma pasta de trabalho Excel armazenada no SharePoint. Não pode ser desfeito. Uma pasta de trabalho deve ter pelo menos uma planilha. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha a excluir. Sensível a maiúsculas e minúsculas. Todos os dados, tabelas e gráficos nesta planilha serão permanentemente removidos. + + + + + **Descrição:** Remover uma tabela de uma planilha Excel no SharePoint. Isto exclui a estrutura da tabela (filtragem, formatação, recursos de tabela) mas preserva os dados subjacentes das células. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + - `worksheet_name` (string, obrigatório): Nome da planilha contendo a tabela. Obtenha de get_excel_worksheets. + - `table_name` (string, obrigatório): Nome da tabela a excluir (ex: 'Table1'). Obtenha de get_excel_tables. Os dados nas células permanecerão após a exclusão da tabela. + + + + + **Descrição:** Recuperar todos os intervalos nomeados definidos em uma pasta de trabalho Excel armazenada no SharePoint. Intervalos nomeados são rótulos definidos pelo usuário para intervalos de células (ex: 'DadosVendas' para A1:D100). + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do arquivo Excel no SharePoint. Obtenha de list_files ou search_files. + + + + + **Descrição:** Baixar e extrair conteúdo de texto de um documento Word (.docx) armazenado em uma biblioteca de documentos do SharePoint. Esta é a maneira recomendada de ler documentos Word do SharePoint. + + **Parâmetros:** + - `site_id` (string, obrigatório): O identificador completo do site SharePoint obtido de get_sites. + - `drive_id` (string, obrigatório): O ID da biblioteca de documentos. Chame get_drives primeiro para obter IDs de drive válidos. + - `item_id` (string, obrigatório): O identificador único do documento Word (.docx) no SharePoint. Obtenha de list_files ou search_files. diff --git a/docs/pt-BR/enterprise/integrations/microsoft_teams.mdx b/docs/pt-BR/enterprise/integrations/microsoft_teams.mdx index 54a9891d6..b8d5548f7 100644 --- a/docs/pt-BR/enterprise/integrations/microsoft_teams.mdx +++ b/docs/pt-BR/enterprise/integrations/microsoft_teams.mdx @@ -107,6 +107,86 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token - `join_web_url` (string, obrigatório): A URL de participação na web da reunião a pesquisar. + + + **Descrição:** Pesquisar reuniões online por ID externo da reunião. + + **Parâmetros:** + - `join_meeting_id` (string, obrigatório): O ID da reunião (código numérico) que os participantes usam para entrar. É o joinMeetingId exibido nos convites da reunião, não o meeting id da API Graph. + + + + + **Descrição:** Obter detalhes de uma reunião online específica. + + **Parâmetros:** + - `meeting_id` (string, obrigatório): O ID da reunião na API Graph (string alfanumérica longa). Obter pelas ações create_meeting ou search_online_meetings. Diferente do joinMeetingId numérico. + + + + + **Descrição:** Obter membros de uma equipe específica. + + **Parâmetros:** + - `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams. + - `top` (integer, opcional): Número máximo de membros a recuperar por página (1-999). Padrão: 100. + - `skip_token` (string, opcional): Token de paginação de uma resposta anterior. Quando a resposta incluir @odata.nextLink, extraia o valor do parâmetro $skiptoken e passe aqui para obter a próxima página de resultados. + + + + + **Descrição:** Criar um novo canal em uma equipe. + + **Parâmetros:** + - `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams. + - `display_name` (string, obrigatório): Nome do canal exibido no Teams. Deve ser único na equipe. Máx 50 caracteres. + - `description` (string, opcional): Descrição opcional explicando o propósito do canal. Visível nos detalhes do canal. Máx 1024 caracteres. + - `membership_type` (string, opcional): Visibilidade do canal. Opções: standard, private. "standard" = visível para todos os membros da equipe, "private" = visível apenas para membros adicionados especificamente. Padrão: standard. + + + + + **Descrição:** Obter respostas a uma mensagem específica em um canal. + + **Parâmetros:** + - `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams. + - `channel_id` (string, obrigatório): O identificador único do canal. Obter pela ação get_channels. + - `message_id` (string, obrigatório): O identificador único da mensagem pai. Obter pela ação get_messages. + - `top` (integer, opcional): Número máximo de respostas a recuperar por página (1-50). Padrão: 50. + - `skip_token` (string, opcional): Token de paginação de uma resposta anterior. Quando a resposta incluir @odata.nextLink, extraia o valor do parâmetro $skiptoken e passe aqui para obter a próxima página de resultados. + + + + + **Descrição:** Responder a uma mensagem em um canal do Teams. + + **Parâmetros:** + - `team_id` (string, obrigatório): O identificador único da equipe. Obter pela ação get_teams. + - `channel_id` (string, obrigatório): O identificador único do canal. Obter pela ação get_channels. + - `message_id` (string, obrigatório): O identificador único da mensagem a responder. Obter pela ação get_messages. + - `message` (string, obrigatório): O conteúdo da resposta. Para HTML, inclua tags de formatação. Para texto, use apenas texto simples. + - `content_type` (string, opcional): Formato do conteúdo. Opções: html, text. "text" para texto simples, "html" para texto rico com formatação. Padrão: text. + + + + + **Descrição:** Atualizar uma reunião online existente. + + **Parâmetros:** + - `meeting_id` (string, obrigatório): O identificador único da reunião. Obter pelas ações create_meeting ou search_online_meetings. + - `subject` (string, opcional): Novo título da reunião. + - `startDateTime` (string, opcional): Nova hora de início no formato ISO 8601 com fuso horário. Exemplo: "2024-01-20T10:00:00-08:00". + - `endDateTime` (string, opcional): Nova hora de término no formato ISO 8601 com fuso horário. + + + + + **Descrição:** Excluir uma reunião online. + + **Parâmetros:** + - `meeting_id` (string, obrigatório): O identificador único da reunião a excluir. Obter pelas ações create_meeting ou search_online_meetings. + + ## Exemplos de Uso diff --git a/docs/pt-BR/enterprise/integrations/microsoft_word.mdx b/docs/pt-BR/enterprise/integrations/microsoft_word.mdx index 9d8663c44..ec29fe409 100644 --- a/docs/pt-BR/enterprise/integrations/microsoft_word.mdx +++ b/docs/pt-BR/enterprise/integrations/microsoft_word.mdx @@ -97,6 +97,26 @@ CREWAI_PLATFORM_INTEGRATION_TOKEN=seu_enterprise_token - `file_id` (string, obrigatório): O ID do documento a excluir. + + + **Descrição:** Copiar um documento para um novo local no OneDrive. + + **Parâmetros:** + - `file_id` (string, obrigatório): O ID do documento a copiar. + - `name` (string, opcional): Novo nome para o documento copiado. + - `parent_id` (string, opcional): O ID da pasta de destino (padrão: raiz). + + + + + **Descrição:** Mover um documento para um novo local no OneDrive. + + **Parâmetros:** + - `file_id` (string, obrigatório): O ID do documento a mover. + - `parent_id` (string, obrigatório): O ID da pasta de destino. + - `name` (string, opcional): Novo nome para o documento movido. + + ## Exemplos de Uso From 397d14c772d1856e7ff585d77bf51bf50d8c8742 Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 11 Feb 2026 13:51:54 -0800 Subject: [PATCH 8/9] fix: correct CLI flag format from --skip-provider to --skip_provider (#4462) Update documentation to use underscore instead of hyphen in the `--skip_provider` flag across all CLI command examples for consistency with actual CLI implementation. --- lib/crewai/src/crewai/cli/templates/AGENTS.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/crewai/src/crewai/cli/templates/AGENTS.md b/lib/crewai/src/crewai/cli/templates/AGENTS.md index acf716c95..887dbc65e 100644 --- a/lib/crewai/src/crewai/cli/templates/AGENTS.md +++ b/lib/crewai/src/crewai/cli/templates/AGENTS.md @@ -57,8 +57,8 @@ uv sync # Sync dependencies uv lock # Lock dependencies # Project scaffolding -crewai create crew --skip-provider # New crew project -crewai create flow --skip-provider # New flow project +crewai create crew --skip_provider # New crew project +crewai create flow --skip_provider # New flow project # Running crewai run # Run crew or flow (auto-detects from pyproject.toml) @@ -982,7 +982,7 @@ Python >=3.10, <3.14 ```bash uv tool install crewai # Install CrewAI CLI uv tool list # Verify installation -crewai create crew my_crew --skip-provider # Scaffold a new project +crewai create crew my_crew --skip_provider # Scaffold a new project crewai install # Install project dependencies crewai run # Execute ``` From 0341e5aee7e5760bc6edf9c2566d6e69f35d5109 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:07:15 -0800 Subject: [PATCH 9/9] supporting prompt cache results show (#4447) * supporting prompt cache * droped azure tests * fix tests --------- Co-authored-by: Greyson LaLonde --- .../llms/providers/anthropic/completion.py | 2 + .../crewai/llms/providers/azure/completion.py | 15 +- .../llms/providers/gemini/completion.py | 2 + .../llms/providers/openai/completion.py | 30 +- .../test_anthropic_cached_prompt_tokens.yaml | 332 +++++++++++ ...ropic_cached_prompt_tokens_with_tools.yaml | 336 +++++++++++ ...hropic_streaming_cached_prompt_tokens.yaml | 411 ++++++++++++++ .../test_gemini_cached_prompt_tokens.yaml | 266 +++++++++ ...emini_cached_prompt_tokens_with_tools.yaml | 280 ++++++++++ ...enai_completions_cached_prompt_tokens.yaml | 356 ++++++++++++ ...tions_cached_prompt_tokens_with_tools.yaml | 368 +++++++++++++ ...ai_responses_api_cached_prompt_tokens.yaml | 520 ++++++++++++++++++ ...s_api_cached_prompt_tokens_with_tools.yaml | 368 +++++++++++++ ...openai_streaming_cached_prompt_tokens.yaml | 375 +++++++++++++ .../tests/llms/anthropic/test_anthropic.py | 131 +++++ lib/crewai/tests/llms/azure/test_azure.py | 1 - lib/crewai/tests/llms/google/test_google.py | 153 ++++-- lib/crewai/tests/llms/openai/test_openai.py | 212 +++++++ 18 files changed, 4085 insertions(+), 73 deletions(-) create mode 100644 lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens.yaml create mode 100644 lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens_with_tools.yaml create mode 100644 lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_streaming_cached_prompt_tokens.yaml create mode 100644 lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens.yaml create mode 100644 lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens_with_tools.yaml create mode 100644 lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens.yaml create mode 100644 lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens_with_tools.yaml create mode 100644 lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens.yaml create mode 100644 lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens_with_tools.yaml create mode 100644 lib/crewai/tests/cassettes/llms/openai/test_openai_streaming_cached_prompt_tokens.yaml diff --git a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py b/lib/crewai/src/crewai/llms/providers/anthropic/completion.py index 657488098..f7cb76471 100644 --- a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py +++ b/lib/crewai/src/crewai/llms/providers/anthropic/completion.py @@ -1580,10 +1580,12 @@ class AnthropicCompletion(BaseLLM): usage = response.usage input_tokens = getattr(usage, "input_tokens", 0) output_tokens = getattr(usage, "output_tokens", 0) + cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0 return { "input_tokens": input_tokens, "output_tokens": output_tokens, "total_tokens": input_tokens + output_tokens, + "cached_prompt_tokens": cache_read_tokens, } return {"total_tokens": 0} diff --git a/lib/crewai/src/crewai/llms/providers/azure/completion.py b/lib/crewai/src/crewai/llms/providers/azure/completion.py index e7fd80844..00c10112d 100644 --- a/lib/crewai/src/crewai/llms/providers/azure/completion.py +++ b/lib/crewai/src/crewai/llms/providers/azure/completion.py @@ -425,8 +425,9 @@ class AzureCompletion(BaseLLM): "stream": self.stream, } + model_extras: dict[str, Any] = {} if self.stream: - params["model_extras"] = {"stream_options": {"include_usage": True}} + model_extras["stream_options"] = {"include_usage": True} if response_model and self.is_openai_model: model_description = generate_model_description(response_model) @@ -464,6 +465,13 @@ class AzureCompletion(BaseLLM): params["tools"] = self._convert_tools_for_interference(tools) params["tool_choice"] = "auto" + prompt_cache_key = self.additional_params.get("prompt_cache_key") + if prompt_cache_key: + model_extras["prompt_cache_key"] = prompt_cache_key + + if model_extras: + params["model_extras"] = model_extras + additional_params = self.additional_params additional_drop_params = additional_params.get("additional_drop_params") drop_params = additional_params.get("drop_params") @@ -1063,10 +1071,15 @@ class AzureCompletion(BaseLLM): """Extract token usage from Azure response.""" if hasattr(response, "usage") and response.usage: usage = response.usage + cached_tokens = 0 + prompt_details = getattr(usage, "prompt_tokens_details", None) + if prompt_details: + cached_tokens = getattr(prompt_details, "cached_tokens", 0) or 0 return { "prompt_tokens": getattr(usage, "prompt_tokens", 0), "completion_tokens": getattr(usage, "completion_tokens", 0), "total_tokens": getattr(usage, "total_tokens", 0), + "cached_prompt_tokens": cached_tokens, } return {"total_tokens": 0} diff --git a/lib/crewai/src/crewai/llms/providers/gemini/completion.py b/lib/crewai/src/crewai/llms/providers/gemini/completion.py index 0c00de96d..14603b7d2 100644 --- a/lib/crewai/src/crewai/llms/providers/gemini/completion.py +++ b/lib/crewai/src/crewai/llms/providers/gemini/completion.py @@ -1295,11 +1295,13 @@ class GeminiCompletion(BaseLLM): """Extract token usage from Gemini response.""" if response.usage_metadata: usage = response.usage_metadata + cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0 return { "prompt_token_count": getattr(usage, "prompt_token_count", 0), "candidates_token_count": getattr(usage, "candidates_token_count", 0), "total_token_count": getattr(usage, "total_token_count", 0), "total_tokens": getattr(usage, "total_token_count", 0), + "cached_prompt_tokens": cached_tokens, } return {"total_tokens": 0} diff --git a/lib/crewai/src/crewai/llms/providers/openai/completion.py b/lib/crewai/src/crewai/llms/providers/openai/completion.py index 22b9cda3b..871621ddb 100644 --- a/lib/crewai/src/crewai/llms/providers/openai/completion.py +++ b/lib/crewai/src/crewai/llms/providers/openai/completion.py @@ -1094,11 +1094,7 @@ class OpenAICompletion(BaseLLM): if reasoning_items: self._last_reasoning_items = reasoning_items if event.response and event.response.usage: - usage = { - "prompt_tokens": event.response.usage.input_tokens, - "completion_tokens": event.response.usage.output_tokens, - "total_tokens": event.response.usage.total_tokens, - } + usage = self._extract_responses_token_usage(event.response) self._track_token_usage_internal(usage) # If parse_tool_outputs is enabled, return structured result @@ -1222,11 +1218,7 @@ class OpenAICompletion(BaseLLM): if reasoning_items: self._last_reasoning_items = reasoning_items if event.response and event.response.usage: - usage = { - "prompt_tokens": event.response.usage.input_tokens, - "completion_tokens": event.response.usage.output_tokens, - "total_tokens": event.response.usage.total_tokens, - } + usage = self._extract_responses_token_usage(event.response) self._track_token_usage_internal(usage) # If parse_tool_outputs is enabled, return structured result @@ -1310,11 +1302,18 @@ class OpenAICompletion(BaseLLM): def _extract_responses_token_usage(self, response: Response) -> dict[str, Any]: """Extract token usage from Responses API response.""" if response.usage: - return { + result = { "prompt_tokens": response.usage.input_tokens, "completion_tokens": response.usage.output_tokens, "total_tokens": response.usage.total_tokens, } + # Extract cached prompt tokens from input_tokens_details + input_details = getattr(response.usage, "input_tokens_details", None) + if input_details: + result["cached_prompt_tokens"] = ( + getattr(input_details, "cached_tokens", 0) or 0 + ) + return result return {"total_tokens": 0} def _extract_builtin_tool_outputs(self, response: Response) -> ResponsesAPIResult: @@ -2264,11 +2263,18 @@ class OpenAICompletion(BaseLLM): """Extract token usage from OpenAI ChatCompletion or ChatCompletionChunk response.""" if hasattr(response, "usage") and response.usage: usage = response.usage - return { + result = { "prompt_tokens": getattr(usage, "prompt_tokens", 0), "completion_tokens": getattr(usage, "completion_tokens", 0), "total_tokens": getattr(usage, "total_tokens", 0), } + # Extract cached prompt tokens from prompt_tokens_details + prompt_details = getattr(usage, "prompt_tokens_details", None) + if prompt_details: + result["cached_prompt_tokens"] = ( + getattr(prompt_details, "cached_tokens", 0) or 0 + ) + return result return {"total_tokens": 0} def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]: diff --git a/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens.yaml b/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens.yaml new file mode 100644 index 000000000..51997fbed --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens.yaml @@ -0,0 +1,332 @@ +interactions: +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say + hello in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You + are a helpful assistant. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. "}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5918' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_013xTaKq41TFn6drdxt1mFdx","type":"message","role":"assistant","content":[{"type":"text","text":"Hello!"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":5,"service_tier":"standard","inference_geo":"not_available"}}' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:27:40 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '726' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say + goodbye in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You + are a helpful assistant. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. "}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5920' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01LdueHX7nvf19wD8Uxn4EZD","type":"message","role":"assistant","content":[{"type":"text","text":"Goodbye"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":5,"service_tier":"standard","inference_geo":"not_available"}}' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:27:41 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '759' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens_with_tools.yaml b/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens_with_tools.yaml new file mode 100644 index 000000000..84e6549cf --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_cached_prompt_tokens_with_tools.yaml @@ -0,0 +1,336 @@ +interactions: +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What + is the weather in Tokyo?","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You + are a helpful assistant that uses tools. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. ","tool_choice":{"type":"tool","name":"get_weather"},"tools":[{"name":"get_weather","description":"Get + the current weather for a location","input_schema":{"type":"object","properties":{"location":{"type":"string","description":"The + city name"}},"required":["location"]}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '6211' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01WhFk2ppoz43nbh4uNhXBfL","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01CX1yZuJ5MQaJbXNSrnCiqf","name":"get_weather","input":{"location":"Tokyo"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":24,"cache_creation_input_tokens":0,"cache_read_input_tokens":1857,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":33,"service_tier":"standard","inference_geo":"not_available"}}' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:27:38 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '1390' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What + is the weather in Paris?","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You + are a helpful assistant that uses tools. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. ","tool_choice":{"type":"tool","name":"get_weather"},"tools":[{"name":"get_weather","description":"Get + the current weather for a location","input_schema":{"type":"object","properties":{"location":{"type":"string","description":"The + city name"}},"required":["location"]}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '6211' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01Nmw5NyAEwCLGjpVnf15rh4","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01DEe9K7N4EfhPFqxHhqEHCE","name":"get_weather","input":{"location":"Paris"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":24,"cache_creation_input_tokens":0,"cache_read_input_tokens":1857,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":33,"service_tier":"standard","inference_geo":"not_available"}}' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:27:40 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '1259' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_streaming_cached_prompt_tokens.yaml b/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_streaming_cached_prompt_tokens.yaml new file mode 100644 index 000000000..b1623d81c --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/anthropic/test_anthropic_streaming_cached_prompt_tokens.yaml @@ -0,0 +1,411 @@ +interactions: +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say + hello in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","system":"You + are a helpful assistant. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. ","stream":true}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5917' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-stream-helper: + - messages + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01LshZroyEGgd3HfDrKdQMLm","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":4,"service_tier":"standard","inference_geo":"not_available"}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello"} } + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"output_tokens":4} + } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: + CF-RAY: + - CF-RAY-XXX + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Tue, 10 Feb 2026 18:27:43 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '837' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say + goodbye in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","system":"You + are a helpful assistant. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. ","stream":true}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5919' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-stream-helper: + - messages + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01MZSWarEUbFXmek8aEpwKDu","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":6,"service_tier":"standard","inference_geo":"not_available"}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}} + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Goodbye."} } + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"output_tokens":6} } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: + CF-RAY: + - CF-RAY-XXX + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Tue, 10 Feb 2026 18:27:44 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '870' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens.yaml b/lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens.yaml new file mode 100644 index 000000000..44dd7934c --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens.yaml @@ -0,0 +1,266 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "Say hello in one word."}], "role": "user"}], + "systemInstruction": {"parts": [{"text": "You are a helpful assistant. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + "}], "role": "user"}, "generationConfig": {}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '5876' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"text\": \"Hello\"\n }\n ],\n + \ \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n + \ \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 1135,\n \"candidatesTokenCount\": 1,\n \"totalTokenCount\": 1158,\n + \ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n + \ \"tokenCount\": 1135\n }\n ],\n \"thoughtsTokenCount\": + 22\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"46GLaf60NYmY-8YP--PB6QE\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 10 Feb 2026 21:23:47 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=773 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +- request: + body: '{"contents": [{"parts": [{"text": "Say goodbye in one word."}], "role": + "user"}], "systemInstruction": {"parts": [{"text": "You are a helpful assistant. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. "}], "role": "user"}, "generationConfig": {}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '5878' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"text\": \"Farewell.\"\n }\n ],\n + \ \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n + \ \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 1135,\n \"candidatesTokenCount\": 3,\n \"totalTokenCount\": 1164,\n + \ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n + \ \"tokenCount\": 1135\n }\n ],\n \"thoughtsTokenCount\": + 26\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"5KGLafeeIv-G-8YP_MfPgAI\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 10 Feb 2026 21:23:48 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=662 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens_with_tools.yaml b/lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens_with_tools.yaml new file mode 100644 index 000000000..728329fb7 --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/google/test_gemini_cached_prompt_tokens_with_tools.yaml @@ -0,0 +1,280 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "What is the weather in Tokyo?"}], "role": + "user"}], "systemInstruction": {"parts": [{"text": "You are a helpful assistant + that uses tools. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. "}], "role": "user"}, "tools": [{"functionDeclarations": + [{"description": "Get the current weather for a location", "name": "get_weather", + "parameters_json_schema": {"type": "object", "properties": {"location": {"type": + "string", "description": "The city name"}}, "required": ["location"]}}]}], "generationConfig": + {}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '6172' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"functionCall\": {\n \"name\": \"get_weather\",\n + \ \"args\": {\n \"location\": \"Tokyo\"\n }\n + \ },\n \"thoughtSignature\": \"CpECAb4+9vvTFzaczX2PeZjKEs1f6+MRyTMz+xxqs37q0INQ6e0WLt1soet6CL/uzRML9LsycSeQTraXtXR8qcGj6dnrhKLpovpy8EkrtfK6P57PGpostE/UJ6TIKPlWi0pY1h2u9vyy5yGLzpp0PZM6d6f8rzV9uPFNM+onGvcFOdzghRZlHmYkQdbdpZaFQBAK6QFuh8oGbC0Ygrsk1guJo1YZaKtU5Rp/k2rJO61Obgq7aYEb7ACVx7DM9ZlVCun/PbXR4UolFeNPxNdwzC5AVvP7UKa2Cxi8dzQ8RNebtd39/gNO546XzADGZkpSqG6QF0S4IEsmB9FFCctN1evgKicgT2Qo+AR6BY8uzZyWkGQx\"\n + \ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated + function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 1180,\n \"candidatesTokenCount\": 15,\n \"totalTokenCount\": 1253,\n + \ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n + \ \"tokenCount\": 1180\n }\n ],\n \"thoughtsTokenCount\": + 58\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"wHmLacb_GL-J-sAPn6azgAo\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 10 Feb 2026 18:32:32 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=755 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +- request: + body: '{"contents": [{"parts": [{"text": "What is the weather in Paris?"}], "role": + "user"}], "systemInstruction": {"parts": [{"text": "You are a helpful assistant + that uses tools. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. "}], "role": "user"}, "tools": [{"functionDeclarations": + [{"description": "Get the current weather for a location", "name": "get_weather", + "parameters_json_schema": {"type": "object", "properties": {"location": {"type": + "string", "description": "The city name"}}, "required": ["location"]}}]}], "generationConfig": + {}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '6172' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"functionCall\": {\n \"name\": \"get_weather\",\n + \ \"args\": {\n \"location\": \"Paris\"\n }\n + \ },\n \"thoughtSignature\": \"CuMBAb4+9vurHOlMBPzqCtd/J0Q5jBhUq8dsk7xntqcTgwBcZ1KeX4F4UJ0rdfg1OLhDkOlOlELA/jBYxATT19QUvw0szvDBDml0PsTBXlt64o7oGVmOCjdiGPu71I9+sCYhlD3QXzwLdQdrvUIfVrB+kaGszmZi1KTIli+qD9ihueDYGY510ouKdfl31UipQEG990+qFJyXe3avVEh3Jo72iXr3Q4UczFdbKSTV4V4fjrokFaB7UqcYy1iuAB5vHRsxYFJeTCi+ddKzn700gbWbiJZUniKiE3QfdOK4A5S0woBDzV0=\"\n + \ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated + function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 1180,\n \"candidatesTokenCount\": 15,\n \"totalTokenCount\": 1242,\n + \ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n + \ \"tokenCount\": 1180\n }\n ],\n \"thoughtsTokenCount\": + 47\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"wXmLadTiEri5jMcPk_6ZgAc\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 10 Feb 2026 18:32:33 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=881 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens.yaml b/lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens.yaml new file mode 100644 index 000000000..5ec31bcea --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens.yaml @@ -0,0 +1,356 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + "},{"role":"user","content":"Say hello in one word."}],"model":"gpt-4.1"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5823' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D7mVhCCkdWfellaSmcNLOuu87BsqI\",\n \"object\": + \"chat.completion\",\n \"created\": 1770747141,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Hello!\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 1144,\n \"completion_tokens\": + 2,\n \"total_tokens\": 1146,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 1024,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:12:22 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '469' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + "},{"role":"user","content":"Say goodbye in one word."}],"model":"gpt-4.1"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5825' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D7mViSYwB6eFFbBcp045uvPAO8m2e\",\n \"object\": + \"chat.completion\",\n \"created\": 1770747142,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Farewell.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 1144,\n \"completion_tokens\": 3,\n \"total_tokens\": 1147,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:12:22 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '468' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens_with_tools.yaml b/lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens_with_tools.yaml new file mode 100644 index 000000000..25137d35f --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/openai/test_openai_completions_cached_prompt_tokens_with_tools.yaml @@ -0,0 +1,368 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant that + uses tools. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. "},{"role":"user","content":"What is the weather in Tokyo?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get + the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city name"}},"required":["location"],"additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '6158' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D7mVx3s1dI2SICWePwHVeWCDct2QG\",\n \"object\": + \"chat.completion\",\n \"created\": 1770747157,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_x9KzZUT3UYazEUJiRmE0PvaU\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"get_weather\",\n + \ \"arguments\": \"{\\\"location\\\":\\\"Tokyo\\\"}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\": + 14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:12:37 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '645' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant that + uses tools. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. "},{"role":"user","content":"What is the weather in Paris?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get + the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city name"}},"required":["location"],"additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '6158' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D7mVynM0Soyt3osUFrlF7tEyrj7jP\",\n \"object\": + \"chat.completion\",\n \"created\": 1770747158,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_k8rYmsdMcCWSRKqVDFItmJ8v\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"get_weather\",\n + \ \"arguments\": \"{\\\"location\\\":\\\"Paris\\\"}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\": + 14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:12:38 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '749' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens.yaml b/lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens.yaml new file mode 100644 index 000000000..32167dab9 --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens.yaml @@ -0,0 +1,520 @@ +interactions: +- request: + body: '{"input":[{"role":"user","content":"Say hello in one word."}],"model":"gpt-4.1","instructions":"You + are a helpful assistant. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. "}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5807' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: "{\n \"id\": \"resp_0b352452095088f800698b751350fc8196bd5d8b1a179d27e8\",\n + \ \"object\": \"response\",\n \"created_at\": 1770747155,\n \"status\": + \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": + \"developer\"\n },\n \"completed_at\": 1770747155,\n \"error\": null,\n + \ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\": + \"You are a helpful assistant. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. \",\n \"max_output_tokens\": + null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"output\": [\n {\n \"id\": \"msg_0b352452095088f800698b7513b97c8196b35014840754d999\",\n + \ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": + [\n {\n \"type\": \"output_text\",\n \"annotations\": + [],\n \"logprobs\": [],\n \"text\": \"Hello!\"\n }\n + \ ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": + true,\n \"presence_penalty\": 0.0,\n \"previous_response_id\": null,\n \"prompt_cache_key\": + null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": + null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": + \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n + \ \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n + \ },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": + 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": + 1144,\n \"input_tokens_details\": {\n \"cached_tokens\": 1024\n },\n + \ \"output_tokens\": 3,\n \"output_tokens_details\": {\n \"reasoning_tokens\": + 0\n },\n \"total_tokens\": 1147\n },\n \"user\": null,\n \"metadata\": + {}\n}" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:12:35 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '637' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"input":[{"role":"user","content":"Say goodbye in one word."}],"model":"gpt-4.1","instructions":"You + are a helpful assistant. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. "}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5809' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: "{\n \"id\": \"resp_003a6f71f9ee620400698b75140a088196989e8d5641ffa74d\",\n + \ \"object\": \"response\",\n \"created_at\": 1770747156,\n \"status\": + \"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\": + \"developer\"\n },\n \"completed_at\": 1770747156,\n \"error\": null,\n + \ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\": + \"You are a helpful assistant. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to + ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the + prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is + large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for + caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is + padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. \",\n \"max_output_tokens\": + null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"output\": [\n {\n \"id\": \"msg_003a6f71f9ee620400698b75146160819692f2cee879df2405\",\n + \ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\": + [\n {\n \"type\": \"output_text\",\n \"annotations\": + [],\n \"logprobs\": [],\n \"text\": \"Farewell.\"\n }\n + \ ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\": + true,\n \"presence_penalty\": 0.0,\n \"previous_response_id\": null,\n \"prompt_cache_key\": + null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\": + null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\": + \"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n + \ \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n + \ },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\": + 0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": + 1144,\n \"input_tokens_details\": {\n \"cached_tokens\": 1024\n },\n + \ \"output_tokens\": 4,\n \"output_tokens_details\": {\n \"reasoning_tokens\": + 0\n },\n \"total_tokens\": 1148\n },\n \"user\": null,\n \"metadata\": + {}\n}" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:12:36 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '543' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens_with_tools.yaml b/lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens_with_tools.yaml new file mode 100644 index 000000000..c0db4ef9c --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/openai/test_openai_responses_api_cached_prompt_tokens_with_tools.yaml @@ -0,0 +1,368 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant that + uses tools. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. "},{"role":"user","content":"What is the weather in Tokyo?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get + the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city name"}},"required":["location"],"additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '6158' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D7mXQCgT3p3ViImkiqDiZGqLREQtp\",\n \"object\": + \"chat.completion\",\n \"created\": 1770747248,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_9ZqMavn3J1fBnQEaqpYol0Bd\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"get_weather\",\n + \ \"arguments\": \"{\\\"location\\\":\\\"Tokyo\\\"}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\": + 14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:14:08 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '484' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant that + uses tools. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. "},{"role":"user","content":"What is the weather in Paris?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get + the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city name"}},"required":["location"],"additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '6158' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D7mXR8k9vk8TlGvGXlrQSI7iNeAN1\",\n \"object\": + \"chat.completion\",\n \"created\": 1770747249,\n \"model\": \"gpt-4.1-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_6PeUBlRPG8JcV2lspmLjJbnn\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"get_weather\",\n + \ \"arguments\": \"{\\\"location\\\":\\\"Paris\\\"}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\": + 14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 10 Feb 2026 18:14:09 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '528' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/openai/test_openai_streaming_cached_prompt_tokens.yaml b/lib/crewai/tests/cassettes/llms/openai/test_openai_streaming_cached_prompt_tokens.yaml new file mode 100644 index 000000000..86ce69eb5 --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/openai/test_openai_streaming_cached_prompt_tokens.yaml @@ -0,0 +1,375 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + "},{"role":"user","content":"Say hello in one word."}],"model":"gpt-4.1","stream":true,"stream_options":{"include_usage":true}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5877' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"lFWRn007xqlce"} + + + data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"OXJHANtgvy"} + + + data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"AZtd6jtoChevtm"} + + + data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"irwn2mqyB"} + + + data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[],"usage":{"prompt_tokens":1144,"completion_tokens":2,"total_tokens":1146,"prompt_tokens_details":{"cached_tokens":1024,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"W0rkiiZe"} + + + data: [DONE] + + + ' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Tue, 10 Feb 2026 18:12:34 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '236' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + This is padding text to ensure the prompt is large enough for caching. This + is padding text to ensure the prompt is large enough for caching. This is padding + text to ensure the prompt is large enough for caching. This is padding text + to ensure the prompt is large enough for caching. This is padding text to ensure + the prompt is large enough for caching. This is padding text to ensure the prompt + is large enough for caching. This is padding text to ensure the prompt is large + enough for caching. This is padding text to ensure the prompt is large enough + for caching. This is padding text to ensure the prompt is large enough for caching. + "},{"role":"user","content":"Say goodbye in one word."}],"model":"gpt-4.1","stream":true,"stream_options":{"include_usage":true}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5879' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"pCjdYd4kX4W2q"} + + + data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"Fare"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"DJ94I8XQj86"} + + + data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"well"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"qgSSFwDBmaW"} + + + data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"4xVBYer6Uy1atr"} + + + data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"XxMhsMje0"} + + + data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[],"usage":{"prompt_tokens":1144,"completion_tokens":3,"total_tokens":1147,"prompt_tokens_details":{"cached_tokens":1024,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"J3eKDOHW"} + + + data: [DONE] + + + ' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Tue, 10 Feb 2026 18:12:34 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '296' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/llms/anthropic/test_anthropic.py b/lib/crewai/tests/llms/anthropic/test_anthropic.py index c5ad5f273..129662ef3 100644 --- a/lib/crewai/tests/llms/anthropic/test_anthropic.py +++ b/lib/crewai/tests/llms/anthropic/test_anthropic.py @@ -990,3 +990,134 @@ def test_anthropic_agent_kickoff_structured_output_with_tools(): assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}" assert result.pydantic.operation, "Operation should not be empty" assert result.pydantic.explanation, "Explanation should not be empty" + + +@pytest.mark.vcr() +def test_anthropic_cached_prompt_tokens(): + """ + Test that Anthropic correctly extracts and tracks cached_prompt_tokens + from cache_read_input_tokens. Uses cache_control to enable prompt caching + and sends the same large prompt twice so the second call hits the cache. + """ + # Anthropic requires cache_control blocks and >=1024 tokens for caching + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant. {padding}" + + llm = LLM(model="anthropic/claude-sonnet-4-5-20250929") + + def _ephemeral_user(text: str): + return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}] + + # First call: creates the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": _ephemeral_user("Say hello in one word.")}, + ]) + + # Second call: same system prompt should hit the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": _ephemeral_user("Say goodbye in one word.")}, + ]) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.prompt_tokens > 0 + assert usage.completion_tokens > 0 + assert usage.successful_requests == 2 + # The second call should have cached prompt tokens + assert usage.cached_prompt_tokens > 0 + + +@pytest.mark.vcr() +def test_anthropic_streaming_cached_prompt_tokens(): + """ + Test that Anthropic streaming correctly extracts and tracks cached_prompt_tokens. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant. {padding}" + + llm = LLM(model="anthropic/claude-sonnet-4-5-20250929", stream=True) + + def _ephemeral_user(text: str): + return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}] + + # First call: creates the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": _ephemeral_user("Say hello in one word.")}, + ]) + + # Second call: same system prompt should hit the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": _ephemeral_user("Say goodbye in one word.")}, + ]) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.successful_requests == 2 + # The second call should have cached prompt tokens + assert usage.cached_prompt_tokens > 0 + + +@pytest.mark.vcr() +def test_anthropic_cached_prompt_tokens_with_tools(): + """ + Test that Anthropic correctly tracks cached_prompt_tokens when tools are used. + The large system prompt should be cached across tool-calling requests. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant that uses tools. {padding}" + + def get_weather(location: str) -> str: + return f"The weather in {location} is sunny and 72°F" + + tools = [ + { + "name": "get_weather", + "description": "Get the current weather for a location", + "input_schema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name" + } + }, + "required": ["location"], + }, + } + ] + + llm = LLM(model="anthropic/claude-sonnet-4-5-20250929") + + def _ephemeral_user(text: str): + return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}] + + # First call with tool: creates the cache + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": _ephemeral_user("What is the weather in Tokyo?")}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + # Second call with same system prompt + tools: should hit the cache + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": _ephemeral_user("What is the weather in Paris?")}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.prompt_tokens > 0 + assert usage.successful_requests == 2 + # The second call should have cached prompt tokens + assert usage.cached_prompt_tokens > 0 diff --git a/lib/crewai/tests/llms/azure/test_azure.py b/lib/crewai/tests/llms/azure/test_azure.py index 17a01bb56..d25b607a8 100644 --- a/lib/crewai/tests/llms/azure/test_azure.py +++ b/lib/crewai/tests/llms/azure/test_azure.py @@ -102,7 +102,6 @@ def test_azure_tool_use_conversation_flow(): # Verify that the API was called assert mock_complete.called - @pytest.mark.usefixtures("mock_azure_credentials") def test_azure_completion_module_is_imported(): """ diff --git a/lib/crewai/tests/llms/google/test_google.py b/lib/crewai/tests/llms/google/test_google.py index 1c3ed5ce6..3f86388d5 100644 --- a/lib/crewai/tests/llms/google/test_google.py +++ b/lib/crewai/tests/llms/google/test_google.py @@ -42,65 +42,6 @@ def test_gemini_completion_is_used_when_gemini_provider(): assert llm.provider == "gemini" assert llm.model == "gemini-2.0-flash-001" - - - -def test_gemini_tool_use_conversation_flow(): - """ - Test that the Gemini completion properly handles tool use conversation flow - """ - from unittest.mock import Mock, patch - from crewai.llms.providers.gemini.completion import GeminiCompletion - - # Create GeminiCompletion instance - completion = GeminiCompletion(model="gemini-2.0-flash-001") - - # Mock tool function - def mock_weather_tool(location: str) -> str: - return f"The weather in {location} is sunny and 75°F" - - available_functions = {"get_weather": mock_weather_tool} - - # Mock the Google Gemini client responses - with patch.object(completion.client.models, 'generate_content') as mock_generate: - # Mock function call in response - mock_function_call = Mock() - mock_function_call.name = "get_weather" - mock_function_call.args = {"location": "San Francisco"} - - mock_part = Mock() - mock_part.function_call = mock_function_call - - mock_content = Mock() - mock_content.parts = [mock_part] - - mock_candidate = Mock() - mock_candidate.content = mock_content - - mock_response = Mock() - mock_response.candidates = [mock_candidate] - mock_response.text = "Based on the weather data, it's a beautiful day in San Francisco with sunny skies and 75°F temperature." - mock_response.usage_metadata = Mock() - mock_response.usage_metadata.prompt_token_count = 100 - mock_response.usage_metadata.candidates_token_count = 50 - mock_response.usage_metadata.total_token_count = 150 - - mock_generate.return_value = mock_response - - # Test the call - messages = [{"role": "user", "content": "What's the weather like in San Francisco?"}] - result = completion.call( - messages=messages, - available_functions=available_functions - ) - - # Verify the tool was executed and returned the result - assert result == "The weather in San Francisco is sunny and 75°F" - - # Verify that the API was called - assert mock_generate.called - - def test_gemini_completion_module_is_imported(): """ Test that the completion module is properly imported when using Google provider @@ -1114,3 +1055,97 @@ def test_gemini_structured_output_preserves_json_with_stop_word_patterns(): assert "Action:" in result.action_taken assert "Observation:" in result.observation_result assert "Final Answer:" in result.final_answer + + +@pytest.mark.vcr() +def test_gemini_cached_prompt_tokens(): + """ + Test that Gemini correctly extracts and tracks cached_prompt_tokens + from cached_content_token_count in the usage metadata. + Sends two calls with the same large prompt to trigger caching. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant. {padding}" + + llm = LLM(model="google/gemini-2.5-flash") + + # First call + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say hello in one word."}, + ]) + + # Second call: same system prompt + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say goodbye in one word."}, + ]) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.prompt_tokens > 0 + assert usage.completion_tokens > 0 + assert usage.successful_requests == 2 + # cached_prompt_tokens should be populated (may be 0 if Gemini + # doesn't cache for this particular request, but the field should exist) + assert usage.cached_prompt_tokens >= 0 + + +@pytest.mark.vcr() +def test_gemini_cached_prompt_tokens_with_tools(): + """ + Test that Gemini correctly tracks cached_prompt_tokens when tools are used. + The large system prompt should be cached across tool-calling requests. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant that uses tools. {padding}" + + def get_weather(location: str) -> str: + return f"The weather in {location} is sunny and 72°F" + + tools = [ + { + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name" + } + }, + "required": ["location"], + }, + } + ] + + llm = LLM(model="google/gemini-2.5-flash") + + # First call with tool + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "What is the weather in Tokyo?"}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + # Second call with same system prompt + tools + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "What is the weather in Paris?"}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.prompt_tokens > 0 + assert usage.successful_requests == 2 + # cached_prompt_tokens should be populated (may be 0 if Gemini + # doesn't cache for this particular request, but the field should exist) + assert usage.cached_prompt_tokens >= 0 diff --git a/lib/crewai/tests/llms/openai/test_openai.py b/lib/crewai/tests/llms/openai/test_openai.py index a75a37681..069823a7a 100644 --- a/lib/crewai/tests/llms/openai/test_openai.py +++ b/lib/crewai/tests/llms/openai/test_openai.py @@ -1581,6 +1581,218 @@ def test_openai_structured_output_preserves_json_with_stop_word_patterns(): assert "Final Answer:" in result.final_answer + +@pytest.mark.vcr() +def test_openai_completions_cached_prompt_tokens(): + """ + Test that the Chat Completions API correctly extracts and tracks + cached_prompt_tokens from prompt_tokens_details.cached_tokens. + Sends the same large prompt twice so the second call hits the cache. + """ + # Build a large system prompt to trigger prompt caching (>1024 tokens) + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant. {padding}" + + llm = OpenAICompletion(model="gpt-4.1") + + # First call: creates the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say hello in one word."}, + ]) + + # Second call: same system prompt should hit the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say goodbye in one word."}, + ]) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.prompt_tokens > 0 + assert usage.completion_tokens > 0 + assert usage.successful_requests == 2 + # The second call should have cached prompt tokens + assert usage.cached_prompt_tokens > 0 + + +@pytest.mark.vcr() +def test_openai_responses_api_cached_prompt_tokens(): + """ + Test that the Responses API correctly extracts and tracks + cached_prompt_tokens from input_tokens_details.cached_tokens. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant. {padding}" + + llm = OpenAICompletion(model="gpt-4.1", api="responses") + + # First call: creates the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say hello in one word."}, + ]) + + # Second call: same system prompt should hit the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say goodbye in one word."}, + ]) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.prompt_tokens > 0 + assert usage.completion_tokens > 0 + assert usage.successful_requests == 2 + # The second call should have cached prompt tokens + assert usage.cached_prompt_tokens > 0 + + +@pytest.mark.vcr() +def test_openai_streaming_cached_prompt_tokens(): + """ + Test that streaming Chat Completions API correctly extracts and tracks + cached_prompt_tokens. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant. {padding}" + + llm = OpenAICompletion(model="gpt-4.1", stream=True) + + # First call: creates the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say hello in one word."}, + ]) + + # Second call: same system prompt should hit the cache + llm.call([ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "Say goodbye in one word."}, + ]) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.successful_requests == 2 + # The second call should have cached prompt tokens + assert usage.cached_prompt_tokens > 0 + + +@pytest.mark.vcr() +def test_openai_completions_cached_prompt_tokens_with_tools(): + """ + Test that the Chat Completions API correctly tracks cached_prompt_tokens + when tools are used. The large system prompt should be cached across calls. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant that uses tools. {padding}" + + def get_weather(location: str) -> str: + return f"The weather in {location} is sunny and 72°F" + + tools = [ + { + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name" + } + }, + "required": ["location"], + "additionalProperties": False, + }, + } + ] + + llm = OpenAICompletion(model="gpt-4.1") + + # First call with tool: creates the cache + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "What is the weather in Tokyo?"}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + # Second call with same system prompt + tools: should hit the cache + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "What is the weather in Paris?"}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.prompt_tokens > 0 + assert usage.successful_requests == 2 + # The second call should have cached prompt tokens + assert usage.cached_prompt_tokens > 0 + + +@pytest.mark.vcr() +def test_openai_responses_api_cached_prompt_tokens_with_tools(): + """ + Test that the Responses API correctly tracks cached_prompt_tokens + when function tools are used. + """ + padding = "This is padding text to ensure the prompt is large enough for caching. " * 80 + system_msg = f"You are a helpful assistant that uses tools. {padding}" + + def get_weather(location: str) -> str: + return f"The weather in {location} is sunny and 72°F" + + tools = [ + { + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name" + } + }, + "required": ["location"], + }, + } + ] + + llm = OpenAICompletion(model="gpt-4.1", api='response') + + # First call with tool + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "What is the weather in Tokyo?"}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + # Second call: same system prompt + tools should hit cache + llm.call( + [ + {"role": "system", "content": system_msg}, + {"role": "user", "content": "What is the weather in Paris?"}, + ], + tools=tools, + available_functions={"get_weather": get_weather}, + ) + + usage = llm.get_token_usage_summary() + assert usage.total_tokens > 0 + assert usage.successful_requests == 2 + assert usage.cached_prompt_tokens > 0 def test_openai_streaming_returns_tool_calls_without_available_functions(): """Test that streaming returns tool calls list when available_functions is None.