mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
Lorenze/fix tool call twice (#3495)
* test: add test to ensure tool is called only once during crew execution - Introduced a new test case to validate that the counting_tool is executed exactly once during crew execution. - Created a CountingTool class to track execution counts and log call history. - Enhanced the test suite with a YAML cassette for consistent tool behavior verification. * ensure tool function called only once * refactor: simplify error handling in CrewStructuredTool - Removed unnecessary try-except block around the tool function call to streamline execution flow. - Ensured that the tool function is called directly, improving readability and maintainability. * linted * need to ignore for now as we cant infer the complex generic type within pydantic create_model_func * fix tests
This commit is contained in:
@@ -330,4 +330,222 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": ["Capital of France"], "model": "text-embedding-3-small", "encoding_format":
|
||||
"base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '96'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=rvDDZbBWaissP0luvtyuyyAWcPx3AiaoZS9LkAuK4sM-1746636999152-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1R6Ww+yyrbl+/4VK+uV3pGLUFXrjbsISKEgYqfTAUEEBeRWQJ2c/97Br/t094uJ
|
||||
WIZUzVljjjHm/I9//fXX321a5Y/x73/++vtTDuPf/217liVj8vc/f/33f/31119//cfv8/9bmddp
|
||||
nmVlU/yW/34smyxf/v7nL/a/nvzfRf/89XfIFDaOu8OtX0ttiOESiQE+pXaRrnxot/BYFAy5kEUP
|
||||
ONOCJaymU0mOVijT9bPeH/BZXg4kYptvv4S7/QQztojx/ay+Ul5kFR8F+9XEUeizziKFvQ+uIr4T
|
||||
E107h77ZSw1r9zPiUCqSYL4SxUbv5usRy5ZLusgkecBnVXUYOzGbrs25voiDG1c4vEhCTxHz8GCW
|
||||
BAdvL9+sngv6iwv3uxDj7HWgPUkXUIBvFy3YnkpJo7AQIvipwoIEgINatQMnCZbG54EP4NtQGgm1
|
||||
j84HJBDbhxeNI+7DBsboBuS5G27VeLUbGV4TmSEyXDVNYO2qg963GbCJrrazpK3iIk9XzQn1/aGa
|
||||
HvHpDatPvZDj9a5UvL67PpApmxGO6maite0dfTjujIfHLN4F0Dun8IiTkpSc0vyuLRl9hRAz+on4
|
||||
10ruubG6mchYruLEKW2rfYOdyUKFSzKs4XeRsjXNYsmeAMVeXOk9n7hjDu9xnGNTZ57p3DVSDWr5
|
||||
fMXpOdNo4zDGG1m7/QtHFVHoPOe4gw9gDliBlhTQ8z7hRY3aLjneZ4Uu8u3TAfe1ODjU9lrFPfww
|
||||
hvez5uNTtZcBx1Zhglg5CrF231NAhv5ewND/BCQ5FX2/CJdjAW8sKYkyly7lHFe2AdR9HZ9Jc6EC
|
||||
8O8qelzOmcdoegrac2xJ0KLvI8be8UXp28I+fJ/uiCQH4ews2KhKpFTMwxPPo5S+M+Oxh4esr7Hb
|
||||
TDigi6VIaPf8aMS6GVol/PItMgbfA8z3HghaTguYSWJFrmr1AovFCxN6lWOCH8Zy0Cj3tG24rysZ
|
||||
n9r61c8ncz9A32GbqY46xeETxDNIpUswCWHyqRbOfMVIaG0bG4uy9Et2iGSYhTeJqEEyARpwbgEt
|
||||
fzpMxzJXU9bImBBMrPAicq0fwCyP9wiGaWsRR481jeNRCZHD8pG348i7IurdyaE9iRTHha1X0yeE
|
||||
NbicCPCK19j063LnYvSxK5+oxyWshHE3y2h/CD/kfrjXdEaVFIL3y9fwbQWHgApiPMGnM2U4gotf
|
||||
rZ/1nKNkskqcWfRTrXUytNBlfAWr5VD1rak4ETSWm4iVsLxt+8tVwDlaOfWqg7XlfHY6cDjfPGIy
|
||||
4EFJveg+1JsHxmnzzgDl35IHDwafeKabnYLRmXoXamJ1m+ZoDTQara0OOTVZsHr21YoviMHCV9w2
|
||||
Xhyo0Fma0VZh/dXfON9HRTV/7vMAFpFeJxbnZ20Rs28L+x0YiFcKNzDj21P95SexzvRYcVejYNHL
|
||||
3p9w6l15ZzaGYQZWc7gQo/rMKQEr1P+s12KUOD+8gU/9ir193EcVh3mkwnF3eGDjcbS1LV9l8aiE
|
||||
lbfU4hqMtzd6S8X3POFnBStnNPtbjcDbqLGm0n06n77BCkrCD948wBDMDjY8+L7vWRztmSPoLobs
|
||||
/fCAaLN67eeMnxKYHFWJHOpFTvlQEfcwPpWLR7+T6nBPHQ9S9MI6ceprH0yrM/OI8IDBTs6jdNu/
|
||||
DN9WTr3yzmgaX5ATD3fr0/R4tjlWbCmuMlpPOSTqnak0sgPGHo6vm7bhs619j3KhIsKLzDT/8EeV
|
||||
1hpe4nkhF+HtaLPNtTnkXo2LtUA2KUfDD4SscAUY21nSc339iVEYXiSiAuAEfKu9BnSa9hl+HG4i
|
||||
WMTs1aGT3p/w7z7wL7dc0RLgAeMrbKtB08gM7kdXw9nexVrfiZGOPs9uT3R6PmksFWUT1dXxhuWD
|
||||
++l7duVnMGVqiPVdGjpcC+YHhF+uw9bK7bWFrcIYct54xGcd2g6b9t7lT71zGdpRKl2frhTXkeJ9
|
||||
TEnVKDafDxh0yteD3rOp1rgp3uiZ2Tnxjq2vsVdrlKRLvC5Y9vRLT+tFv6DrpZ+JWoHIWVRpfcMt
|
||||
HsSNn2oq3OsgRiqlAbaSl+1wVXSckWv6N5I43Q3wrfdkwHzTHyRWXyxYSb/av/wg3m6t6cJPsJTU
|
||||
lDFxoE2rs6T3ywxD+rxO+8ukpUIowTesh9kh0fhM6dqy1IXtM9OxfBtdwKUjTuA8ePrEcrmiCTwT
|
||||
STCTvSvOZOPjbPjeIvdFHeKJitivfqDP6G7CjBzN/KSNuyV4w21/WAuA3As69GPIvT4uPsx2oq1R
|
||||
zQ+wegGbGLWJtCH83k0QNKHhzVnUpHzifh4ofz1PHmdKpdOuB30AeUY/3jc+Tz2NKrH9xYPkr7Oh
|
||||
Cca7f0PXOb2wdXocA5Zw7AOBV6wSP9bHtPM5wkrIHQdispqv0fNBidD3XJUT3zHYoW7nQyh809Hb
|
||||
H42vI2QobaEa+0+c4XB11putTSA5yhLxrDFKZ41QHd4WDXit8S2rqTo+HvA1zphoPP4GRHncV4jl
|
||||
YzR9g2oE81l+mGLTgpToVcqAWc7LPUpQhDzY2FfA8rkO4Wjw2OOy+FEtmjBc4GMML0S1StXhbOH1
|
||||
RkrTavhxApPGqcevDleddYhy+egBOUmCj2q3GT3JSz/BELGi+wd/5LfHBvRY7WTYIQETXDSPlN3w
|
||||
AgyJhXDEAEgH7RTLcOc5KjlcTueeFtkl+lMPsEISh33czh26z7gnp8K9g29/kic0WAMkh8WgQBh0
|
||||
5gKKy74ix0vLassrjy5o6d4SkfkP0eawuEyQu5V3YufKBcyXbIboy6bvaTl1di+8IzRImSWY09kl
|
||||
PhAI/A6QdbuBKLcG9QMPvrzQTt8nztr6VbHzwJhSFKcslmP9lAp5UXQIjFPiOUTrwWqpX+tP/mtc
|
||||
iPvJCZZQcly+J9rXtirhtexZIJhVTzb8C9aIGy1YfS8x8T7tJ2gMl1/h/az4ntBbRb9AtS7gEoEA
|
||||
O80bgd/9hnyvCuQQJp+eWnp5kXhcatOeQwZdNHy2YWsZPLbXz6tarvfrAz1hKGKv65j+veEV5BhT
|
||||
nahzKpxZqqMVupNRYVvMvJ7/Sv0FbnhHrLpfHHrX5hB+PE8gWos9sAiP3Sy1RnzBxzQK+3l9v2oQ
|
||||
0uyKQ3RuANH8kw6/kxzjbOOXQmSGK/RVTCdwMqtq3Yn6/MtPbL1fOhBOr0uNBll38F2LngHHFZ4O
|
||||
F/4wEcv4qpUQa14Ob2eh8Fgdvx1ivKs3xPWww7kIg2BelEcLLr4tT8vjyqRrVS/RH34h2+2gLexq
|
||||
s7DjvHxipUJK6cangcZxM06uwSWgOHcvcMoDwXvkDy7o+ciZ4XqpP8RRHezwQ/JiID/bxSS6vaJ9
|
||||
X/arQyKBPTGGbglG4SGscPceBhzQQ9ovtzf3hvU1QRPParO27KWwg/zJ2OHj1B6D9ZuZMVQiV8VB
|
||||
9DgFg8a8CpAnB8YTZlvSfvkNtXlf4xv4HgArpW0BNzwm6l37BlRT8g4wnxudqslQg8mLAhOZgoG8
|
||||
9cdHEsRDIKwumnapmPbzQJMJHoKaxcrGX3/4iMDpBYnmfc1KcLsYwvq4PxLtNA2UCF3ng+h10qdd
|
||||
nRFap50aojJPeqx+TnPaSb26R8wcf7G/C0j1zbuOgXs3IB6bjExA90dBB32sF95Mj1YvbN/Rxk9I
|
||||
Yow64HOQFKChzJngJv72q6r2rOSwbIQV5VgFg4dWH5k3scTu+q0Ambs3A1fR8vBt/I7BPBwOCdyJ
|
||||
fEiMV6pro+d9JVCY94FYXzcDvaWXPkpFGeN4qY2eqyJlRnBfH7yPfmTpwOc6I92mMMJX5MQVbYyO
|
||||
lcbbxd/yray2/V/g5/o2cZ7qi7a691eC1FoVf3yhmp+q+YbvCjUkQ/lXowa9qHA9PSBR7WFK5/KR
|
||||
WX/qXYfYCfQ5m13gGLy/2FuPQz/o+rlD6mwM5NZbcs9zXlLD4/1IyHH+wIrGZ1aCA7l88AlWJFjk
|
||||
OjQRMydfD5qDDFojYyL44+PO+lyDRa1OM7zlzh5rytugQh2QGEz6ycAGcvbV+sZ6iXKDuU/rwW/o
|
||||
d+MbwJyHFWdfjnVmfgAJ+O4PDnHzB5cSK/AHwJeJOQmrPFakbNccJRwi2OqRo/G96Tykl1buvPHw
|
||||
clK2ZVAJxITTSJ4chWAKqBPDTb8RV3zNdEnvyQq9em9h88GE/WrRtQU/fXXAg06Xj36SITeKb2Ie
|
||||
P0Ww6GaygmOUO8RwzmUwd/xLRdM9GsnJrJd+0x8h2PALO1nTASp/Yhvm/JvDxySVwAgnkUcbvyWG
|
||||
SC4O6198HlbOtcAGhiCYHkzBI2ZiGqw97RP9jNXThGlkn7APhKPDO8ESAeMqGMTK0Fwt+nOXgy6S
|
||||
NSID4aiRXVyyUGVYi4RZ7lA+oE4CZRrJRNvPvEM13zChJr5u3szEVzoPV8SDa6Iy+CSpaiXgQZog
|
||||
aOgJK2fz7FBWGX3QS7DE2vHm9iy+3VT49mJ54saPowmPq1xA4SSJZLu/gGbJ/EblmwrePn1UYCK7
|
||||
eoYnK2KJ9uNb2KgKVLZZThyLVMEsj+fwd37TaxrELV8T9Yd3WJ51tV+b8+RDJN/DTZ/LgQAPlxYq
|
||||
OU+J+Rk4rVX2hxruibNMHGNeAqGLqIys0rLIvTqCYH6xCgPZ83rFch+9wNAyXAmsTH16/J75gtWF
|
||||
7gPmXmOTw+GLUrryJxPeB93A12BXgpmmvQtxpcX48OHNfnnqeIK/88eTH/XLcjR4KJbu5efngHm3
|
||||
93kwOe+AeJirwLpOwR5BE7XE84vM4YmX+fAwV19vJ5lvSpMskyHoZIpl6B37ae/DCGqWBIjiiZlG
|
||||
iywJYQpG6rH53ajmvBRXsHfPBGNmycDS5EHNFfYhmCStmOliGZMLW2Xq8UkJb9Wq3ezo5/8Qyw7T
|
||||
fmzBPof5x2L/3C9CSz9H3rMQt/yWA77eKwWsDl+RHGuurKZz0/vQct45OZiaXa0JtSZo4zEmj/Rt
|
||||
pbN/vOSIUZ8L0eXqQckS7UNkZfKThPdD3xP541uIpHebeIOwVpueV9FTv2FsVB8/ZfscenCPUYST
|
||||
p2AGwtwNDEj1ycWHwOpTOuj8Bb7qu4LtyXsFlG2tEpYscyVHxaXOUvrnAaaXR01wRnC62rtklTa+
|
||||
hR1hGdI581Ifci2TTNLkKcH8uJ1bBKvHk+SPqA8oLeMc6L6h4OTerRUlju8iIn0+5KkQyVm3+waR
|
||||
LVv48PMn9kfNhsGS7DF2A+/P+6Dn6fuJ9xqxmupvasKyKsNpOZtnjQaXVw43vj7B6kaccfN34GGI
|
||||
NXw5LmxFJLDm6E7BBePzxwros/Yn1LRiSix/jyk7MRIPzVGzscmVWspl+6aAnkk6rL9V2elEET+A
|
||||
wWkFUTAbAbI2UQdTLmwnfsOrmR9oAn5+1MZXNYrIZwbnqSqwBaq4GqD8SX7xxNiBI/3hF6yk4U4y
|
||||
G52BUAdNjN479eEtHybu1+SdXODiFxYxjF2bzk3UxaAy44hc47HqOU6Q9rC7hA/y03NLt+/sP3h0
|
||||
P9xNSoB/lmHER8MEnsvNmdeDPv38S2IEO5XysGFruOHHJLEO2087f+mQJl9sfCx0Lp2E9mVLDfz8
|
||||
H3z9+Y+92T5J0N1TjRejdwKb2e+wB0eZCoo+MPBz3j9JnD4qOvO2HyI218epZ773dGaZvSVKTvbE
|
||||
zmRdnMU2YxNsfi22FokJ6DU/1jDx4yOJDFF0Vv0hJbBdbwdyXIoo5cluWiHiISLKs/s4czHd9/B0
|
||||
OjPeromPlaDloICptzJTtfEvfqsvUHbCK85pvNPWTI8hXKAMiBJBvRLq3TpA53jUsXWm34rW5ZWF
|
||||
9sFsiCwUFqDrTsvBhic/Pzcgh1PRIl5/etNSlhdnLcx1QMHBabB5CWQgiPpeguvudfeks9CDGV01
|
||||
CRJhrxDsxGGwgK8yo9N95TwmONvpli8xOuFBxUotrikVTkYsbviDbZsw2urm5A0vjnojxqbvOYja
|
||||
C9TOk7Pxt5VOh+fDhrITXYlyMPlgIl52gT7lLwTv/XMw9zl0oV0HKsafMtJ6rRH3YNPHHkpz0SEO
|
||||
c6rBFl8PHm4inU971UM/v3njc/1YKr0PkuQBMN78ofVjehbIh0KdOss79/RCwgeaUWdgIypwRa/D
|
||||
Xv35H8T/HCetu+elBze/yqNB+OxX7aaG6B4OAlZrq3MWOC0s2vQrPi4Fn071/ljCULAhdkKxC4Tj
|
||||
3Vp//yd4ifV0OUlaKL5cZ/akMI77OWsECAfxGk/DdQrB/LAqV9rwHpt6V1fLK899wIZPcfps/IGA
|
||||
t9FC/eN+SSSKVT/sv6MJvOvjgI/bfRluJcx/+OsJwuVQkW1/8OcnRZ/hqpEhGAawOMmTyGe3SJfY
|
||||
st9wjdGM5UEBThMOng02PMHpeIrTdZ1SCT7vuYCtpf5Um1/jwWU3lfiw3yf9eH5KJuSr73d68c8d
|
||||
HZ5CfIGJ2EdeluC6H+7+bpB0r9thNb9qwdLg0Yab3+S9SLMC2j3F5Fff8LFdOUCYizTDyzv4bHpP
|
||||
6ac6eXfwgFh92vCkWhRNVCWPNyti2/rH+cMHNj1JsNoFlHuN7gB/eu6w6c25a9Y3PPUePzEgrau5
|
||||
H8sWfs+vkiiS0zgr2U3zD3+J+wVawLMiq4p5qitb/jt0nC+6h+BYnT1287/G5XhioWDjCza+Xgfo
|
||||
O+ImOEGXkOweukDoxFwH0XNRsU+Pbb/A8wMC9TsU5K64VBuDR79CY/QCchIPLaBRtbTIL+Mz9oJz
|
||||
l87v0OrgtH9Zf/g0a+CDBJ4NPhNN2ec9laPXgPKhVLE5JdeKX0x5RUNfM8S5srrDea1Xw+FCCXFD
|
||||
CrRe5NUYNpa/89Ys+VYziBMXuJ5oEWVcdsEqioccNP1RmVB9P1V00BkfIvKZsDKadUA2fQ/XhcrE
|
||||
PJNDwNvzOwTIJQO2ETvR9YVYH8nXu0WwA0+UZRIA4bQcpenr9i9tuJvHHAYnk8XWk+oOnYzWRvVR
|
||||
OmI1HL7OWl4SHsTCqnprBXhtBfPK/PETVLK8gxXoTQt/9ce1rUOwvtxuhTrfdjgUOSXd+jUR4n1o
|
||||
kNvVYPqhOQAIG9ioRFWYkfbV1JVQNdl8i9/ozBoBJpS6XvLKPnlr9BBHe9hO/dMT/UNTzbS7t+Cn
|
||||
f8yBVakwn+UL0voZYVUyBNpMT52Hm77CrtAjbfzohiotXS0RXSqkYNz8Ydg4TE0co7lWa/SwHrAX
|
||||
fEDOF8WmA2aphGqp/GAVnYyK/eknTG4zdrf+Bx837RvWt64izidZKpKhtIOWjn1vTU9cvw4OKP74
|
||||
G27y9SqujqvyT3ycnM+C+aenLkXDTEAzW0B//mckqQGxxPuQbniXw7NPD9P+ZlT93BwoA5Rl4rFN
|
||||
Mgp+fgtqC2AS75kuwXjlpQhKB7qfaIwkrb37wgCUnKWbuXTUtn5OCX563rt7WbVemb6Al0cee4Ji
|
||||
zXRuojL+g/+O63zBuvVv4MjnPTGKwk6XJCgH4BhZNoHhwlS02KEYZucu8sQDklN+5y8t2urptLjE
|
||||
p+MLExaQUztjL3T1YN09CxMpjOYRZ/emv35E9/N7sBeNM/0mQTdB8WR+seXga7pgoy/h7/w3Pkzn
|
||||
PNQiKO/PNn7gRAl+/BSuzXTC0fgEdG57aw+as4mx7J/NXiidaw09XTbxTx99Gc/rwOPb9eTg3phf
|
||||
P6aFy90WPenD3qtl67cARTR1bJH7uZ/rZ7GiD7u+iMWhD5imW9BB/l7zxFJq4AxN1CVQi/rrNOtp
|
||||
nC7P1/AGSuFzePOHqzk9f99w81vxIXqfKf1IdvvTo+QEKxyspfSB0OpmER8SsDrLvm5l2Do0m3a+
|
||||
eXXW2pxldLw7hDjbeQ9bPwKW7TPHbrC8Kirydgw7WUqJcUw9IJCR+n/8Gquw39Xy45PdyaNbf0yv
|
||||
eNeWINSDucUGIUK6/PrV/bd4TWCrH6yUFgXqtM8O6+cdAtMtsHW4a5OAWN6t0pYqPUAoNUzk9bJS
|
||||
/vRVDiRDD6dff41w4/wGh3Z4ELvwTMpLvSr9/GmibPySpgstkYZcxoNKYlLaRUCFW78HywNk6fLh
|
||||
8lL6Puob1hP1E7TNATDQGTST2JE3ONRIPy6MmF3mwQ0vh7h9F1B+zBGJk5et8Y3bmpDgffTzryjX
|
||||
vp0WTiz3msRVJ2CuVWcPHGXhyeH2dCrOlDwVdIXJEwdNu75Tzxcf7YziTDJ2bqu1Fx4Q/v2bCvjP
|
||||
f/311//4TRjUbZZ/tsGAMV/Gf//XqMC/hX8PdfL5/BlDmIakyP/+539PIPz97dv6O/7PsX3nzfD3
|
||||
P38Jf0YN/h7bMfn8P4//tb3oP//1vwAAAP//AwDPjjDU3iAAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97d174615cfef96b-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 10 Sep 2025 19:50:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=eYh.U8kiOc9xS0U2L8g4MiopA6w9E7lUuodx4D.rMOU-1757533830-1.0.1.1-YO2od1GbrHRgwOEdJSw3gCcNy8XFBF_O.jT_f8F2z6dWZsBIS7XPLWUpJAzenthO1wXRkx7OZDmVrPCPro2sSj1srJCxCY8KgIwcjw5NWGU;
|
||||
path=/; expires=Wed, 10-Sep-25 20:20:30 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=vkbBikeJy.dDV.o7ZB2HjcJaD_hkp9dDeCEBfHZxG94-1757533830280-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-model:
|
||||
- text-embedding-3-small
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '172'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
via:
|
||||
- envoy-router-59c745856-z5gxd
|
||||
x-envoy-upstream-service-time:
|
||||
- '267'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '10000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '9999996'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_06f3f9465f1a4af0ae5a4d8a58f19321
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from unittest import mock
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
from collections import defaultdict
|
||||
|
||||
import pydantic_core
|
||||
import pytest
|
||||
@@ -14,11 +14,29 @@ from crewai.agent import Agent
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.flow import Flow, start
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
@@ -27,28 +45,9 @@ from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
from crewai.events.types.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ceo():
|
||||
@@ -364,7 +363,7 @@ def test_hierarchical_process(researcher, writer):
|
||||
|
||||
assert (
|
||||
result.raw
|
||||
== "1. **The Rise of Autonomous AI Agents in Daily Life** \n As artificial intelligence technology progresses, the integration of autonomous AI agents into everyday life becomes increasingly prominent. These agents, capable of making decisions without human intervention, are reshaping industries from healthcare to finance. Exploring case studies where autonomous AI has successfully decreased operational costs or improved efficiency can reveal not only the benefits but also the ethical implications of delegating decision-making to machines. This topic offers an exciting opportunity to dive into the AI landscape, showcasing current developments such as AI assistants and autonomous vehicles.\n\n2. **Ethical Implications of Generative AI in Creative Industries** \n The surge of generative AI tools in creative fields, such as art, music, and writing, has sparked a heated debate about authorship and originality. This article could investigate how these tools are being used by artists and creators, examining both the potential for innovation and the risk of devaluing traditional art forms. Highlighting perspectives from creators, legal experts, and ethicists could provide a comprehensive overview of the challenges faced, including copyright concerns and the emotional impact on human artists. This discussion is vital as the creative landscape evolves alongside technological advancements, making it ripe for exploration.\n\n3. **AI in Climate Change Mitigation: Current Solutions and Future Potential** \n As the world grapples with climate change, AI technology is increasingly being harnessed to develop innovative solutions for sustainability. From predictive analytics that optimize energy consumption to machine learning algorithms that improve carbon capture methods, AI's potential in environmental science is vast. This topic invites an exploration of existing AI applications in climate initiatives, with a focus on groundbreaking research and initiatives aimed at reducing humanity's carbon footprint. Highlighting successful projects and technology partnerships can illustrate the positive impact AI can have on global climate efforts, inspiring further exploration and investment in this area.\n\n4. **The Future of Work: How AI is Reshaping Employment Landscapes** \n The discussions around AI's impact on the workforce are both urgent and complex, as advances in automation and machine learning continue to transform the job market. This article could delve into the current trends of AI-driven job displacement alongside opportunities for upskilling and the creation of new job roles. By examining case studies of companies that integrate AI effectively and the resulting workforce adaptations, readers can gain valuable insights into preparing for a future where humans and AI collaborate. This exploration highlights the importance of policies that promote workforce resilience in the face of change.\n\n5. **Decentralized AI: Exploring the Role of Blockchain in AI Development** \n As blockchain technology sweeps through various sectors, its application in AI development presents a fascinating topic worth examining. Decentralized AI could address issues of data privacy, security, and democratization in AI models by allowing users to retain ownership of data while benefiting from AI's capabilities. This article could analyze how decentralized networks are disrupting traditional AI development models, featuring innovative projects that harness the synergy between blockchain and AI. Highlighting potential pitfalls and the future landscape of decentralized AI could stimulate discussion among technologists, entrepreneurs, and policymakers alike."
|
||||
== "**1. The Rise of Autonomous AI Agents in Daily Life** \nAs artificial intelligence technology progresses, the integration of autonomous AI agents into everyday life becomes increasingly prominent. These agents, capable of making decisions without human intervention, are reshaping industries from healthcare to finance. Exploring case studies where autonomous AI has successfully decreased operational costs or improved efficiency can reveal not only the benefits but also the ethical implications of delegating decision-making to machines. This topic offers an exciting opportunity to dive into the AI landscape, showcasing current developments such as AI assistants and autonomous vehicles.\n\n**2. Ethical Implications of Generative AI in Creative Industries** \nThe surge of generative AI tools in creative fields, such as art, music, and writing, has sparked a heated debate about authorship and originality. This article could investigate how these tools are being used by artists and creators, examining both the potential for innovation and the risk of devaluing traditional art forms. Highlighting perspectives from creators, legal experts, and ethicists could provide a comprehensive overview of the challenges faced, including copyright concerns and the emotional impact on human artists. This discussion is vital as the creative landscape evolves alongside technological advancements, making it ripe for exploration.\n\n**3. AI in Climate Change Mitigation: Current Solutions and Future Potential** \nAs the world grapples with climate change, AI technology is increasingly being harnessed to develop innovative solutions for sustainability. From predictive analytics that optimize energy consumption to machine learning algorithms that improve carbon capture methods, AI's potential in environmental science is vast. This topic invites an exploration of existing AI applications in climate initiatives, with a focus on groundbreaking research and initiatives aimed at reducing humanity's carbon footprint. Highlighting successful projects and technology partnerships can illustrate the positive impact AI can have on global climate efforts, inspiring further exploration and investment in this area.\n\n**4. The Future of Work: How AI is Reshaping Employment Landscapes** \nThe discussions around AI's impact on the workforce are both urgent and complex, as advances in automation and machine learning continue to transform the job market. This article could delve into the current trends of AI-driven job displacement alongside opportunities for upskilling and the creation of new job roles. By examining case studies of companies that integrate AI effectively and the resulting workforce adaptations, readers can gain valuable insights into preparing for a future where humans and AI collaborate. This exploration highlights the importance of policies that promote workforce resilience in the face of change.\n\n**5. Decentralized AI: Exploring the Role of Blockchain in AI Development** \nAs blockchain technology sweeps through various sectors, its application in AI development presents a fascinating topic worth examining. Decentralized AI could address issues of data privacy, security, and democratization in AI models by allowing users to retain ownership of data while benefiting from AI's capabilities. This article could analyze how decentralized networks are disrupting traditional AI development models, featuring innovative projects that harness the synergy between blockchain and AI. Highlighting potential pitfalls and the future landscape of decentralized AI could stimulate discussion among technologists, entrepreneurs, and policymakers alike.\n\nThese topics not only reflect current trends but also probe deeper into ethical and practical considerations, making them timely and relevant for contemporary audiences."
|
||||
)
|
||||
|
||||
|
||||
@@ -570,8 +569,6 @@ def test_crew_with_delegating_agents(ceo, writer):
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -584,7 +581,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer)
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -622,18 +619,16 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer)
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -646,7 +641,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -686,18 +681,16 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in new_ceo.tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_tools_override_agent_tools(researcher):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -710,7 +703,7 @@ def test_task_tools_override_agent_tools(researcher):
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -718,7 +711,7 @@ def test_task_tools_override_agent_tools(researcher):
|
||||
class AnotherTestTool(BaseTool):
|
||||
name: str = "Another Test Tool"
|
||||
description: str = "Another test tool"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Another processed: {query}"
|
||||
@@ -754,7 +747,6 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
"""
|
||||
Test that task tools override agent tools while preserving delegation tools when allow_delegation=True
|
||||
"""
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -766,7 +758,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -774,7 +766,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
class AnotherTestTool(BaseTool):
|
||||
name: str = "Another Test Tool"
|
||||
description: str = "Another test tool"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Another processed: {query}"
|
||||
@@ -815,17 +807,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Confirm AnotherTestTool is present but TestTool is not
|
||||
assert any(
|
||||
isinstance(tool, AnotherTestTool) for tool in used_tools
|
||||
), "AnotherTestTool should be present"
|
||||
assert not any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "TestTool should not be present among used tools"
|
||||
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), (
|
||||
"AnotherTestTool should be present"
|
||||
)
|
||||
assert not any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"TestTool should not be present among used tools"
|
||||
)
|
||||
|
||||
# Confirm delegation tool(s) are present
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Finally, make sure the agent's original tools remain unchanged
|
||||
assert len(researcher_with_delegation.tools) == 1
|
||||
@@ -929,9 +921,9 @@ def test_cache_hitting_between_agents(researcher, writer, ceo):
|
||||
tool="multiplier", input={"first_number": 2, "second_number": 6}
|
||||
)
|
||||
assert cache_calls[0] == expected_call, f"First call mismatch: {cache_calls[0]}"
|
||||
assert (
|
||||
cache_calls[1] == expected_call
|
||||
), f"Second call mismatch: {cache_calls[1]}"
|
||||
assert cache_calls[1] == expected_call, (
|
||||
f"Second call mismatch: {cache_calls[1]}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1042,7 +1034,7 @@ def test_crew_kickoff_streaming_usage_metrics():
|
||||
assert result.token_usage.cached_prompt_tokens == 0
|
||||
|
||||
|
||||
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
def test_agents_rpm_is_never_set_if_crew_max_rpm_is_not_set():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
@@ -1395,8 +1387,9 @@ def test_kickoff_for_each_error_handling():
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with patch.object(Crew, "kickoff") as mock_kickoff:
|
||||
mock_kickoff.side_effect = expected_outputs[:2] + [
|
||||
Exception("Simulated kickoff error")
|
||||
mock_kickoff.side_effect = [
|
||||
*expected_outputs[:2],
|
||||
Exception("Simulated kickoff error"),
|
||||
]
|
||||
with pytest.raises(Exception, match="Simulated kickoff error"):
|
||||
crew.kickoff_for_each(inputs=inputs)
|
||||
@@ -1674,9 +1667,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
|
||||
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
||||
assert len(used_tools) == 1, "Should have exactly one tool"
|
||||
assert isinstance(
|
||||
used_tools[0], CodeInterpreterTool
|
||||
), "Tool should be CodeInterpreterTool"
|
||||
assert isinstance(used_tools[0], CodeInterpreterTool), (
|
||||
"Tool should be CodeInterpreterTool"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1760,10 +1753,10 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
assert result.raw == "Howdy!"
|
||||
|
||||
assert result.token_usage == UsageMetrics(
|
||||
total_tokens=2390,
|
||||
prompt_tokens=2264,
|
||||
completion_tokens=126,
|
||||
successful_requests=4,
|
||||
total_tokens=1673,
|
||||
prompt_tokens=1562,
|
||||
completion_tokens=111,
|
||||
successful_requests=3,
|
||||
cached_prompt_tokens=0,
|
||||
)
|
||||
|
||||
@@ -2179,8 +2172,7 @@ def test_tools_with_custom_caching():
|
||||
return first_number * second_number
|
||||
|
||||
def cache_func(args, result):
|
||||
cache = result % 2 == 0
|
||||
return cache
|
||||
return result % 2 == 0
|
||||
|
||||
multiplcation_tool.cache_function = cache_func
|
||||
|
||||
@@ -2884,7 +2876,7 @@ def test_manager_agent_with_tools_raises_exception(researcher, writer):
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
with pytest.raises(Exception, match="Manager agent should not have tools"):
|
||||
crew.kickoff()
|
||||
|
||||
|
||||
@@ -3108,7 +3100,7 @@ def test_crew_task_db_init():
|
||||
db_handler.load()
|
||||
assert True # If we reach this point, no exception was raised
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception was raised: {str(e)}")
|
||||
pytest.fail(f"An exception was raised: {e!s}")
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3494,8 +3486,9 @@ def test_key(researcher, writer):
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
hash = hashlib.md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode()
|
||||
hash = md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode(),
|
||||
usedforsecurity=False,
|
||||
).hexdigest()
|
||||
|
||||
assert crew.key == hash
|
||||
@@ -3534,8 +3527,9 @@ def test_key_with_interpolated_inputs():
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
hash = hashlib.md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode()
|
||||
hash = md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode(),
|
||||
usedforsecurity=False,
|
||||
).hexdigest()
|
||||
|
||||
assert crew.key == hash
|
||||
@@ -3815,16 +3809,15 @@ def test_fetch_inputs():
|
||||
expected_placeholders = {"role_detail", "topic", "field"}
|
||||
actual_placeholders = crew.fetch_inputs()
|
||||
|
||||
assert (
|
||||
actual_placeholders == expected_placeholders
|
||||
), f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
assert actual_placeholders == expected_placeholders, (
|
||||
f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
)
|
||||
|
||||
|
||||
def test_task_tools_preserve_code_execution_tools():
|
||||
"""
|
||||
Test that task tools don't override code execution tools when allow_code_execution=True
|
||||
"""
|
||||
from typing import Type
|
||||
|
||||
# Mock embedchain initialization to prevent race conditions in parallel CI execution
|
||||
with patch("embedchain.client.Client.setup"):
|
||||
@@ -3841,7 +3834,7 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -3892,20 +3885,20 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Verify all expected tools are present
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "Task's TestTool should be present"
|
||||
assert any(
|
||||
isinstance(tool, CodeInterpreterTool) for tool in used_tools
|
||||
), "CodeInterpreterTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"Task's TestTool should be present"
|
||||
)
|
||||
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), (
|
||||
"CodeInterpreterTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
||||
assert (
|
||||
len(used_tools) == 4
|
||||
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
assert len(used_tools) == 4, (
|
||||
"Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3949,9 +3942,9 @@ def test_multimodal_flag_adds_multimodal_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Check that the multimodal tool was added
|
||||
assert any(
|
||||
isinstance(tool, AddImageTool) for tool in used_tools
|
||||
), "AddImageTool should be present when agent is multimodal"
|
||||
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
|
||||
"AddImageTool should be present when agent is multimodal"
|
||||
)
|
||||
|
||||
# Verify we have exactly one tool (just the AddImageTool)
|
||||
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
||||
@@ -4215,9 +4208,9 @@ def test_crew_guardrail_feedback_in_context():
|
||||
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
||||
|
||||
# Verify that the second execution included the guardrail feedback
|
||||
assert (
|
||||
"Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
|
||||
), "Guardrail feedback should be included in retry context"
|
||||
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], (
|
||||
"Guardrail feedback should be included in retry context"
|
||||
)
|
||||
|
||||
# Verify final output meets guardrail requirements
|
||||
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
||||
@@ -4232,13 +4225,11 @@ def test_before_kickoff_callback():
|
||||
|
||||
@CrewBase
|
||||
class TestCrewClass:
|
||||
from typing import List
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.project import CrewBase, agent, before_kickoff, crew, task
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
|
||||
agents_config = None
|
||||
tasks_config = None
|
||||
@@ -4262,12 +4253,11 @@ def test_before_kickoff_callback():
|
||||
|
||||
@task
|
||||
def my_task(self):
|
||||
task = Task(
|
||||
return Task(
|
||||
description="Test task description",
|
||||
expected_output="Test expected output",
|
||||
agent=self.my_agent(),
|
||||
)
|
||||
return task
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
@@ -4433,46 +4423,46 @@ def test_crew_copy_with_memory():
|
||||
try:
|
||||
crew_copy = crew.copy()
|
||||
|
||||
assert hasattr(
|
||||
crew_copy, "_short_term_memory"
|
||||
), "Copied crew should have _short_term_memory"
|
||||
assert (
|
||||
crew_copy._short_term_memory is not None
|
||||
), "Copied _short_term_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._short_term_memory) != original_short_term_id
|
||||
), "Copied _short_term_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_short_term_memory"), (
|
||||
"Copied crew should have _short_term_memory"
|
||||
)
|
||||
assert crew_copy._short_term_memory is not None, (
|
||||
"Copied _short_term_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._short_term_memory) != original_short_term_id, (
|
||||
"Copied _short_term_memory should be a new object"
|
||||
)
|
||||
|
||||
assert hasattr(
|
||||
crew_copy, "_long_term_memory"
|
||||
), "Copied crew should have _long_term_memory"
|
||||
assert (
|
||||
crew_copy._long_term_memory is not None
|
||||
), "Copied _long_term_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._long_term_memory) != original_long_term_id
|
||||
), "Copied _long_term_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_long_term_memory"), (
|
||||
"Copied crew should have _long_term_memory"
|
||||
)
|
||||
assert crew_copy._long_term_memory is not None, (
|
||||
"Copied _long_term_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._long_term_memory) != original_long_term_id, (
|
||||
"Copied _long_term_memory should be a new object"
|
||||
)
|
||||
|
||||
assert hasattr(
|
||||
crew_copy, "_entity_memory"
|
||||
), "Copied crew should have _entity_memory"
|
||||
assert (
|
||||
crew_copy._entity_memory is not None
|
||||
), "Copied _entity_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._entity_memory) != original_entity_id
|
||||
), "Copied _entity_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_entity_memory"), (
|
||||
"Copied crew should have _entity_memory"
|
||||
)
|
||||
assert crew_copy._entity_memory is not None, (
|
||||
"Copied _entity_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._entity_memory) != original_entity_id, (
|
||||
"Copied _entity_memory should be a new object"
|
||||
)
|
||||
|
||||
if original_external_id:
|
||||
assert hasattr(
|
||||
crew_copy, "_external_memory"
|
||||
), "Copied crew should have _external_memory"
|
||||
assert (
|
||||
crew_copy._external_memory is not None
|
||||
), "Copied _external_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._external_memory) != original_external_id
|
||||
), "Copied _external_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_external_memory"), (
|
||||
"Copied crew should have _external_memory"
|
||||
)
|
||||
assert crew_copy._external_memory is not None, (
|
||||
"Copied _external_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._external_memory) != original_external_id, (
|
||||
"Copied _external_memory should be a new object"
|
||||
)
|
||||
else:
|
||||
assert (
|
||||
not hasattr(crew_copy, "_external_memory")
|
||||
@@ -4735,21 +4725,25 @@ def test_ensure_exchanged_messages_are_propagated_to_external_memory():
|
||||
) as external_memory_save:
|
||||
crew.kickoff()
|
||||
|
||||
expected_messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are Researcher. You're an expert in research and you love to learn new things.\nYour personal goal is: You research about math.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "\nCurrent Task: Research a topic to teach a kid aged 6 about math.\n\nThis is the expected criteria for your final answer: A topic, explanation, angle, and examples.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "I now can give a great answer \nFinal Answer: \n\n**Topic: Understanding Shapes (Geometry)**\n\n**Explanation:** \nShapes are everywhere around us! They are the special forms that we can see in everyday objects. Teaching a 6-year-old about shapes is not only fun but also a way to help them think about the world around them and develop their spatial awareness. We will focus on basic shapes: circle, square, triangle, and rectangle. Understanding these shapes helps kids recognize and describe their environment.\n\n**Angle:** \nLet’s make learning about shapes an adventure! We can turn it into a treasure hunt where the child has to find objects around the house or outside that match the shapes we learn. This hands-on approach helps make the learning stick!\n\n**Examples:** \n1. **Circle:** \n - Explanation: A circle is round and has no corners. It looks like a wheel or a cookie! \n - Activity: Find objects that are circles, such as a clock, a dinner plate, or a ball. Draw a big circle on a paper and then try to draw smaller circles inside it.\n\n2. **Square:** \n - Explanation: A square has four equal sides and four corners. It looks like a box! \n - Activity: Look for squares in books, in windows, or in building blocks. Try to build a tall tower using square blocks!\n\n3. **Triangle:** \n - Explanation: A triangle has three sides and three corners. It looks like a slice of pizza or a roof! \n - Activity: Use crayons to draw a big triangle and then find things that are shaped like a triangle, like a slice of cheese or a traffic sign.\n\n4. **Rectangle:** \n - Explanation: A rectangle has four sides but only opposite sides are equal. It’s like a stretched square! \n - Activity: Search for rectangles, such as a book cover or a door. You can cut out rectangles from colored paper and create a collage!\n\nBy relating the shapes to fun activities and using real-world examples, we not only make learning more enjoyable but also help the child better remember and understand the concept of shapes in math. This foundation forms the basis of their future learning in geometry!",
|
||||
},
|
||||
]
|
||||
external_memory_save.assert_called_once_with(
|
||||
value=ANY,
|
||||
metadata={"description": ANY, "messages": expected_messages},
|
||||
)
|
||||
external_memory_save.assert_called_once()
|
||||
|
||||
call_args = external_memory_save.call_args
|
||||
|
||||
assert "value" in call_args.kwargs or len(call_args.args) > 0
|
||||
assert "metadata" in call_args.kwargs or len(call_args.args) > 1
|
||||
|
||||
if "metadata" in call_args.kwargs:
|
||||
metadata = call_args.kwargs["metadata"]
|
||||
else:
|
||||
metadata = call_args.args[1]
|
||||
|
||||
assert "description" in metadata
|
||||
assert "messages" in metadata
|
||||
assert isinstance(metadata["messages"], list)
|
||||
assert len(metadata["messages"]) >= 2
|
||||
|
||||
messages = metadata["messages"]
|
||||
assert messages[0]["role"] == "system"
|
||||
assert "Researcher" in messages[0]["content"]
|
||||
assert messages[1]["role"] == "user"
|
||||
assert "Research a topic to teach a kid aged 6 about math" in messages[1]["content"]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import hashlib
|
||||
import ast
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
from hashlib import md5
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -248,7 +248,7 @@ def test_guardrail_type_error():
|
||||
return (True, x)
|
||||
|
||||
@staticmethod
|
||||
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, Union[str, TaskOutput]]:
|
||||
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, str | TaskOutput]:
|
||||
return (True, x)
|
||||
|
||||
obj = Object()
|
||||
@@ -271,7 +271,7 @@ def test_guardrail_type_error():
|
||||
guardrail=Object.guardrail_static_fn,
|
||||
)
|
||||
|
||||
def error_fn(x: TaskOutput, y: bool) -> Tuple[bool, TaskOutput]:
|
||||
def error_fn(x: TaskOutput, y: bool) -> tuple[bool, TaskOutput]:
|
||||
return (y, x)
|
||||
|
||||
Task(
|
||||
@@ -340,7 +340,7 @@ def test_output_pydantic_hierarchical():
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert isinstance(result.pydantic, ScoreOutput)
|
||||
assert result.to_dict() == {"score": 5}
|
||||
assert result.to_dict() == {"score": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -401,8 +401,8 @@ def test_output_json_hierarchical():
|
||||
manager_llm="gpt-4o",
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert result.json == '{"score": 5}'
|
||||
assert result.to_dict() == {"score": 5}
|
||||
assert result.json == '{"score": 4}'
|
||||
assert result.to_dict() == {"score": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -560,8 +560,8 @@ def test_output_json_dict_hierarchical():
|
||||
manager_llm="gpt-4o",
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert {"score": 5} == result.json_dict
|
||||
assert result.to_dict() == {"score": 5}
|
||||
assert {"score": 4} == result.json_dict
|
||||
assert result.to_dict() == {"score": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -900,11 +900,11 @@ def test_conditional_task_copy_preserves_type():
|
||||
assert isinstance(copied_conditional_task, ConditionalTask)
|
||||
|
||||
|
||||
def test_interpolate_inputs():
|
||||
def test_interpolate_inputs(tmp_path):
|
||||
task = Task(
|
||||
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 interesting ideas about {topic}.",
|
||||
output_file="/tmp/{topic}/output_{date}.txt",
|
||||
output_file=str(tmp_path / "{topic}" / "output_{date}.txt"),
|
||||
)
|
||||
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
@@ -915,7 +915,7 @@ def test_interpolate_inputs():
|
||||
== "Give me a list of 5 interesting ideas about AI to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about AI."
|
||||
assert task.output_file == "/tmp/AI/output_2025.txt"
|
||||
assert task.output_file == str(tmp_path / "AI" / "output_2025.txt")
|
||||
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
inputs={"topic": "ML", "date": "2025"}
|
||||
@@ -925,7 +925,7 @@ def test_interpolate_inputs():
|
||||
== "Give me a list of 5 interesting ideas about ML to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about ML."
|
||||
assert task.output_file == "/tmp/ML/output_2025.txt"
|
||||
assert task.output_file == str(tmp_path / "ML" / "output_2025.txt")
|
||||
|
||||
|
||||
def test_interpolate_only():
|
||||
@@ -1074,8 +1074,9 @@ def test_key():
|
||||
description=original_description,
|
||||
expected_output=original_expected_output,
|
||||
)
|
||||
hash = hashlib.md5(
|
||||
f"{original_description}|{original_expected_output}".encode()
|
||||
hash = md5(
|
||||
f"{original_description}|{original_expected_output}".encode(),
|
||||
usedforsecurity=False,
|
||||
).hexdigest()
|
||||
|
||||
assert task.key == hash, "The key should be the hash of the description."
|
||||
@@ -1086,7 +1087,7 @@ def test_key():
|
||||
)
|
||||
|
||||
|
||||
def test_output_file_validation():
|
||||
def test_output_file_validation(tmp_path):
|
||||
"""Test output file path validation."""
|
||||
# Valid paths
|
||||
assert (
|
||||
@@ -1097,13 +1098,15 @@ def test_output_file_validation():
|
||||
).output_file
|
||||
== "output.txt"
|
||||
)
|
||||
# Use secure temporary path instead of /tmp
|
||||
temp_file = tmp_path / "output.txt"
|
||||
assert (
|
||||
Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
output_file="/tmp/output.txt",
|
||||
output_file=str(temp_file),
|
||||
).output_file
|
||||
== "tmp/output.txt"
|
||||
== str(temp_file).lstrip("/") # Remove leading slash to match expected behavior
|
||||
)
|
||||
assert (
|
||||
Task(
|
||||
@@ -1320,7 +1323,7 @@ def test_interpolate_with_list_of_dicts():
|
||||
}
|
||||
result = interpolate_only("{people}", input_data)
|
||||
|
||||
parsed_result = eval(result)
|
||||
parsed_result = ast.literal_eval(result)
|
||||
assert isinstance(parsed_result, list)
|
||||
assert len(parsed_result) == 2
|
||||
assert parsed_result[0]["name"] == "Alice"
|
||||
@@ -1346,7 +1349,7 @@ def test_interpolate_with_nested_structures():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{company}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["name"] == "TechCorp"
|
||||
assert len(parsed["departments"]) == 2
|
||||
@@ -1364,7 +1367,7 @@ def test_interpolate_with_special_characters():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{special_data}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["quotes"] == """This has "double" and 'single' quotes"""
|
||||
assert parsed["unicode"] == "文字化けテスト"
|
||||
@@ -1386,7 +1389,7 @@ def test_interpolate_mixed_types():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{data}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["name"] == "Test Dataset"
|
||||
assert parsed["samples"] == 1000
|
||||
@@ -1409,7 +1412,7 @@ def test_interpolate_complex_combination():
|
||||
]
|
||||
}
|
||||
result = interpolate_only("{report}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert len(parsed) == 2
|
||||
assert parsed[0]["month"] == "January"
|
||||
@@ -1482,7 +1485,7 @@ def test_interpolate_valid_complex_types():
|
||||
|
||||
# Should not raise any errors
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
assert parsed["name"] == "Valid Dataset"
|
||||
assert parsed["stats"]["nested"]["deeper"]["b"] == 2.5
|
||||
|
||||
@@ -1512,7 +1515,7 @@ def test_interpolate_valid_types():
|
||||
}
|
||||
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["active"] is True
|
||||
assert parsed["deleted"] is False
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -39,6 +37,7 @@ def test_initialization(basic_function, schema_class):
|
||||
assert tool.func == basic_function
|
||||
assert tool.args_schema == schema_class
|
||||
|
||||
|
||||
def test_from_function(basic_function):
|
||||
"""Test creating tool from function"""
|
||||
tool = CrewStructuredTool.from_function(
|
||||
@@ -50,6 +49,7 @@ def test_from_function(basic_function):
|
||||
assert tool.func == basic_function
|
||||
assert isinstance(tool.args_schema, type(BaseModel))
|
||||
|
||||
|
||||
def test_validate_function_signature(basic_function, schema_class):
|
||||
"""Test function signature validation"""
|
||||
tool = CrewStructuredTool(
|
||||
@@ -62,6 +62,7 @@ def test_validate_function_signature(basic_function, schema_class):
|
||||
# Should not raise any exceptions
|
||||
tool._validate_function_signature()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ainvoke(basic_function):
|
||||
"""Test asynchronous invocation"""
|
||||
@@ -70,6 +71,7 @@ async def test_ainvoke(basic_function):
|
||||
result = await tool.ainvoke(input={"param1": "test"})
|
||||
assert result == "test 0"
|
||||
|
||||
|
||||
def test_parse_args_dict(basic_function):
|
||||
"""Test parsing dictionary arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
@@ -78,6 +80,7 @@ def test_parse_args_dict(basic_function):
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
|
||||
def test_parse_args_string(basic_function):
|
||||
"""Test parsing string arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
@@ -86,6 +89,7 @@ def test_parse_args_string(basic_function):
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
|
||||
def test_complex_types():
|
||||
"""Test handling of complex parameter types"""
|
||||
|
||||
@@ -99,6 +103,7 @@ def test_complex_types():
|
||||
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
|
||||
assert result == "Processed 3 items with 1 nested keys"
|
||||
|
||||
|
||||
def test_schema_inheritance():
|
||||
"""Test tool creation with inherited schema"""
|
||||
|
||||
@@ -119,13 +124,14 @@ def test_schema_inheritance():
|
||||
result = tool.invoke({"base_param": "test", "extra_param": 42})
|
||||
assert result == "test 42"
|
||||
|
||||
|
||||
def test_default_values_in_schema():
|
||||
"""Test handling of default values in schema"""
|
||||
|
||||
def default_func(
|
||||
required_param: str,
|
||||
optional_param: str = "default",
|
||||
nullable_param: Optional[int] = None,
|
||||
nullable_param: int | None = None,
|
||||
) -> str:
|
||||
"""Test function with default values."""
|
||||
return f"{required_param} {optional_param} {nullable_param}"
|
||||
@@ -144,6 +150,7 @@ def test_default_values_in_schema():
|
||||
)
|
||||
assert result == "test custom 42"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool_decorator():
|
||||
from crewai.tools import tool
|
||||
@@ -155,6 +162,7 @@ def custom_tool_decorator():
|
||||
|
||||
return custom_tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool():
|
||||
from crewai.tools import BaseTool
|
||||
@@ -169,17 +177,25 @@ def custom_tool():
|
||||
|
||||
return CustomTool()
|
||||
|
||||
|
||||
def build_simple_crew(tool):
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai import Agent, Crew, Task
|
||||
|
||||
agent1 = Agent(role="Simple role", goal="Simple goal", backstory="Simple backstory", tools=[tool])
|
||||
|
||||
say_hi_task = Task(
|
||||
description="Use the custom tool result as answer.", agent=agent1, expected_output="Use the tool result"
|
||||
agent1 = Agent(
|
||||
role="Simple role",
|
||||
goal="Simple goal",
|
||||
backstory="Simple backstory",
|
||||
tools=[tool],
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=[say_hi_task])
|
||||
return crew
|
||||
say_hi_task = Task(
|
||||
description="Use the custom tool result as answer.",
|
||||
agent=agent1,
|
||||
expected_output="Use the tool result",
|
||||
)
|
||||
|
||||
return Crew(agents=[agent1], tasks=[say_hi_task])
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_within_isolated_crew(custom_tool):
|
||||
@@ -188,6 +204,7 @@ def test_async_tool_using_within_isolated_crew(custom_tool):
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
@@ -195,6 +212,7 @@ def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator):
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_within_flow(custom_tool):
|
||||
from crewai.flow.flow import Flow
|
||||
@@ -205,8 +223,7 @@ def test_async_tool_within_flow(custom_tool):
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool)
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
return await crew.kickoff_async()
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
@@ -219,12 +236,141 @@ def test_async_tool_using_decorator_within_flow(custom_tool_decorator):
|
||||
|
||||
class StructuredExampleFlow(Flow):
|
||||
from crewai.flow.flow import start
|
||||
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
return await crew.kickoff_async()
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_calls_func_only_once():
|
||||
"""Test that CrewStructuredTool.invoke() calls the underlying function exactly once."""
|
||||
call_count = 0
|
||||
call_history = []
|
||||
|
||||
def counting_function(param: str) -> str:
|
||||
"""Function that tracks how many times it's called."""
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
call_history.append(f"Call #{call_count} with param: {param}")
|
||||
return f"Result from call #{call_count}: {param}"
|
||||
|
||||
# Create CrewStructuredTool directly
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=counting_function,
|
||||
name="direct_test_tool",
|
||||
description="Tool to test direct invoke() method",
|
||||
)
|
||||
|
||||
# Call invoke() directly - this is where the bug was
|
||||
result = tool.invoke({"param": "test_value"})
|
||||
|
||||
# Critical assertions that would catch the duplicate execution bug
|
||||
assert call_count == 1, (
|
||||
f"DUPLICATE EXECUTION BUG: Function was called {call_count} times instead of 1. "
|
||||
f"This means CrewStructuredTool.invoke() has duplicate function calls. "
|
||||
f"Call history: {call_history}"
|
||||
)
|
||||
|
||||
assert len(call_history) == 1, (
|
||||
f"Expected 1 call in history, got {len(call_history)}: {call_history}"
|
||||
)
|
||||
|
||||
assert call_history[0] == "Call #1 with param: test_value", (
|
||||
f"Expected 'Call #1 with param: test_value', got: {call_history[0]}"
|
||||
)
|
||||
|
||||
assert result == "Result from call #1: test_value", (
|
||||
f"Expected result from first call, got: {result}"
|
||||
)
|
||||
|
||||
|
||||
def test_structured_tool_invoke_multiple_calls_increment_correctly():
|
||||
"""Test multiple calls to invoke() to ensure each increments correctly."""
|
||||
call_count = 0
|
||||
|
||||
def incrementing_function(value: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return value + call_count
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=incrementing_function,
|
||||
name="incrementing_tool",
|
||||
description="Tool that increments on each call",
|
||||
)
|
||||
|
||||
result1 = tool.invoke({"value": 10})
|
||||
assert call_count == 1, (
|
||||
f"After first invoke, expected call_count=1, got {call_count}"
|
||||
)
|
||||
assert result1 == 11, f"Expected 11 (10+1), got {result1}"
|
||||
|
||||
result2 = tool.invoke({"value": 20})
|
||||
assert call_count == 2, (
|
||||
f"After second invoke, expected call_count=2, got {call_count}"
|
||||
)
|
||||
assert result2 == 22, f"Expected 22 (20+2), got {result2}"
|
||||
|
||||
result3 = tool.invoke({"value": 30})
|
||||
assert call_count == 3, (
|
||||
f"After third invoke, expected call_count=3, got {call_count}"
|
||||
)
|
||||
assert result3 == 33, f"Expected 33 (30+3), got {result3}"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_with_side_effects():
|
||||
"""Test that side effects only happen once per invoke() call."""
|
||||
side_effects = []
|
||||
|
||||
def side_effect_function(action: str) -> str:
|
||||
side_effects.append(f"SIDE_EFFECT: {action} executed at call")
|
||||
return f"Action {action} completed"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=side_effect_function,
|
||||
name="side_effect_tool",
|
||||
description="Tool with observable side effects",
|
||||
)
|
||||
|
||||
result = tool.invoke({"action": "write_file"})
|
||||
|
||||
assert len(side_effects) == 1, (
|
||||
f"SIDE EFFECT BUG: Expected 1 side effect, got {len(side_effects)}. "
|
||||
f"This indicates the function was called multiple times. "
|
||||
f"Side effects: {side_effects}"
|
||||
)
|
||||
|
||||
assert side_effects[0] == "SIDE_EFFECT: write_file executed at call"
|
||||
assert result == "Action write_file completed"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_exception_handling():
|
||||
"""Test that exceptions don't cause duplicate execution."""
|
||||
call_count = 0
|
||||
|
||||
def failing_function(should_fail: bool) -> str:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if should_fail:
|
||||
raise ValueError(f"Intentional failure on call #{call_count}")
|
||||
return f"Success on call #{call_count}"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=failing_function, name="failing_tool", description="Tool that can fail"
|
||||
)
|
||||
|
||||
result = tool.invoke({"should_fail": False})
|
||||
assert call_count == 1, f"Expected 1 call for success case, got {call_count}"
|
||||
assert result == "Success on call #1"
|
||||
|
||||
call_count = 0
|
||||
|
||||
with pytest.raises(ValueError, match="Intentional failure on call #1"):
|
||||
tool.invoke({"should_fail": True})
|
||||
|
||||
assert call_count == 1
|
||||
|
||||
Reference in New Issue
Block a user