Compare commits
665 Commits
devin/1741
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
75ff7dce0c | ||
|
|
38b0b125d3 | ||
|
|
9bd8ad51f7 | ||
|
|
0632a054ca | ||
|
|
feec6b440e | ||
|
|
e43c7debbd | ||
|
|
8ef9fe2cab | ||
|
|
807f97114f | ||
|
|
bdafe0fac7 | ||
|
|
8e99d490b0 | ||
|
|
34b909367b | ||
|
|
22684b513e | ||
|
|
3e3b9df761 | ||
|
|
177294f588 | ||
|
|
beef712646 | ||
|
|
6125b866fd | ||
|
|
f2f994612c | ||
|
|
7fff2b654c | ||
|
|
34e09162ba | ||
|
|
24d1fad7ab | ||
|
|
9b8f31fa07 | ||
|
|
d898d7c02c | ||
|
|
f04c40babf | ||
|
|
c456e5c5fa | ||
|
|
633e279b51 | ||
|
|
a25778974d | ||
|
|
09f1ba6956 | ||
|
|
20704742e2 | ||
|
|
59180e9c9f | ||
|
|
3ce019b07b | ||
|
|
2355ec0733 | ||
|
|
c925d2d519 | ||
|
|
bc4e6a3127 | ||
|
|
37526c693b | ||
|
|
c59173a762 | ||
|
|
4d8eec96e8 | ||
|
|
2025a26fc3 | ||
|
|
bed9a3847a | ||
|
|
5239dc9859 | ||
|
|
52444ad390 | ||
|
|
f070595e65 | ||
|
|
69c5eace2d | ||
|
|
d88ac338d5 | ||
|
|
4ae8c36815 | ||
|
|
b049b73f2e | ||
|
|
d2b9c54931 | ||
|
|
a928cde6ee | ||
|
|
9c84475691 | ||
|
|
f3c5d1e351 | ||
|
|
a978267fa2 | ||
|
|
b759654e7d | ||
|
|
9da1f0c0aa | ||
|
|
a559cedbd1 | ||
|
|
bcc3e358cb | ||
|
|
d160f0874a | ||
|
|
9fcf55198f | ||
|
|
f46a846ddc | ||
|
|
b546982690 | ||
|
|
d7bdac12a2 | ||
|
|
528d812263 | ||
|
|
ffd717c51a | ||
|
|
fbe4aa4bd1 | ||
|
|
c205d2e8de | ||
|
|
fcb5b19b2e | ||
|
|
01f0111d52 | ||
|
|
6b52587c67 | ||
|
|
629f7f34ce | ||
|
|
0f1c173d02 | ||
|
|
19c5b9a35e | ||
|
|
1ed307b58c | ||
|
|
d29867bbb6 | ||
|
|
b2c278ed22 | ||
|
|
f6aed9798b | ||
|
|
40a2d387a1 | ||
|
|
6f36d7003b | ||
|
|
9e5906c52f | ||
|
|
fc521839e4 | ||
|
|
e4cc9a664c | ||
|
|
7e6171d5bc | ||
|
|
61ad1fb112 | ||
|
|
54710a8711 | ||
|
|
5abf976373 | ||
|
|
329567153b | ||
|
|
60332e0b19 | ||
|
|
40932af3fa | ||
|
|
e134e5305b | ||
|
|
e229ef4e19 | ||
|
|
2e9eb8c32d | ||
|
|
4ebb5114ed | ||
|
|
70b083945f | ||
|
|
410db1ff39 | ||
|
|
5d6b4c922b | ||
|
|
b07c0fc45c | ||
|
|
97853199c7 | ||
|
|
494ed7e671 | ||
|
|
a83c57a2f2 | ||
|
|
08e15ab267 | ||
|
|
9728388ea7 | ||
|
|
4371cf5690 | ||
|
|
d28daa26cd | ||
|
|
a850813f2b | ||
|
|
5944a39629 | ||
|
|
c594859ed0 | ||
|
|
2ee27efca7 | ||
|
|
f6e13eb890 | ||
|
|
e7b3ce27ca | ||
|
|
dba27cf8b5 | ||
|
|
6469f224f6 | ||
|
|
f3a63be215 | ||
|
|
01d8c189f0 | ||
|
|
cc83c1ead5 | ||
|
|
7578901f6d | ||
|
|
d1343b96ed | ||
|
|
42f2b4d551 | ||
|
|
0229390ad1 | ||
|
|
f0fb349ddf | ||
|
|
bf2e2a42da | ||
|
|
814c962196 | ||
|
|
2ebb2e845f | ||
|
|
7b550ebfe8 | ||
|
|
29919c2d81 | ||
|
|
b71c88814f | ||
|
|
cb8bcfe214 | ||
|
|
13a514f8be | ||
|
|
316b1cea69 | ||
|
|
6f2e39c0dd | ||
|
|
8d93361cb3 | ||
|
|
54ec245d84 | ||
|
|
f589ab9b80 | ||
|
|
fadb59e0f0 | ||
|
|
1a60848425 | ||
|
|
0135163040 | ||
|
|
dac5d6d664 | ||
|
|
f0f94f2540 | ||
|
|
bf9e0423f2 | ||
|
|
f47e0c82c4 | ||
|
|
eabced321c | ||
|
|
b77074e48e | ||
|
|
7d5cd4d3e2 | ||
|
|
73e932bfee | ||
|
|
12fa7e2ff1 | ||
|
|
091d1267d8 | ||
|
|
b5b10a8cde | ||
|
|
2485ed93d6 | ||
|
|
ce5ea9be6f | ||
|
|
e070c1400c | ||
|
|
6537e3737d | ||
|
|
346faf229f | ||
|
|
a0b757a12c | ||
|
|
1dbe8aab52 | ||
|
|
4ac65eb0a6 | ||
|
|
3e97393f58 | ||
|
|
34bed359a6 | ||
|
|
feeed505bb | ||
|
|
cb0efd05b4 | ||
|
|
db5f565dea | ||
|
|
58413b663a | ||
|
|
37636f0dd7 | ||
|
|
0e370593f1 | ||
|
|
aa8dc9d77f | ||
|
|
9c1096dbdc | ||
|
|
47044450c0 | ||
|
|
0ee438c39d | ||
|
|
cbb9965bf7 | ||
|
|
4951d30dd9 | ||
|
|
7426969736 | ||
|
|
d879be8b66 | ||
|
|
24b84a4b68 | ||
|
|
8e571ea8a7 | ||
|
|
2cfc4d37b8 | ||
|
|
f4abc41235 | ||
|
|
de5d3c3ad1 | ||
|
|
c062826779 | ||
|
|
9491fe8334 | ||
|
|
6f2ea013a7 | ||
|
|
39e8792ae5 | ||
|
|
2f682e1564 | ||
|
|
d4aa676195 | ||
|
|
578fa8c2e4 | ||
|
|
6f5af2b27c | ||
|
|
8ee3cf4874 | ||
|
|
f2d3fd0c0f | ||
|
|
f28e78c5ba | ||
|
|
81bd81e5f5 | ||
|
|
1b00cc71ef | ||
|
|
45d0c9912c | ||
|
|
1f1ab14b07 | ||
|
|
1a70f1698e | ||
|
|
8883fb656b | ||
|
|
79d65e55a1 | ||
|
|
dde76bfec5 | ||
|
|
f554123af6 | ||
|
|
4336e945b8 | ||
|
|
75b916c85a | ||
|
|
01be26ce2a | ||
|
|
c3ad5887ef | ||
|
|
260b49c10a | ||
|
|
1dc4f2e897 | ||
|
|
b126ab22dd | ||
|
|
079cb72f6e | ||
|
|
83682d511f | ||
|
|
6676d94ba1 | ||
|
|
d5126d159b | ||
|
|
fa06aea8d5 | ||
|
|
f936e0f69b | ||
|
|
37c5e88d02 | ||
|
|
1a96ed7b00 | ||
|
|
1a1bb0ca3d | ||
|
|
99b79ab20d | ||
|
|
80974fec6c | ||
|
|
30b9cdd944 | ||
|
|
610c1f70c0 | ||
|
|
ab82da02f9 | ||
|
|
f0def350a4 | ||
|
|
f4f32b5f7f | ||
|
|
49a5ae0e16 | ||
|
|
d31ffdbb90 | ||
|
|
4555ada91e | ||
|
|
92d71f7f06 | ||
|
|
dada9f140f | ||
|
|
878c1a649a | ||
|
|
1b1a8fdbf4 | ||
|
|
2633b33afc | ||
|
|
e4c4b81e63 | ||
|
|
ec1eff02a8 | ||
|
|
0f1b764c3e | ||
|
|
6ee9db1d4a | ||
|
|
109de91d08 | ||
|
|
92b70e652d | ||
|
|
fc3f2c49d2 | ||
|
|
88d2968fd5 | ||
|
|
7addda9398 | ||
|
|
4b4a119a9f | ||
|
|
869bb115c8 | ||
|
|
7ac482c7c9 | ||
|
|
2e4bd3f49d | ||
|
|
c02997d956 | ||
|
|
f96b779df5 | ||
|
|
842bed4e9c | ||
|
|
1217935b31 | ||
|
|
641c156c17 | ||
|
|
7fdf9f9290 | ||
|
|
c0d2bf4c12 | ||
|
|
ed187b495b | ||
|
|
2773996b49 | ||
|
|
95923b78c6 | ||
|
|
7065ad4336 | ||
|
|
d6254918fd | ||
|
|
95e3d6db7a | ||
|
|
d7f8002baa | ||
|
|
d743e12a06 | ||
|
|
6068fe941f | ||
|
|
2a0cefc98b | ||
|
|
a4f65e4870 | ||
|
|
a1b3edd79c | ||
|
|
80b3d9689a | ||
|
|
ec03a53121 | ||
|
|
2fdf3f3a6a | ||
|
|
1d3d7ebf5e | ||
|
|
2c2196f415 | ||
|
|
c9f30b175c | ||
|
|
a17b93a7f8 | ||
|
|
0d3e462791 | ||
|
|
947c9552f0 | ||
|
|
04a03d332f | ||
|
|
992e093610 | ||
|
|
07f8e73958 | ||
|
|
66c2fa1623 | ||
|
|
7a52cc9667 | ||
|
|
8b686fb0c6 | ||
|
|
dc6771ae95 | ||
|
|
e9b1e5a8f6 | ||
|
|
57c787f919 | ||
|
|
a0eadf783b | ||
|
|
251ae00b8b | ||
|
|
a221295394 | ||
|
|
a92211f0ba | ||
|
|
f9481cf10d | ||
|
|
915857541e | ||
|
|
7c162411b7 | ||
|
|
8f4a6cc61c | ||
|
|
7dc86dc79a | ||
|
|
7ce20cfcc6 | ||
|
|
1d9523c98f | ||
|
|
9f1d7d1aa9 | ||
|
|
79b375f6fa | ||
|
|
75752479c2 | ||
|
|
477bc1f09e | ||
|
|
66567bdc2f | ||
|
|
0b31bbe957 | ||
|
|
246cf588cd | ||
|
|
88ed91561f | ||
|
|
9a347ad458 | ||
|
|
34c3075fdb | ||
|
|
498e8dc6e8 | ||
|
|
cb522cf500 | ||
|
|
017acc74f5 | ||
|
|
fab86d197a | ||
|
|
864e9bfb76 | ||
|
|
d3b45d197c | ||
|
|
579153b070 | ||
|
|
b1fdcdfa6e | ||
|
|
18d76a270c | ||
|
|
30541239ad | ||
|
|
9a65573955 | ||
|
|
27623a1d01 | ||
|
|
2593242234 | ||
|
|
2ab6c31544 | ||
|
|
3c55c8a22a | ||
|
|
424433ff58 | ||
|
|
2fd99503ed | ||
|
|
942014962e | ||
|
|
2ab79a7dd5 | ||
|
|
27c449c9c4 | ||
|
|
9737333ffd | ||
|
|
bf248d5118 | ||
|
|
2490e8cd46 | ||
|
|
9b67e5a15f | ||
|
|
6ebb6c9b63 | ||
|
|
53f674be60 | ||
|
|
11717a5213 | ||
|
|
b6d699f764 | ||
|
|
5b15061b87 | ||
|
|
1b6b2b36d9 | ||
|
|
3ada4053bd | ||
|
|
e7a5747c6b | ||
|
|
eec1262d4f | ||
|
|
c6caa763d7 | ||
|
|
08fa3797ca | ||
|
|
bf8fa3232b | ||
|
|
a6e60a5d42 | ||
|
|
7b0f3aabd9 | ||
|
|
f071966951 | ||
|
|
318310bb7a | ||
|
|
34a03f882c | ||
|
|
a0fcc0c8d1 | ||
|
|
748c25451c | ||
|
|
a77dcdd419 | ||
|
|
68f5bdf0d9 | ||
|
|
7f83947020 | ||
|
|
ceb310bcde | ||
|
|
ae57e5723c | ||
|
|
ab39753a75 | ||
|
|
640e1a7bc2 | ||
|
|
e544ff8ba3 | ||
|
|
49c0144154 | ||
|
|
2ab002a5bf | ||
|
|
b7bf15681e | ||
|
|
af9c01f5d3 | ||
|
|
5a12b51ba2 | ||
|
|
576b8ff836 | ||
|
|
b35c3e8024 | ||
|
|
b09796cd3f | ||
|
|
e0b46492fa | ||
|
|
ece13fbda0 | ||
|
|
94a62d84e1 | ||
|
|
cdf8388b18 | ||
|
|
0f861338ef | ||
|
|
4d1aabf620 | ||
|
|
a50fae3a4b | ||
|
|
f6dfec61d6 | ||
|
|
060c486948 | ||
|
|
8b176d0598 | ||
|
|
c96d4a6823 | ||
|
|
59032817c7 | ||
|
|
e9d8a853ea | ||
|
|
463ea2b97f | ||
|
|
ec2903e5ee | ||
|
|
4364585ebc | ||
|
|
0a6b7c655b | ||
|
|
db1e9e9b9a | ||
|
|
d92382b6cf | ||
|
|
7c8f2a1325 | ||
|
|
a40447df29 | ||
|
|
5d6b467042 | ||
|
|
e0ff30c212 | ||
|
|
a5b5c8ab37 | ||
|
|
7f12e98de5 | ||
|
|
99133104dd | ||
|
|
970a63c13c | ||
|
|
06c991d8c3 | ||
|
|
739eb72fd0 | ||
|
|
b0d2e9fe31 | ||
|
|
5c51349a85 | ||
|
|
5b740467cb | ||
|
|
e9d9dd2a79 | ||
|
|
3e74cb4832 | ||
|
|
db3c8a49bd | ||
|
|
8a37b535ed | ||
|
|
e6ac1311e7 | ||
|
|
b0d89698fd | ||
|
|
21d063a46c | ||
|
|
02912a653e | ||
|
|
f1cfba7527 | ||
|
|
3e075cd48d | ||
|
|
e03ec4d60f | ||
|
|
ba740c6157 | ||
|
|
34c813ed79 | ||
|
|
545cc2ffe4 | ||
|
|
47b97d9b7f | ||
|
|
bf8fbb0a44 | ||
|
|
552921cf83 | ||
|
|
372874fb3a | ||
|
|
2bd6b72aae | ||
|
|
f02e0060fa | ||
|
|
66b7628972 | ||
|
|
c045399d6b | ||
|
|
1da2fd2a5c | ||
|
|
e07e11fbe7 | ||
|
|
55ed91e313 | ||
|
|
e676c83d7f | ||
|
|
844d142f2e | ||
|
|
bcc694348e | ||
|
|
dfc4255f2f | ||
|
|
4e0ce9adfe | ||
|
|
42dacb2862 | ||
|
|
22db4aae81 | ||
|
|
7fe193866d | ||
|
|
921423679a | ||
|
|
2460f61d3e | ||
|
|
be24559630 | ||
|
|
2b4a6b2e3d | ||
|
|
beddc72189 | ||
|
|
2d6deee753 | ||
|
|
222912d14b | ||
|
|
d131d4ef96 | ||
|
|
e59627adf2 | ||
|
|
d0855987f8 | ||
|
|
c1672613bc | ||
|
|
9945da7dbe | ||
|
|
31ffa90075 | ||
|
|
169d3233e8 | ||
|
|
b3484c1d0e | ||
|
|
910ed716d9 | ||
|
|
eb6364284f | ||
|
|
e21d54654c | ||
|
|
50b8f83428 | ||
|
|
8d2928e49a | ||
|
|
1ef22131e6 | ||
|
|
227b521f9e | ||
|
|
bef5971598 | ||
|
|
aa6e5b703e | ||
|
|
0b35e40a24 | ||
|
|
49bbf3f234 | ||
|
|
c566747d4a | ||
|
|
3a114463f9 | ||
|
|
b4dfb19a3a | ||
|
|
30ef8ed70b | ||
|
|
e1541b2619 | ||
|
|
7c4889f5c9 | ||
|
|
c403497cf4 | ||
|
|
fed397f745 | ||
|
|
d55e596800 | ||
|
|
f700e014c9 | ||
|
|
4e496d7a20 | ||
|
|
8663c7e1c2 | ||
|
|
cb1a98cabf | ||
|
|
369e6d109c | ||
|
|
2c011631f9 | ||
|
|
d3fc2b4477 | ||
|
|
516d45deaa | ||
|
|
7ad51d9d05 | ||
|
|
e3887ae36e | ||
|
|
e23bc2aaa7 | ||
|
|
7fc405408e | ||
|
|
cac06adc6c | ||
|
|
c8ec03424a | ||
|
|
bfea85d22c | ||
|
|
836e9fc545 | ||
|
|
c3726092fd | ||
|
|
dabf02a90d | ||
|
|
2912c93d77 | ||
|
|
17474a3a0c | ||
|
|
f89c2bfb7e | ||
|
|
2902201bfa | ||
|
|
378dcc79bb | ||
|
|
d348d5f20e | ||
|
|
bc24bc64cd | ||
|
|
015e1a41b2 | ||
|
|
94b1a6cfb8 | ||
|
|
1c2976c4d1 | ||
|
|
25c8155609 | ||
|
|
55b07506c2 | ||
|
|
59f34d900a | ||
|
|
4f6054d439 | ||
|
|
a86a1213c7 | ||
|
|
566935fb94 | ||
|
|
3a66746a99 | ||
|
|
337a6d5719 | ||
|
|
51eb5e9998 | ||
|
|
b2969e9441 | ||
|
|
5b9606e8b6 | ||
|
|
685d20f46c | ||
|
|
9ebf3aa043 | ||
|
|
2e4c97661a | ||
|
|
16eb4df556 | ||
|
|
3d9000495c | ||
|
|
6d0039b117 | ||
|
|
311a078ca6 | ||
|
|
371f19f3cd | ||
|
|
870dffbb89 | ||
|
|
ced3c8f0e0 | ||
|
|
8e555149f7 | ||
|
|
a96a27f064 | ||
|
|
a2f3566cd9 | ||
|
|
e655412aca | ||
|
|
1d91ab5d1b | ||
|
|
37359a34f0 | ||
|
|
6eb4045339 | ||
|
|
aebbc75dea | ||
|
|
bc91e94f03 | ||
|
|
d659151dca | ||
|
|
9dffd42e6d | ||
|
|
88455cd52c | ||
|
|
6a1eb10830 | ||
|
|
10edde100e | ||
|
|
40a441f30e | ||
|
|
ea5ae9086a | ||
|
|
0cd524af86 | ||
|
|
4bff5408d8 | ||
|
|
d2caf11191 | ||
|
|
37979a0ca1 | ||
|
|
c9f47e6a37 | ||
|
|
5780c3147a | ||
|
|
98ccbeb4bd | ||
|
|
fbb156b9de | ||
|
|
b73960cebe | ||
|
|
10328f3db4 | ||
|
|
da42ec7eb9 | ||
|
|
97d4439872 | ||
|
|
c3bb221fb3 | ||
|
|
e68cad380e | ||
|
|
96a78a97f0 | ||
|
|
337d2b634b | ||
|
|
475b704f95 | ||
|
|
b992ee9d6b | ||
|
|
d7fa8464c7 | ||
|
|
918c0589eb | ||
|
|
c9d3eb7ccf | ||
|
|
d216edb022 | ||
|
|
afa8783750 | ||
|
|
a661050464 | ||
|
|
c14f990098 | ||
|
|
26ccaf78ec | ||
|
|
12e98e1f3c | ||
|
|
efe27bd570 | ||
|
|
403ea385d7 | ||
|
|
9b51e1174c | ||
|
|
a3b5413f16 | ||
|
|
bce4bb5c4e | ||
|
|
3f92e217f9 | ||
|
|
b0f9637662 | ||
|
|
63ef3918dd | ||
|
|
3c24350306 | ||
|
|
356d4d9729 | ||
|
|
e290064ecc | ||
|
|
77fa1b18c7 | ||
|
|
08a6a82071 | ||
|
|
625748e462 | ||
|
|
6e209d5d77 | ||
|
|
f845fac4da | ||
|
|
b6c32b014c | ||
|
|
06950921e9 | ||
|
|
fc9da22c38 | ||
|
|
02f790ffcb | ||
|
|
af7983be43 | ||
|
|
a83661fd6e | ||
|
|
e1a73e0c44 | ||
|
|
48983773f5 | ||
|
|
73701fda1e | ||
|
|
3deeba4cab | ||
|
|
e3dde17af0 | ||
|
|
49b8cc95ae | ||
|
|
6145331ee4 | ||
|
|
f1839bc6db | ||
|
|
0b58911153 | ||
|
|
ee78446cc5 | ||
|
|
50fe5080e6 | ||
|
|
e1b8394265 | ||
|
|
c23e8fbb02 | ||
|
|
65aeb85e88 | ||
|
|
6c003e0382 | ||
|
|
6b14ffcffb | ||
|
|
df25703cc2 | ||
|
|
12a815e5db | ||
|
|
102836a2c2 | ||
|
|
d38be25d33 | ||
|
|
ac848f9ff4 | ||
|
|
a25a27c3d3 | ||
|
|
22c8e5f433 | ||
|
|
8df8255f18 | ||
|
|
66124d9afb | ||
|
|
7def3a8acc | ||
|
|
5b7fed2cb6 | ||
|
|
838b3bc09d | ||
|
|
ebb585e494 | ||
|
|
7c67c2c6af | ||
|
|
e4f5c7cdf2 | ||
|
|
f09238e512 | ||
|
|
da5f60e7f3 | ||
|
|
807c13e144 | ||
|
|
3dea3d0183 | ||
|
|
35cb7fcf4d | ||
|
|
d2a9a4a4e4 | ||
|
|
e62e9c7401 | ||
|
|
3c5031e711 | ||
|
|
82e84c0f88 | ||
|
|
2c550dc175 | ||
|
|
bdc92deade | ||
|
|
448d31cad9 | ||
|
|
ed1f009c64 | ||
|
|
bb3829a9ed | ||
|
|
0a116202f0 | ||
|
|
4daa88fa59 | ||
|
|
53067f8b92 | ||
|
|
d3a09c3180 | ||
|
|
4d7aacb5f2 | ||
|
|
6b1cf78e41 | ||
|
|
80f1a88b63 | ||
|
|
32da76a2ca | ||
|
|
b3667a8c09 | ||
|
|
3aa48dcd58 | ||
|
|
03f1d57463 | ||
|
|
4725d0de0d | ||
|
|
b766af75f2 | ||
|
|
b2c8779f4c | ||
|
|
df266bda01 | ||
|
|
eed7919d72 | ||
|
|
1e49d1b592 | ||
|
|
ded7197fcb | ||
|
|
2155acb3a3 | ||
|
|
794574957e | ||
|
|
66b19311a7 | ||
|
|
9fc84fc1ac | ||
|
|
f8f9df6d1d | ||
|
|
6e94edb777 | ||
|
|
5f2ac8c33e | ||
|
|
bbe896d48c | ||
|
|
9298054436 | ||
|
|
90b7937796 | ||
|
|
520933b4c5 | ||
|
|
9ea4fb8c82 | ||
|
|
fe0813e831 | ||
|
|
33cebea15b | ||
|
|
e723e5ca3f | ||
|
|
24f1a19310 | ||
|
|
d0959573dc | ||
|
|
939afd5f82 | ||
|
|
d42e58e199 | ||
|
|
000bab4cf5 | ||
|
|
8df1042180 | ||
|
|
313038882c | ||
|
|
41a670166a | ||
|
|
a77496a217 | ||
|
|
430260c985 | ||
|
|
334b0959b0 | ||
|
|
2b31e26ba5 | ||
|
|
7122a29a20 | ||
|
|
f3ddb430a7 | ||
|
|
435bfca186 | ||
|
|
2ef896bdd5 | ||
|
|
59c6c29706 | ||
|
|
a1f35e768f | ||
|
|
cf1864ce0f | ||
|
|
52e0a84829 |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 43 KiB |
|
Before Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 25 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 33 KiB |
|
Before Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 17 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 21 KiB |
|
Before Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 33 KiB |
1429
.cursorrules
Normal file
161
.env.test
Normal file
@@ -0,0 +1,161 @@
|
||||
# =============================================================================
|
||||
# Test Environment Variables
|
||||
# =============================================================================
|
||||
# This file contains all environment variables needed to run tests locally
|
||||
# in a way that mimics the GitHub Actions CI environment.
|
||||
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# LLM Provider API Keys
|
||||
# -----------------------------------------------------------------------------
|
||||
OPENAI_API_KEY=fake-api-key
|
||||
ANTHROPIC_API_KEY=fake-anthropic-key
|
||||
GEMINI_API_KEY=fake-gemini-key
|
||||
AZURE_API_KEY=fake-azure-key
|
||||
OPENROUTER_API_KEY=fake-openrouter-key
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# AWS Credentials
|
||||
# -----------------------------------------------------------------------------
|
||||
AWS_ACCESS_KEY_ID=fake-aws-access-key
|
||||
AWS_SECRET_ACCESS_KEY=fake-aws-secret-key
|
||||
AWS_DEFAULT_REGION=us-east-1
|
||||
AWS_REGION_NAME=us-east-1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Azure OpenAI Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
AZURE_ENDPOINT=https://fake-azure-endpoint.openai.azure.com
|
||||
AZURE_OPENAI_ENDPOINT=https://fake-azure-endpoint.openai.azure.com
|
||||
AZURE_OPENAI_API_KEY=fake-azure-openai-key
|
||||
AZURE_API_VERSION=2024-02-15-preview
|
||||
OPENAI_API_VERSION=2024-02-15-preview
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Google Cloud Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
#GOOGLE_CLOUD_PROJECT=fake-gcp-project
|
||||
#GOOGLE_CLOUD_LOCATION=us-central1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OpenAI Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
OPENAI_API_BASE=https://api.openai.com/v1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Search & Scraping Tool API Keys
|
||||
# -----------------------------------------------------------------------------
|
||||
SERPER_API_KEY=fake-serper-key
|
||||
EXA_API_KEY=fake-exa-key
|
||||
BRAVE_API_KEY=fake-brave-key
|
||||
FIRECRAWL_API_KEY=fake-firecrawl-key
|
||||
TAVILY_API_KEY=fake-tavily-key
|
||||
SERPAPI_API_KEY=fake-serpapi-key
|
||||
SERPLY_API_KEY=fake-serply-key
|
||||
LINKUP_API_KEY=fake-linkup-key
|
||||
PARALLEL_API_KEY=fake-parallel-key
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Exa Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
EXA_BASE_URL=https://api.exa.ai
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Web Scraping & Automation
|
||||
# -----------------------------------------------------------------------------
|
||||
BRIGHT_DATA_API_KEY=fake-brightdata-key
|
||||
BRIGHT_DATA_ZONE=fake-zone
|
||||
BRIGHTDATA_API_URL=https://api.brightdata.com
|
||||
BRIGHTDATA_DEFAULT_TIMEOUT=600
|
||||
BRIGHTDATA_DEFAULT_POLLING_INTERVAL=1
|
||||
|
||||
OXYLABS_USERNAME=fake-oxylabs-user
|
||||
OXYLABS_PASSWORD=fake-oxylabs-pass
|
||||
|
||||
SCRAPFLY_API_KEY=fake-scrapfly-key
|
||||
SCRAPEGRAPH_API_KEY=fake-scrapegraph-key
|
||||
|
||||
BROWSERBASE_API_KEY=fake-browserbase-key
|
||||
BROWSERBASE_PROJECT_ID=fake-browserbase-project
|
||||
|
||||
HYPERBROWSER_API_KEY=fake-hyperbrowser-key
|
||||
MULTION_API_KEY=fake-multion-key
|
||||
APIFY_API_TOKEN=fake-apify-token
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Database & Vector Store Credentials
|
||||
# -----------------------------------------------------------------------------
|
||||
SINGLESTOREDB_URL=mysql://fake:fake@localhost:3306/fake
|
||||
SINGLESTOREDB_HOST=localhost
|
||||
SINGLESTOREDB_PORT=3306
|
||||
SINGLESTOREDB_USER=fake-user
|
||||
SINGLESTOREDB_PASSWORD=fake-password
|
||||
SINGLESTOREDB_DATABASE=fake-database
|
||||
SINGLESTOREDB_CONNECT_TIMEOUT=30
|
||||
|
||||
SNOWFLAKE_USER=fake-snowflake-user
|
||||
SNOWFLAKE_PASSWORD=fake-snowflake-password
|
||||
SNOWFLAKE_ACCOUNT=fake-snowflake-account
|
||||
SNOWFLAKE_WAREHOUSE=fake-snowflake-warehouse
|
||||
SNOWFLAKE_DATABASE=fake-snowflake-database
|
||||
SNOWFLAKE_SCHEMA=fake-snowflake-schema
|
||||
|
||||
WEAVIATE_URL=http://localhost:8080
|
||||
WEAVIATE_API_KEY=fake-weaviate-key
|
||||
|
||||
EMBEDCHAIN_DB_URI=sqlite:///test.db
|
||||
|
||||
# Databricks Credentials
|
||||
DATABRICKS_HOST=https://fake-databricks.cloud.databricks.com
|
||||
DATABRICKS_TOKEN=fake-databricks-token
|
||||
DATABRICKS_CONFIG_PROFILE=fake-profile
|
||||
|
||||
# MongoDB Credentials
|
||||
MONGODB_URI=mongodb://fake:fake@localhost:27017/fake
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# CrewAI Platform & Enterprise
|
||||
# -----------------------------------------------------------------------------
|
||||
# setting CREWAI_PLATFORM_INTEGRATION_TOKEN causes these test to fail:
|
||||
#=========================== short test summary info ============================
|
||||
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_platform_context_manager_basic_usage - AssertionError: assert 'fake-platform-token' is None
|
||||
# + where 'fake-platform-token' = get_platform_integration_token()
|
||||
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_context_var_isolation_between_tests - AssertionError: assert 'fake-platform-token' is None
|
||||
# + where 'fake-platform-token' = get_platform_integration_token()
|
||||
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_multiple_sequential_context_managers - AssertionError: assert 'fake-platform-token' is None
|
||||
# + where 'fake-platform-token' = get_platform_integration_token()
|
||||
#CREWAI_PLATFORM_INTEGRATION_TOKEN=fake-platform-token
|
||||
CREWAI_PERSONAL_ACCESS_TOKEN=fake-personal-token
|
||||
CREWAI_PLUS_URL=https://fake.crewai.com
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Other Service API Keys
|
||||
# -----------------------------------------------------------------------------
|
||||
ZAPIER_API_KEY=fake-zapier-key
|
||||
PATRONUS_API_KEY=fake-patronus-key
|
||||
MINDS_API_KEY=fake-minds-key
|
||||
HF_TOKEN=fake-hf-token
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Feature Flags/Testing Modes
|
||||
# -----------------------------------------------------------------------------
|
||||
CREWAI_DISABLE_TELEMETRY=true
|
||||
OTEL_SDK_DISABLED=true
|
||||
CREWAI_TESTING=true
|
||||
CREWAI_TRACING_ENABLED=false
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Testing/CI Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
# VCR recording mode: "none" (default), "new_episodes", "all", "once"
|
||||
PYTEST_VCR_RECORD_MODE=none
|
||||
|
||||
# Set to "true" by GitHub when running in GitHub Actions
|
||||
# GITHUB_ACTIONS=false
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Python Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
PYTHONUNBUFFERED=1
|
||||
28
.github/codeql/codeql-config.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: "CodeQL Config"
|
||||
|
||||
paths-ignore:
|
||||
# Ignore template files - these are boilerplate code that shouldn't be analyzed
|
||||
- "lib/crewai/src/crewai/cli/templates/**"
|
||||
# Ignore test cassettes - these are test fixtures/recordings
|
||||
- "lib/crewai/tests/cassettes/**"
|
||||
- "lib/crewai-tools/tests/cassettes/**"
|
||||
# Ignore cache and build artifacts
|
||||
- ".cache/**"
|
||||
# Ignore documentation build artifacts
|
||||
- "docs/.cache/**"
|
||||
# Ignore experimental code
|
||||
- "lib/crewai/src/crewai/experimental/a2a/**"
|
||||
|
||||
paths:
|
||||
# Include all Python source code from workspace packages
|
||||
- "lib/crewai/src/**"
|
||||
- "lib/crewai-tools/src/**"
|
||||
- "lib/devtools/src/**"
|
||||
# Include tests (but exclude cassettes via paths-ignore)
|
||||
- "lib/crewai/tests/**"
|
||||
- "lib/crewai-tools/tests/**"
|
||||
- "lib/devtools/tests/**"
|
||||
|
||||
# Configure specific queries or packs if needed
|
||||
# queries:
|
||||
# - uses: security-and-quality
|
||||
11
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: uv # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
61
.github/security.md
vendored
@@ -1,19 +1,50 @@
|
||||
CrewAI takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organization.
|
||||
If you believe you have found a security vulnerability in any CrewAI product or service, please report it to us as described below.
|
||||
## CrewAI Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
Please do not report security vulnerabilities through public GitHub issues.
|
||||
To report a vulnerability, please email us at security@crewai.com.
|
||||
Please include the requested information listed below so that we can triage your report more quickly
|
||||
We are committed to protecting the confidentiality, integrity, and availability of the CrewAI ecosystem. This policy explains how to report potential vulnerabilities and what you can expect from us when you do.
|
||||
|
||||
- Type of issue (e.g. SQL injection, cross-site scripting, etc.)
|
||||
- Full paths of source file(s) related to the manifestation of the issue
|
||||
- The location of the affected source code (tag/branch/commit or direct URL)
|
||||
- Any special configuration required to reproduce the issue
|
||||
- Step-by-step instructions to reproduce the issue (please include screenshots if needed)
|
||||
- Proof-of-concept or exploit code (if possible)
|
||||
- Impact of the issue, including how an attacker might exploit the issue
|
||||
### Scope
|
||||
|
||||
Once we have received your report, we will respond to you at the email address you provide. If the issue is confirmed, we will release a patch as soon as possible depending on the complexity of the issue.
|
||||
We welcome reports for vulnerabilities that could impact:
|
||||
|
||||
At this time, we are not offering a bug bounty program. Any rewards will be at our discretion.
|
||||
- CrewAI-maintained source code and repositories
|
||||
- CrewAI-operated infrastructure and services
|
||||
- Official CrewAI releases, packages, and distributions
|
||||
|
||||
Issues affecting clearly unaffiliated third-party services or user-generated content are out of scope, unless you can demonstrate a direct impact on CrewAI systems or customers.
|
||||
|
||||
### How to Report
|
||||
|
||||
- **Please do not** disclose vulnerabilities via public GitHub issues, pull requests, or social media.
|
||||
- Email detailed reports to **security@crewai.com** with the subject line `Security Report`.
|
||||
- If you need to share large files or sensitive artifacts, mention it in your email and we will coordinate a secure transfer method.
|
||||
|
||||
### What to Include
|
||||
|
||||
Providing comprehensive information enables us to validate the issue quickly:
|
||||
|
||||
- **Vulnerability overview** — a concise description and classification (e.g., RCE, privilege escalation)
|
||||
- **Affected components** — repository, branch, tag, or deployed service along with relevant file paths or endpoints
|
||||
- **Reproduction steps** — detailed, step-by-step instructions; include logs, screenshots, or screen recordings when helpful
|
||||
- **Proof-of-concept** — exploit details or code that demonstrates the impact (if available)
|
||||
- **Impact analysis** — severity assessment, potential exploitation scenarios, and any prerequisites or special configurations
|
||||
|
||||
### Our Commitment
|
||||
|
||||
- **Acknowledgement:** We aim to acknowledge your report within two business days.
|
||||
- **Communication:** We will keep you informed about triage results, remediation progress, and planned release timelines.
|
||||
- **Resolution:** Confirmed vulnerabilities will be prioritized based on severity and fixed as quickly as possible.
|
||||
- **Recognition:** We currently do not run a bug bounty program; any rewards or recognition are issued at CrewAI's discretion.
|
||||
|
||||
### Coordinated Disclosure
|
||||
|
||||
We ask that you allow us a reasonable window to investigate and remediate confirmed issues before any public disclosure. We will coordinate publication timelines with you whenever possible.
|
||||
|
||||
### Safe Harbor
|
||||
|
||||
We will not pursue or support legal action against individuals who, in good faith:
|
||||
|
||||
- Follow this policy and refrain from violating any applicable laws
|
||||
- Avoid privacy violations, data destruction, or service disruption
|
||||
- Limit testing to systems in scope and respect rate limits and terms of service
|
||||
|
||||
If you are unsure whether your testing is covered, please contact us at **security@crewai.com** before proceeding.
|
||||
|
||||
48
.github/workflows/build-uv-cache.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: Build uv cache
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "uv.lock"
|
||||
- "pyproject.toml"
|
||||
schedule:
|
||||
- cron: "0 0 */5 * *" # Run every 5 days at midnight UTC to prevent cache expiration
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-cache:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install dependencies and populate cache
|
||||
run: |
|
||||
echo "Building global UV cache for Python ${{ matrix.python-version }}..."
|
||||
uv sync --all-groups --all-extras --no-install-project
|
||||
echo "Cache populated successfully"
|
||||
|
||||
- name: Save uv caches
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
103
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
paths-ignore:
|
||||
- "lib/crewai/src/crewai/cli/templates/**"
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths-ignore:
|
||||
- "lib/crewai/src/crewai/cli/templates/**"
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
build-mode: none
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
config-file: ./.github/codeql/codeql-config.yml
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
35
.github/workflows/docs-broken-links.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Check Documentation Broken Links
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "docs/**"
|
||||
- "docs.json"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "docs/**"
|
||||
- "docs.json"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
check-links:
|
||||
name: Check broken links
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "latest"
|
||||
|
||||
- name: Install Mintlify CLI
|
||||
run: npm i -g mintlify
|
||||
|
||||
- name: Run broken link checker
|
||||
run: |
|
||||
# Auto-answer the prompt with yes command
|
||||
yes "" | mintlify broken-links || test $? -eq 141
|
||||
working-directory: ./docs
|
||||
61
.github/workflows/linter.yml
vendored
@@ -2,15 +2,68 @@ name: Lint
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Requirements
|
||||
- name: Fetch Target Branch
|
||||
run: git fetch origin $TARGET_BRANCH --depth=1
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py3.11-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.11"
|
||||
enable-cache: false
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras --no-install-project
|
||||
|
||||
- name: Get Changed Python Files
|
||||
id: changed-files
|
||||
run: |
|
||||
pip install ruff
|
||||
merge_base=$(git merge-base origin/"$TARGET_BRANCH" HEAD)
|
||||
changed_files=$(git diff --name-only --diff-filter=ACMRTUB "$merge_base" | grep '\.py$' || true)
|
||||
echo "files<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$changed_files" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run Ruff Linter
|
||||
run: ruff check
|
||||
- name: Run Ruff on Changed Files
|
||||
if: ${{ steps.changed-files.outputs.files != '' }}
|
||||
run: |
|
||||
echo "${{ steps.changed-files.outputs.files }}" \
|
||||
| tr ' ' '\n' \
|
||||
| grep -v 'src/crewai/cli/templates/' \
|
||||
| grep -v '/tests/' \
|
||||
| xargs -I{} uv run ruff check "{}"
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
|
||||
45
.github/workflows/mkdocs.yml
vendored
@@ -1,45 +0,0 @@
|
||||
name: Deploy MkDocs
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Calculate requirements hash
|
||||
id: req-hash
|
||||
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
|
||||
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install pngquant &&
|
||||
pip install mkdocs-material mkdocs-material-extensions pillow cairosvg
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Build and deploy MkDocs
|
||||
run: mkdocs gh-deploy --force
|
||||
33
.github/workflows/notify-downstream.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Notify Downstream
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
notify-downstream:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Generate GitHub App token
|
||||
id: app-token
|
||||
uses: tibdex/github-app-token@v2
|
||||
with:
|
||||
app_id: ${{ secrets.OSS_SYNC_APP_ID }}
|
||||
private_key: ${{ secrets.OSS_SYNC_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Notify Repo B
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
repository: ${{ secrets.OSS_SYNC_DOWNSTREAM_REPO }}
|
||||
event-type: upstream-commit
|
||||
client-payload: |
|
||||
{
|
||||
"commit_sha": "${{ github.sha }}"
|
||||
}
|
||||
|
||||
81
.github/workflows/publish.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build packages
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Build packages
|
||||
run: |
|
||||
uv build --all-packages
|
||||
rm dist/.gitignore
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
publish:
|
||||
name: Publish to PyPI
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/crewai
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.12"
|
||||
enable-cache: false
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist
|
||||
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
failed=0
|
||||
for package in dist/*; do
|
||||
if [[ "$package" == *"crewai_devtools"* ]]; then
|
||||
echo "Skipping private package: $package"
|
||||
continue
|
||||
fi
|
||||
echo "Publishing $package"
|
||||
if ! uv publish "$package"; then
|
||||
echo "Failed to publish $package"
|
||||
failed=1
|
||||
fi
|
||||
done
|
||||
if [ $failed -eq 1 ]; then
|
||||
echo "Some packages failed to publish"
|
||||
exit 1
|
||||
fi
|
||||
23
.github/workflows/security-checker.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Security Checker
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
security-check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install bandit
|
||||
|
||||
- name: Run Bandit
|
||||
run: bandit -c pyproject.toml -r src/ -ll
|
||||
|
||||
94
.github/workflows/tests.yml
vendored
@@ -3,30 +3,98 @@ name: Run Tests
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: tests (${{ matrix.python-version }})
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
group: [1, 2, 3, 4, 5, 6, 7, 8]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.12.8
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --dev --all-extras
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Run tests
|
||||
run: uv run pytest tests -vv
|
||||
- name: Restore test durations
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
|
||||
# Temporarily always skip cached durations to fix test splitting
|
||||
# When durations don't match, pytest-split runs duplicate tests instead of splitting
|
||||
echo "Using even test splitting (duration cache disabled until fix merged)"
|
||||
DURATIONS_ARG=""
|
||||
|
||||
# Original logic (disabled temporarily):
|
||||
# if [ ! -f "$DURATION_FILE" ]; then
|
||||
# echo "No cached durations found, tests will be split evenly"
|
||||
# DURATIONS_ARG=""
|
||||
# elif git diff origin/${{ github.base_ref }}...HEAD --name-only 2>/dev/null | grep -q "^tests/.*\.py$"; then
|
||||
# echo "Test files have changed, skipping cached durations to avoid mismatches"
|
||||
# DURATIONS_ARG=""
|
||||
# else
|
||||
# echo "No test changes detected, using cached test durations for optimal splitting"
|
||||
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
# fi
|
||||
|
||||
cd lib/crewai && uv run pytest \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
$DURATIONS_ARG \
|
||||
--durations=10 \
|
||||
--maxfail=3
|
||||
|
||||
- name: Run tool tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
cd lib/crewai-tools && uv run pytest \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
--durations=10 \
|
||||
--maxfail=3
|
||||
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
|
||||
95
.github/workflows/type-checker.yml
vendored
@@ -3,24 +3,99 @@ name: Run Type Checks
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
type-checker:
|
||||
type-checker-matrix:
|
||||
name: type-checker (${{ matrix.python-version }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Install Requirements
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Get changed Python files
|
||||
id: changed-files
|
||||
run: |
|
||||
pip install mypy
|
||||
# Get the list of changed Python files compared to the base branch
|
||||
echo "Fetching changed files..."
|
||||
git diff --name-only --diff-filter=ACMRT origin/${{ github.base_ref }}...HEAD -- '*.py' > changed_files.txt
|
||||
|
||||
- name: Run type checks
|
||||
run: mypy src
|
||||
# Filter for files in src/ directory only (excluding tests/)
|
||||
grep -E "^src/" changed_files.txt > filtered_changed_files.txt || true
|
||||
|
||||
# Check if there are any changed files
|
||||
if [ -s filtered_changed_files.txt ]; then
|
||||
echo "Changed Python files in src/:"
|
||||
cat filtered_changed_files.txt
|
||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
||||
# Convert newlines to spaces for mypy command
|
||||
echo "files=$(cat filtered_changed_files.txt | tr '\n' ' ')" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "No Python files changed in src/"
|
||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run type checks on changed files
|
||||
if: steps.changed-files.outputs.has_changes == 'true'
|
||||
run: |
|
||||
echo "Running mypy on changed files with Python ${{ matrix.python-version }}..."
|
||||
uv run mypy ${{ steps.changed-files.outputs.files }}
|
||||
|
||||
- name: No files to check
|
||||
if: steps.changed-files.outputs.has_changes == 'false'
|
||||
run: echo "No Python files in src/ were modified - skipping type checks"
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
|
||||
# Summary job to provide single status for branch protection
|
||||
type-checker:
|
||||
name: type-checker
|
||||
runs-on: ubuntu-latest
|
||||
needs: type-checker-matrix
|
||||
if: always()
|
||||
steps:
|
||||
- name: Check matrix results
|
||||
run: |
|
||||
if [ "${{ needs.type-checker-matrix.result }}" == "success" ] || [ "${{ needs.type-checker-matrix.result }}" == "skipped" ]; then
|
||||
echo "✅ All type checks passed"
|
||||
else
|
||||
echo "❌ Type checks failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
71
.github/workflows/update-test-durations.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Update Test Durations
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'tests/**/*.py'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update-durations:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Run all tests and store durations
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
uv run pytest --store-durations --durations-path=.test_durations_py${PYTHON_VERSION_SAFE} -n auto
|
||||
continue-on-error: true
|
||||
|
||||
- name: Save durations to cache
|
||||
if: always()
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
9
.gitignore
vendored
@@ -2,7 +2,6 @@
|
||||
.pytest_cache
|
||||
__pycache__
|
||||
dist/
|
||||
lib/
|
||||
.env
|
||||
assets/*
|
||||
.idea
|
||||
@@ -21,5 +20,9 @@ crew_tasks_output.json
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.venv
|
||||
agentops.log
|
||||
test_flow.html
|
||||
test_flow.html
|
||||
crewairules.mdc
|
||||
plan.md
|
||||
conceptual_plan.md
|
||||
build_image
|
||||
chromadb-*.lock
|
||||
|
||||
@@ -1,7 +1,27 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.8.2
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: ["--fix"]
|
||||
name: ruff
|
||||
entry: bash -c 'source .venv/bin/activate && uv run ruff check --config pyproject.toml "$@"' --
|
||||
language: system
|
||||
pass_filenames: true
|
||||
types: [python]
|
||||
- id: ruff-format
|
||||
name: ruff-format
|
||||
entry: bash -c 'source .venv/bin/activate && uv run ruff format --config pyproject.toml "$@"' --
|
||||
language: system
|
||||
pass_filenames: true
|
||||
types: [python]
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: bash -c 'source .venv/bin/activate && uv run mypy --config-file pyproject.toml "$@"' --
|
||||
language: system
|
||||
pass_filenames: true
|
||||
types: [python]
|
||||
exclude: ^(lib/crewai/src/crewai/cli/templates/|lib/crewai/tests/|lib/crewai-tools/tests/)
|
||||
- repo: https://github.com/astral-sh/uv-pre-commit
|
||||
rev: 0.9.3
|
||||
hooks:
|
||||
- id: uv-lock
|
||||
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
exclude = [
|
||||
"templates",
|
||||
"__init__.py",
|
||||
]
|
||||
|
||||
[lint]
|
||||
select = [
|
||||
"I", # isort rules
|
||||
]
|
||||
2
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2018 The Python Packaging Authority
|
||||
Copyright (c) 2025 crewAI, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
296
README.md
@@ -1,29 +1,85 @@
|
||||
<div align="center">
|
||||
<p align="center">
|
||||
<a href="https://github.com/crewAIInc/crewAI">
|
||||
<img src="docs/images/crewai_logo.png" width="600px" alt="Open source Multi-AI Agent orchestration framework">
|
||||
</a>
|
||||
</p>
|
||||
<p align="center" style="display: flex; justify-content: center; gap: 20px; align-items: center;">
|
||||
<a href="https://trendshift.io/repositories/11239" target="_blank">
|
||||
<img src="https://trendshift.io/api/badge/repositories/11239" alt="crewAIInc%2FcrewAI | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||

|
||||
<p align="center">
|
||||
<a href="https://crewai.com">Homepage</a>
|
||||
·
|
||||
<a href="https://docs.crewai.com">Docs</a>
|
||||
·
|
||||
<a href="https://app.crewai.com">Start Cloud Trial</a>
|
||||
·
|
||||
<a href="https://blog.crewai.com">Blog</a>
|
||||
·
|
||||
<a href="https://community.crewai.com">Forum</a>
|
||||
</p>
|
||||
|
||||
# **CrewAI**
|
||||
<p align="center">
|
||||
<a href="https://github.com/crewAIInc/crewAI">
|
||||
<img src="https://img.shields.io/github/stars/crewAIInc/crewAI" alt="GitHub Repo stars">
|
||||
</a>
|
||||
<a href="https://github.com/crewAIInc/crewAI/network/members">
|
||||
<img src="https://img.shields.io/github/forks/crewAIInc/crewAI" alt="GitHub forks">
|
||||
</a>
|
||||
<a href="https://github.com/crewAIInc/crewAI/issues">
|
||||
<img src="https://img.shields.io/github/issues/crewAIInc/crewAI" alt="GitHub issues">
|
||||
</a>
|
||||
<a href="https://github.com/crewAIInc/crewAI/pulls">
|
||||
<img src="https://img.shields.io/github/issues-pr/crewAIInc/crewAI" alt="GitHub pull requests">
|
||||
</a>
|
||||
<a href="https://opensource.org/licenses/MIT">
|
||||
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="License: MIT">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||
<p align="center">
|
||||
<a href="https://pypi.org/project/crewai/">
|
||||
<img src="https://img.shields.io/pypi/v/crewai" alt="PyPI version">
|
||||
</a>
|
||||
<a href="https://pypi.org/project/crewai/">
|
||||
<img src="https://img.shields.io/pypi/dm/crewai" alt="PyPI downloads">
|
||||
</a>
|
||||
<a href="https://twitter.com/crewAIInc">
|
||||
<img src="https://img.shields.io/twitter/follow/crewAIInc?style=social" alt="Twitter Follow">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**CrewAI Enterprise**
|
||||
Want to plan, build (+ no code), deploy, monitor and interare your agents: [CrewAI Enterprise](https://www.crewai.com/enterprise). Designed for complex, real-world applications, our enterprise solution offers:
|
||||
### Fast and Flexible Multi-Agent Automation Framework
|
||||
|
||||
- **Seamless Integrations**
|
||||
- **Scalable & Secure Deployment**
|
||||
- **Actionable Insights**
|
||||
- **24/7 Support**
|
||||
> CrewAI is a lean, lightning-fast Python framework built entirely from scratch—completely **independent of LangChain or other agent frameworks**.
|
||||
> It empowers developers with both high-level simplicity and precise low-level control, ideal for creating autonomous AI agents tailored to any scenario.
|
||||
|
||||
<h3>
|
||||
- **CrewAI Crews**: Optimize for autonomy and collaborative intelligence.
|
||||
- **CrewAI Flows**: Enable granular, event-driven control, single LLM calls for precise task orchestration and supports Crews natively
|
||||
|
||||
[Homepage](https://www.crewai.com/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Discourse](https://community.crewai.com)
|
||||
With over 100,000 developers certified through our community courses at [learn.crewai.com](https://learn.crewai.com), CrewAI is rapidly becoming the
|
||||
standard for enterprise-ready AI automation.
|
||||
|
||||
</h3>
|
||||
# CrewAI AOP Suite
|
||||
|
||||
[](https://github.com/crewAIInc/crewAI)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
CrewAI AOP Suite is a comprehensive bundle tailored for organizations that require secure, scalable, and easy-to-manage agent-driven automation.
|
||||
|
||||
</div>
|
||||
You can try one part of the suite the [Crew Control Plane for free](https://app.crewai.com)
|
||||
|
||||
## Crew Control Plane Key Features:
|
||||
|
||||
- **Tracing & Observability**: Monitor and track your AI agents and workflows in real-time, including metrics, logs, and traces.
|
||||
- **Unified Control Plane**: A centralized platform for managing, monitoring, and scaling your AI agents and workflows.
|
||||
- **Seamless Integrations**: Easily connect with existing enterprise systems, data sources, and cloud infrastructure.
|
||||
- **Advanced Security**: Built-in robust security and compliance measures ensuring safe deployment and management.
|
||||
- **Actionable Insights**: Real-time analytics and reporting to optimize performance and decision-making.
|
||||
- **24/7 Support**: Dedicated enterprise support to ensure uninterrupted operation and quick resolution of issues.
|
||||
- **On-premise and Cloud Deployment Options**: Deploy CrewAI AOP on-premise or in the cloud, depending on your security and compliance requirements.
|
||||
|
||||
CrewAI AOP is designed for enterprises seeking a powerful, reliable solution to transform complex business processes into efficient,
|
||||
intelligent automations.
|
||||
|
||||
## Table of contents
|
||||
|
||||
@@ -47,14 +103,31 @@ Want to plan, build (+ no code), deploy, monitor and interare your agents: [Crew
|
||||
|
||||
## Why CrewAI?
|
||||
|
||||
The power of AI collaboration has too much to offer.
|
||||
CrewAI is a standalone framework, built from the ground up without dependencies on Langchain or other agent frameworks. It's designed to enable AI agents to assume roles, share goals, and operate in a cohesive unit - much like a well-oiled crew. Whether you're building a smart assistant platform, an automated customer service ensemble, or a multi-agent research team, CrewAI provides the backbone for sophisticated multi-agent interactions.
|
||||
<div align="center" style="margin-bottom: 30px;">
|
||||
<img src="docs/images/asset.png" alt="CrewAI Logo" width="100%">
|
||||
</div>
|
||||
|
||||
CrewAI unlocks the true potential of multi-agent automation, delivering the best-in-class combination of speed, flexibility, and control with either Crews of AI Agents or Flows of Events:
|
||||
|
||||
- **Standalone Framework**: Built from scratch, independent of LangChain or any other agent framework.
|
||||
- **High Performance**: Optimized for speed and minimal resource usage, enabling faster execution.
|
||||
- **Flexible Low Level Customization**: Complete freedom to customize at both high and low levels - from overall workflows and system architecture to granular agent behaviors, internal prompts, and execution logic.
|
||||
- **Ideal for Every Use Case**: Proven effective for both simple tasks and highly complex, real-world, enterprise-grade scenarios.
|
||||
- **Robust Community**: Backed by a rapidly growing community of over **100,000 certified** developers offering comprehensive support and resources.
|
||||
|
||||
CrewAI empowers developers and enterprises to confidently build intelligent automations, bridging the gap between simplicity, flexibility, and performance.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Learning Resources
|
||||
Setup and run your first CrewAI agents by following this tutorial.
|
||||
|
||||
[](https://www.youtube.com/watch?v=-kSOTtYzgEw "CrewAI Getting Started Tutorial")
|
||||
|
||||
###
|
||||
Learning Resources
|
||||
|
||||
Learn CrewAI through our comprehensive courses:
|
||||
|
||||
- [Multi AI Agent Systems with CrewAI](https://www.deeplearning.ai/short-courses/multi-ai-agent-systems-with-crewai/) - Master the fundamentals of multi-agent systems
|
||||
- [Practical Multi AI Agents and Advanced Use Cases](https://www.deeplearning.ai/short-courses/practical-multi-ai-agents-and-advanced-use-cases-with-crewai/) - Deep dive into advanced implementations
|
||||
|
||||
@@ -63,18 +136,20 @@ Learn CrewAI through our comprehensive courses:
|
||||
CrewAI offers two powerful, complementary approaches that work seamlessly together to build sophisticated AI applications:
|
||||
|
||||
1. **Crews**: Teams of AI agents with true autonomy and agency, working together to accomplish complex tasks through role-based collaboration. Crews enable:
|
||||
|
||||
- Natural, autonomous decision-making between agents
|
||||
- Dynamic task delegation and collaboration
|
||||
- Specialized roles with defined goals and expertise
|
||||
- Flexible problem-solving approaches
|
||||
|
||||
2. **Flows**: Production-ready, event-driven workflows that deliver precise control over complex automations. Flows provide:
|
||||
|
||||
- Fine-grained control over execution paths for real-world scenarios
|
||||
- Secure, consistent state management between tasks
|
||||
- Clean integration of AI agents with production Python code
|
||||
- Conditional branching for complex business logic
|
||||
|
||||
The true power of CrewAI emerges when combining Crews and Flows. This synergy allows you to:
|
||||
|
||||
- Build complex, production-grade applications
|
||||
- Balance autonomy with precise control
|
||||
- Handle sophisticated real-world scenarios
|
||||
@@ -86,18 +161,20 @@ To get started with CrewAI, follow these simple steps:
|
||||
|
||||
### 1. Installation
|
||||
|
||||
Ensure you have Python >=3.10 <3.13 installed on your system. CrewAI uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
Ensure you have Python >=3.10 <3.14 installed on your system. CrewAI uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, install CrewAI:
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
If you want to install the 'crewai' package along with its optional features that include additional tools for agents, you can do so by using the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
The command above installs the basic package and also adds extra components which require more dependencies to function.
|
||||
|
||||
### Troubleshooting Dependencies
|
||||
@@ -107,10 +184,11 @@ If you encounter issues during installation or usage, here are some common solut
|
||||
#### Common Issues
|
||||
|
||||
1. **ModuleNotFoundError: No module named 'tiktoken'**
|
||||
|
||||
- Install tiktoken explicitly: `pip install 'crewai[embeddings]'`
|
||||
- If using embedchain or other tools: `pip install 'crewai[tools]'`
|
||||
|
||||
2. **Failed building wheel for tiktoken**
|
||||
|
||||
- Ensure Rust compiler is installed (see installation steps above)
|
||||
- For Windows: Verify Visual C++ Build Tools are installed
|
||||
- Try upgrading pip: `pip install --upgrade pip`
|
||||
@@ -221,10 +299,14 @@ reporting_task:
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai_tools import SerperDevTool
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
@@ -321,27 +403,25 @@ In addition to the sequential process, you can use the hierarchical process, whi
|
||||
|
||||
## Key Features
|
||||
|
||||
**Note**: CrewAI is a standalone framework built from the ground up, without dependencies on Langchain or other agent frameworks.
|
||||
CrewAI stands apart as a lean, standalone, high-performance multi-AI Agent framework delivering simplicity, flexibility, and precise control—free from the complexity and limitations found in other agent frameworks.
|
||||
|
||||
- **Deep Customization**: Build sophisticated agents with full control over the system - from overriding inner prompts to accessing low-level APIs. Customize roles, goals, tools, and behaviors while maintaining clean abstractions.
|
||||
- **Autonomous Inter-Agent Delegation**: Agents can autonomously delegate tasks and inquire amongst themselves, enabling complex problem-solving in real-world scenarios.
|
||||
- **Flexible Task Management**: Define and customize tasks with granular control, from simple operations to complex multi-step processes.
|
||||
- **Production-Grade Architecture**: Support for both high-level abstractions and low-level customization, with robust error handling and state management.
|
||||
- **Predictable Results**: Ensure consistent, accurate outputs through programmatic guardrails, agent training capabilities, and flow-based execution control. See our [documentation on guardrails](https://docs.crewai.com/how-to/guardrails/) for implementation details.
|
||||
- **Model Flexibility**: Run your crew using OpenAI or open source models with production-ready integrations. See [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) for detailed configuration options.
|
||||
- **Event-Driven Flows**: Build complex, real-world workflows with precise control over execution paths, state management, and conditional logic.
|
||||
- **Process Orchestration**: Achieve any workflow pattern through flows - from simple sequential and hierarchical processes to complex, custom orchestration patterns with conditional branching and parallel execution.
|
||||
- **Standalone & Lean**: Completely independent from other frameworks like LangChain, offering faster execution and lighter resource demands.
|
||||
- **Flexible & Precise**: Easily orchestrate autonomous agents through intuitive [Crews](https://docs.crewai.com/concepts/crews) or precise [Flows](https://docs.crewai.com/concepts/flows), achieving perfect balance for your needs.
|
||||
- **Seamless Integration**: Effortlessly combine Crews (autonomy) and Flows (precision) to create complex, real-world automations.
|
||||
- **Deep Customization**: Tailor every aspect—from high-level workflows down to low-level internal prompts and agent behaviors.
|
||||
- **Reliable Performance**: Consistent results across simple tasks and complex, enterprise-level automations.
|
||||
- **Thriving Community**: Backed by robust documentation and over 100,000 certified developers, providing exceptional support and guidance.
|
||||
|
||||

|
||||
Choose CrewAI to easily build powerful, adaptable, and production-ready AI automations.
|
||||
|
||||
## Examples
|
||||
|
||||
You can test different real life examples of AI crews in the [CrewAI-examples repo](https://github.com/crewAIInc/crewAI-examples?tab=readme-ov-file):
|
||||
|
||||
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/landing_page_generator)
|
||||
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/landing_page_generator)
|
||||
- [Having Human input on the execution](https://docs.crewai.com/how-to/Human-Input-on-Execution)
|
||||
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner)
|
||||
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis)
|
||||
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner)
|
||||
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis)
|
||||
|
||||
### Quick Tutorial
|
||||
|
||||
@@ -349,29 +429,35 @@ You can test different real life examples of AI crews in the [CrewAI-examples re
|
||||
|
||||
### Write Job Descriptions
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting) or watch a video below:
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings")
|
||||
|
||||
### Trip Planner
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner) or watch a video below:
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner")
|
||||
|
||||
### Stock Analysis
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis) or watch a video below:
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
|
||||
|
||||
### Using Crews and Flows Together
|
||||
|
||||
CrewAI's power truly shines when combining Crews with Flows to create sophisticated automation pipelines. Here's how you can orchestrate multiple Crews within a Flow:
|
||||
CrewAI's power truly shines when combining Crews with Flows to create sophisticated automation pipelines.
|
||||
CrewAI flows support logical operators like `or_` and `and_` to combine multiple conditions. This can be used with `@start`, `@listen`, or `@router` decorators to create complex triggering conditions.
|
||||
|
||||
- `or_`: Triggers when any of the specified conditions are met.
|
||||
- `and_`Triggers when all of the specified conditions are met.
|
||||
|
||||
Here's how you can orchestrate multiple Crews within a Flow:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start, router
|
||||
from crewai import Crew, Agent, Task
|
||||
from crewai.flow.flow import Flow, listen, start, router, or_
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Define structured state for precise control
|
||||
@@ -445,13 +531,14 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
||||
)
|
||||
return strategy_crew.kickoff()
|
||||
|
||||
@listen("medium_confidence", "low_confidence")
|
||||
@listen(or_("medium_confidence", "low_confidence"))
|
||||
def request_additional_analysis(self):
|
||||
self.state.recommendations.append("Gather more data")
|
||||
return "Additional analysis required"
|
||||
```
|
||||
|
||||
This example demonstrates how to:
|
||||
|
||||
1. Use Python code for basic data operations
|
||||
2. Create and execute Crews as steps in your workflow
|
||||
3. Use Flow decorators to manage the sequence of operations
|
||||
@@ -461,7 +548,7 @@ This example demonstrates how to:
|
||||
|
||||
CrewAI supports using various LLMs through a variety of connection options. By default your agents will use the OpenAI API when querying the model. However, there are several other ways to allow your agents to connect to models. For example, you can configure your agents to use a local model via the Ollama tool.
|
||||
|
||||
Please refer to the [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring you agents' connections to models.
|
||||
Please refer to the [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring your agents' connections to models.
|
||||
|
||||
## How CrewAI Compares
|
||||
|
||||
@@ -472,7 +559,6 @@ Please refer to the [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-
|
||||
*P.S. CrewAI demonstrates significant performance advantages over LangGraph, executing 5.76x faster in certain cases like this QA task example ([see comparison](https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/CrewAI%20Flows%20%26%20Langgraph/QA%20Agent)) while achieving higher evaluation scores with faster completion times in certain coding tasks, like in this example ([detailed analysis](https://github.com/crewAIInc/crewAI-examples/blob/main/Notebooks/CrewAI%20Flows%20%26%20Langgraph/Coding%20Assistant/coding_assistant_eval.ipynb)).*
|
||||
|
||||
- **Autogen**: While Autogen excels at creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
|
||||
|
||||
- **ChatDev**: ChatDev introduced the idea of processes into the realm of AI agents, but its implementation is quite rigid. Customizations in ChatDev are limited and not geared towards production environments, which can hinder scalability and flexibility in real-world applications.
|
||||
|
||||
## Contribution
|
||||
@@ -565,39 +651,127 @@ CrewAI is released under the [MIT License](https://github.com/crewAIInc/crewAI/b
|
||||
|
||||
## Frequently Asked Questions (FAQ)
|
||||
|
||||
### Q: What is CrewAI?
|
||||
A: CrewAI is a cutting-edge framework for orchestrating role-playing, autonomous AI agents. It enables agents to work together seamlessly, tackling complex tasks through collaborative intelligence.
|
||||
### General
|
||||
|
||||
- [What exactly is CrewAI?](#q-what-exactly-is-crewai)
|
||||
- [How do I install CrewAI?](#q-how-do-i-install-crewai)
|
||||
- [Does CrewAI depend on LangChain?](#q-does-crewai-depend-on-langchain)
|
||||
- [Is CrewAI open-source?](#q-is-crewai-open-source)
|
||||
- [Does CrewAI collect data from users?](#q-does-crewai-collect-data-from-users)
|
||||
|
||||
### Features and Capabilities
|
||||
|
||||
- [Can CrewAI handle complex use cases?](#q-can-crewai-handle-complex-use-cases)
|
||||
- [Can I use CrewAI with local AI models?](#q-can-i-use-crewai-with-local-ai-models)
|
||||
- [What makes Crews different from Flows?](#q-what-makes-crews-different-from-flows)
|
||||
- [How is CrewAI better than LangChain?](#q-how-is-crewai-better-than-langchain)
|
||||
- [Does CrewAI support fine-tuning or training custom models?](#q-does-crewai-support-fine-tuning-or-training-custom-models)
|
||||
|
||||
### Resources and Community
|
||||
|
||||
- [Where can I find real-world CrewAI examples?](#q-where-can-i-find-real-world-crewai-examples)
|
||||
- [How can I contribute to CrewAI?](#q-how-can-i-contribute-to-crewai)
|
||||
|
||||
### Enterprise Features
|
||||
|
||||
- [What additional features does CrewAI AOP offer?](#q-what-additional-features-does-crewai-amp-offer)
|
||||
- [Is CrewAI AOP available for cloud and on-premise deployments?](#q-is-crewai-amp-available-for-cloud-and-on-premise-deployments)
|
||||
- [Can I try CrewAI AOP for free?](#q-can-i-try-crewai-amp-for-free)
|
||||
|
||||
### Q: What exactly is CrewAI?
|
||||
|
||||
A: CrewAI is a standalone, lean, and fast Python framework built specifically for orchestrating autonomous AI agents. Unlike frameworks like LangChain, CrewAI does not rely on external dependencies, making it leaner, faster, and simpler.
|
||||
|
||||
### Q: How do I install CrewAI?
|
||||
A: You can install CrewAI using pip:
|
||||
|
||||
A: Install CrewAI using pip:
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
For additional tools, use:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
### Q: Can I use CrewAI with local models?
|
||||
A: Yes, CrewAI supports various LLMs, including local models. You can configure your agents to use local models via tools like Ollama & LM Studio. Check the [LLM Connections documentation](https://docs.crewai.com/how-to/LLM-Connections/) for more details.
|
||||
### Q: Does CrewAI depend on LangChain?
|
||||
|
||||
### Q: What are the key features of CrewAI?
|
||||
A: Key features include role-based agent design, autonomous inter-agent delegation, flexible task management, process-driven execution, output saving as files, and compatibility with both open-source and proprietary models.
|
||||
A: No. CrewAI is built entirely from the ground up, with no dependencies on LangChain or other agent frameworks. This ensures a lean, fast, and flexible experience.
|
||||
|
||||
### Q: How does CrewAI compare to other AI orchestration tools?
|
||||
A: CrewAI is designed with production in mind, offering flexibility similar to Autogen's conversational agents and structured processes like ChatDev, but with more adaptability for real-world applications.
|
||||
### Q: Can CrewAI handle complex use cases?
|
||||
|
||||
A: Yes. CrewAI excels at both simple and highly complex real-world scenarios, offering deep customization options at both high and low levels, from internal prompts to sophisticated workflow orchestration.
|
||||
|
||||
### Q: Can I use CrewAI with local AI models?
|
||||
|
||||
A: Absolutely! CrewAI supports various language models, including local ones. Tools like Ollama and LM Studio allow seamless integration. Check the [LLM Connections documentation](https://docs.crewai.com/how-to/LLM-Connections/) for more details.
|
||||
|
||||
### Q: What makes Crews different from Flows?
|
||||
|
||||
A: Crews provide autonomous agent collaboration, ideal for tasks requiring flexible decision-making and dynamic interaction. Flows offer precise, event-driven control, ideal for managing detailed execution paths and secure state management. You can seamlessly combine both for maximum effectiveness.
|
||||
|
||||
### Q: How is CrewAI better than LangChain?
|
||||
|
||||
A: CrewAI provides simpler, more intuitive APIs, faster execution speeds, more reliable and consistent results, robust documentation, and an active community—addressing common criticisms and limitations associated with LangChain.
|
||||
|
||||
### Q: Is CrewAI open-source?
|
||||
A: Yes, CrewAI is open-source and welcomes contributions from the community.
|
||||
|
||||
### Q: Does CrewAI collect any data?
|
||||
A: CrewAI uses anonymous telemetry to collect usage data for improvement purposes. No sensitive data (like prompts, task descriptions, or API calls) is collected. Users can opt-in to share more detailed data by setting `share_crew=True` on their Crews.
|
||||
A: Yes, CrewAI is open-source and actively encourages community contributions and collaboration.
|
||||
|
||||
### Q: Where can I find examples of CrewAI in action?
|
||||
A: You can find various real-life examples in the [CrewAI-examples repository](https://github.com/crewAIInc/crewAI-examples), including trip planners, stock analysis tools, and more.
|
||||
### Q: Does CrewAI collect data from users?
|
||||
|
||||
### Q: What is the difference between Crews and Flows?
|
||||
A: Crews and Flows serve different but complementary purposes in CrewAI. Crews are teams of AI agents working together to accomplish specific tasks through role-based collaboration, delivering accurate and predictable results. Flows, on the other hand, are event-driven workflows that can orchestrate both Crews and regular Python code, allowing you to build complex automation pipelines with secure state management and conditional execution paths.
|
||||
A: CrewAI collects anonymous telemetry data strictly for improvement purposes. Sensitive data such as prompts, tasks, or API responses are never collected unless explicitly enabled by the user.
|
||||
|
||||
### Q: Where can I find real-world CrewAI examples?
|
||||
|
||||
A: Check out practical examples in the [CrewAI-examples repository](https://github.com/crewAIInc/crewAI-examples), covering use cases like trip planners, stock analysis, and job postings.
|
||||
|
||||
### Q: How can I contribute to CrewAI?
|
||||
A: Contributions are welcome! You can fork the repository, create a new branch for your feature, add your improvement, and send a pull request. Check the Contribution section in the README for more details.
|
||||
|
||||
A: Contributions are warmly welcomed! Fork the repository, create your branch, implement your changes, and submit a pull request. See the Contribution section of the README for detailed guidelines.
|
||||
|
||||
### Q: What additional features does CrewAI AOP offer?
|
||||
|
||||
A: CrewAI AOP provides advanced features such as a unified control plane, real-time observability, secure integrations, advanced security, actionable insights, and dedicated 24/7 enterprise support.
|
||||
|
||||
### Q: Is CrewAI AOP available for cloud and on-premise deployments?
|
||||
|
||||
A: Yes, CrewAI AOP supports both cloud-based and on-premise deployment options, allowing enterprises to meet their specific security and compliance requirements.
|
||||
|
||||
### Q: Can I try CrewAI AOP for free?
|
||||
|
||||
A: Yes, you can explore part of the CrewAI AOP Suite by accessing the [Crew Control Plane](https://app.crewai.com) for free.
|
||||
|
||||
### Q: Does CrewAI support fine-tuning or training custom models?
|
||||
|
||||
A: Yes, CrewAI can integrate with custom-trained or fine-tuned models, allowing you to enhance your agents with domain-specific knowledge and accuracy.
|
||||
|
||||
### Q: Can CrewAI agents interact with external tools and APIs?
|
||||
|
||||
A: Absolutely! CrewAI agents can easily integrate with external tools, APIs, and databases, empowering them to leverage real-world data and resources.
|
||||
|
||||
### Q: Is CrewAI suitable for production environments?
|
||||
|
||||
A: Yes, CrewAI is explicitly designed with production-grade standards, ensuring reliability, stability, and scalability for enterprise deployments.
|
||||
|
||||
### Q: How scalable is CrewAI?
|
||||
|
||||
A: CrewAI is highly scalable, supporting simple automations and large-scale enterprise workflows involving numerous agents and complex tasks simultaneously.
|
||||
|
||||
### Q: Does CrewAI offer debugging and monitoring tools?
|
||||
|
||||
A: Yes, CrewAI AOP includes advanced debugging, tracing, and real-time observability features, simplifying the management and troubleshooting of your automations.
|
||||
|
||||
### Q: What programming languages does CrewAI support?
|
||||
|
||||
A: CrewAI is primarily Python-based but easily integrates with services and APIs written in any programming language through its flexible API integration capabilities.
|
||||
|
||||
### Q: Does CrewAI offer educational resources for beginners?
|
||||
|
||||
A: Yes, CrewAI provides extensive beginner-friendly tutorials, courses, and documentation through learn.crewai.com, supporting developers at all skill levels.
|
||||
|
||||
### Q: Can CrewAI automate human-in-the-loop workflows?
|
||||
|
||||
A: Yes, CrewAI fully supports human-in-the-loop workflows, allowing seamless collaboration between human experts and AI agents for enhanced decision-making.
|
||||
|
||||
197
conftest.py
Normal file
@@ -0,0 +1,197 @@
|
||||
"""Pytest configuration for crewAI workspace."""
|
||||
|
||||
from collections.abc import Generator
|
||||
import os
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
from typing import Any
|
||||
|
||||
from dotenv import load_dotenv
|
||||
import pytest
|
||||
from vcr.request import Request # type: ignore[import-untyped]
|
||||
|
||||
|
||||
env_test_path = Path(__file__).parent / ".env.test"
|
||||
load_dotenv(env_test_path, override=True)
|
||||
load_dotenv(override=True)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def cleanup_event_handlers() -> Generator[None, Any, None]:
|
||||
"""Clean up event bus handlers after each test to prevent test pollution."""
|
||||
yield
|
||||
|
||||
try:
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
with crewai_event_bus._rwlock.w_locked():
|
||||
crewai_event_bus._sync_handlers.clear()
|
||||
crewai_event_bus._async_handlers.clear()
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def setup_test_environment() -> Generator[None, Any, None]:
|
||||
"""Setup test environment for crewAI workspace."""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
storage_dir = Path(temp_dir) / "crewai_test_storage"
|
||||
storage_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not storage_dir.exists() or not storage_dir.is_dir():
|
||||
raise RuntimeError(
|
||||
f"Failed to create test storage directory: {storage_dir}"
|
||||
)
|
||||
|
||||
try:
|
||||
test_file = storage_dir / ".permissions_test"
|
||||
test_file.touch()
|
||||
test_file.unlink()
|
||||
except (OSError, IOError) as e:
|
||||
raise RuntimeError(
|
||||
f"Test storage directory {storage_dir} is not writable: {e}"
|
||||
) from e
|
||||
|
||||
os.environ["CREWAI_STORAGE_DIR"] = str(storage_dir)
|
||||
os.environ["CREWAI_TESTING"] = "true"
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.environ.pop("CREWAI_TESTING", "true")
|
||||
os.environ.pop("CREWAI_STORAGE_DIR", None)
|
||||
os.environ.pop("CREWAI_DISABLE_TELEMETRY", "true")
|
||||
os.environ.pop("OTEL_SDK_DISABLED", "true")
|
||||
os.environ.pop("OPENAI_BASE_URL", "https://api.openai.com/v1")
|
||||
os.environ.pop("OPENAI_API_BASE", "https://api.openai.com/v1")
|
||||
|
||||
|
||||
HEADERS_TO_FILTER = {
|
||||
"authorization": "AUTHORIZATION-XXX",
|
||||
"content-security-policy": "CSP-FILTERED",
|
||||
"cookie": "COOKIE-XXX",
|
||||
"set-cookie": "SET-COOKIE-XXX",
|
||||
"permissions-policy": "PERMISSIONS-POLICY-XXX",
|
||||
"referrer-policy": "REFERRER-POLICY-XXX",
|
||||
"strict-transport-security": "STS-XXX",
|
||||
"x-content-type-options": "X-CONTENT-TYPE-XXX",
|
||||
"x-frame-options": "X-FRAME-OPTIONS-XXX",
|
||||
"x-permitted-cross-domain-policies": "X-PERMITTED-XXX",
|
||||
"x-request-id": "X-REQUEST-ID-XXX",
|
||||
"x-runtime": "X-RUNTIME-XXX",
|
||||
"x-xss-protection": "X-XSS-PROTECTION-XXX",
|
||||
"x-stainless-arch": "X-STAINLESS-ARCH-XXX",
|
||||
"x-stainless-os": "X-STAINLESS-OS-XXX",
|
||||
"x-stainless-read-timeout": "X-STAINLESS-READ-TIMEOUT-XXX",
|
||||
"cf-ray": "CF-RAY-XXX",
|
||||
"etag": "ETAG-XXX",
|
||||
"Strict-Transport-Security": "STS-XXX",
|
||||
"access-control-expose-headers": "ACCESS-CONTROL-XXX",
|
||||
"openai-organization": "OPENAI-ORG-XXX",
|
||||
"openai-project": "OPENAI-PROJECT-XXX",
|
||||
"x-ratelimit-limit-requests": "X-RATELIMIT-LIMIT-REQUESTS-XXX",
|
||||
"x-ratelimit-limit-tokens": "X-RATELIMIT-LIMIT-TOKENS-XXX",
|
||||
"x-ratelimit-remaining-requests": "X-RATELIMIT-REMAINING-REQUESTS-XXX",
|
||||
"x-ratelimit-remaining-tokens": "X-RATELIMIT-REMAINING-TOKENS-XXX",
|
||||
"x-ratelimit-reset-requests": "X-RATELIMIT-RESET-REQUESTS-XXX",
|
||||
"x-ratelimit-reset-tokens": "X-RATELIMIT-RESET-TOKENS-XXX",
|
||||
"x-goog-api-key": "X-GOOG-API-KEY-XXX",
|
||||
"api-key": "X-API-KEY-XXX",
|
||||
"User-Agent": "X-USER-AGENT-XXX",
|
||||
"apim-request-id:": "X-API-CLIENT-REQUEST-ID-XXX",
|
||||
"azureml-model-session": "AZUREML-MODEL-SESSION-XXX",
|
||||
"x-ms-client-request-id": "X-MS-CLIENT-REQUEST-ID-XXX",
|
||||
"x-ms-region": "X-MS-REGION-XXX",
|
||||
"apim-request-id": "APIM-REQUEST-ID-XXX",
|
||||
"x-api-key": "X-API-KEY-XXX",
|
||||
"anthropic-organization-id": "ANTHROPIC-ORGANIZATION-ID-XXX",
|
||||
"request-id": "REQUEST-ID-XXX",
|
||||
"anthropic-ratelimit-input-tokens-limit": "ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX",
|
||||
"anthropic-ratelimit-input-tokens-remaining": "ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX",
|
||||
"anthropic-ratelimit-input-tokens-reset": "ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX",
|
||||
"anthropic-ratelimit-output-tokens-limit": "ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX",
|
||||
"anthropic-ratelimit-output-tokens-remaining": "ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX",
|
||||
"anthropic-ratelimit-output-tokens-reset": "ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX",
|
||||
"anthropic-ratelimit-tokens-limit": "ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX",
|
||||
"anthropic-ratelimit-tokens-remaining": "ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX",
|
||||
"anthropic-ratelimit-tokens-reset": "ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX",
|
||||
"x-amz-date": "X-AMZ-DATE-XXX",
|
||||
"amz-sdk-invocation-id": "AMZ-SDK-INVOCATION-ID-XXX",
|
||||
"accept-encoding": "ACCEPT-ENCODING-XXX",
|
||||
"x-amzn-requestid": "X-AMZN-REQUESTID-XXX",
|
||||
"x-amzn-RequestId": "X-AMZN-REQUESTID-XXX",
|
||||
}
|
||||
|
||||
|
||||
def _filter_request_headers(request: Request) -> Request: # type: ignore[no-any-unimported]
|
||||
"""Filter sensitive headers from request before recording."""
|
||||
for header_name, replacement in HEADERS_TO_FILTER.items():
|
||||
for variant in [header_name, header_name.upper(), header_name.title()]:
|
||||
if variant in request.headers:
|
||||
request.headers[variant] = [replacement]
|
||||
|
||||
request.method = request.method.upper()
|
||||
return request
|
||||
|
||||
|
||||
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Filter sensitive headers from response before recording."""
|
||||
# Remove Content-Encoding to prevent decompression issues on replay
|
||||
for encoding_header in ["Content-Encoding", "content-encoding"]:
|
||||
response["headers"].pop(encoding_header, None)
|
||||
|
||||
for header_name, replacement in HEADERS_TO_FILTER.items():
|
||||
for variant in [header_name, header_name.upper(), header_name.title()]:
|
||||
if variant in response["headers"]:
|
||||
response["headers"][variant] = [replacement]
|
||||
return response
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_cassette_dir(request: Any) -> str:
|
||||
"""Generate cassette directory path based on test module location.
|
||||
|
||||
Organizes cassettes to mirror test directory structure within each package:
|
||||
lib/crewai/tests/llms/google/test_google.py -> lib/crewai/tests/cassettes/llms/google/
|
||||
lib/crewai-tools/tests/tools/test_search.py -> lib/crewai-tools/tests/cassettes/tools/
|
||||
"""
|
||||
test_file = Path(request.fspath)
|
||||
|
||||
for parent in test_file.parents:
|
||||
if parent.name in ("crewai", "crewai-tools") and parent.parent.name == "lib":
|
||||
package_root = parent
|
||||
break
|
||||
else:
|
||||
package_root = test_file.parent
|
||||
|
||||
tests_root = package_root / "tests"
|
||||
test_dir = test_file.parent
|
||||
|
||||
if test_dir != tests_root:
|
||||
relative_path = test_dir.relative_to(tests_root)
|
||||
cassette_dir = tests_root / "cassettes" / relative_path
|
||||
else:
|
||||
cassette_dir = tests_root / "cassettes"
|
||||
|
||||
cassette_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return str(cassette_dir)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_config(vcr_cassette_dir: str) -> dict[str, Any]:
|
||||
"""Configure VCR with organized cassette storage."""
|
||||
config = {
|
||||
"cassette_library_dir": vcr_cassette_dir,
|
||||
"record_mode": os.getenv("PYTEST_VCR_RECORD_MODE", "once"),
|
||||
"filter_headers": [(k, v) for k, v in HEADERS_TO_FILTER.items()],
|
||||
"before_record_request": _filter_request_headers,
|
||||
"before_record_response": _filter_response_headers,
|
||||
"filter_query_parameters": ["key"],
|
||||
"match_on": ["method", "scheme", "host", "port", "path"],
|
||||
}
|
||||
|
||||
if os.getenv("GITHUB_ACTIONS") == "true":
|
||||
config["record_mode"] = "none"
|
||||
|
||||
return config
|
||||
1737
crewAI.excalidraw
18
docs/common-room-tracking.js
Normal file
@@ -0,0 +1,18 @@
|
||||
(function() {
|
||||
if (typeof window === 'undefined') return;
|
||||
if (typeof window.signals !== 'undefined') return;
|
||||
var script = document.createElement('script');
|
||||
script.src = 'https://cdn.cr-relay.com/v1/site/883520f4-c431-44be-80e7-e123a1ee7a2b/signals.js';
|
||||
script.async = true;
|
||||
window.signals = Object.assign(
|
||||
[],
|
||||
['page', 'identify', 'form'].reduce(function (acc, method){
|
||||
acc[method] = function () {
|
||||
signals.push([method, arguments]);
|
||||
return signals;
|
||||
};
|
||||
return acc;
|
||||
}, {})
|
||||
);
|
||||
document.head.appendChild(script);
|
||||
})();
|
||||
@@ -1,211 +0,0 @@
|
||||
---
|
||||
title: CLI
|
||||
description: Learn how to use the CrewAI CLI to interact with CrewAI.
|
||||
icon: terminal
|
||||
---
|
||||
|
||||
# CrewAI CLI Documentation
|
||||
|
||||
The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you to create, train, run, and manage crews & flows.
|
||||
|
||||
## Installation
|
||||
|
||||
To use the CrewAI CLI, make sure you have CrewAI installed:
|
||||
|
||||
```shell Terminal
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
The basic structure of a CrewAI CLI command is:
|
||||
|
||||
```shell Terminal
|
||||
crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||
```
|
||||
|
||||
## Available Commands
|
||||
|
||||
### 1. Create
|
||||
|
||||
Create a new crew or flow.
|
||||
|
||||
```shell Terminal
|
||||
crewai create [OPTIONS] TYPE NAME
|
||||
```
|
||||
|
||||
- `TYPE`: Choose between "crew" or "flow"
|
||||
- `NAME`: Name of the crew or flow
|
||||
|
||||
Example:
|
||||
```shell Terminal
|
||||
crewai create crew my_new_crew
|
||||
crewai create flow my_new_flow
|
||||
```
|
||||
|
||||
### 2. Version
|
||||
|
||||
Show the installed version of CrewAI.
|
||||
|
||||
```shell Terminal
|
||||
crewai version [OPTIONS]
|
||||
```
|
||||
|
||||
- `--tools`: (Optional) Show the installed version of CrewAI tools
|
||||
|
||||
Example:
|
||||
```shell Terminal
|
||||
crewai version
|
||||
crewai version --tools
|
||||
```
|
||||
|
||||
### 3. Train
|
||||
|
||||
Train the crew for a specified number of iterations.
|
||||
|
||||
```shell Terminal
|
||||
crewai train [OPTIONS]
|
||||
```
|
||||
|
||||
- `-n, --n_iterations INTEGER`: Number of iterations to train the crew (default: 5)
|
||||
- `-f, --filename TEXT`: Path to a custom file for training (default: "trained_agents_data.pkl")
|
||||
|
||||
Example:
|
||||
```shell Terminal
|
||||
crewai train -n 10 -f my_training_data.pkl
|
||||
```
|
||||
|
||||
### 4. Replay
|
||||
|
||||
Replay the crew execution from a specific task.
|
||||
|
||||
```shell Terminal
|
||||
crewai replay [OPTIONS]
|
||||
```
|
||||
|
||||
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
|
||||
|
||||
Example:
|
||||
```shell Terminal
|
||||
crewai replay -t task_123456
|
||||
```
|
||||
|
||||
### 5. Log-tasks-outputs
|
||||
|
||||
Retrieve your latest crew.kickoff() task outputs.
|
||||
|
||||
```shell Terminal
|
||||
crewai log-tasks-outputs
|
||||
```
|
||||
|
||||
### 6. Reset-memories
|
||||
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_outputs).
|
||||
|
||||
```shell Terminal
|
||||
crewai reset-memories [OPTIONS]
|
||||
```
|
||||
|
||||
- `-l, --long`: Reset LONG TERM memory
|
||||
- `-s, --short`: Reset SHORT TERM memory
|
||||
- `-e, --entities`: Reset ENTITIES memory
|
||||
- `-k, --kickoff-outputs`: Reset LATEST KICKOFF TASK OUTPUTS
|
||||
- `-a, --all`: Reset ALL memories
|
||||
|
||||
Example:
|
||||
```shell Terminal
|
||||
crewai reset-memories --long --short
|
||||
crewai reset-memories --all
|
||||
```
|
||||
|
||||
### 7. Test
|
||||
|
||||
Test the crew and evaluate the results.
|
||||
|
||||
```shell Terminal
|
||||
crewai test [OPTIONS]
|
||||
```
|
||||
|
||||
- `-n, --n_iterations INTEGER`: Number of iterations to test the crew (default: 3)
|
||||
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
|
||||
|
||||
Example:
|
||||
```shell Terminal
|
||||
crewai test -n 5 -m gpt-3.5-turbo
|
||||
```
|
||||
|
||||
### 8. Run
|
||||
|
||||
Run the crew or flow.
|
||||
|
||||
```shell Terminal
|
||||
crewai run
|
||||
```
|
||||
|
||||
<Note>
|
||||
Starting from version 0.103.0, the `crewai run` command can be used to run both standard crews and flows. For flows, it automatically detects the type from pyproject.toml and runs the appropriate command. This is now the recommended way to run both crews and flows.
|
||||
</Note>
|
||||
|
||||
<Note>
|
||||
Make sure to run these commands from the directory where your CrewAI project is set up.
|
||||
Some commands may require additional configuration or setup within your project structure.
|
||||
</Note>
|
||||
|
||||
### 9. Chat
|
||||
|
||||
Starting in version `0.98.0`, when you run the `crewai chat` command, you start an interactive session with your crew. The AI assistant will guide you by asking for necessary inputs to execute the crew. Once all inputs are provided, the crew will execute its tasks.
|
||||
|
||||
After receiving the results, you can continue interacting with the assistant for further instructions or questions.
|
||||
|
||||
```shell Terminal
|
||||
crewai chat
|
||||
```
|
||||
<Note>
|
||||
Ensure you execute these commands from your CrewAI project's root directory.
|
||||
</Note>
|
||||
<Note>
|
||||
IMPORTANT: Set the `chat_llm` property in your `crew.py` file to enable this command.
|
||||
|
||||
```python
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents,
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
chat_llm="gpt-4o", # LLM for chat orchestration
|
||||
)
|
||||
```
|
||||
</Note>
|
||||
|
||||
### 10. API Keys
|
||||
|
||||
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
||||
|
||||
Once you've selected an LLM provider, you will be prompted for API keys.
|
||||
|
||||
#### Initial API key providers
|
||||
|
||||
The CLI will initially prompt for API keys for the following services:
|
||||
|
||||
* OpenAI
|
||||
* Groq
|
||||
* Anthropic
|
||||
* Google Gemini
|
||||
* SambaNova
|
||||
|
||||
When you select a provider, the CLI will prompt you to enter your API key.
|
||||
|
||||
#### Other Options
|
||||
|
||||
If you select option 6, you will be able to select from a list of LiteLLM supported providers.
|
||||
|
||||
When you select a provider, the CLI will prompt you to enter the Key name and the API key.
|
||||
|
||||
See the following link for each provider's key name:
|
||||
|
||||
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)
|
||||
|
||||
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
---
|
||||
title: Collaboration
|
||||
description: Exploring the dynamics of agent collaboration within the CrewAI framework, focusing on the newly integrated features for enhanced functionality.
|
||||
icon: screen-users
|
||||
---
|
||||
|
||||
## Collaboration Fundamentals
|
||||
|
||||
Collaboration in CrewAI is fundamental, enabling agents to combine their skills, share information, and assist each other in task execution, embodying a truly cooperative ecosystem.
|
||||
|
||||
- **Information Sharing**: Ensures all agents are well-informed and can contribute effectively by sharing data and findings.
|
||||
- **Task Assistance**: Allows agents to seek help from peers with the required expertise for specific tasks.
|
||||
- **Resource Allocation**: Optimizes task execution through the efficient distribution and sharing of resources among agents.
|
||||
|
||||
## Enhanced Attributes for Improved Collaboration
|
||||
|
||||
The `Crew` class has been enriched with several attributes to support advanced functionalities:
|
||||
|
||||
| Feature | Description |
|
||||
|:-------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Language Model Management** (`manager_llm`, `function_calling_llm`) | Manages language models for executing tasks and tools. `manager_llm` is required for hierarchical processes, while `function_calling_llm` is optional with a default value for streamlined interactions. |
|
||||
| **Custom Manager Agent** (`manager_agent`) | Specifies a custom agent as the manager, replacing the default CrewAI manager. |
|
||||
| **Process Flow** (`process`) | Defines execution logic (e.g., sequential, hierarchical) for task distribution. |
|
||||
| **Verbose Logging** (`verbose`) | Provides detailed logging for monitoring and debugging. Accepts integer and boolean values to control verbosity level. |
|
||||
| **Rate Limiting** (`max_rpm`) | Limits requests per minute to optimize resource usage. Setting guidelines depend on task complexity and load. |
|
||||
| **Internationalization / Customization** (`language`, `prompt_file`) | Supports prompt customization for global usability. [Example of file](https://github.com/joaomdmoura/crewAI/blob/main/src/crewai/translations/en.json) |
|
||||
| **Execution and Output Handling** (`full_output`) | Controls output granularity, distinguishing between full and final outputs. |
|
||||
| **Callback and Telemetry** (`step_callback`, `task_callback`) | Enables step-wise and task-level execution monitoring and telemetry for performance analytics. |
|
||||
| **Crew Sharing** (`share_crew`) | Allows sharing crew data with CrewAI for model improvement. Privacy implications and benefits should be considered. |
|
||||
| **Usage Metrics** (`usage_metrics`) | Logs all LLM usage metrics during task execution for performance insights. |
|
||||
| **Memory Usage** (`memory`) | Enables memory for storing execution history, aiding in agent learning and task efficiency. |
|
||||
| **Embedder Configuration** (`embedder`) | Configures the embedder for language understanding and generation, with support for provider customization. |
|
||||
| **Cache Management** (`cache`) | Specifies whether to cache tool execution results, enhancing performance. |
|
||||
| **Output Logging** (`output_log_file`) | Defines the file path for logging crew execution output. |
|
||||
| **Planning Mode** (`planning`) | Enables action planning before task execution. Set `planning=True` to activate. |
|
||||
| **Replay Feature** (`replay`) | Provides CLI for listing tasks from the last run and replaying from specific tasks, aiding in task management and troubleshooting. |
|
||||
|
||||
## Delegation (Dividing to Conquer)
|
||||
|
||||
Delegation enhances functionality by allowing agents to intelligently assign tasks or seek help, thereby amplifying the crew's overall capability.
|
||||
|
||||
## Implementing Collaboration and Delegation
|
||||
|
||||
Setting up a crew involves defining the roles and capabilities of each agent. CrewAI seamlessly manages their interactions, ensuring efficient collaboration and delegation, with enhanced customization and monitoring features to adapt to various operational needs.
|
||||
|
||||
## Example Scenario
|
||||
|
||||
Consider a crew with a researcher agent tasked with data gathering and a writer agent responsible for compiling reports. The integration of advanced language model management and process flow attributes allows for more sophisticated interactions, such as the writer delegating complex research tasks to the researcher or querying specific information, thereby facilitating a seamless workflow.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The integration of advanced attributes and functionalities into the CrewAI framework significantly enriches the agent collaboration ecosystem. These enhancements not only simplify interactions but also offer unprecedented flexibility and control, paving the way for sophisticated AI-driven solutions capable of tackling complex tasks through intelligent collaboration and delegation.
|
||||
@@ -1,624 +0,0 @@
|
||||
---
|
||||
title: Knowledge
|
||||
description: What is knowledge in CrewAI and how to use it.
|
||||
icon: book
|
||||
---
|
||||
|
||||
## What is Knowledge?
|
||||
|
||||
Knowledge in CrewAI is a powerful system that allows AI agents to access and utilize external information sources during their tasks.
|
||||
Think of it as giving your agents a reference library they can consult while working.
|
||||
|
||||
<Info>
|
||||
Key benefits of using Knowledge:
|
||||
- Enhance agents with domain-specific information
|
||||
- Support decisions with real-world data
|
||||
- Maintain context across conversations
|
||||
- Ground responses in factual information
|
||||
</Info>
|
||||
|
||||
## Supported Knowledge Sources
|
||||
|
||||
CrewAI supports various types of knowledge sources out of the box:
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Text Sources" icon="text">
|
||||
- Raw strings
|
||||
- Text files (.txt)
|
||||
- PDF documents
|
||||
</Card>
|
||||
<Card title="Structured Data" icon="table">
|
||||
- CSV files
|
||||
- Excel spreadsheets
|
||||
- JSON documents
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Supported Knowledge Parameters
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| :--------------------------- | :---------------------------------- | :------- | :---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `sources` | **List[BaseKnowledgeSource]** | Yes | List of knowledge sources that provide content to be stored and queried. Can include PDF, CSV, Excel, JSON, text files, or string content. |
|
||||
| `collection_name` | **str** | No | Name of the collection where the knowledge will be stored. Used to identify different sets of knowledge. Defaults to "knowledge" if not provided. |
|
||||
| `storage` | **Optional[KnowledgeStorage]** | No | Custom storage configuration for managing how the knowledge is stored and retrieved. If not provided, a default storage will be created. |
|
||||
|
||||
## Quickstart Example
|
||||
|
||||
<Tip>
|
||||
For file-Based Knowledge Sources, make sure to place your files in a `knowledge` directory at the root of your project.
|
||||
Also, use relative paths from the `knowledge` directory when creating the source.
|
||||
</Tip>
|
||||
|
||||
Here's an example using string-based knowledge:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
|
||||
# Create a knowledge source
|
||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||
string_source = StringKnowledgeSource(
|
||||
content=content,
|
||||
)
|
||||
|
||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
|
||||
# Create an agent with the knowledge store
|
||||
agent = Agent(
|
||||
role="About User",
|
||||
goal="You know everything about the user.",
|
||||
backstory="""You are a master at understanding people and their preferences.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=llm,
|
||||
)
|
||||
task = Task(
|
||||
description="Answer the following questions about the user: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge_sources=[string_source], # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||
)
|
||||
|
||||
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||
```
|
||||
|
||||
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including MD, PDF, DOCX, HTML, and more.
|
||||
|
||||
<Note>
|
||||
You need to install `docling` for the following example to work: `uv add docling`
|
||||
</Note>
|
||||
|
||||
|
||||
|
||||
```python Code
|
||||
from crewai import LLM, Agent, Crew, Process, Task
|
||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||
|
||||
# Create a knowledge source
|
||||
content_source = CrewDoclingSource(
|
||||
file_paths=[
|
||||
"https://lilianweng.github.io/posts/2024-11-28-reward-hacking",
|
||||
"https://lilianweng.github.io/posts/2024-07-07-hallucination",
|
||||
],
|
||||
)
|
||||
|
||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
|
||||
# Create an agent with the knowledge store
|
||||
agent = Agent(
|
||||
role="About papers",
|
||||
goal="You know everything about the papers.",
|
||||
backstory="""You are a master at understanding papers and their content.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=llm,
|
||||
)
|
||||
task = Task(
|
||||
description="Answer the following questions about the papers: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge_sources=[
|
||||
content_source
|
||||
], # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||
)
|
||||
|
||||
result = crew.kickoff(
|
||||
inputs={
|
||||
"question": "What is the reward hacking paper about? Be sure to provide sources."
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## More Examples
|
||||
|
||||
Here are examples of how to use different types of knowledge sources:
|
||||
|
||||
### Text File Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
||||
|
||||
# Create a text file knowledge source
|
||||
text_source = TextFileKnowledgeSource(
|
||||
file_paths=["document.txt", "another.txt"]
|
||||
)
|
||||
|
||||
# Create crew with text file source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[text_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[text_source]
|
||||
)
|
||||
```
|
||||
|
||||
### PDF Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource
|
||||
|
||||
# Create a PDF knowledge source
|
||||
pdf_source = PDFKnowledgeSource(
|
||||
file_paths=["document.pdf", "another.pdf"]
|
||||
)
|
||||
|
||||
# Create crew with PDF knowledge source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[pdf_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[pdf_source]
|
||||
)
|
||||
```
|
||||
|
||||
### CSV Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.csv_knowledge_source import CSVKnowledgeSource
|
||||
|
||||
# Create a CSV knowledge source
|
||||
csv_source = CSVKnowledgeSource(
|
||||
file_paths=["data.csv"]
|
||||
)
|
||||
|
||||
# Create crew with CSV knowledge source or on agent level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[csv_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[csv_source]
|
||||
)
|
||||
```
|
||||
|
||||
### Excel Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.excel_knowledge_source import ExcelKnowledgeSource
|
||||
|
||||
# Create an Excel knowledge source
|
||||
excel_source = ExcelKnowledgeSource(
|
||||
file_paths=["spreadsheet.xlsx"]
|
||||
)
|
||||
|
||||
# Create crew with Excel knowledge source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[excel_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[excel_source]
|
||||
)
|
||||
```
|
||||
|
||||
### JSON Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.json_knowledge_source import JSONKnowledgeSource
|
||||
|
||||
# Create a JSON knowledge source
|
||||
json_source = JSONKnowledgeSource(
|
||||
file_paths=["data.json"]
|
||||
)
|
||||
|
||||
# Create crew with JSON knowledge source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[json_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[json_source]
|
||||
)
|
||||
```
|
||||
|
||||
## Knowledge Configuration
|
||||
|
||||
### Chunking Configuration
|
||||
|
||||
Knowledge sources automatically chunk content for better processing.
|
||||
You can configure chunking behavior in your knowledge sources:
|
||||
|
||||
```python
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
|
||||
source = StringKnowledgeSource(
|
||||
content="Your content here",
|
||||
chunk_size=4000, # Maximum size of each chunk (default: 4000)
|
||||
chunk_overlap=200 # Overlap between chunks (default: 200)
|
||||
)
|
||||
```
|
||||
|
||||
The chunking configuration helps in:
|
||||
- Breaking down large documents into manageable pieces
|
||||
- Maintaining context through chunk overlap
|
||||
- Optimizing retrieval accuracy
|
||||
|
||||
### Embeddings Configuration
|
||||
|
||||
You can also configure the embedder for the knowledge store.
|
||||
This is useful if you want to use a different embedder for the knowledge store than the one used for the agents.
|
||||
The `embedder` parameter supports various embedding model providers that include:
|
||||
- `openai`: OpenAI's embedding models
|
||||
- `google`: Google's text embedding models
|
||||
- `azure`: Azure OpenAI embeddings
|
||||
- `ollama`: Local embeddings with Ollama
|
||||
- `vertexai`: Google Cloud VertexAI embeddings
|
||||
- `cohere`: Cohere's embedding models
|
||||
- `voyageai`: VoyageAI's embedding models
|
||||
- `bedrock`: AWS Bedrock embeddings
|
||||
- `huggingface`: Hugging Face models
|
||||
- `watson`: IBM Watson embeddings
|
||||
|
||||
Here's an example of how to configure the embedder for the knowledge store using Google's `text-embedding-004` model:
|
||||
<CodeGroup>
|
||||
```python Example
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
import os
|
||||
|
||||
# Get the GEMINI API key
|
||||
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
||||
|
||||
# Create a knowledge source
|
||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||
string_source = StringKnowledgeSource(
|
||||
content=content,
|
||||
)
|
||||
|
||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||
gemini_llm = LLM(
|
||||
model="gemini/gemini-1.5-pro-002",
|
||||
api_key=GEMINI_API_KEY,
|
||||
temperature=0,
|
||||
)
|
||||
|
||||
# Create an agent with the knowledge store
|
||||
agent = Agent(
|
||||
role="About User",
|
||||
goal="You know everything about the user.",
|
||||
backstory="""You are a master at understanding people and their preferences.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=gemini_llm,
|
||||
embedder={
|
||||
"provider": "google",
|
||||
"config": {
|
||||
"model": "models/text-embedding-004",
|
||||
"api_key": GEMINI_API_KEY,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Answer the following questions about the user: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge_sources=[string_source],
|
||||
embedder={
|
||||
"provider": "google",
|
||||
"config": {
|
||||
"model": "models/text-embedding-004",
|
||||
"api_key": GEMINI_API_KEY,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||
```
|
||||
```text Output
|
||||
# Agent: About User
|
||||
## Task: Answer the following questions about the user: What city does John live in and how old is he?
|
||||
|
||||
# Agent: About User
|
||||
## Final Answer:
|
||||
John is 30 years old and lives in San Francisco.
|
||||
```
|
||||
</CodeGroup>
|
||||
## Clearing Knowledge
|
||||
|
||||
If you need to clear the knowledge stored in CrewAI, you can use the `crewai reset-memories` command with the `--knowledge` option.
|
||||
|
||||
```bash Command
|
||||
crewai reset-memories --knowledge
|
||||
```
|
||||
|
||||
This is useful when you've updated your knowledge sources and want to ensure that the agents are using the most recent information.
|
||||
|
||||
## Agent-Specific Knowledge
|
||||
|
||||
While knowledge can be provided at the crew level using `crew.knowledge_sources`, individual agents can also have their own knowledge sources using the `knowledge_sources` parameter:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
|
||||
# Create agent-specific knowledge about a product
|
||||
product_specs = StringKnowledgeSource(
|
||||
content="""The XPS 13 laptop features:
|
||||
- 13.4-inch 4K display
|
||||
- Intel Core i7 processor
|
||||
- 16GB RAM
|
||||
- 512GB SSD storage
|
||||
- 12-hour battery life""",
|
||||
metadata={"category": "product_specs"}
|
||||
)
|
||||
|
||||
# Create a support agent with product knowledge
|
||||
support_agent = Agent(
|
||||
role="Technical Support Specialist",
|
||||
goal="Provide accurate product information and support.",
|
||||
backstory="You are an expert on our laptop products and specifications.",
|
||||
knowledge_sources=[product_specs] # Agent-specific knowledge
|
||||
)
|
||||
|
||||
# Create a task that requires product knowledge
|
||||
support_task = Task(
|
||||
description="Answer this customer question: {question}",
|
||||
agent=support_agent
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[support_agent],
|
||||
tasks=[support_task]
|
||||
)
|
||||
|
||||
# Get answer about the laptop's specifications
|
||||
result = crew.kickoff(
|
||||
inputs={"question": "What is the storage capacity of the XPS 13?"}
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Benefits of agent-specific knowledge:
|
||||
- Give agents specialized information for their roles
|
||||
- Maintain separation of concerns between agents
|
||||
- Combine with crew-level knowledge for layered information access
|
||||
</Info>
|
||||
|
||||
## Custom Knowledge Sources
|
||||
|
||||
CrewAI allows you to create custom knowledge sources for any type of data by extending the `BaseKnowledgeSource` class. Let's create a practical example that fetches and processes space news articles.
|
||||
|
||||
#### Space News Knowledge Source Example
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class SpaceNewsKnowledgeSource(BaseKnowledgeSource):
|
||||
"""Knowledge source that fetches data from Space News API."""
|
||||
|
||||
api_endpoint: str = Field(description="API endpoint URL")
|
||||
limit: int = Field(default=10, description="Number of articles to fetch")
|
||||
|
||||
def load_content(self) -> Dict[Any, str]:
|
||||
"""Fetch and format space news articles."""
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.api_endpoint}?limit={self.limit}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
articles = data.get('results', [])
|
||||
|
||||
formatted_data = self._format_articles(articles)
|
||||
return {self.api_endpoint: formatted_data}
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to fetch space news: {str(e)}")
|
||||
|
||||
def _format_articles(self, articles: list) -> str:
|
||||
"""Format articles into readable text."""
|
||||
formatted = "Space News Articles:\n\n"
|
||||
for article in articles:
|
||||
formatted += f"""
|
||||
Title: {article['title']}
|
||||
Published: {article['published_at']}
|
||||
Summary: {article['summary']}
|
||||
News Site: {article['news_site']}
|
||||
URL: {article['url']}
|
||||
-------------------"""
|
||||
return formatted
|
||||
|
||||
def add(self) -> None:
|
||||
"""Process and store the articles."""
|
||||
content = self.load_content()
|
||||
for _, text in content.items():
|
||||
chunks = self._chunk_text(text)
|
||||
self.chunks.extend(chunks)
|
||||
|
||||
self._save_documents()
|
||||
|
||||
# Create knowledge source
|
||||
recent_news = SpaceNewsKnowledgeSource(
|
||||
api_endpoint="https://api.spaceflightnewsapi.net/v4/articles",
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Create specialized agent
|
||||
space_analyst = Agent(
|
||||
role="Space News Analyst",
|
||||
goal="Answer questions about space news accurately and comprehensively",
|
||||
backstory="""You are a space industry analyst with expertise in space exploration,
|
||||
satellite technology, and space industry trends. You excel at answering questions
|
||||
about space news and providing detailed, accurate information.""",
|
||||
knowledge_sources=[recent_news],
|
||||
llm=LLM(model="gpt-4", temperature=0.0)
|
||||
)
|
||||
|
||||
# Create task that handles user questions
|
||||
analysis_task = Task(
|
||||
description="Answer this question about space news: {user_question}",
|
||||
expected_output="A detailed answer based on the recent space news articles",
|
||||
agent=space_analyst
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[space_analyst],
|
||||
tasks=[analysis_task],
|
||||
verbose=True,
|
||||
process=Process.sequential
|
||||
)
|
||||
|
||||
# Example usage
|
||||
result = crew.kickoff(
|
||||
inputs={"user_question": "What are the latest developments in space exploration?"}
|
||||
)
|
||||
```
|
||||
|
||||
```output Output
|
||||
# Agent: Space News Analyst
|
||||
## Task: Answer this question about space news: What are the latest developments in space exploration?
|
||||
|
||||
|
||||
# Agent: Space News Analyst
|
||||
## Final Answer:
|
||||
The latest developments in space exploration, based on recent space news articles, include the following:
|
||||
|
||||
1. SpaceX has received the final regulatory approvals to proceed with the second integrated Starship/Super Heavy launch, scheduled for as soon as the morning of Nov. 17, 2023. This is a significant step in SpaceX's ambitious plans for space exploration and colonization. [Source: SpaceNews](https://spacenews.com/starship-cleared-for-nov-17-launch/)
|
||||
|
||||
2. SpaceX has also informed the US Federal Communications Commission (FCC) that it plans to begin launching its first next-generation Starlink Gen2 satellites. This represents a major upgrade to the Starlink satellite internet service, which aims to provide high-speed internet access worldwide. [Source: Teslarati](https://www.teslarati.com/spacex-first-starlink-gen2-satellite-launch-2022/)
|
||||
|
||||
3. AI startup Synthetaic has raised $15 million in Series B funding. The company uses artificial intelligence to analyze data from space and air sensors, which could have significant applications in space exploration and satellite technology. [Source: SpaceNews](https://spacenews.com/ai-startup-synthetaic-raises-15-million-in-series-b-funding/)
|
||||
|
||||
4. The Space Force has formally established a unit within the U.S. Indo-Pacific Command, marking a permanent presence in the Indo-Pacific region. This could have significant implications for space security and geopolitics. [Source: SpaceNews](https://spacenews.com/space-force-establishes-permanent-presence-in-indo-pacific-region/)
|
||||
|
||||
5. Slingshot Aerospace, a space tracking and data analytics company, is expanding its network of ground-based optical telescopes to increase coverage of low Earth orbit. This could improve our ability to track and analyze objects in low Earth orbit, including satellites and space debris. [Source: SpaceNews](https://spacenews.com/slingshots-space-tracking-network-to-extend-coverage-of-low-earth-orbit/)
|
||||
|
||||
6. The National Natural Science Foundation of China has outlined a five-year project for researchers to study the assembly of ultra-large spacecraft. This could lead to significant advancements in spacecraft technology and space exploration capabilities. [Source: SpaceNews](https://spacenews.com/china-researching-challenges-of-kilometer-scale-ultra-large-spacecraft/)
|
||||
|
||||
7. The Center for AEroSpace Autonomy Research (CAESAR) at Stanford University is focusing on spacecraft autonomy. The center held a kickoff event on May 22, 2024, to highlight the industry, academia, and government collaboration it seeks to foster. This could lead to significant advancements in autonomous spacecraft technology. [Source: SpaceNews](https://spacenews.com/stanford-center-focuses-on-spacecraft-autonomy/)
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
#### Key Components Explained
|
||||
|
||||
1. **Custom Knowledge Source (`SpaceNewsKnowledgeSource`)**:
|
||||
|
||||
- Extends `BaseKnowledgeSource` for integration with CrewAI
|
||||
- Configurable API endpoint and article limit
|
||||
- Implements three key methods:
|
||||
- `load_content()`: Fetches articles from the API
|
||||
- `_format_articles()`: Structures the articles into readable text
|
||||
- `add()`: Processes and stores the content
|
||||
|
||||
2. **Agent Configuration**:
|
||||
|
||||
- Specialized role as a Space News Analyst
|
||||
- Uses the knowledge source to access space news
|
||||
|
||||
3. **Task Setup**:
|
||||
|
||||
- Takes a user question as input through `{user_question}`
|
||||
- Designed to provide detailed answers based on the knowledge source
|
||||
|
||||
4. **Crew Orchestration**:
|
||||
- Manages the workflow between agent and task
|
||||
- Handles input/output through the kickoff method
|
||||
|
||||
This example demonstrates how to:
|
||||
|
||||
- Create a custom knowledge source that fetches real-time data
|
||||
- Process and format external data for AI consumption
|
||||
- Use the knowledge source to answer specific user questions
|
||||
- Integrate everything seamlessly with CrewAI's agent system
|
||||
|
||||
#### About the Spaceflight News API
|
||||
|
||||
The example uses the [Spaceflight News API](https://api.spaceflightnewsapi.net/v4/docs/), which:
|
||||
|
||||
- Provides free access to space-related news articles
|
||||
- Requires no authentication
|
||||
- Returns structured data about space news
|
||||
- Supports pagination and filtering
|
||||
|
||||
You can customize the API query by modifying the endpoint URL:
|
||||
|
||||
```python
|
||||
# Fetch more articles
|
||||
recent_news = SpaceNewsKnowledgeSource(
|
||||
api_endpoint="https://api.spaceflightnewsapi.net/v4/articles",
|
||||
limit=20, # Increase the number of articles
|
||||
)
|
||||
|
||||
# Add search parameters
|
||||
recent_news = SpaceNewsKnowledgeSource(
|
||||
api_endpoint="https://api.spaceflightnewsapi.net/v4/articles?search=NASA", # Search for NASA news
|
||||
limit=10,
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Content Organization">
|
||||
- Keep chunk sizes appropriate for your content type
|
||||
- Consider content overlap for context preservation
|
||||
- Organize related information into separate knowledge sources
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Performance Tips">
|
||||
- Adjust chunk sizes based on content complexity
|
||||
- Configure appropriate embedding models
|
||||
- Consider using local embedding providers for faster processing
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
@@ -1,71 +0,0 @@
|
||||
---
|
||||
title: Using LlamaIndex Tools
|
||||
description: Learn how to integrate LlamaIndex tools with CrewAI agents to enhance search-based queries and more.
|
||||
icon: toolbox
|
||||
---
|
||||
|
||||
## Using LlamaIndex Tools
|
||||
|
||||
<Info>
|
||||
CrewAI seamlessly integrates with LlamaIndex’s comprehensive toolkit for RAG (Retrieval-Augmented Generation) and agentic pipelines, enabling advanced search-based queries and more.
|
||||
</Info>
|
||||
|
||||
Here are the available built-in tools offered by LlamaIndex.
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai_tools import LlamaIndexTool
|
||||
|
||||
# Example 1: Initialize from FunctionTool
|
||||
from llama_index.core.tools import FunctionTool
|
||||
|
||||
your_python_function = lambda ...: ...
|
||||
og_tool = FunctionTool.from_defaults(
|
||||
your_python_function,
|
||||
name="<name>",
|
||||
description='<description>'
|
||||
)
|
||||
tool = LlamaIndexTool.from_tool(og_tool)
|
||||
|
||||
# Example 2: Initialize from LlamaHub Tools
|
||||
from llama_index.tools.wolfram_alpha import WolframAlphaToolSpec
|
||||
wolfram_spec = WolframAlphaToolSpec(app_id="<app_id>")
|
||||
wolfram_tools = wolfram_spec.to_tool_list()
|
||||
tools = [LlamaIndexTool.from_tool(t) for t in wolfram_tools]
|
||||
|
||||
# Example 3: Initialize Tool from a LlamaIndex Query Engine
|
||||
query_engine = index.as_query_engine()
|
||||
query_tool = LlamaIndexTool.from_query_engine(
|
||||
query_engine,
|
||||
name="Uber 2019 10K Query Tool",
|
||||
description="Use this tool to lookup the 2019 Uber 10K Annual Report"
|
||||
)
|
||||
|
||||
# Create and assign the tools to an agent
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[tool, *tools, query_tool]
|
||||
)
|
||||
|
||||
# rest of the code ...
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the LlamaIndexTool, follow these steps:
|
||||
|
||||
<Steps>
|
||||
<Step title="Package Installation">
|
||||
Make sure that `crewai[tools]` package is installed in your Python environment:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Install and Use LlamaIndex">
|
||||
Follow the LlamaIndex documentation [LlamaIndex Documentation](https://docs.llamaindex.ai/) to set up a RAG/agent pipeline.
|
||||
</Step>
|
||||
</Steps>
|
||||
@@ -1,714 +0,0 @@
|
||||
---
|
||||
title: 'LLMs'
|
||||
description: 'A comprehensive guide to configuring and using Large Language Models (LLMs) in your CrewAI projects'
|
||||
icon: 'microchip-ai'
|
||||
---
|
||||
|
||||
<Note>
|
||||
CrewAI integrates with multiple LLM providers through LiteLLM, giving you the flexibility to choose the right model for your specific use case. This guide will help you understand how to configure and use different LLM providers in your CrewAI projects.
|
||||
</Note>
|
||||
|
||||
## What are LLMs?
|
||||
|
||||
Large Language Models (LLMs) are the core intelligence behind CrewAI agents. They enable agents to understand context, make decisions, and generate human-like responses. Here's what you need to know:
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="LLM Basics" icon="brain">
|
||||
Large Language Models are AI systems trained on vast amounts of text data. They power the intelligence of your CrewAI agents, enabling them to understand and generate human-like text.
|
||||
</Card>
|
||||
<Card title="Context Window" icon="window">
|
||||
The context window determines how much text an LLM can process at once. Larger windows (e.g., 128K tokens) allow for more context but may be more expensive and slower.
|
||||
</Card>
|
||||
<Card title="Temperature" icon="temperature-three-quarters">
|
||||
Temperature (0.0 to 1.0) controls response randomness. Lower values (e.g., 0.2) produce more focused, deterministic outputs, while higher values (e.g., 0.8) increase creativity and variability.
|
||||
</Card>
|
||||
<Card title="Provider Selection" icon="server">
|
||||
Each LLM provider (e.g., OpenAI, Anthropic, Google) offers different models with varying capabilities, pricing, and features. Choose based on your needs for accuracy, speed, and cost.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Setting Up Your LLM
|
||||
|
||||
There are three ways to configure LLMs in CrewAI. Choose the method that best fits your workflow:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="1. Environment Variables">
|
||||
The simplest way to get started. Set these variables in your environment:
|
||||
|
||||
```bash
|
||||
# Required: Your API key for authentication
|
||||
OPENAI_API_KEY=<your-api-key>
|
||||
|
||||
# Optional: Default model selection
|
||||
OPENAI_MODEL_NAME=gpt-4o-mini # Default if not set
|
||||
|
||||
# Optional: Organization ID (if applicable)
|
||||
OPENAI_ORGANIZATION_ID=<your-org-id>
|
||||
```
|
||||
|
||||
<Warning>
|
||||
Never commit API keys to version control. Use environment files (.env) or your system's secret management.
|
||||
</Warning>
|
||||
</Tab>
|
||||
<Tab title="2. YAML Configuration">
|
||||
Create a YAML file to define your agent configurations. This method is great for version control and team collaboration:
|
||||
|
||||
```yaml
|
||||
researcher:
|
||||
role: Research Specialist
|
||||
goal: Conduct comprehensive research and analysis
|
||||
backstory: A dedicated research professional with years of experience
|
||||
verbose: true
|
||||
llm: openai/gpt-4o-mini # your model here
|
||||
# (see provider configuration examples below for more)
|
||||
```
|
||||
|
||||
<Info>
|
||||
The YAML configuration allows you to:
|
||||
- Version control your agent settings
|
||||
- Easily switch between different models
|
||||
- Share configurations across team members
|
||||
- Document model choices and their purposes
|
||||
</Info>
|
||||
</Tab>
|
||||
<Tab title="3. Direct Code">
|
||||
For maximum flexibility, configure LLMs directly in your Python code:
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
|
||||
# Basic configuration
|
||||
llm = LLM(model="gpt-4")
|
||||
|
||||
# Advanced configuration with detailed parameters
|
||||
llm = LLM(
|
||||
model="gpt-4o-mini",
|
||||
temperature=0.7, # Higher for more creative outputs
|
||||
timeout=120, # Seconds to wait for response
|
||||
max_tokens=4000, # Maximum length of response
|
||||
top_p=0.9, # Nucleus sampling parameter
|
||||
frequency_penalty=0.1, # Reduce repetition
|
||||
presence_penalty=0.1, # Encourage topic diversity
|
||||
response_format={"type": "json"}, # For structured outputs
|
||||
seed=42 # For reproducible results
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Parameter explanations:
|
||||
- `temperature`: Controls randomness (0.0-1.0)
|
||||
- `timeout`: Maximum wait time for response
|
||||
- `max_tokens`: Limits response length
|
||||
- `top_p`: Alternative to temperature for sampling
|
||||
- `frequency_penalty`: Reduces word repetition
|
||||
- `presence_penalty`: Encourages new topics
|
||||
- `response_format`: Specifies output structure
|
||||
- `seed`: Ensures consistent outputs
|
||||
</Info>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Provider Configuration Examples
|
||||
|
||||
|
||||
CrewAI supports a multitude of LLM providers, each offering unique features, authentication methods, and model capabilities.
|
||||
In this section, you'll find detailed examples that help you select, configure, and optimize the LLM that best fits your project's needs.
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="OpenAI">
|
||||
Set the following environment variables in your `.env` file:
|
||||
|
||||
```toml Code
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional
|
||||
OPENAI_API_BASE=<custom-base-url>
|
||||
OPENAI_ORGANIZATION=<your-org-id>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="openai/gpt-4", # call model by provider/model_name
|
||||
temperature=0.8,
|
||||
max_tokens=150,
|
||||
top_p=0.9,
|
||||
frequency_penalty=0.1,
|
||||
presence_penalty=0.1,
|
||||
stop=["END"],
|
||||
seed=42
|
||||
)
|
||||
```
|
||||
|
||||
OpenAI is one of the leading providers of LLMs with a wide range of models and features.
|
||||
|
||||
| Model | Context Window | Best For |
|
||||
|---------------------|------------------|-----------------------------------------------|
|
||||
| GPT-4 | 8,192 tokens | High-accuracy tasks, complex reasoning |
|
||||
| GPT-4 Turbo | 128,000 tokens | Long-form content, document analysis |
|
||||
| GPT-4o & GPT-4o-mini | 128,000 tokens | Cost-effective large context processing |
|
||||
| o3-mini | 200,000 tokens | Fast reasoning, complex reasoning |
|
||||
| o1-mini | 128,000 tokens | Fast reasoning, complex reasoning |
|
||||
| o1-preview | 128,000 tokens | Fast reasoning, complex reasoning |
|
||||
| o1 | 200,000 tokens | Fast reasoning, complex reasoning |
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Anthropic">
|
||||
```toml Code
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="anthropic/claude-3-sonnet-20240229-v1:0",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Google">
|
||||
Set the following environment variables in your `.env` file:
|
||||
|
||||
```toml Code
|
||||
# Option 1: Gemini accessed with an API key.
|
||||
# https://ai.google.dev/gemini-api/docs/api-key
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Option 2: Vertex AI IAM credentials for Gemini, Anthropic, and Model Garden.
|
||||
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
||||
```
|
||||
|
||||
Get credentials from your Google Cloud Console and save it to a JSON file with the following code:
|
||||
```python Code
|
||||
import json
|
||||
|
||||
file_path = 'path/to/vertex_ai_service_account.json'
|
||||
|
||||
# Load the JSON file
|
||||
with open(file_path, 'r') as file:
|
||||
vertex_credentials = json.load(file)
|
||||
|
||||
# Convert the credentials to a JSON string
|
||||
vertex_credentials_json = json.dumps(vertex_credentials)
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-1.5-pro-latest",
|
||||
temperature=0.7,
|
||||
vertex_credentials=vertex_credentials_json
|
||||
)
|
||||
```
|
||||
Google offers a range of powerful models optimized for different use cases:
|
||||
|
||||
| Model | Context Window | Best For |
|
||||
|-----------------------|----------------|------------------------------------------------------------------|
|
||||
| gemini-2.0-flash-exp | 1M tokens | Higher quality at faster speed, multimodal model, good for most tasks |
|
||||
| gemini-1.5-flash | 1M tokens | Balanced multimodal model, good for most tasks |
|
||||
| gemini-1.5-flash-8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
|
||||
| gemini-1.5-pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Azure">
|
||||
```toml Code
|
||||
# Required
|
||||
AZURE_API_KEY=<your-api-key>
|
||||
AZURE_API_BASE=<your-resource-url>
|
||||
AZURE_API_VERSION=<api-version>
|
||||
|
||||
# Optional
|
||||
AZURE_AD_TOKEN=<your-azure-ad-token>
|
||||
AZURE_API_TYPE=<your-azure-api-type>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="azure/gpt-4",
|
||||
api_version="2023-05-15"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="AWS Bedrock">
|
||||
```toml Code
|
||||
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
AWS_DEFAULT_REGION=<your-region>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Amazon SageMaker">
|
||||
```toml Code
|
||||
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
AWS_DEFAULT_REGION=<your-region>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="sagemaker/<my-endpoint>"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Mistral">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
MISTRAL_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="mistral/mistral-large-latest",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Nvidia NIM">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
NVIDIA_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="nvidia_nim/meta/llama3-70b-instruct",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
|
||||
Nvidia NIM provides a comprehensive suite of models for various use cases, from general-purpose tasks to specialized applications.
|
||||
|
||||
| Model | Context Window | Best For |
|
||||
|-------------------------------------------------------------------------|----------------|-------------------------------------------------------------------|
|
||||
| nvidia/mistral-nemo-minitron-8b-8k-instruct | 8,192 tokens | State-of-the-art small language model delivering superior accuracy for chatbot, virtual assistants, and content generation. |
|
||||
| nvidia/nemotron-4-mini-hindi-4b-instruct | 4,096 tokens | A bilingual Hindi-English SLM for on-device inference, tailored specifically for Hindi Language. |
|
||||
| nvidia/llama-3.1-nemotron-70b-instruct | 128k tokens | Customized for enhanced helpfulness in responses |
|
||||
| nvidia/llama3-chatqa-1.5-8b | 128k tokens | Advanced LLM to generate high-quality, context-aware responses for chatbots and search engines. |
|
||||
| nvidia/llama3-chatqa-1.5-70b | 128k tokens | Advanced LLM to generate high-quality, context-aware responses for chatbots and search engines. |
|
||||
| nvidia/vila | 128k tokens | Multi-modal vision-language model that understands text/img/video and creates informative responses |
|
||||
| nvidia/neva-22 | 4,096 tokens | Multi-modal vision-language model that understands text/images and generates informative responses |
|
||||
| nvidia/nemotron-mini-4b-instruct | 8,192 tokens | General-purpose tasks |
|
||||
| nvidia/usdcode-llama3-70b-instruct | 128k tokens | State-of-the-art LLM that answers OpenUSD knowledge queries and generates USD-Python code. |
|
||||
| nvidia/nemotron-4-340b-instruct | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||
| meta/codellama-70b | 100k tokens | LLM capable of generating code from natural language and vice versa. |
|
||||
| meta/llama2-70b | 4,096 tokens | Cutting-edge large language AI model capable of generating text and code in response to prompts. |
|
||||
| meta/llama3-8b-instruct | 8,192 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
||||
| meta/llama3-70b-instruct | 8,192 tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
||||
| meta/llama-3.1-8b-instruct | 128k tokens | Advanced state-of-the-art model with language understanding, superior reasoning, and text generation. |
|
||||
| meta/llama-3.1-70b-instruct | 128k tokens | Powers complex conversations with superior contextual understanding, reasoning and text generation. |
|
||||
| meta/llama-3.1-405b-instruct | 128k tokens | Advanced LLM for synthetic data generation, distillation, and inference for chatbots, coding, and domain-specific tasks. |
|
||||
| meta/llama-3.2-1b-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||
| meta/llama-3.2-3b-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||
| meta/llama-3.2-11b-vision-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||
| meta/llama-3.2-90b-vision-instruct | 128k tokens | Advanced state-of-the-art small language model with language understanding, superior reasoning, and text generation. |
|
||||
| google/gemma-7b | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||
| google/gemma-2b | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||
| google/codegemma-7b | 8,192 tokens | Cutting-edge model built on Google's Gemma-7B specialized for code generation and code completion. |
|
||||
| google/codegemma-1.1-7b | 8,192 tokens | Advanced programming model for code generation, completion, reasoning, and instruction following. |
|
||||
| google/recurrentgemma-2b | 8,192 tokens | Novel recurrent architecture based language model for faster inference when generating long sequences. |
|
||||
| google/gemma-2-9b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||
| google/gemma-2-27b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||
| google/gemma-2-2b-it | 8,192 tokens | Cutting-edge text generation model text understanding, transformation, and code generation. |
|
||||
| google/deplot | 512 tokens | One-shot visual language understanding model that translates images of plots into tables. |
|
||||
| google/paligemma | 8,192 tokens | Vision language model adept at comprehending text and visual inputs to produce informative responses. |
|
||||
| mistralai/mistral-7b-instruct-v0.2 | 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
|
||||
| mistralai/mixtral-8x7b-instruct-v0.1 | 8,192 tokens | An MOE LLM that follows instructions, completes requests, and generates creative text. |
|
||||
| mistralai/mistral-large | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||
| mistralai/mixtral-8x22b-instruct-v0.1 | 8,192 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||
| mistralai/mistral-7b-instruct-v0.3 | 32k tokens | This LLM follows instructions, completes requests, and generates creative text. |
|
||||
| nv-mistralai/mistral-nemo-12b-instruct | 128k tokens | Most advanced language model for reasoning, code, multilingual tasks; runs on a single GPU. |
|
||||
| mistralai/mamba-codestral-7b-v0.1 | 256k tokens | Model for writing and interacting with code across a wide range of programming languages and tasks. |
|
||||
| microsoft/phi-3-mini-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||
| microsoft/phi-3-mini-4k-instruct | 4,096 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||
| microsoft/phi-3-small-8k-instruct | 8,192 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||
| microsoft/phi-3-small-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||
| microsoft/phi-3-medium-4k-instruct | 4,096 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||
| microsoft/phi-3-medium-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. |
|
||||
| microsoft/phi-3.5-mini-instruct | 128K tokens | Lightweight multilingual LLM powering AI applications in latency bound, memory/compute constrained environments |
|
||||
| microsoft/phi-3.5-moe-instruct | 128K tokens | Advanced LLM based on Mixture of Experts architecure to deliver compute efficient content generation |
|
||||
| microsoft/kosmos-2 | 1,024 tokens | Groundbreaking multimodal model designed to understand and reason about visual elements in images. |
|
||||
| microsoft/phi-3-vision-128k-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. |
|
||||
| microsoft/phi-3.5-vision-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. |
|
||||
| databricks/dbrx-instruct | 12k tokens | A general-purpose LLM with state-of-the-art performance in language understanding, coding, and RAG. |
|
||||
| snowflake/arctic | 1,024 tokens | Delivers high efficiency inference for enterprise applications focused on SQL generation and coding. |
|
||||
| aisingapore/sea-lion-7b-instruct | 4,096 tokens | LLM to represent and serve the linguistic and cultural diversity of Southeast Asia |
|
||||
| ibm/granite-8b-code-instruct | 4,096 tokens | Software programming LLM for code generation, completion, explanation, and multi-turn conversion. |
|
||||
| ibm/granite-34b-code-instruct | 8,192 tokens | Software programming LLM for code generation, completion, explanation, and multi-turn conversion. |
|
||||
| ibm/granite-3.0-8b-instruct | 4,096 tokens | Advanced Small Language Model supporting RAG, summarization, classification, code, and agentic AI |
|
||||
| ibm/granite-3.0-3b-a800m-instruct | 4,096 tokens | Highly efficient Mixture of Experts model for RAG, summarization, entity extraction, and classification |
|
||||
| mediatek/breeze-7b-instruct | 4,096 tokens | Creates diverse synthetic data that mimics the characteristics of real-world data. |
|
||||
| upstage/solar-10.7b-instruct | 4,096 tokens | Excels in NLP tasks, particularly in instruction-following, reasoning, and mathematics. |
|
||||
| writer/palmyra-med-70b-32k | 32k tokens | Leading LLM for accurate, contextually relevant responses in the medical domain. |
|
||||
| writer/palmyra-med-70b | 32k tokens | Leading LLM for accurate, contextually relevant responses in the medical domain. |
|
||||
| writer/palmyra-fin-70b-32k | 32k tokens | Specialized LLM for financial analysis, reporting, and data processing |
|
||||
| 01-ai/yi-large | 32k tokens | Powerful model trained on English and Chinese for diverse tasks including chatbot and creative writing. |
|
||||
| deepseek-ai/deepseek-coder-6.7b-instruct | 2k tokens | Powerful coding model offering advanced capabilities in code generation, completion, and infilling |
|
||||
| rakuten/rakutenai-7b-instruct | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
||||
| rakuten/rakutenai-7b-chat | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. |
|
||||
| baichuan-inc/baichuan2-13b-chat | 4,096 tokens | Support Chinese and English chat, coding, math, instruction following, solving quizzes |
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Groq">
|
||||
Set the following environment variables in your `.env` file:
|
||||
|
||||
```toml Code
|
||||
GROQ_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="groq/llama-3.2-90b-text-preview",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
| Model | Context Window | Best For |
|
||||
|-------------------|------------------|--------------------------------------------|
|
||||
| Llama 3.1 70B/8B | 131,072 tokens | High-performance, large context tasks |
|
||||
| Llama 3.2 Series | 8,192 tokens | General-purpose tasks |
|
||||
| Mixtral 8x7B | 32,768 tokens | Balanced performance and context |
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="IBM watsonx.ai">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
# Required
|
||||
WATSONX_URL=<your-url>
|
||||
WATSONX_APIKEY=<your-apikey>
|
||||
WATSONX_PROJECT_ID=<your-project-id>
|
||||
|
||||
# Optional
|
||||
WATSONX_TOKEN=<your-token>
|
||||
WATSONX_DEPLOYMENT_SPACE_ID=<your-space-id>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="watsonx/meta-llama/llama-3-1-70b-instruct",
|
||||
base_url="https://api.watsonx.ai/v1"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Ollama (Local LLMs)">
|
||||
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
||||
2. Run a model: `ollama run llama2`
|
||||
3. Configure:
|
||||
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="ollama/llama3:70b",
|
||||
base_url="http://localhost:11434"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Fireworks AI">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
FIREWORKS_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Perplexity AI">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
PERPLEXITY_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="llama-3.1-sonar-large-128k-online",
|
||||
base_url="https://api.perplexity.ai/"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Hugging Face">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
HUGGINGFACE_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
base_url="your_api_endpoint"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="SambaNova">
|
||||
Set the following environment variables in your `.env` file:
|
||||
|
||||
```toml Code
|
||||
SAMBANOVA_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="sambanova/Meta-Llama-3.1-8B-Instruct",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
| Model | Context Window | Best For |
|
||||
|--------------------|------------------------|----------------------------------------------|
|
||||
| Llama 3.1 70B/8B | Up to 131,072 tokens | High-performance, large context tasks |
|
||||
| Llama 3.1 405B | 8,192 tokens | High-performance and output quality |
|
||||
| Llama 3.2 Series | 8,192 tokens | General-purpose, multimodal tasks |
|
||||
| Llama 3.3 70B | Up to 131,072 tokens | High-performance and output quality |
|
||||
| Qwen2 familly | 8,192 tokens | High-performance and output quality |
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Cerebras">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
# Required
|
||||
CEREBRAS_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="cerebras/llama3.1-70b",
|
||||
temperature=0.7,
|
||||
max_tokens=8192
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Cerebras features:
|
||||
- Fast inference speeds
|
||||
- Competitive pricing
|
||||
- Good balance of speed and quality
|
||||
- Support for long context windows
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Open Router">
|
||||
Set the following environment variables in your `.env` file:
|
||||
```toml Code
|
||||
OPENROUTER_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="openrouter/deepseek/deepseek-r1",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
api_key=OPENROUTER_API_KEY
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Open Router models:
|
||||
- openrouter/deepseek/deepseek-r1
|
||||
- openrouter/deepseek/deepseek-chat
|
||||
</Info>
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Structured LLM Calls
|
||||
|
||||
CrewAI supports structured responses from LLM calls by allowing you to define a `response_format` using a Pydantic model. This enables the framework to automatically parse and validate the output, making it easier to integrate the response into your application without manual post-processing.
|
||||
|
||||
For example, you can define a Pydantic model to represent the expected response structure and pass it as the `response_format` when instantiating the LLM. The model will then be used to convert the LLM output into a structured Python object.
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
class Dog(BaseModel):
|
||||
name: str
|
||||
age: int
|
||||
breed: str
|
||||
|
||||
|
||||
llm = LLM(model="gpt-4o", response_format=Dog)
|
||||
|
||||
response = llm.call(
|
||||
"Analyze the following messages and return the name, age, and breed. "
|
||||
"Meet Kona! She is 3 years old and is a black german shepherd."
|
||||
)
|
||||
print(response)
|
||||
|
||||
# Output:
|
||||
# Dog(name='Kona', age=3, breed='black german shepherd')
|
||||
```
|
||||
|
||||
## Advanced Features and Optimization
|
||||
|
||||
Learn how to get the most out of your LLM configuration:
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Context Window Management">
|
||||
CrewAI includes smart context management features:
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
|
||||
# CrewAI automatically handles:
|
||||
# 1. Token counting and tracking
|
||||
# 2. Content summarization when needed
|
||||
# 3. Task splitting for large contexts
|
||||
|
||||
llm = LLM(
|
||||
model="gpt-4",
|
||||
max_tokens=4000, # Limit response length
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Best practices for context management:
|
||||
1. Choose models with appropriate context windows
|
||||
2. Pre-process long inputs when possible
|
||||
3. Use chunking for large documents
|
||||
4. Monitor token usage to optimize costs
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Performance Optimization">
|
||||
<Steps>
|
||||
<Step title="Token Usage Optimization">
|
||||
Choose the right context window for your task:
|
||||
- Small tasks (up to 4K tokens): Standard models
|
||||
- Medium tasks (between 4K-32K): Enhanced models
|
||||
- Large tasks (over 32K): Large context models
|
||||
|
||||
```python
|
||||
# Configure model with appropriate settings
|
||||
llm = LLM(
|
||||
model="openai/gpt-4-turbo-preview",
|
||||
temperature=0.7, # Adjust based on task
|
||||
max_tokens=4096, # Set based on output needs
|
||||
timeout=300 # Longer timeout for complex tasks
|
||||
)
|
||||
```
|
||||
<Tip>
|
||||
- Lower temperature (0.1 to 0.3) for factual responses
|
||||
- Higher temperature (0.7 to 0.9) for creative tasks
|
||||
</Tip>
|
||||
</Step>
|
||||
|
||||
<Step title="Best Practices">
|
||||
1. Monitor token usage
|
||||
2. Implement rate limiting
|
||||
3. Use caching when possible
|
||||
4. Set appropriate max_tokens limits
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Info>
|
||||
Remember to regularly monitor your token usage and adjust your configuration as needed to optimize costs and performance.
|
||||
</Info>
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Authentication">
|
||||
<Warning>
|
||||
Most authentication issues can be resolved by checking API key format and environment variable names.
|
||||
</Warning>
|
||||
|
||||
```bash
|
||||
# OpenAI
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Anthropic
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Model Names">
|
||||
<Check>
|
||||
Always include the provider prefix in model names
|
||||
</Check>
|
||||
|
||||
```python
|
||||
# Correct
|
||||
llm = LLM(model="openai/gpt-4")
|
||||
|
||||
# Incorrect
|
||||
llm = LLM(model="gpt-4")
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Context Length">
|
||||
<Tip>
|
||||
Use larger context models for extensive tasks
|
||||
</Tip>
|
||||
|
||||
```python
|
||||
# Large context model
|
||||
llm = LLM(model="openai/gpt-4o") # 128K tokens
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you need assistance, these resources are available:
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="LiteLLM Documentation"
|
||||
href="https://docs.litellm.ai/docs/"
|
||||
icon="book"
|
||||
>
|
||||
Comprehensive documentation for LiteLLM integration and troubleshooting common issues.
|
||||
</Card>
|
||||
<Card
|
||||
title="GitHub Issues"
|
||||
href="https://github.com/joaomdmoura/crewAI/issues"
|
||||
icon="bug"
|
||||
>
|
||||
Report bugs, request features, or browse existing issues for solutions.
|
||||
</Card>
|
||||
<Card
|
||||
title="Community Forum"
|
||||
href="https://community.crewai.com"
|
||||
icon="comment-question"
|
||||
>
|
||||
Connect with other CrewAI users, share experiences, and get help from the community.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
<Note>
|
||||
Best Practices for API Key Security:
|
||||
- Use environment variables or secure vaults
|
||||
- Never commit keys to version control
|
||||
- Rotate keys regularly
|
||||
- Use separate keys for development and production
|
||||
- Monitor key usage for unusual patterns
|
||||
</Note>
|
||||
@@ -1,573 +0,0 @@
|
||||
---
|
||||
title: Memory
|
||||
description: Leveraging memory systems in the CrewAI framework to enhance agent capabilities.
|
||||
icon: database
|
||||
---
|
||||
|
||||
## Introduction to Memory Systems in CrewAI
|
||||
|
||||
The crewAI framework introduces a sophisticated memory system designed to significantly enhance the capabilities of AI agents.
|
||||
This system comprises `short-term memory`, `long-term memory`, `entity memory`, and `contextual memory`, each serving a unique purpose in aiding agents to remember,
|
||||
reason, and learn from past interactions.
|
||||
|
||||
## Memory System Components
|
||||
|
||||
| Component | Description |
|
||||
| :------------------- | :---------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Short-Term Memory**| Temporarily stores recent interactions and outcomes using `RAG`, enabling agents to recall and utilize information relevant to their current context during the current executions.|
|
||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
|
||||
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
|
||||
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
||||
| **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
|
||||
|
||||
## How Memory Systems Empower Agents
|
||||
|
||||
1. **Contextual Awareness**: With short-term and contextual memory, agents gain the ability to maintain context over a conversation or task sequence, leading to more coherent and relevant responses.
|
||||
|
||||
2. **Experience Accumulation**: Long-term memory allows agents to accumulate experiences, learning from past actions to improve future decision-making and problem-solving.
|
||||
|
||||
3. **Entity Understanding**: By maintaining entity memory, agents can recognize and remember key entities, enhancing their ability to process and interact with complex information.
|
||||
|
||||
## Implementing Memory in Your Crew
|
||||
|
||||
When configuring a crew, you can enable and customize each memory component to suit the crew's objectives and the nature of tasks it will perform.
|
||||
By default, the memory system is disabled, and you can ensure it is active by setting `memory=True` in the crew configuration.
|
||||
The memory will use OpenAI embeddings by default, but you can change it by setting `embedder` to a different model.
|
||||
It's also possible to initialize the memory instance with your own instance.
|
||||
|
||||
The 'embedder' only applies to **Short-Term Memory** which uses Chroma for RAG.
|
||||
The **Long-Term Memory** uses SQLite3 to store task results. Currently, there is no way to override these storage implementations.
|
||||
The data storage files are saved into a platform-specific location found using the appdirs package,
|
||||
and the name of the project can be overridden using the **CREWAI_STORAGE_DIR** environment variable.
|
||||
|
||||
### Example: Configuring Memory for a Crew
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
# Assemble your crew with memory capabilities
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Process
|
||||
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage, RAGStorage
|
||||
from typing import List, Optional
|
||||
|
||||
# Assemble your crew with memory capabilities
|
||||
my_crew: Crew = Crew(
|
||||
agents = [...],
|
||||
tasks = [...],
|
||||
process = Process.sequential,
|
||||
memory = True,
|
||||
# Long-term memory for persistent storage across sessions
|
||||
long_term_memory = LongTermMemory(
|
||||
storage=LTMSQLiteStorage(
|
||||
db_path="/my_crew1/long_term_memory_storage.db"
|
||||
)
|
||||
),
|
||||
# Short-term memory for current context using RAG
|
||||
short_term_memory = ShortTermMemory(
|
||||
storage = RAGStorage(
|
||||
embedder_config={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
},
|
||||
type="short_term",
|
||||
path="/my_crew1/"
|
||||
)
|
||||
),
|
||||
),
|
||||
# Entity memory for tracking key information about entities
|
||||
entity_memory = EntityMemory(
|
||||
storage=RAGStorage(
|
||||
embedder_config={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
},
|
||||
type="short_term",
|
||||
path="/my_crew1/"
|
||||
)
|
||||
),
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
When configuring memory storage:
|
||||
- Use environment variables for storage paths (e.g., `CREWAI_STORAGE_DIR`)
|
||||
- Never hardcode sensitive information like database credentials
|
||||
- Consider access permissions for storage directories
|
||||
- Use relative paths when possible to maintain portability
|
||||
|
||||
Example using environment variables:
|
||||
```python
|
||||
import os
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage
|
||||
|
||||
# Configure storage path using environment variable
|
||||
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
long_term_memory=LongTermMemory(
|
||||
storage=LTMSQLiteStorage(
|
||||
db_path="{storage_path}/memory.db".format(storage_path=storage_path)
|
||||
)
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Basic Memory Configuration
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
|
||||
# Simple memory configuration
|
||||
crew = Crew(memory=True) # Uses default storage locations
|
||||
```
|
||||
|
||||
### Custom Storage Configuration
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage
|
||||
|
||||
# Configure custom storage paths
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
long_term_memory=LongTermMemory(
|
||||
storage=LTMSQLiteStorage(db_path="./memory.db")
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Integrating Mem0 for Enhanced User Memory
|
||||
|
||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||
|
||||
To include user-specific memory you can get your API key [here](https://app.mem0.ai/dashboard/api-keys) and refer the [docs](https://docs.mem0.ai/platform/quickstart#4-1-create-memories) for adding user preferences.
|
||||
|
||||
|
||||
```python Code
|
||||
import os
|
||||
from crewai import Crew, Process
|
||||
from mem0 import MemoryClient
|
||||
|
||||
# Set environment variables for Mem0
|
||||
os.environ["MEM0_API_KEY"] = "m0-xx"
|
||||
|
||||
# Step 1: Record preferences based on past conversation or user input
|
||||
client = MemoryClient()
|
||||
messages = [
|
||||
{"role": "user", "content": "Hi there! I'm planning a vacation and could use some advice."},
|
||||
{"role": "assistant", "content": "Hello! I'd be happy to help with your vacation planning. What kind of destination do you prefer?"},
|
||||
{"role": "user", "content": "I am more of a beach person than a mountain person."},
|
||||
{"role": "assistant", "content": "That's interesting. Do you like hotels or Airbnb?"},
|
||||
{"role": "user", "content": "I like Airbnb more."},
|
||||
]
|
||||
client.add(messages, user_id="john")
|
||||
|
||||
# Step 2: Create a Crew with User Memory
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "john"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Memory Configuration Options
|
||||
If you want to access a specific organization and project, you can set the `org_id` and `project_id` parameters in the memory configuration.
|
||||
|
||||
```python Code
|
||||
from crewai import Crew
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
verbose=True,
|
||||
memory=True,
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "john", "org_id": "my_org_id", "project_id": "my_project_id"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Additional Embedding Providers
|
||||
|
||||
### Using OpenAI embeddings (already default)
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
Alternatively, you can directly pass the OpenAIEmbeddingFunction to the embedder parameter.
|
||||
|
||||
Example:
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Using Ollama embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {
|
||||
"model": "mxbai-embed-large"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Using Google AI embeddings
|
||||
|
||||
#### Prerequisites
|
||||
Before using Google AI embeddings, ensure you have:
|
||||
- Access to the Gemini API
|
||||
- The necessary API keys and permissions
|
||||
|
||||
You will need to update your *pyproject.toml* dependencies:
|
||||
```YAML
|
||||
dependencies = [
|
||||
"google-generativeai>=0.8.4", #main version in January/2025 - crewai v.0.100.0 and crewai-tools 0.33.0
|
||||
"crewai[tools]>=0.100.0,<1.0.0"
|
||||
]
|
||||
```
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "google",
|
||||
"config": {
|
||||
"api_key": "<YOUR_API_KEY>",
|
||||
"model": "<model_name>"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Using Azure OpenAI embeddings
|
||||
|
||||
```python Code
|
||||
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"api_base": "YOUR_API_BASE_PATH",
|
||||
"api_version": "YOUR_API_VERSION",
|
||||
"model_name": 'text-embedding-3-small'
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Using Vertex AI embeddings
|
||||
|
||||
```python Code
|
||||
from chromadb.utils.embedding_functions import GoogleVertexEmbeddingFunction
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "vertexai",
|
||||
"config": {
|
||||
"project_id"="YOUR_PROJECT_ID",
|
||||
"region"="YOUR_REGION",
|
||||
"api_key"="YOUR_API_KEY",
|
||||
"model_name"="textembedding-gecko"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Using Cohere embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "cohere",
|
||||
"config": {
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"model": "<model_name>"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
### Using VoyageAI embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "voyageai",
|
||||
"config": {
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"model": "<model_name>"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
### Using HuggingFace embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "huggingface",
|
||||
"config": {
|
||||
"api_url": "<api_url>",
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Using Watson embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
# Note: Ensure you have installed and imported `ibm_watsonx_ai` for Watson embeddings to work.
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "watson",
|
||||
"config": {
|
||||
"model": "<model_name>",
|
||||
"api_url": "<api_url>",
|
||||
"api_key": "<YOUR_API_KEY>",
|
||||
"project_id": "<YOUR_PROJECT_ID>",
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Using Amazon Bedrock embeddings
|
||||
|
||||
```python Code
|
||||
# Note: Ensure you have installed `boto3` for Bedrock embeddings to work.
|
||||
|
||||
import os
|
||||
import boto3
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
boto3_session = boto3.Session(
|
||||
region_name=os.environ.get("AWS_REGION_NAME"),
|
||||
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")
|
||||
)
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "bedrock",
|
||||
"config":{
|
||||
"session": boto3_session,
|
||||
"model": "amazon.titan-embed-text-v2:0",
|
||||
"vector_dimension": 1024
|
||||
}
|
||||
}
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
### Adding Custom Embedding Function
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||
|
||||
# Create a custom embedding function
|
||||
class CustomEmbedder(EmbeddingFunction):
|
||||
def __call__(self, input: Documents) -> Embeddings:
|
||||
# generate embeddings
|
||||
return [1, 2, 3] # this is a dummy embedding
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "custom",
|
||||
"config": {
|
||||
"embedder": CustomEmbedder()
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Resetting Memory via cli
|
||||
|
||||
```shell
|
||||
crewai reset-memories [OPTIONS]
|
||||
```
|
||||
|
||||
#### Resetting Memory Options
|
||||
|
||||
| Option | Description | Type | Default |
|
||||
| :----------------- | :------------------------------- | :------------- | :------ |
|
||||
| `-l`, `--long` | Reset LONG TERM memory. | Flag (boolean) | False |
|
||||
| `-s`, `--short` | Reset SHORT TERM memory. | Flag (boolean) | False |
|
||||
| `-e`, `--entities` | Reset ENTITIES memory. | Flag (boolean) | False |
|
||||
| `-k`, `--kickoff-outputs` | Reset LATEST KICKOFF TASK OUTPUTS. | Flag (boolean) | False |
|
||||
| `-kn`, `--knowledge` | Reset KNOWLEDEGE storage | Flag (boolean) | False |
|
||||
| `-a`, `--all` | Reset ALL memories. | Flag (boolean) | False |
|
||||
|
||||
Note: To use the cli command you need to have your crew in a file called crew.py in the same directory.
|
||||
|
||||
|
||||
|
||||
|
||||
### Resetting Memory via crew object
|
||||
|
||||
```python
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "custom",
|
||||
"config": {
|
||||
"embedder": CustomEmbedder()
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
my_crew.reset_memories(command_type = 'all') # Resets all the memory
|
||||
```
|
||||
|
||||
#### Resetting Memory Options
|
||||
|
||||
| Command Type | Description |
|
||||
| :----------------- | :------------------------------- |
|
||||
| `long` | Reset LONG TERM memory. |
|
||||
| `short` | Reset SHORT TERM memory. |
|
||||
| `entities` | Reset ENTITIES memory. |
|
||||
| `kickoff_outputs` | Reset LATEST KICKOFF TASK OUTPUTS. |
|
||||
| `knowledge` | Reset KNOWLEDGE memory. |
|
||||
| `all` | Reset ALL memories. |
|
||||
|
||||
|
||||
## Benefits of Using CrewAI's Memory System
|
||||
|
||||
- 🦾 **Adaptive Learning:** Crews become more efficient over time, adapting to new information and refining their approach to tasks.
|
||||
- 🫡 **Enhanced Personalization:** Memory enables agents to remember user preferences and historical interactions, leading to personalized experiences.
|
||||
- 🧠 **Improved Problem Solving:** Access to a rich memory store aids agents in making more informed decisions, drawing on past learnings and contextual insights.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Integrating CrewAI's memory system into your projects is straightforward. By leveraging the provided memory components and configurations,
|
||||
you can quickly empower your agents with the ability to remember, reason, and learn from their interactions, unlocking new levels of intelligence and capability.
|
||||
@@ -1,67 +0,0 @@
|
||||
---
|
||||
title: Training
|
||||
description: Learn how to train your CrewAI agents by giving them feedback early on and get consistent results.
|
||||
icon: dumbbell
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
The training feature in CrewAI allows you to train your AI agents using the command-line interface (CLI).
|
||||
By running the command `crewai train -n <n_iterations>`, you can specify the number of iterations for the training process.
|
||||
|
||||
During training, CrewAI utilizes techniques to optimize the performance of your agents along with human feedback.
|
||||
This helps the agents improve their understanding, decision-making, and problem-solving abilities.
|
||||
|
||||
### Training Your Crew Using the CLI
|
||||
|
||||
To use the training feature, follow these steps:
|
||||
|
||||
1. Open your terminal or command prompt.
|
||||
2. Navigate to the directory where your CrewAI project is located.
|
||||
3. Run the following command:
|
||||
|
||||
```shell
|
||||
crewai train -n <n_iterations> <filename> (optional)
|
||||
```
|
||||
<Tip>
|
||||
Replace `<n_iterations>` with the desired number of training iterations and `<filename>` with the appropriate filename ending with `.pkl`.
|
||||
</Tip>
|
||||
|
||||
### Training Your Crew Programmatically
|
||||
|
||||
To train your crew programmatically, use the following steps:
|
||||
|
||||
1. Define the number of iterations for training.
|
||||
2. Specify the input parameters for the training process.
|
||||
3. Execute the training command within a try-except block to handle potential errors.
|
||||
|
||||
```python Code
|
||||
n_iterations = 2
|
||||
inputs = {"topic": "CrewAI Training"}
|
||||
filename = "your_model.pkl"
|
||||
|
||||
try:
|
||||
YourCrewName_Crew().crew().train(
|
||||
n_iterations=n_iterations,
|
||||
inputs=inputs,
|
||||
filename=filename
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
```
|
||||
|
||||
### Key Points to Note
|
||||
|
||||
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
|
||||
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
|
||||
|
||||
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
|
||||
|
||||
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
|
||||
|
||||
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
|
||||
|
||||
Happy training with CrewAI! 🚀
|
||||
|
||||
1476
docs/docs.json
Normal file
8
docs/en/api-reference/inputs.mdx
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
title: "GET /inputs"
|
||||
description: "Get required inputs for your crew"
|
||||
openapi: "/enterprise-api.en.yaml GET /inputs"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
|
||||
120
docs/en/api-reference/introduction.mdx
Normal file
@@ -0,0 +1,120 @@
|
||||
---
|
||||
title: "Introduction"
|
||||
description: "Complete reference for the CrewAI AOP REST API"
|
||||
icon: "code"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# CrewAI AOP API
|
||||
|
||||
Welcome to the CrewAI AOP API reference. This API allows you to programmatically interact with your deployed crews, enabling integration with your applications, workflows, and services.
|
||||
|
||||
## Quick Start
|
||||
|
||||
<Steps>
|
||||
<Step title="Get Your API Credentials">
|
||||
Navigate to your crew's detail page in the CrewAI AOP dashboard and copy your Bearer Token from the Status tab.
|
||||
</Step>
|
||||
|
||||
<Step title="Discover Required Inputs">
|
||||
Use the `GET /inputs` endpoint to see what parameters your crew expects.
|
||||
</Step>
|
||||
|
||||
<Step title="Start a Crew Execution">
|
||||
Call `POST /kickoff` with your inputs to start the crew execution and receive a `kickoff_id`.
|
||||
</Step>
|
||||
|
||||
<Step title="Monitor Progress">
|
||||
Use `GET /status/{kickoff_id}` to check execution status and retrieve results.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Authentication
|
||||
|
||||
All API requests require authentication using a Bearer token. Include your token in the `Authorization` header:
|
||||
|
||||
```bash
|
||||
curl -H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
https://your-crew-url.crewai.com/inputs
|
||||
```
|
||||
|
||||
### Token Types
|
||||
|
||||
| Token Type | Scope | Use Case |
|
||||
|:-----------|:--------|:----------|
|
||||
| **Bearer Token** | Organization-level access | Full crew operations, ideal for server-to-server integration |
|
||||
| **User Bearer Token** | User-scoped access | Limited permissions, suitable for user-specific operations |
|
||||
|
||||
<Tip>
|
||||
You can find both token types in the Status tab of your crew's detail page in the CrewAI AOP dashboard.
|
||||
</Tip>
|
||||
|
||||
## Base URL
|
||||
|
||||
Each deployed crew has its own unique API endpoint:
|
||||
|
||||
```
|
||||
https://your-crew-name.crewai.com
|
||||
```
|
||||
|
||||
Replace `your-crew-name` with your actual crew's URL from the dashboard.
|
||||
|
||||
## Typical Workflow
|
||||
|
||||
1. **Discovery**: Call `GET /inputs` to understand what your crew needs
|
||||
2. **Execution**: Submit inputs via `POST /kickoff` to start processing
|
||||
3. **Monitoring**: Poll `GET /status/{kickoff_id}` until completion
|
||||
4. **Results**: Extract the final output from the completed response
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes:
|
||||
|
||||
| Code | Meaning |
|
||||
|------|:--------|
|
||||
| `200` | Success |
|
||||
| `400` | Bad Request - Invalid input format |
|
||||
| `401` | Unauthorized - Invalid bearer token |
|
||||
| `404` | Not Found - Resource doesn't exist |
|
||||
| `422` | Validation Error - Missing required inputs |
|
||||
| `500` | Server Error - Contact support |
|
||||
|
||||
## Interactive Testing
|
||||
|
||||
<Info>
|
||||
**Why no "Send" button?** Since each CrewAI AOP user has their own unique crew URL, we use **reference mode** instead of an interactive playground to avoid confusion. This shows you exactly what the requests should look like without non-functional send buttons.
|
||||
</Info>
|
||||
|
||||
Each endpoint page shows you:
|
||||
- ✅ **Exact request format** with all parameters
|
||||
- ✅ **Response examples** for success and error cases
|
||||
- ✅ **Code samples** in multiple languages (cURL, Python, JavaScript, etc.)
|
||||
- ✅ **Authentication examples** with proper Bearer token format
|
||||
|
||||
### **To Test Your Actual API:**
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Copy cURL Examples" icon="terminal">
|
||||
Copy the cURL examples and replace the URL + token with your real values
|
||||
</Card>
|
||||
<Card title="Use Postman/Insomnia" icon="play">
|
||||
Import the examples into your preferred API testing tool
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
**Example workflow:**
|
||||
1. **Copy this cURL example** from any endpoint page
|
||||
2. **Replace `your-actual-crew-name.crewai.com`** with your real crew URL
|
||||
3. **Replace the Bearer token** with your real token from the dashboard
|
||||
4. **Run the request** in your terminal or API client
|
||||
|
||||
## Need Help?
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Enterprise Support" icon="headset" href="mailto:support@crewai.com">
|
||||
Get help with API integration and troubleshooting
|
||||
</Card>
|
||||
<Card title="Enterprise Dashboard" icon="chart-line" href="https://app.crewai.com">
|
||||
Manage your crews and view execution logs
|
||||
</Card>
|
||||
</CardGroup>
|
||||
8
docs/en/api-reference/kickoff.mdx
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
title: "POST /kickoff"
|
||||
description: "Start a crew execution"
|
||||
openapi: "/enterprise-api.en.yaml POST /kickoff"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
|
||||
6
docs/en/api-reference/resume.mdx
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
title: "POST /resume"
|
||||
description: "Resume crew execution with human feedback"
|
||||
openapi: "/enterprise-api.en.yaml POST /resume"
|
||||
mode: "wide"
|
||||
---
|
||||
8
docs/en/api-reference/status.mdx
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "Get execution status"
|
||||
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
|
||||