From 8e5bface29865e1defe70379743ed88e9b6a8f72 Mon Sep 17 00:00:00 2001 From: Mike Heavers Date: Thu, 23 May 2024 20:05:32 -0700 Subject: [PATCH] Update README.md (#652) Rework example so that if you use a custom LLM it doesn't throw code errors by uncommenting. --- README.md | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index f18fc417b..a4e2433db 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,17 @@ os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key # os.environ["OPENAI_MODEL_NAME"] ='openhermes' # Adjust based on available model # os.environ["OPENAI_API_KEY"] ='sk-111111111111111111111111111111111111111111111111' +# You can pass an optional llm attribute specifying what model you wanna use. +# It can be a local model through Ollama / LM Studio or a remote +# model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/) +# +# import os +# os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo' +# +# OR +# +# from langchain_openai import ChatOpenAI + search_tool = SerperDevTool() # Define your agents with roles and goals @@ -81,18 +92,9 @@ researcher = Agent( You have a knack for dissecting complex data and presenting actionable insights.""", verbose=True, allow_delegation=False, - tools=[search_tool] # You can pass an optional llm attribute specifying what model you wanna use. - # It can be a local model through Ollama / LM Studio or a remote - # model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/) - # - # import os - # os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo' - # - # OR - # - # from langchain_openai import ChatOpenAI - # llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7) + # llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7), + tools=[search_tool] ) writer = Agent( role='Tech Content Strategist',