fixed issue for smaller models due to instructions prompt

This commit is contained in:
Lorenze Jay
2024-07-24 21:56:15 -07:00
parent c20e628de7
commit 1d5d8228cf
3 changed files with 5 additions and 3 deletions

View File

@@ -23,7 +23,7 @@ class Converter(OutputConverter):
if self.is_gpt:
return self._create_instructor().to_pydantic()
else:
return self._create_chain().invoke({"query": self.text})
return self._create_chain().invoke({})
except Exception as e:
if current_attempt < self.max_attempts:
return self.to_pydantic(current_attempt + 1)

View File

@@ -20,6 +20,8 @@ class CrewPydanticOutputParser(PydanticOutputParser):
# Treating edge case of function calling llm returning the name instead of tool_name
json_object = json.loads(result[0].text)
if "tool_name" not in json_object:
json_object["tool_name"] = json_object.get("name", "")
result[0].text = json.dumps(json_object)
json_object = super().parse_result(result)

View File

@@ -66,11 +66,11 @@ class TaskEvaluator:
"- Entities extracted from the task output, if any, their type, description, and relationships"
)
instructions = "I'm gonna convert this raw text into valid JSON."
instructions = "Convert this raw text into valid JSON."
if not self._is_gpt(self.llm):
model_schema = PydanticSchemaParser(model=TaskEvaluation).get_schema()
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
instructions = f"{instructions}\n\nYou must return json with the following schema:\n{model_schema}"
converter = Converter(
llm=self.llm,