mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
fixing tasks order
This commit is contained in:
@@ -1093,7 +1093,7 @@ def test_agent_training_handler(crew_training_handler):
|
||||
|
||||
result = agent._training_handler(task_prompt=task_prompt)
|
||||
|
||||
assert result == "What is 1 + 1?You MUST follow these feedbacks: \n good"
|
||||
assert result == "What is 1 + 1?\n\nYou MUST follow these instructions: \n good"
|
||||
|
||||
crew_training_handler.assert_has_calls(
|
||||
[mock.call(), mock.call("training_data.pkl"), mock.call().load()]
|
||||
@@ -1121,8 +1121,8 @@ def test_agent_use_trained_data(crew_training_handler):
|
||||
result = agent._use_trained_data(task_prompt=task_prompt)
|
||||
|
||||
assert (
|
||||
result == "What is 1 + 1?You MUST follow these feedbacks: \n "
|
||||
"The result of the math operation must be right.\n - Result must be better than 1."
|
||||
result == "What is 1 + 1?\n\nYou MUST follow these instructions: \n"
|
||||
" - The result of the math operation must be right.\n - Result must be better than 1."
|
||||
)
|
||||
crew_training_handler.assert_has_calls(
|
||||
[mock.call(), mock.call("trained_agents_data.pkl"), mock.call().load()]
|
||||
@@ -1205,7 +1205,7 @@ def test_agent_with_custom_stop_words():
|
||||
)
|
||||
|
||||
assert isinstance(agent.llm, LLM)
|
||||
assert agent.llm.stop == stop_words
|
||||
assert agent.llm.stop == stop_words + ["\nObservation:"]
|
||||
|
||||
|
||||
def test_agent_with_callbacks():
|
||||
@@ -1368,7 +1368,7 @@ def test_agent_with_all_llm_attributes():
|
||||
assert agent.llm.temperature == 0.7
|
||||
assert agent.llm.top_p == 0.9
|
||||
assert agent.llm.n == 1
|
||||
assert agent.llm.stop == ["STOP", "END"]
|
||||
assert agent.llm.stop == ["STOP", "END", "\nObservation:"]
|
||||
assert agent.llm.max_tokens == 100
|
||||
assert agent.llm.presence_penalty == 0.1
|
||||
assert agent.llm.frequency_penalty == 0.1
|
||||
|
||||
@@ -50,11 +50,12 @@ def test_evaluate_training_data(converter_mock):
|
||||
text="Assess the quality of the training data based on the llm output, human feedback , and llm "
|
||||
"output improved result.\n\nInitial Output:\nInitial output 1\n\nHuman Feedback:\nHuman feedback "
|
||||
"1\n\nImproved Output:\nImproved output 1\n\nInitial Output:\nInitial output 2\n\nHuman "
|
||||
"Feedback:\nHuman feedback 2\n\nImproved Output:\nImproved output 2\n\nPlease provide:\n- "
|
||||
"Based on the Human Feedbacks and the comparison between Initial Outputs and Improved outputs "
|
||||
"provide action items based on human_feedback for future tasks\n- A score from 0 to 10 evaluating "
|
||||
"on completion, quality, and overall performance from the improved output to the initial output "
|
||||
"based on the human feedback\n",
|
||||
"Feedback:\nHuman feedback 2\n\nImproved Output:\nImproved output 2\n\nPlease provide:\n- Provide "
|
||||
"a list of clear, actionable instructions derived from the Human Feedbacks to enhance the Agent's "
|
||||
"performance. Analyze the differences between Initial Outputs and Improved Outputs to generate specific "
|
||||
"action items for future tasks. Ensure all key and specificpoints from the human feedback are "
|
||||
"incorporated into these instructions.\n- A score from 0 to 10 evaluating on completion, quality, and "
|
||||
"overall performance from the improved output to the initial output based on the human feedback\n",
|
||||
model=TrainingTaskEvaluation,
|
||||
instructions="I'm gonna convert this raw text into valid JSON.\n\nThe json should have the "
|
||||
"following structure, with the following keys:\n{\n suggestions: List[str],\n quality: float,\n final_summary: str\n}",
|
||||
|
||||
Reference in New Issue
Block a user