crewAI-examples
github demo
完成讀取文章,配圖,發tweet。
https://github.com/wassim249/xgrow/blob/master/main.py#L23
文生圖
https://github.com/coderphonui/crewai-stable-diffusion-pipeline/tree/master
https://github.com/abdelhadi-eddiraa/image-generator-crewai/blob/main/agents.py
圖配文
https://github.com/Suv4o/custom_tool_crewAi/blob/main/main.py
crewAI-examples
https://github.com/crewAIInc/crewAI-examples/tree/main
https://docs.crewai.com/getting-started/Start-a-New-CrewAI-Project-Template-Method/#annotations-include
markdown_validator
https://github.com/fanqingsong/crewAI-examples/tree/main/markdown_validator
import sys from crewai import Agent, Task import os from dotenv import load_dotenv from langchain.tools import tool from langchain.chat_models.openai import ChatOpenAI from pymarkdown.api import PyMarkdownApi, PyMarkdownApiException from MarkdownTools import markdown_validation_tool load_dotenv() defalut_llm = ChatOpenAI(openai_api_base=os.environ.get("OPENAI_API_BASE_URL", "https://api.openai.com/v1"), openai_api_key=os.environ.get("OPENAI_API_KEY"), temperature=0.1, model_name=os.environ.get("MODEL_NAME", "gpt-3.5-turbo"), top_p=0.3) def process_markdown_document(filename): """ Processes a markdown document by reviewing its syntax validation results and providing feedback on necessary changes. Args: filename (str): The path to the markdown file to be processed. Returns: str: The list of recommended changes to make to the document. """ # Define general agent general_agent = Agent(role='Requirements Manager', goal="""Provide a detailed list of the markdown linting results. Give a summary with actionable tasks to address the validation results. Write your response as if you were handing it to a developer to fix the issues. DO NOT provide examples of how to fix the issues or recommend other tools to use.""", backstory="""You are an expert business analyst and software QA specialist. You provide high quality, thorough, insightful and actionable feedback via detailed list of changes and actionable tasks.""", allow_delegation=False, verbose=True, tools=[markdown_validation_tool], llm=defalut_llm) # Define Tasks Using Crew Tools syntax_review_task = Task(description=f""" Use the markdown_validation_tool to review the file(s) at this path: {filename} Be sure to pass only the file path to the markdown_validation_tool. Use the following format to call the markdown_validation_tool: Do I need to use a tool? Yes Action: markdown_validation_tool Action Input: {filename} Get the validation results from the tool and then summarize it into a list of changes the developer should make to the document. DO NOT recommend ways to update the document. DO NOT change any of the content of the document or add content to it. It is critical to your task to only respond with a list of changes. If you already know the answer or if you do not need to use a tool, return it as your Final Answer.""", agent=general_agent) updated_markdown = syntax_review_task.execute() return updated_markdown # If called directly from the command line take the first argument as the filename if __name__ == "__main__": if len(sys.argv) > 1: filename = sys.argv[1] processed_document = process_markdown_document(filename) print(processed_document)
screenplay_writer
https://github.com/fanqingsong/crewAI-examples/tree/main/screenplay_writer
import re, os import yaml from pathlib import Path from crewai import Agent, Task, Crew, Process from dotenv import load_dotenv # from langchain.chat_models.openai import ChatOpenAI from langchain_community.chat_models.openai import ChatOpenAI load_dotenv() defalut_llm = ChatOpenAI(openai_api_base=os.environ.get("OPENAI_API_BASE_URL", "https://api.openai.com/v1"), openai_api_key=os.environ.get("OPENAI_API_KEY"), temperature=0.1, model_name=os.environ.get("MODEL_NAME", "gpt-3.5-turbo"), top_p=0.3) # Use Path for file locations current_dir = Path.cwd() agents_config_path = current_dir / "config" / "agents.yaml" tasks_config_path = current_dir / "config" / "tasks.yaml" # Load YAML configuration files with open(agents_config_path, "r") as file: agents_config = yaml.safe_load(file) with open(tasks_config_path, "r") as file: tasks_config = yaml.safe_load(file) ## Define Agents spamfilter = Agent( role=agents_config["spamfilter"]['role'], goal=agents_config["spamfilter"]['goal'], backstory=agents_config["spamfilter"]['backstory'], allow_delegation=False, verbose=True, llm=defalut_llm ) analyst = Agent( role=agents_config["analyst"]['role'], goal=agents_config["analyst"]['goal'], backstory=agents_config["analyst"]['backstory'], allow_delegation=False, verbose=True, llm=defalut_llm ) scriptwriter = Agent( role=agents_config["scriptwriter"]['role'], goal=agents_config["scriptwriter"]['goal'], backstory=agents_config["scriptwriter"]['backstory'], allow_delegation=False, verbose=True, llm=defalut_llm ) formatter = Agent( role=agents_config["formatter"]['role'], goal=agents_config["formatter"]['goal'], backstory=agents_config["formatter"]['backstory'], allow_delegation=False, verbose=True, llm=defalut_llm ) scorer = Agent( role=agents_config["scorer"]['role'], goal=agents_config["scorer"]['goal'], backstory=agents_config["scorer"]['backstory'], allow_delegation=False, verbose=True, llm=defalut_llm ) # this is one example of a public post in the newsgroup alt.atheism # try it out yourself by replacing this with your own email thread or text or ... discussion = """From: keith@cco.caltech.edu (Keith Allan Schneider) Subject: Re: <Political Atheists? Organization: California Institute of Technology, Pasadena Lines: 50 NNTP-Posting-Host: punisher.caltech.edu bobbe@vice.ICO.TEK.COM (Robert Beauchaine) writes: >>I think that about 70% (or so) people approve of the >>death penalty, even realizing all of its shortcomings. Doesn't this make >>it reasonable? Or are *you* the sole judge of reasonability? >Aside from revenge, what merits do you find in capital punishment? Are we talking about me, or the majority of the people that support it? Anyway, I think that "revenge" or "fairness" is why most people are in favor of the punishment. If a murderer is going to be punished, people that think that he should "get what he deserves." Most people wouldn't think it would be fair for the murderer to live, while his victim died. >Revenge? Petty and pathetic. Perhaps you think that it is petty and pathetic, but your views are in the minority. keith """ oo_discussion = """From: keith@cco.caltech.edu (Keith Allan Schneider) Subject: Re: <Political Atheists? Organization: California Institute of Technology, Pasadena Lines: 50 NNTP-Posting-Host: punisher.caltech.edu bobbe@vice.ICO.TEK.COM (Robert Beauchaine) writes: >>I think that about 70% (or so) people approve of the >>death penalty, even realizing all of its shortcomings. Doesn't this make >>it reasonable? Or are *you* the sole judge of reasonability? >Aside from revenge, what merits do you find in capital punishment? Are we talking about me, or the majority of the people that support it? Anyway, I think that "revenge" or "fairness" is why most people are in favor of the punishment. If a murderer is going to be punished, people that think that he should "get what he deserves." Most people wouldn't think it would be fair for the murderer to live, while his victim died. >Revenge? Petty and pathetic. Perhaps you think that it is petty and pathetic, but your views are in the minority. >We have a local televised hot topic talk show that very recently >did a segment on capital punishment. Each and every advocate of >the use of this portion of our system of "jurisprudence" cited the >main reason for supporting it: "That bastard deserved it". True >human compassion, forgiveness, and sympathy. Where are we required to have compassion, forgiveness, and sympathy? If someone wrongs me, I will take great lengths to make sure that his advantage is removed, or a similar situation is forced upon him. If someone kills another, then we can apply the golden rule and kill this person in turn. Is not our entire moral system based on such a concept? Or, are you stating that human life is sacred, somehow, and that it should never be violated? This would sound like some sort of religious view. >>I mean, how reasonable is imprisonment, really, when you think about it? >>Sure, the person could be released if found innocent, but you still >>can't undo the imiprisonment that was served. Perhaps we shouldn't >>imprision people if we could watch them closely instead. The cost would >>probably be similar, especially if we just implanted some sort of >>electronic device. >Would you rather be alive in prison or dead in the chair? Once a criminal has committed a murder, his desires are irrelevant. And, you still have not answered my question. If you are concerned about the death penalty due to the possibility of the execution of an innocent, then why isn't this same concern shared with imprisonment. Shouldn't we, by your logic, administer as minimum as punishment as possible, to avoid violating the liberty or happiness of an innocent person? keith """ # Filter out spam and vulgar posts task0 = Task( description=tasks_config["task0"]["description"].format(discussion=discussion), expected_output=tasks_config["task0"]["expected_output"], agent=spamfilter, ) crew = Crew( agents=[spamfilter], tasks=[task0], verbose=True, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels process=Process.sequential, # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next. ) inputs = {'discussion': discussion} result = crew.kickoff(inputs) # result = crew.kickoff() print("===================== end result from crew ===================================") print(result) # Accessing the task output task_output = task0.output print(f"Task Description: {task_output.description}") print(f"Task Summary: {task_output.summary}") print(f"Raw Output: {task_output.raw}") if task_output.json_dict: print(f"JSON Output: {json.dumps(task_output.json_dict, indent=2)}") if task_output.pydantic: print(f"Pydantic Output: {task_output.pydantic}") # process post with a crew of agents, ultimately delivering a well formatted dialogue task1 = Task( description=tasks_config["task1"]["description"].format(discussion=discussion), expected_output=tasks_config["task1"]["expected_output"], agent=analyst, ) task2 = Task( description=tasks_config["task2"]["description"], expected_output=tasks_config["task2"]["expected_output"], agent=scriptwriter, ) task3 = Task( description=tasks_config["task3"]["description"], expected_output=tasks_config["task3"]["expected_output"], agent=formatter, ) crew = Crew( agents=[analyst, scriptwriter, formatter], tasks=[task1, task2, task3], verbose=True, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels process=Process.sequential, # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next. ) inputs = {'discussion': discussion} result = crew.kickoff(inputs) print("===================== end result from crew ===================================") print(result) # print("===================== score ==================================================") # task4 = Task( # description=tasks_config["task4"]["description"], # expected_output=tasks_config["task4"]["expected_output"], # agent=scorer, # ) # score = task4.execute() # score = score.split("\n")[0] # sometimes an explanation comes after score, ignore # print(f"Scoring the dialogue as: {score}/10")
LLM之Agent(十一)| 多智慧體框架CrewAI與AutoGen相比
https://zhuanlan.zhihu.com/p/681218725
CrewAI可以應用在生成環境中。它在發言人的反應和編排上犧牲了一點靈活性和隨機性,但在代理人的能力、任務和發言轉向上獲得了更多的確定性。到目前為止,唯一的編排策略是“sequential”,未來的釋出計劃是“consensual”和“hierarchical”。
當我們在下一章中深入研究這個框架及其程式碼時,我們會發現確保任務由相關代理並按定義的順序處理非常容易。你肯定不會在CrewAI中看到智慧體之間的任何生動互動,比如一個智慧體糾正另一個智慧體,一個智慧體的多次講話。這些互動有利於實驗或演示,但對需要高效、確定性和成本效益
高的任務完成的真實LLM產品用處不大。因此,CrewAI優先考慮精簡和可靠的方法,在一個強大的群聊中,每個人工智慧代理都準確地知道該做什麼以及他們的目標。
在我看來,另一個也是最關鍵的優勢是它蓬勃發展的工具和支援豐富的資源,可以用來構建代理和任務,這源於它是基於LangChain設計的智慧體。LangChain是一個成熟的LLM框架,已經為LLM應用程式開發
人員提供了豐富的工具和外圍裝置來增強語言模型的功能。
CrewAI被證明適合熟悉LangChain的LLM應用程式開發人員,或者已經在其上構建應用程式的開發人員。對他們來說,將現有的單獨代理整合到CrewAI框架中可以相對容易地實現。相比之下,AutoGen的學習曲線可能更陡峭,需要更多的時間來了解其用法並有效地整合代理。