CookBook: https://docs.agno.com/introduction
GitHub Repo: https://github.com/agno-agi/agno
从 Phidata 迁移而来
--0 多种模型支持
OpenAI
from agno.agent import Agent, RunResponse # noqa
from agno.models.openai import OpenAIChat
agent = Agent(model=OpenAIChat(id="gpt-4o"), markdown=True)
# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)
# Print the response in the terminal
agent.print_response("Share a 2 sentence horror story")
agent.run_response.metrics
DeepSeek
from agno.agent import Agent, RunResponse # noqa
from agno.models.deepseek import DeepSeek
agent = Agent(model=DeepSeek(id="deepseek-chat"), markdown=True)
# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)
# Print the response in the terminal
agent.print_response("Share a 2 sentence horror story")
Ollama
from agno.agent import Agent, RunResponse # noqa
from agno.models.ollama import Ollama
agent = Agent(model=Ollama(id="llama3.1:8b"), markdown=True)
# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
# print(run.content)
# Print the response in the terminal
agent.print_response("Share a 2 sentence horror story")
...
--1 结构化输出
基于Pydantic实现,Agent 中 response_model 作为参数设定。
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(..., description="Ending of the movie. If not available, provide a happy ending.")
genre: str = Field(
..., description="Genre of the movie. If not available, select action, thriller or romantic comedy."
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
# Agent that uses JSON mode
json_mode_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
description="You write movie scripts.",
response_model=MovieScript,
)
--2 知识库增强
class AgentKnowledge(BaseModel):
基类是Pydantic 的 BaseModel
主要组件:vector_db + Chunk + Reader + embedder
PgVector | Chroma FixedSizeChunking JSONReader | PDFReader OpenAIEmbedder | OllamaEmbedder
--3 问答记忆
--3.1 Built-in Memory
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
# Set add_history_to_messages=true to add the previous chat history to the messages sent to the Model.
add_history_to_messages=True,
# Number of historical responses to add to the messages.
num_history_responses=3,
description="You are a helpful assistant that always responds in a polite, upbeat and positive manner.",
)
--3.2 Persistent Memory 基于session_id 进行存储
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
# Store agent sessions in a database
storage=SqliteAgentStorage(table_name="agent_sessions", db_file="tmp/agent_storage.db"),
# Set add_history_to_messages=true to add the previous chat history to the messages sent to the Model.
add_history_to_messages=True,
# Number of historical responses to add to the messages.
num_history_responses=3,
# The session_id is used to identify the session in the database
# You can resume any session by providing a session_id
# session_id="xxxx-xxxx-xxxx-xxxx",
# Description creates a system prompt for the agent
description="You are a helpful assistant that always responds in a polite, upbeat and positive manner.",
)
--4 Tools
内置了众多工具包 或者 自己封装python 函数
--5 Team
不需要严格定义工作流程的的多智能体协作
--5.1 Route Mode Team 来寻找最合适的Agent完成任务
--5.2 Coordinate Mode Team 给多个Agent分配不同的任务,再进行结果的整合
--5.3 Collaborate Mode Team 给每个Agent分配相同的任务,综合输出进行整合
--6 WorkFlow
需要严格定义工作流程的的多智能体协作,智能体之间工作流在Run中进行定义。
import json
from textwrap import dedent
from typing import Dict, Iterator, Optional
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.storage.sqlite import SqliteStorage
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.tools.newspaper4k import Newspaper4kTools
from agno.utils.log import logger
from agno.utils.pprint import pprint_run_response
from agno.workflow import RunEvent, RunResponse, Workflow
from pydantic import BaseModel, Field
class NewsArticle(BaseModel):
title: str = Field(..., description="Title of the article.")
url: str = Field(..., description="Link to the article.")
summary: Optional[str] = Field(
..., description="Summary of the article if available."
)
class SearchResults(BaseModel):
articles: list[NewsArticle]
class ScrapedArticle(BaseModel):
title: str = Field(..., description="Title of the article.")
url: str = Field(..., description="Link to the article.")
summary: Optional[str] = Field(
..., description="Summary of the article if available."
)
content: Optional[str] = Field(
...,
description="Full article content in markdown format. None if content is unavailable.",
)
class BlogPostGenerator(Workflow):
"""Advanced workflow for generating professional blog posts with proper research and citations."""
description: str = dedent("""\
An intelligent blog post generator that creates engaging, well-researched content.
This workflow orchestrates multiple AI agents to research, analyze, and craft
compelling blog posts that combine journalistic rigor with engaging storytelling.
The system excels at creating content that is both informative and optimized for
digital consumption.
""")
# Search Agent: Handles intelligent web searching and source gathering
searcher: Agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[DuckDuckGoTools()],
description=dedent("""\
You are BlogResearch-X, an elite research assistant specializing in discovering
high-quality sources for compelling blog content. Your expertise includes:
- Finding authoritative and trending sources
- Evaluating content credibility and relevance
- Identifying diverse perspectives and expert opinions
- Discovering unique angles and insights
- Ensuring comprehensive topic coverage\
"""),
instructions=dedent("""\
1. Search Strategy
- Find 10-15 relevant sources and select the 5-7 best ones
- Prioritize recent, authoritative content
- Look for unique angles and expert insights
2. Source Evaluation
- Verify source credibility and expertise
- Check publication dates for timeliness
- Assess content depth and uniqueness
3. Diversity of Perspectives
- Include different viewpoints
- Gather both mainstream and expert opinions
- Find supporting data and statistics\
"""),
response_model=SearchResults,
)
# Content Scraper: Extracts and processes article content
article_scraper: Agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[Newspaper4kTools()],
description=dedent("""\
You are ContentBot-X, a specialist in extracting and processing digital content
for blog creation. Your expertise includes:
- Efficient content extraction
- Smart formatting and structuring
- Key information identification
- Quote and statistic preservation
- Maintaining source attribution\
"""),
instructions=dedent("""\
1. Content Extraction
- Extract content from the article
- Preserve important quotes and statistics
- Maintain proper attribution
- Handle paywalls gracefully
2. Content Processing
- Format text in clean markdown
- Preserve key information
- Structure content logically
3. Quality Control
- Verify content relevance
- Ensure accurate extraction
- Maintain readability\
"""),
response_model=ScrapedArticle,
structured_outputs=True,
)
# Content Writer Agent: Crafts engaging blog posts from research
writer: Agent = Agent(
model=OpenAIChat(id="gpt-4o"),
description=dedent("""\
You are BlogMaster-X, an elite content creator combining journalistic excellence
with digital marketing expertise. Your strengths include:
- Crafting viral-worthy headlines
- Writing engaging introductions
- Structuring content for digital consumption
- Incorporating research seamlessly
- Optimizing for SEO while maintaining quality
- Creating shareable conclusions\
"""),
instructions=dedent("""\
1. Content Strategy
- Craft attention-grabbing headlines
- Write compelling introductions
- Structure content for engagement
- Include relevant subheadings
2. Writing Excellence
- Balance expertise with accessibility
- Use clear, engaging language
- Include relevant examples
- Incorporate statistics naturally
3. Source Integration
- Cite sources properly
- Include expert quotes
- Maintain factual accuracy
4. Digital Optimization
- Structure for scanability
- Include shareable takeaways
- Optimize for SEO
- Add engaging subheadings\
"""),
expected_output=dedent("""\
# {Viral-Worthy Headline}
## Introduction
{Engaging hook and context}
## {Compelling Section 1}
{Key insights and analysis}
{Expert quotes and statistics}
## {Engaging Section 2}
{Deeper exploration}
{Real-world examples}
## {Practical Section 3}
{Actionable insights}
{Expert recommendations}
## Key Takeaways
- {Shareable insight 1}
- {Practical takeaway 2}
- {Notable finding 3}
## Sources
{Properly attributed sources with links}\
"""),
markdown=True,
)
def run(
self,
topic: str,
use_search_cache: bool = True,
use_scrape_cache: bool = True,
use_cached_report: bool = True,
) -> Iterator[RunResponse]:
logger.info(f"Generating a blog post on: {topic}")
# Use the cached blog post if use_cache is True
if use_cached_report:
cached_blog_post = self.get_cached_blog_post(topic)
if cached_blog_post:
yield RunResponse(
content=cached_blog_post, event=RunEvent.workflow_completed
)
return
# Search the web for articles on the topic
search_results: Optional[SearchResults] = self.get_search_results(
topic, use_search_cache
)
# If no search_results are found for the topic, end the workflow
if search_results is None or len(search_results.articles) == 0:
yield RunResponse(
event=RunEvent.workflow_completed,
content=f"Sorry, could not find any articles on the topic: {topic}",
)
return
# Scrape the search results
scraped_articles: Dict[str, ScrapedArticle] = self.scrape_articles(
topic, search_results, use_scrape_cache
)
# Prepare the input for the writer
writer_input = {
"topic": topic,
"articles": [v.model_dump() for v in scraped_articles.values()],
}
# Run the writer and yield the response
yield from self.writer.run(json.dumps(writer_input, indent=4), stream=True)
# Save the blog post in the cache
self.add_blog_post_to_cache(topic, self.writer.run_response.content)
def get_cached_blog_post(self, topic: str) -> Optional[str]:
logger.info("Checking if cached blog post exists")
return self.session_state.get("blog_posts", {}).get(topic)
def add_blog_post_to_cache(self, topic: str, blog_post: str):
logger.info(f"Saving blog post for topic: {topic}")
self.session_state.setdefault("blog_posts", {})
self.session_state["blog_posts"][topic] = blog_post
def get_cached_search_results(self, topic: str) -> Optional[SearchResults]:
logger.info("Checking if cached search results exist")
search_results = self.session_state.get("search_results", {}).get(topic)
return (
SearchResults.model_validate(search_results)
if search_results and isinstance(search_results, dict)
else search_results
)
def add_search_results_to_cache(self, topic: str, search_results: SearchResults):
logger.info(f"Saving search results for topic: {topic}")
self.session_state.setdefault("search_results", {})
self.session_state["search_results"][topic] = search_results
def get_cached_scraped_articles(
self, topic: str
) -> Optional[Dict[str, ScrapedArticle]]:
logger.info("Checking if cached scraped articles exist")
scraped_articles = self.session_state.get("scraped_articles", {}).get(topic)
return (
ScrapedArticle.model_validate(scraped_articles)
if scraped_articles and isinstance(scraped_articles, dict)
else scraped_articles
)
def add_scraped_articles_to_cache(
self, topic: str, scraped_articles: Dict[str, ScrapedArticle]
):
logger.info(f"Saving scraped articles for topic: {topic}")
self.session_state.setdefault("scraped_articles", {})
self.session_state["scraped_articles"][topic] = scraped_articles
def get_search_results(
self, topic: str, use_search_cache: bool, num_attempts: int = 3
) -> Optional[SearchResults]:
# Get cached search_results from the session state if use_search_cache is True
if use_search_cache:
try:
search_results_from_cache = self.get_cached_search_results(topic)
if search_results_from_cache is not None:
search_results = SearchResults.model_validate(
search_results_from_cache
)
logger.info(
f"Found {len(search_results.articles)} articles in cache."
)
return search_results
except Exception as e:
logger.warning(f"Could not read search results from cache: {e}")
# If there are no cached search_results, use the searcher to find the latest articles
for attempt in range(num_attempts):
try:
searcher_response: RunResponse = self.searcher.run(topic)
if (
searcher_response is not None
and searcher_response.content is not None
and isinstance(searcher_response.content, SearchResults)
):
article_count = len(searcher_response.content.articles)
logger.info(
f"Found {article_count} articles on attempt {attempt + 1}"
)
# Cache the search results
self.add_search_results_to_cache(topic, searcher_response.content)
return searcher_response.content
else:
logger.warning(
f"Attempt {attempt + 1}/{num_attempts} failed: Invalid response type"
)
except Exception as e:
logger.warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
logger.error(f"Failed to get search results after {num_attempts} attempts")
return None
def scrape_articles(
self, topic: str, search_results: SearchResults, use_scrape_cache: bool
) -> Dict[str, ScrapedArticle]:
scraped_articles: Dict[str, ScrapedArticle] = {}
# Get cached scraped_articles from the session state if use_scrape_cache is True
if use_scrape_cache:
try:
scraped_articles_from_cache = self.get_cached_scraped_articles(topic)
if scraped_articles_from_cache is not None:
scraped_articles = scraped_articles_from_cache
logger.info(
f"Found {len(scraped_articles)} scraped articles in cache."
)
return scraped_articles
except Exception as e:
logger.warning(f"Could not read scraped articles from cache: {e}")
# Scrape the articles that are not in the cache
for article in search_results.articles:
if article.url in scraped_articles:
logger.info(f"Found scraped article in cache: {article.url}")
continue
article_scraper_response: RunResponse = self.article_scraper.run(
article.url
)
if (
article_scraper_response is not None
and article_scraper_response.content is not None
and isinstance(article_scraper_response.content, ScrapedArticle)
):
scraped_articles[article_scraper_response.content.url] = (
article_scraper_response.content
)
logger.info(f"Scraped article: {article_scraper_response.content.url}")
# Save the scraped articles in the session state
self.add_scraped_articles_to_cache(topic, scraped_articles)
return scraped_articles
# Run the workflow if the script is executed directly
if __name__ == "__main__":
import random
from rich.prompt import Prompt
# Fun example prompts to showcase the generator's versatility
example_prompts = [
"Why Cats Secretly Run the Internet",
"The Science Behind Why Pizza Tastes Better at 2 AM",
"Time Travelers' Guide to Modern Social Media",
"How Rubber Ducks Revolutionized Software Development",
"The Secret Society of Office Plants: A Survival Guide",
"Why Dogs Think We're Bad at Smelling Things",
"The Underground Economy of Coffee Shop WiFi Passwords",
"A Historical Analysis of Dad Jokes Through the Ages",
]
# Get topic from user
topic = Prompt.ask(
"[bold]Enter a blog post topic[/bold] (or press Enter for a random example)\n",
default=random.choice(example_prompts),
)
# Convert the topic to a URL-safe string for use in session_id
url_safe_topic = topic.lower().replace(" ", "-")
# Initialize the blog post generator workflow
# - Creates a unique session ID based on the topic
# - Sets up SQLite storage for caching results
generate_blog_post = BlogPostGenerator(
session_id=f"generate-blog-post-on-{url_safe_topic}",
storage=SqliteStorage(
table_name="generate_blog_post_workflows",
db_file="tmp/agno_workflows.db",
),
debug_mode=True,
)
# Execute the workflow with caching enabled
# Returns an iterator of RunResponse objects containing the generated content
blog_post: Iterator[RunResponse] = generate_blog_post.run(
topic=topic,
use_search_cache=True,
use_scrape_cache=True,
use_cached_report=True,
)
# Print the response
pprint_run_response(blog_post, markdown=True)
--7 待深究问题:
--7.1 agent 是如何调用工具的呢?
--7.2 如何确保工具的输出是符合格式需求?
--7.3 llm 调用失败重试机制?