Ceci est l implementation pedagogique complete de l exemple de l article Memoire de l agent : ce qu il retient et pourquoi.
Si vous n avez pas encore lu l article, commencez par la. Ici, le focus est uniquement sur le code et la difference pratique entre short-term et long-term memory.
Ce Que Cet Exemple Demontre
- Comment la memoire court terme peut perdre des instructions initiales a cause d un contexte limite
- Comment la memoire long terme conserve les reglages entre les taches
- Pourquoi la meme requete peut donner un resultat different selon le type de memoire
- Comment construire un agent memory-aware simple sans complexite inutile
Structure du Projet
foundations/
└── agent-memory/
└── python/
├── main.py # lance deux scenarios et compare le resultat
├── agent.py # logique agent avec short/long memory
├── memory.py # stores short-term et long-term
├── tools.py # sources de donnees simples et rendu du rapport
└── requirements.txt
Comment Executer
1. Clone le depot et va dans le dossier :
git clone https://github.com/AgentPatterns-tech/agentpatterns.git
cd foundations/agent-memory/python
2. Installe les dependances (cet exemple n a pas de packages externes) :
pip install -r requirements.txt
3. Lance la demo :
python main.py
Ce Que Nous Construisons Dans Le Code
Nous construisons un agent minimal qui genere un rapport hebdomadaire en deux modes.
SCENARIO 1: short-term memory uniquement (le contexte est limite et peut "oublier" les prefs initiales)SCENARIO 2: short-term + long-term memory (les prefs sont conservees entre les taches)
Idee cle : le contexte de tache et la memoire entre taches ont des roles differents.
Code
memory.py - memoire short-term et long-term
from dataclasses import dataclass, field
from typing import Any
@dataclass
class ShortMemory:
max_items: int = 6
items: list[dict[str, Any]] = field(default_factory=list)
def add(self, role: str, content: str) -> None:
self.items.append({"role": role, "content": content})
if len(self.items) > self.max_items:
self.items = self.items[-self.max_items :]
def snapshot(self) -> list[dict[str, Any]]:
return list(self.items)
def clear(self) -> None:
self.items.clear()
@dataclass
class LongMemoryStore:
_prefs: dict[str, dict[str, str]] = field(default_factory=dict)
def save_prefs(self, user_key: str, prefs: dict[str, str]) -> None:
self._prefs[user_key] = dict(prefs)
def load_prefs(self, user_key: str) -> dict[str, str]:
return dict(self._prefs.get(user_key, {}))
tools.py - donnees et formatage du rapport
def get_sales_total(user_id: int) -> float:
_ = user_id
return 12400.0
def get_orders_count(user_id: int) -> int:
_ = user_id
return 31
def render_report(*, total: float, orders: int, currency: str, report_format: str) -> str:
if report_format == "short-bullets":
return (
f"- Total sales: {total:.2f} {currency}\n"
f"- Orders: {orders}\n"
"- Status: stable"
)
return (
f"Sales report: total={total:.2f} {currency}, "
f"orders={orders}, status=stable"
)
agent.py - logique de resolution des prefs
from memory import LongMemoryStore, ShortMemory
from tools import get_orders_count, get_sales_total, render_report
DEFAULT_PREFS = {
"report_format": "default",
"currency": "USD",
}
def save_user_preferences(
*,
user_key: str,
prefs: dict[str, str],
short_memory: ShortMemory,
long_memory: LongMemoryStore,
) -> None:
short_memory.add("user", f"Save prefs: {prefs}")
long_memory.save_prefs(user_key, prefs)
short_memory.add("assistant", "Preferences saved to long-term memory")
def parse_prefs_from_short_memory(short_memory: ShortMemory) -> dict[str, str]:
# Simplified parser for learning: looks for lines like "pref:key=value".
parsed: dict[str, str] = {}
for item in short_memory.snapshot():
content = item["content"]
if "pref:" not in content:
continue
payload = content.split("pref:", 1)[1]
if "=" not in payload:
continue
key, value = payload.split("=", 1)
parsed[key.strip()] = value.strip()
return parsed
def build_weekly_report(
*,
user_id: int,
user_key: str,
request: str,
short_memory: ShortMemory,
long_memory: LongMemoryStore,
use_long_memory: bool,
) -> dict:
trace: list[str] = []
short_memory.add("user", request)
trace.append(f"request={request}")
short_prefs = parse_prefs_from_short_memory(short_memory)
trace.append(f"short_prefs={short_prefs}")
long_prefs = long_memory.load_prefs(user_key) if use_long_memory else {}
trace.append(f"long_prefs={long_prefs}")
prefs = {**DEFAULT_PREFS, **short_prefs, **long_prefs}
trace.append(f"resolved_prefs={prefs}")
total = get_sales_total(user_id)
orders = get_orders_count(user_id)
report = render_report(
total=total,
orders=orders,
currency=prefs["currency"],
report_format=prefs["report_format"],
)
short_memory.add("assistant", f"Report generated with prefs={prefs}")
return {
"prefs": prefs,
"report": report,
"trace": trace,
"short_memory_snapshot": short_memory.snapshot(),
}
main.py - comparaison de deux scenarios
from agent import build_weekly_report, save_user_preferences
from memory import LongMemoryStore, ShortMemory
USER_ID = 42
USER_KEY = "user:anna"
def print_result(title: str, result: dict) -> None:
print(f"\n=== {title} ===")
print("Resolved prefs:", result["prefs"])
print("\nReport:")
print(result["report"])
print("\nTrace:")
for line in result["trace"]:
print(" ", line)
def main() -> None:
long_memory = LongMemoryStore()
# Scenario 1: only short-term memory, early instruction falls out of context.
short_memory_1 = ShortMemory(max_items=4)
short_memory_1.add("user", "pref:report_format=short-bullets")
short_memory_1.add("user", "pref:currency=EUR")
short_memory_1.add("assistant", "working...")
short_memory_1.add("assistant", "still working...")
short_memory_1.add("assistant", "collecting data...") # pushes out old prefs
result_short_only = build_weekly_report(
user_id=USER_ID,
user_key=USER_KEY,
request="Build weekly sales report",
short_memory=short_memory_1,
long_memory=long_memory,
use_long_memory=False,
)
print_result("SCENARIO 1: SHORT MEMORY ONLY", result_short_only)
# Scenario 2: persist prefs in long-term memory, then start a new task.
short_memory_2 = ShortMemory(max_items=4)
save_user_preferences(
user_key=USER_KEY,
prefs={"report_format": "short-bullets", "currency": "EUR"},
short_memory=short_memory_2,
long_memory=long_memory,
)
short_memory_2.clear() # new task, short memory resets
result_with_long = build_weekly_report(
user_id=USER_ID,
user_key=USER_KEY,
request="Build weekly sales report like last time",
short_memory=short_memory_2,
long_memory=long_memory,
use_long_memory=True,
)
print_result("SCENARIO 2: WITH LONG MEMORY", result_with_long)
if __name__ == "__main__":
main()
requirements.txt
# No external dependencies for this learning example.
Exemple de Sortie
python main.py
=== SCENARIO 1: SHORT MEMORY ONLY ===
Resolved prefs: {'report_format': 'default', 'currency': 'USD'}
Report:
Sales report: total=12400.00 USD, orders=31, status=stable
Trace:
request=Build weekly sales report
short_prefs={}
long_prefs={}
resolved_prefs={'report_format': 'default', 'currency': 'USD'}
=== SCENARIO 2: WITH LONG MEMORY ===
Resolved prefs: {'report_format': 'short-bullets', 'currency': 'EUR'}
Report:
- Total sales: 12400.00 EUR
- Orders: 31
- Status: stable
Trace:
request=Build weekly sales report like last time
short_prefs={}
long_prefs={'report_format': 'short-bullets', 'currency': 'EUR'}
resolved_prefs={'report_format': 'short-bullets', 'currency': 'EUR'}
Ce Que L on Voit En Pratique
| Short memory uniquement | Short + long memory | |
|---|---|---|
| Conserve les prefs entre les taches | ❌ | ✅ |
| Resistant a la perte du contexte initial | ❌ | ✅ |
| Format/devise reproduits "comme la derniere fois" | ❌ | ✅ |
Ce Qu Il Faut Changer Dans Cet Exemple
- Reduis
max_itemsa2et regarde a quelle vitesse les prefs se perdent en short memory - Ajoute une troisieme preference utilisateur (par exemple
timezone) et propage-la dans le rapport - Sauvegarde aussi
last_report_totalen long memory et compare la dynamique dans la nouvelle tache - Ajoute une regle : ne pas autoriser de prefs vides dans
save_user_preferences
Code Complet sur GitHub
Le depot contient la version complete de cette demo : short-term/long-term memory, comparaison de scenarios et trace des etapes.
Voir le code complet sur GitHub ↗