from lyzr import Cognis, CognisMessagecog = Cognis()# Store a conversationcog.add( messages=[ CognisMessage(role="user", content="My name is Alice and I love hiking."), CognisMessage(role="assistant", content="Nice to meet you, Alice!"), CognisMessage(role="user", content="I work at Google as a data scientist."), CognisMessage(role="assistant", content="That's a great role!"), ], owner_id="user_alice",)
from cognis import Cognism = Cognis(owner_id="user_alice")# Store a conversationresult = m.add([ {"role": "user", "content": "My name is Alice and I love hiking."}, {"role": "assistant", "content": "Nice to meet you, Alice!"}, {"role": "user", "content": "I work at Google as a data scientist."}, {"role": "assistant", "content": "That's a great role!"},])print(result["message"])# "Extracted 3 memories from 4 messages"
Cognis automatically extracts discrete facts from the conversation and stores them as searchable memory records.
results = cog.search(query="What does Alice do for work?", owner_id="user_alice", limit=5)for r in results: print(f" {r.content} (score: {r.score})")# Output:# Alice works at Google as a data scientist (score: 0.89)
resp = m.search("What does Alice do for work?", limit=5)for r in resp["results"]: print(f" {r['content']} (score: {r['score']})")# Output:# Alice works at Google as a data scientist (score: 0.8712)
memories = cog.get(owner_id="user_alice")for mem in memories.memories: print(f" [{mem.id[:8]}] {mem.content}")# Output:# [a1b2c3d4] Alice's name is Alice# [e5f6g7h8] Alice works at Google as a data scientist# [i9j0k1l2] Alice loves hiking
resp = m.get_all()for mem in resp["memories"]: cat = mem["metadata"]["category"] print(f" [{cat}] {mem['content']}")# Output:# [identity] Alice's name is Alice# [work_career] Alice works at Google as a data scientist# [interests] Alice loves hiking
context = cog.context( current_messages=[ CognisMessage(role="user", content="What should I do this weekend?"), ], owner_id="user_alice",)# Use context in your LLM system promptprint(context)
ctx = m.get_context( messages=[{"role": "user", "content": "What should I do this weekend?"}])# Use context_string in your LLM system promptprint(ctx["context_string"])# "Relevant memories:\n- Alice loves hiking\n- ..."m.close() # Don't forget to close!