Examples

Try in Colab

AgentNeo: Getting Started


!pip install agentneo -U -q

Setting up AgentNeo


from agentneo import AgentNeo, Project, Dataset, Experiment, Tracer

# To login using email-id, creates new account if is not a user else retrieves the existing account
BASE_URL = "http://74.249.60.46:5000"

agent_session = AgentNeo(email="user1@example.com", base_url=BASE_URL)


# Create Project


project_created = Project(session=agent_session, project_name="Project_2", description="A test project").create()
project_id = project_created['id']

Tracing

import os
from google.colab import userdata

os.environ["OPENAI_API_KEY"] = userdata.get('OPENAI_API_KEY')

!pip install langchain_openai langchain_community langgraph arxiv -q


# Initialise the tracer by providing the tools-used and their description
tracer = Tracer(session=agent_session, metadata={'tools': [
    {'name': 'arxiv_tool', 'description': "Tool to search Arxiv for available research papers."},
    {'name': 'agent', 'description': "Identify the search terms for arxiv from the user description and call the arxiv_tool based on the search term"},
]})

# Langgraph code
from langchain_openai import ChatOpenAI
from langchain_community.utilities.arxiv import ArxivAPIWrapper
from typing import TypedDict, Annotated, Sequence
import operator
from langgraph.graph import StateGraph, END
import os

openai_llm = ChatOpenAI(temperature=0.4, callbacks=[tracer.get_callback_handler()])
arxiv = ArxivAPIWrapper(
    top_k_results=3,
    ARXIV_MAX_QUERY_LENGTH=300,
    load_max_docs=3,
    load_all_available_meta=False,
    doc_content_chars_max=40000
)

class AgentState(TypedDict):
    messages: Annotated[Sequence[str], operator.add]

@tracer.trace_node
def agent(state):
    query = state["messages"]
    res = openai_llm.invoke(f"""
    You are given text summary on the research topics that the user is working on.
    You need to extract the search term from the text summary.
    Here is the Text summary: {query[0]}
    """)
    return {"messages": [res.content]}

@tracer.trace_node
def arxiv_tool(state):
    context = state["messages"]
    search_query = context[1]
    data = arxiv.run(search_query)
    return {"messages": [data]}

@tracer.trace_node
def responder(state):
    agent = openai_llm.invoke(f"""
    You are given search results on the research topics that the user is working on.

    Here is the user query:
    ---
    {state["messages"][0]}
    ---

    Here is the search results:
    ---
    {state["messages"][2]}
    ---
    """)
    return {"messages": [agent.content]}
@tracer.trace_node
def where_to_go(state):
    ctx = state["messages"][0]
    if ctx == "no_response":
        return "end"
    else:
        return "continue"

@tracer.trace_graph
def workflow():
    graph = StateGraph(AgentState)
    graph.add_node("agent", agent)
    graph.add_node("arxiv", arxiv_tool)
    graph.add_node("responder", responder)
    graph.add_conditional_edges("agent", where_to_go, {
        "end": END,
        "continue": "arxiv"
    })
    graph.add_edge("agent", "arxiv")
    graph.add_edge("arxiv", "responder")
    graph.set_entry_point("agent")
    graph.set_finish_point("responder")
    return graph.compile()

compiled_workflow = workflow()

@tracer.trace_node
def get_output(message):
    inputs = {"messages": [message]}
    response = compiled_workflow.invoke(inputs)
    print(response["messages"][-1])

get_output("Is someone working on the propagation of LASER through brownian fluids")


trace_id = tracer.upload_trace()

Dataset Creation

# Create a dataset from a trace
dataset = Dataset(
    session=agent_session,
    project_id=project_id,
    dataset_name="Dataset_1",
    description="A test dataset"
)

dataset_traced = dataset.from_trace(trace_id=tracer.id, trace_filter=None)

Experiment

# Create Experiment
experiment = Experiment(
        session=agent_session,
        experiment_name="Experiment_1",
        description="A test experiment",
        dataset_id=dataset_traced['id'],
        project_id=project_id
    )

experiment_created = experiment.create()

# Execute Experiment
exp = experiment.execute(metrics=[
    {"name": "summarise", "config": {}},
    {"name": "tool_selection_accuracy", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
    {"name": "tool_usage_efficiency", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
    {"name": "goal_decomposition_efficiency", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
    {"name": "plan_adaptibility", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
])

# Experiment Results
exp = experiment.get_results(experiment_id=exp['id'])

for i in exp['results']:
    print(f"Name: {i['metric_name']}")
    print(f"Result:")
    for key, value in i['result'].items():
        print(f"{key}: {value}")
    print(f"{'*'*100}\n")

Last updated