LogoLogo
Slack CommunityCatalyst Login
  • Welcome
  • RagaAI Catalyst
    • User Quickstart
    • Concepts
      • Configure Your API Keys
      • Supported LLMs
        • OpenAI
        • Gemini
        • Azure
        • AWS Bedrock
        • ANTHROPIC
      • Catalyst Access/Secret Keys
      • Enable Custom Gateway
      • Uploading Data
        • Create new project
        • RAG Datset
        • Chat Dataset
          • Prompt Format
        • Logging traces (LlamaIndex, Langchain)
        • Trace Masking Functions
        • Trace Level Metadata
        • Correlating Traces with External IDs
        • Add Dataset
      • Running RagaAI Evals
        • Executing Evaluations
        • Compare Datasets
      • Analysis
      • Embeddings
    • RagaAI Metric Library
      • RAG Metrics
        • Hallucination
        • Faithfulness
        • Response Correctness
        • Response Completeness
        • False Refusal
        • Context Relevancy
        • Context Precision
        • Context Recall
        • PII Detection
        • Toxicity
      • Chat Metrics
        • Agent Quality
        • Instruction Adherence
        • User Chat Quality
      • Text-to-SQL
        • SQL Response Correctness
        • SQL Prompt Ambiguity
        • SQL Context Ambiguity
        • SQL Context Sufficiency
        • SQL Prompt Injection
      • Text Summarization
        • Summary Consistency
        • Summary Relevance
        • Summary Fluency
        • Summary Coherence
        • SummaC
        • QAG Score
        • ROUGE
        • BLEU
        • METEOR
        • BERTScore
      • Information Extraction
        • MINEA
        • Subjective Question Correction
        • Precision@K
        • Chunk Relevance
        • Entity Co-occurrence
        • Fact Entropy
      • Code Generation
        • Functional Correctness
        • ChrF
        • Ruby
        • CodeBLEU
        • Robust Pass@k
        • Robust Drop@k
        • Pass-Ratio@n
      • Marketing Content Evaluation
        • Engagement Score
        • Misattribution
        • Readability
        • Topic Coverage
        • Fabrication
      • Learning Management System
        • Topic Coverage
        • Topic Redundancy
        • Question Redundancy
        • Answer Correctness
        • Source Citability
        • Difficulty Level
      • Additional Metrics
        • Guardrails
          • Anonymize
          • Deanonymize
          • Ban Competitors
          • Ban Substrings
          • Ban Topics
          • Code
          • Invisible Text
          • Language
          • Secret
          • Sentiment
          • Factual Consistency
          • Language Same
          • No Refusal
          • Reading Time
          • Sensitive
          • URL Reachability
          • JSON Verify
        • Vulnerability Scanner
          • Bullying
          • Deadnaming
          • SexualContent
          • Sexualisation
          • SlurUsage
          • Profanity
          • QuackMedicine
          • DAN 11
          • DAN 10
          • DAN 9
          • DAN 8
          • DAN 7
          • DAN 6_2
          • DAN 6_0
          • DUDE
          • STAN
          • DAN_JailBreak
          • AntiDAN
          • ChatGPT_Developer_Mode_v2
          • ChatGPT_Developer_Mode_RANTI
          • ChatGPT_Image_Markdown
          • Ablation_Dan_11_0
          • Anthropomorphisation
      • Guardrails
        • Competitor Check
        • Gibberish Check
        • PII
        • Regex Check
        • Response Evaluator
        • Toxicity
        • Unusual Prompt
        • Ban List
        • Detect Drug
        • Detect Redundancy
        • Detect Secrets
        • Financial Tone Check
        • Has Url
        • HTML Sanitisation
        • Live URL
        • Logic Check
        • Politeness Check
        • Profanity Check
        • Quote Price
        • Restrict Topics
        • SQL Predicates Guard
        • Valid CSV
        • Valid JSON
        • Valid Python
        • Valid Range
        • Valid SQL
        • Valid URL
        • Cosine Similarity
        • Honesty Detection
        • Toxicity Hate Speech
    • Prompt Playground
      • Concepts
      • Single-Prompt Playground
      • Multiple Prompt Playground
      • Run Evaluations
      • Using Prompt Slugs with Python SDK
      • Create with AI using Prompt Wizard
      • Prompt Diff View
    • Synthetic Data Generation
    • Gateway
      • Quickstart
    • Guardrails
      • Quickstart
      • Python SDK
    • RagaAI Whitepapers
      • RagaAI RLEF (RAG LLM Evaluation Framework)
    • Agentic Testing
      • Quickstart
      • Concepts
        • Tracing
          • Langgraph (Agentic Tracing)
          • RagaAI Catalyst Tracing Guide for Azure OpenAI Users
        • Dynamic Tracing
        • Application Workflow
      • Create New Dataset
      • Metrics
        • Hallucination
        • Toxicity
        • Honesty
        • Cosine Similarity
      • Compare Traces
      • Compare Experiments
      • Add metrics locally
    • Custom Metric
    • Auto Prompt Optimization
    • Human Feedback & Annotations
      • Thumbs Up/Down
      • Add Metric Corrections
      • Corrections as Few-Shot Examples
      • Tagging
    • On-Premise Deployment
      • Enterprise Deployment Guide for AWS
      • Enterprise Deployment Guide for Azure
      • Evaluation Deployment Guide
        • Evaluation Maintenance Guide
    • Fine Tuning (OpenAI)
    • Integration
    • SDK Release Notes
      • ragaai-catalyst 2.1.7
  • RagaAI Prism
    • Quickstart
    • Sandbox Guide
      • Object Detection
      • LLM Summarization
      • Semantic Segmentation
      • Tabular Data
      • Super Resolution
      • OCR
      • Image Classification
      • Event Detection
    • Test Inventory
      • Object Detection
        • Failure Mode Analysis
        • Model Comparison Test
        • Drift Detection
        • Outlier Detection
        • Data Leakage Test
        • Labelling Quality Test
        • Scenario Imbalance
        • Class Imbalance
        • Active Learning
        • Image Property Drift Detection
      • Large Language Model (LLM)
        • Failure Mode Analysis
      • Semantic Segmentation
        • Failure Mode Analysis
        • Labelling Quality Test
        • Active Learning
        • Drift Detection
        • Class Imbalance
        • Scenario Imbalance
        • Data Leakage Test
        • Outlier Detection
        • Label Drift
        • Semantic Similarity
        • Near Duplicates Detection
        • Cluster Imbalance Test
        • Image Property Drift Detection
        • Spatio-Temporal Drift Detection
        • Spatio-Temporal Failure Mode Analysis
      • Tabular Data
        • Failure Mode Analysis
      • Instance Segmentation
        • Failure Mode Analysis
        • Labelling Quality Test
        • Drift Detection
        • Class Imbalance
        • Scenario Imbalance
        • Label Drift
        • Data Leakage Test
        • Outlier Detection
        • Active Learning
        • Near Duplicates Detection
      • Super Resolution
        • Semantic Similarity
        • Active Learning
        • Near Duplicates Detection
        • Outlier Detection
      • OCR
        • Missing Value Test
        • Outlier Detection
      • Image Classification
        • Failure Mode Analysis
        • Labelling Quality Test
        • Class Imbalance
        • Drift Detection
        • Near Duplicates Test
        • Data Leakage Test
        • Outlier Detection
        • Active Learning
        • Image Property Drift Detection
      • Event Detection
        • Failure Mode Analysis
        • A/B Test
    • Metric Glossary
    • Upload custom model
    • Event Detection
      • Upload Model
      • Generate Inference
      • Run tests
    • On-Premise Deployment
      • Enterprise Deployment Guide for AWS
      • Enterprise Deployment Guide for Azure
  • Support
Powered by GitBook
On this page
  • Try in Colab
  • AgentNeo: Getting Started
  • Setting up AgentNeo
  • Tracing
  • Dataset Creation
  • Experiment

Was this helpful?

  1. AgentNeo

Examples

Last updated 8 months ago

Was this helpful?

Try in Colab

AgentNeo: Getting Started


!pip install agentneo -U -q

Setting up AgentNeo


from agentneo import AgentNeo, Project, Dataset, Experiment, Tracer

# To login using email-id, creates new account if is not a user else retrieves the existing account
BASE_URL = "http://74.249.60.46:5000"

agent_session = AgentNeo(email="user1@example.com", base_url=BASE_URL)


# Create Project


project_created = Project(session=agent_session, project_name="Project_2", description="A test project").create()
project_id = project_created['id']

Tracing

import os
from google.colab import userdata

os.environ["OPENAI_API_KEY"] = userdata.get('OPENAI_API_KEY')

!pip install langchain_openai langchain_community langgraph arxiv -q


# Initialise the tracer by providing the tools-used and their description
tracer = Tracer(session=agent_session, metadata={'tools': [
    {'name': 'arxiv_tool', 'description': "Tool to search Arxiv for available research papers."},
    {'name': 'agent', 'description': "Identify the search terms for arxiv from the user description and call the arxiv_tool based on the search term"},
]})

# Langgraph code
from langchain_openai import ChatOpenAI
from langchain_community.utilities.arxiv import ArxivAPIWrapper
from typing import TypedDict, Annotated, Sequence
import operator
from langgraph.graph import StateGraph, END
import os

openai_llm = ChatOpenAI(temperature=0.4, callbacks=[tracer.get_callback_handler()])
arxiv = ArxivAPIWrapper(
    top_k_results=3,
    ARXIV_MAX_QUERY_LENGTH=300,
    load_max_docs=3,
    load_all_available_meta=False,
    doc_content_chars_max=40000
)

class AgentState(TypedDict):
    messages: Annotated[Sequence[str], operator.add]

@tracer.trace_node
def agent(state):
    query = state["messages"]
    res = openai_llm.invoke(f"""
    You are given text summary on the research topics that the user is working on.
    You need to extract the search term from the text summary.
    Here is the Text summary: {query[0]}
    """)
    return {"messages": [res.content]}

@tracer.trace_node
def arxiv_tool(state):
    context = state["messages"]
    search_query = context[1]
    data = arxiv.run(search_query)
    return {"messages": [data]}

@tracer.trace_node
def responder(state):
    agent = openai_llm.invoke(f"""
    You are given search results on the research topics that the user is working on.

    Here is the user query:
    ---
    {state["messages"][0]}
    ---

    Here is the search results:
    ---
    {state["messages"][2]}
    ---
    """)
    return {"messages": [agent.content]}
@tracer.trace_node
def where_to_go(state):
    ctx = state["messages"][0]
    if ctx == "no_response":
        return "end"
    else:
        return "continue"

@tracer.trace_graph
def workflow():
    graph = StateGraph(AgentState)
    graph.add_node("agent", agent)
    graph.add_node("arxiv", arxiv_tool)
    graph.add_node("responder", responder)
    graph.add_conditional_edges("agent", where_to_go, {
        "end": END,
        "continue": "arxiv"
    })
    graph.add_edge("agent", "arxiv")
    graph.add_edge("arxiv", "responder")
    graph.set_entry_point("agent")
    graph.set_finish_point("responder")
    return graph.compile()

compiled_workflow = workflow()

@tracer.trace_node
def get_output(message):
    inputs = {"messages": [message]}
    response = compiled_workflow.invoke(inputs)
    print(response["messages"][-1])

get_output("Is someone working on the propagation of LASER through brownian fluids")


trace_id = tracer.upload_trace()

Dataset Creation

# Create a dataset from a trace
dataset = Dataset(
    session=agent_session,
    project_id=project_id,
    dataset_name="Dataset_1",
    description="A test dataset"
)

dataset_traced = dataset.from_trace(trace_id=tracer.id, trace_filter=None)

Experiment

# Create Experiment
experiment = Experiment(
        session=agent_session,
        experiment_name="Experiment_1",
        description="A test experiment",
        dataset_id=dataset_traced['id'],
        project_id=project_id
    )

experiment_created = experiment.create()

# Execute Experiment
exp = experiment.execute(metrics=[
    {"name": "summarise", "config": {}},
    {"name": "tool_selection_accuracy", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
    {"name": "tool_usage_efficiency", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
    {"name": "goal_decomposition_efficiency", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
    {"name": "plan_adaptibility", "config": {"model": "gpt-4o-mini", "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY")}},
])

# Experiment Results
exp = experiment.get_results(experiment_id=exp['id'])

for i in exp['results']:
    print(f"Name: {i['metric_name']}")
    print(f"Result:")
    for key, value in i['result'].items():
        print(f"{key}: {value}")
    print(f"{'*'*100}\n")
LogoGoogle Colab