"""V2 Evaluation Interface."""from__future__importannotationsimportastimportcollectionsimportconcurrent.futuresascfimportdatetimeimportfunctoolsimportinspectimportitertoolsimportloggingimportpathlibimportqueueimportrandomimporttextwrapimportthreadingimportuuidfromcontextvarsimportcopy_contextfromtypingimport(TYPE_CHECKING,Any,Awaitable,Callable,DefaultDict,Dict,Generator,Iterable,Iterator,List,Optional,Sequence,Tuple,TypeVar,Union,cast,)fromtyping_extensionsimportTypedDictimportlangsmithfromlangsmithimportenvasls_envfromlangsmithimportrun_helpersasrhfromlangsmithimportrun_treesasrtfromlangsmithimportschemasfromlangsmithimportutilsasls_utilsfromlangsmith._internal._beta_decoratorimport_warn_oncefromlangsmith.evaluation.evaluatorimport(SUMMARY_EVALUATOR_T,ComparisonEvaluationResult,DynamicComparisonRunEvaluator,DynamicRunEvaluator,EvaluationResult,EvaluationResults,RunEvaluator,_normalize_summary_evaluator,comparison_evaluator,run_evaluator,)fromlangsmith.evaluation.integrationsimportLangChainStringEvaluatorifTYPE_CHECKING:importpandasaspdfromlangchain_core.runnablesimportRunnableDataFrame=pd.DataFrameelse:DataFrame=Anylogger=logging.getLogger(__name__)TARGET_T=Callable[[dict],dict]# Data format: dataset-name, dataset_id, or examplesDATA_T=Union[str,uuid.UUID,Iterable[schemas.Example],schemas.Dataset]# Summary evaluator runs over the whole dataset# and reports aggregate metric(s)# Row-level evaluatorEVALUATOR_T=Union[RunEvaluator,Callable[[schemas.Run,Optional[schemas.Example]],Union[EvaluationResult,EvaluationResults],],Callable[...,Union[dict,EvaluationResults,EvaluationResult]],]AEVALUATOR_T=Union[Callable[[schemas.Run,Optional[schemas.Example]],Awaitable[Union[EvaluationResult,EvaluationResults]],],]
[docs]defevaluate(target:Union[TARGET_T,Runnable],/,data:DATA_T,evaluators:Optional[Sequence[EVALUATOR_T]]=None,summary_evaluators:Optional[Sequence[SUMMARY_EVALUATOR_T]]=None,metadata:Optional[dict]=None,experiment_prefix:Optional[str]=None,description:Optional[str]=None,max_concurrency:Optional[int]=None,num_repetitions:int=1,client:Optional[langsmith.Client]=None,blocking:bool=True,experiment:Optional[Union[schemas.TracerSession,str,uuid.UUID]]=None,upload_results:bool=True,)->ExperimentResults:r"""Evaluate an application on a given dataset. Args: target (TARGET_T): The target system or function to evaluate. data (DATA_T): The dataset to evaluate on. Can be a dataset name, a list of examples, or a generator of examples. evaluators (Optional[Sequence[EVALUATOR_T]]): A list of evaluators to run on each example. Defaults to None. summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): A list of summary evaluators to run on the entire dataset. Defaults to None. metadata (Optional[dict]): Metadata to attach to the experiment. Defaults to None. experiment_prefix (Optional[str]): A prefix to provide for your experiment name. Defaults to None. description (Optional[str]): A free-form text description for the experiment. max_concurrency (Optional[int]): The maximum number of concurrent evaluations to run. Defaults to None (max number of workers). client (Optional[langsmith.Client]): The LangSmith client to use. Defaults to None. blocking (bool): Whether to block until the evaluation is complete. Defaults to True. num_repetitions (int): The number of times to run the evaluation. Each item in the dataset will be run and evaluated this many times. Defaults to 1. experiment (Optional[schemas.TracerSession]): An existing experiment to extend. If provided, experiment_prefix is ignored. For advanced usage only. Returns: ExperimentResults: The results of the evaluation. Examples: Prepare the dataset: >>> from typing import Sequence >>> from langsmith import Client >>> from langsmith.evaluation import evaluate >>> from langsmith.schemas import Example, Run >>> client = Client() >>> dataset = client.clone_public_dataset( ... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" ... ) >>> dataset_name = "Evaluate Examples" Basic usage: >>> def accuracy(run: Run, example: Example): ... # Row-level evaluator for accuracy. ... pred = run.outputs["output"] ... expected = example.outputs["answer"] ... return {"score": expected.lower() == pred.lower()} >>> def precision(runs: Sequence[Run], examples: Sequence[Example]): ... # Experiment-level evaluator for precision. ... # TP / (TP + FP) ... predictions = [run.outputs["output"].lower() for run in runs] ... expected = [example.outputs["answer"].lower() for example in examples] ... # yes and no are the only possible answers ... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"]) ... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)]) ... return {"score": tp / (tp + fp)} >>> def predict(inputs: dict) -> dict: ... # This can be any function or just an API call to your app. ... return {"output": "Yes"} >>> results = evaluate( ... predict, ... data=dataset_name, ... evaluators=[accuracy], ... summary_evaluators=[precision], ... experiment_prefix="My Experiment", ... description="Evaluating the accuracy of a simple prediction model.", ... metadata={ ... "my-prompt-version": "abcd-1234", ... }, ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... Evaluating over only a subset of the examples >>> experiment_name = results.experiment_name >>> examples = client.list_examples(dataset_name=dataset_name, limit=5) >>> results = evaluate( ... predict, ... data=examples, ... evaluators=[accuracy], ... summary_evaluators=[precision], ... experiment_prefix="My Experiment", ... description="Just testing a subset synchronously.", ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... Streaming each prediction to more easily + eagerly debug. >>> results = evaluate( ... predict, ... data=dataset_name, ... evaluators=[accuracy], ... summary_evaluators=[precision], ... description="I don't even have to block!", ... blocking=False, ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... >>> for i, result in enumerate(results): # doctest: +ELLIPSIS ... pass Using the `evaluate` API with an off-the-shelf LangChain evaluator: >>> from langsmith.evaluation import LangChainStringEvaluator >>> from langchain_openai import ChatOpenAI >>> def prepare_criteria_data(run: Run, example: Example): ... return { ... "prediction": run.outputs["output"], ... "reference": example.outputs["answer"], ... "input": str(example.inputs), ... } >>> results = evaluate( ... predict, ... data=dataset_name, ... evaluators=[ ... accuracy, ... LangChainStringEvaluator("embedding_distance"), ... LangChainStringEvaluator( ... "labeled_criteria", ... config={ ... "criteria": { ... "usefulness": "The prediction is useful if it is correct" ... " and/or asks a useful followup question." ... }, ... "llm": ChatOpenAI(model="gpt-4o"), ... }, ... prepare_data=prepare_criteria_data, ... ), ... ], ... description="Evaluating with off-the-shelf LangChain evaluators.", ... summary_evaluators=[precision], ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... Evaluating a LangChain object: >>> from langchain_core.runnables import chain as as_runnable >>> @as_runnable ... def nested_predict(inputs): ... return {"output": "Yes"} >>> @as_runnable ... def lc_predict(inputs): ... return nested_predict.invoke(inputs) >>> results = evaluate( ... lc_predict.invoke, ... data=dataset_name, ... evaluators=[accuracy], ... description="This time we're evaluating a LangChain object.", ... summary_evaluators=[precision], ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... """# noqa: E501ifnotupload_results:_warn_once("'upload_results' parameter is in beta.")ifcallable(target)andrh.is_async(target):raiseValueError("Async functions are not supported by `evaluate`. ""Please use `aevaluate` instead:\n\n""from langsmith import aevaluate\n\n""await aevaluate(\n"" async_target_function,\n"" data=data,\n"" evaluators=evaluators,\n"" # ... other parameters\n"")")ifexperimentandexperiment_prefix:raiseValueError("Expected at most one of 'experiment' or 'experiment_prefix',"" but both were provided. "f"Got: experiment={experiment}, experiment_prefix={experiment_prefix}")return_evaluate(target,data=data,evaluators=evaluators,summary_evaluators=summary_evaluators,metadata=metadata,experiment_prefix=experiment_prefix,description=description,max_concurrency=max_concurrency,num_repetitions=num_repetitions,client=client,blocking=blocking,experiment=experiment,upload_results=upload_results,)
[docs]defevaluate_existing(experiment:Union[str,uuid.UUID,schemas.TracerSession],/,evaluators:Optional[Sequence[EVALUATOR_T]]=None,summary_evaluators:Optional[Sequence[SUMMARY_EVALUATOR_T]]=None,metadata:Optional[dict]=None,max_concurrency:Optional[int]=None,client:Optional[langsmith.Client]=None,load_nested:bool=False,blocking:bool=True,)->ExperimentResults:r"""Evaluate existing experiment runs. Args: experiment (Union[str, uuid.UUID]): The identifier of the experiment to evaluate. data (DATA_T): The data to use for evaluation. evaluators (Optional[Sequence[EVALUATOR_T]]): Optional sequence of evaluators to use for individual run evaluation. summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): Optional sequence of evaluators to apply over the entire dataset. metadata (Optional[dict]): Optional metadata to include in the evaluation results. max_concurrency (Optional[int]): Optional maximum number of concurrent evaluations. client (Optional[langsmith.Client]): Optional Langsmith client to use for evaluation. load_nested: Whether to load all child runs for the experiment. Default is to only load the top-level root runs. blocking (bool): Whether to block until evaluation is complete. Returns: ExperimentResults: The evaluation results. Environment: - LANGSMITH_TEST_CACHE: If set, API calls will be cached to disk to save time and cost during testing. Recommended to commit the cache files to your repository for faster CI/CD runs. Requires the 'langsmith[vcr]' package to be installed. Examples: >>> from langsmith.evaluation import evaluate, evaluate_existing >>> dataset_name = "Evaluate Examples" >>> def predict(inputs: dict) -> dict: ... # This can be any function or just an API call to your app. ... return {"output": "Yes"} >>> # First run inference on the dataset ... results = evaluate( ... predict, ... data=dataset_name, ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... >>> # Then apply evaluators to the experiment ... def accuracy(run: Run, example: Example): ... # Row-level evaluator for accuracy. ... pred = run.outputs["output"] ... expected = example.outputs["answer"] ... return {"score": expected.lower() == pred.lower()} >>> def precision(runs: Sequence[Run], examples: Sequence[Example]): ... # Experiment-level evaluator for precision. ... # TP / (TP + FP) ... predictions = [run.outputs["output"].lower() for run in runs] ... expected = [example.outputs["answer"].lower() for example in examples] ... # yes and no are the only possible answers ... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"]) ... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)]) ... return {"score": tp / (tp + fp)} >>> experiment_name = ( ... results.experiment_name ... ) # Can use the returned experiment name >>> experiment_name = "My Experiment:64e6e91" # Or manually specify >>> results = evaluate_existing( ... experiment_name, ... summary_evaluators=[precision], ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... """# noqa: E501client=clientorrt.get_cached_client(timeout_ms=(20_000,90_001))project=(experimentifisinstance(experiment,schemas.TracerSession)else_load_experiment(experiment,client))runs=_load_traces(experiment,client,load_nested=load_nested)data_map=_load_examples_map(client,project)data=[data_map[cast(uuid.UUID,run.reference_example_id)]forruninruns]return_evaluate(runs,data=data,evaluators=evaluators,summary_evaluators=summary_evaluators,metadata=metadata,max_concurrency=max_concurrency,client=client,blocking=blocking,experiment=project,)
[docs]classExperimentResults:"""Represents the results of an evaluate() call. This class provides an iterator interface to iterate over the experiment results as they become available. It also provides methods to access the experiment name, the number of results, and to wait for the results to be processed. Methods: experiment_name() -> str: Returns the name of the experiment. wait() -> None: Waits for the experiment data to be processed. """
[docs]defwait(self)->None:"""Wait for the evaluation runner to complete. This method blocks the current thread until the evaluation runner has finished its execution. """ifself._thread:self._thread.join()
## Public API for Comparison Experiments# Row-level evaluatorCOMPARATIVE_EVALUATOR_T=Callable[[Sequence[schemas.Run],Optional[schemas.Example]],Union[Union[ComparisonEvaluationResult,dict],Awaitable[Union[ComparisonEvaluationResult,dict]],],]
[docs]defevaluate_comparative(experiments:Tuple[Union[str,uuid.UUID],Union[str,uuid.UUID]],/,evaluators:Sequence[COMPARATIVE_EVALUATOR_T],experiment_prefix:Optional[str]=None,description:Optional[str]=None,max_concurrency:int=5,client:Optional[langsmith.Client]=None,metadata:Optional[dict]=None,load_nested:bool=False,randomize_order:bool=False,)->ComparativeExperimentResults:r"""Evaluate existing experiment runs against each other. This lets you use pairwise preference scoring to generate more reliable feedback in your experiments. Args: experiments (Tuple[Union[str, uuid.UUID], Union[str, uuid.UUID]]): The identifiers of the experiments to compare. evaluators (Sequence[COMPARATIVE_EVALUATOR_T]): A list of evaluators to run on each example. experiment_prefix (Optional[str]): A prefix to provide for your experiment name. Defaults to None. description (Optional[str]): A free-form text description for the experiment. max_concurrency (int): The maximum number of concurrent evaluations to run. Defaults to 5. client (Optional[langsmith.Client]): The LangSmith client to use. Defaults to None. metadata (Optional[dict]): Metadata to attach to the experiment. Defaults to None. load_nested (bool): Whether to load all child runs for the experiment. Default is to only load the top-level root runs. randomize_order (bool): Whether to randomize the order of the outputs for each evaluation. Default is False. Returns: ComparativeExperimentResults: The results of the comparative evaluation. Examples: Suppose you want to compare two prompts to see which one is more effective. You would first prepare your dataset: >>> from typing import Sequence >>> from langsmith import Client >>> from langsmith.evaluation import evaluate >>> from langsmith.schemas import Example, Run >>> client = Client() >>> dataset = client.clone_public_dataset( ... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" ... ) >>> dataset_name = "Evaluate Examples" Then you would run your different prompts: >>> import functools >>> import openai >>> from langsmith.evaluation import evaluate >>> from langsmith.wrappers import wrap_openai >>> oai_client = openai.Client() >>> wrapped_client = wrap_openai(oai_client) >>> prompt_1 = "You are a helpful assistant." >>> prompt_2 = "You are an exceedingly helpful assistant." >>> def predict(inputs: dict, prompt: str) -> dict: ... completion = wrapped_client.chat.completions.create( ... model="gpt-3.5-turbo", ... messages=[ ... {"role": "system", "content": prompt}, ... { ... "role": "user", ... "content": f"Context: {inputs['context']}" ... f"\n\ninputs['question']", ... }, ... ], ... ) ... return {"output": completion.choices[0].message.content} >>> results_1 = evaluate( ... functools.partial(predict, prompt=prompt_1), ... data=dataset_name, ... description="Evaluating our basic system prompt.", ... blocking=False, # Run these experiments in parallel ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... >>> results_2 = evaluate( ... functools.partial(predict, prompt=prompt_2), ... data=dataset_name, ... description="Evaluating our advanced system prompt.", ... blocking=False, ... ) # doctest: +ELLIPSIS View the evaluation results for experiment:... >>> results_1.wait() >>> results_2.wait() >>> import time >>> time.sleep(10) # Wait for the traces to be fully processed Finally, you would compare the two prompts directly: >>> import json >>> from langsmith.evaluation import evaluate_comparative >>> def score_preferences(runs: list, example: schemas.Example): ... assert len(runs) == 2 # Comparing 2 systems ... assert isinstance(example, schemas.Example) ... assert all(run.reference_example_id == example.id for run in runs) ... pred_a = runs[0].outputs["output"] ... pred_b = runs[1].outputs["output"] ... ground_truth = example.outputs["answer"] ... tools = [ ... { ... "type": "function", ... "function": { ... "name": "rank_preferences", ... "description": "Saves the prefered response ('A' or 'B')", ... "parameters": { ... "type": "object", ... "properties": { ... "reasoning": { ... "type": "string", ... "description": "The reasoning behind the choice.", ... }, ... "preferred_option": { ... "type": "string", ... "enum": ["A", "B"], ... "description": "The preferred option, either 'A' or 'B'", ... }, ... }, ... "required": ["preferred_option"], ... }, ... }, ... } ... ] ... completion = openai.Client().chat.completions.create( ... model="gpt-3.5-turbo", ... messages=[ ... {"role": "system", "content": "Select the better response."}, ... { ... "role": "user", ... "content": f"Option A: {pred_a}" ... f"\n\nOption B: {pred_b}" ... f"\n\nGround Truth: {ground_truth}", ... }, ... ], ... tools=tools, ... tool_choice={ ... "type": "function", ... "function": {"name": "rank_preferences"}, ... }, ... ) ... tool_args = completion.choices[0].message.tool_calls[0].function.arguments ... loaded_args = json.loads(tool_args) ... preference = loaded_args["preferred_option"] ... comment = loaded_args["reasoning"] ... if preference == "A": ... return { ... "key": "ranked_preference", ... "scores": {runs[0].id: 1, runs[1].id: 0}, ... "comment": comment, ... } ... else: ... return { ... "key": "ranked_preference", ... "scores": {runs[0].id: 0, runs[1].id: 1}, ... "comment": comment, ... } >>> def score_length_difference(runs: list, example: schemas.Example): ... # Just return whichever response is longer. ... # Just an example, not actually useful in real life. ... assert len(runs) == 2 # Comparing 2 systems ... assert isinstance(example, schemas.Example) ... assert all(run.reference_example_id == example.id for run in runs) ... pred_a = runs[0].outputs["output"] ... pred_b = runs[1].outputs["output"] ... if len(pred_a) > len(pred_b): ... return { ... "key": "length_difference", ... "scores": {runs[0].id: 1, runs[1].id: 0}, ... } ... else: ... return { ... "key": "length_difference", ... "scores": {runs[0].id: 0, runs[1].id: 1}, ... } >>> results = evaluate_comparative( ... [results_1.experiment_name, results_2.experiment_name], ... evaluators=[score_preferences, score_length_difference], ... client=client, ... ) # doctest: +ELLIPSIS View the pairwise evaluation results at:... >>> eval_results = list(results) >>> assert len(eval_results) >= 10 # doctest: +SKIP >>> assert all( ... "feedback.ranked_preference" in r["evaluation_results"] ... for r in eval_results ... ) # doctest: +SKIP >>> assert all( ... "feedback.length_difference" in r["evaluation_results"] ... for r in eval_results ... ) # doctest: +SKIP """# noqa: E501iflen(experiments)<2:raiseValueError("Comparative evaluation requires at least 2 experiments.")ifnotevaluators:raiseValueError("At least one evaluator is required for comparative evaluation.")ifmax_concurrency<0:raiseValueError("max_concurrency must be a positive integer.")client=clientorrt.get_cached_client()# TODO: Add information about comparison experimentsprojects=[_load_experiment(experiment,client)forexperimentinexperiments]ref_datasets_=[str(p.reference_dataset_id)forpinprojects]ifnotlen(set(ref_datasets_))==1:raiseValueError("All experiments must have the same reference dataset.")experiment_ids=[p.idforpinprojects]ifexperiment_prefixisNone:experiment_names=[p.nameforpinprojectsifp.nameisnotNone]experiment_name=(" vs. ".join(experiment_names)+"-"+str(uuid.uuid4().hex[:4]))else:experiment_name=experiment_prefix+"-"+str(uuid.uuid4().hex[:8])comparative_experiment_id=uuid.uuid4()comparative_experiment=client.create_comparative_experiment(experiment_name,experiments=experiment_ids,description=description,metadata=metadata,id=comparative_experiment_id,)_print_comparative_experiment_start(cast(Tuple[schemas.TracerSessionResult,schemas.TracerSessionResult],tuple(projects),),comparative_experiment,)runs=[_load_traces(experiment,client,load_nested=load_nested)forexperimentinexperiments]# Only check intersections for the experimentsexamples_intersection=Noneforruns_listinruns:example_ids_set={run.reference_example_idforruninruns_list}ifexamples_intersectionisNone:examples_intersection=example_ids_setelse:examples_intersection&=example_ids_setexample_ids_nullable=(list(examples_intersection)ifexamples_intersectionisnotNoneelse[])example_ids=[eidforeidinexample_ids_nullableifeidisnotNone]# TODO: Warn if different dataset versions, etc. are used in the different# experiments. We aren't providing any training wheels here.batch_size=99data={}foriinrange(0,len(example_ids),batch_size):example_ids_batch=example_ids[i:i+batch_size]foreinclient.list_examples(dataset_id=projects[0].reference_dataset_id,as_of=projects[0].metadata.get("dataset_version"),example_ids=example_ids_batch,):data[e.id]=eruns_dict:Dict[uuid.UUID,List[schemas.Run]]=collections.defaultdict(list)forruns_listinruns:forruninruns_list:ifrun.reference_example_idindata:runs_dict[cast(uuid.UUID,run.reference_example_id)].append(run)comparators=[comparison_evaluator(evaluator)forevaluatorinevaluatorsor[]]results:dict={}defevaluate_and_submit_feedback(runs_list:list[schemas.Run],example:schemas.Example,comparator:DynamicComparisonRunEvaluator,executor:cf.Executor,)->ComparisonEvaluationResult:feedback_group_id=uuid.uuid4()ifrandomize_order:random.shuffle(runs_list)withrh.tracing_context(project_name="evaluators",client=client):result=comparator.compare_runs(runs_list,example)ifclientisNone:raiseValueError("Client is required to submit feedback.")comments=({str(rid):result.commentforridinresult.scores}ifisinstance(result.comment,str)else(result.commentor{}))forrun_id,scoreinresult.scores.items():executor.submit(client.create_feedback,run_id=run_id,key=result.key,score=score,comment=comments.get(str(run_id)),comparative_experiment_id=comparative_experiment.id,source_run_id=result.source_run_id,feedback_group_id=feedback_group_id,)returnresulttqdm=_load_tqdm()withls_utils.ContextThreadPoolExecutor(max_workers=max_concurrencyor1)asexecutor:futures=[]forexample_id,runs_listintqdm(runs_dict.items()):results[example_id]={"runs":runs_list}forcomparatorincomparators:ifmax_concurrency>1:future=executor.submit(evaluate_and_submit_feedback,runs_list,data[example_id],comparator,executor,)futures.append(future)else:result=evaluate_and_submit_feedback(runs_list,data[example_id],comparator,executor)results[example_id][f"feedback.{result.key}"]=resultiffutures:cf.wait(futures)forfutureinfutures:result=future.result()results[example_id][f"feedback.{result.key}"]=resultreturnComparativeExperimentResults(results,data)
[docs]classComparativeExperimentResults:"""Represents the results of an evaluate_comparative() call. This class provides an iterator interface to iterate over the experiment results as they become available. It also provides methods to access the experiment name, the number of results, and to wait for the results to be processed. Methods: experiment_name() -> str: Returns the name of the experiment. wait() -> None: Waits for the experiment data to be processed. """
def__getitem__(self,key):"""Return the result associated with the given key."""returnself._results[key]def__iter__(self):forkey,valueinself._results.items():yield{"example":self._examples[key]ifself._exampleselseNone,"evaluation_results":value,}
## Private APIdef_print_comparative_experiment_start(experiments:Tuple[schemas.TracerSession,schemas.TracerSession],comparative_experiment:schemas.ComparativeExperiment,)->None:url=experiments[0].urlorexperiments[1].urlifurl:project_url=url.split("?")[0]dataset_id=comparative_experiment.reference_dataset_idbase_url=project_url.split("/projects/p/")[0]comparison_url=(f"{base_url}/datasets/{dataset_id}/compare?"f"selectedSessions={'%2C'.join([str(e.id)foreinexperiments])}"f"&comparativeExperiment={comparative_experiment.id}")print(# noqa: T201f"View the pairwise evaluation results at:\n{comparison_url}\n\n")def_is_callable(target:Union[TARGET_T,Iterable[schemas.Run],Runnable])->bool:returncallable(target)or_is_langchain_runnable(target)def_evaluate(target:Union[TARGET_T,Iterable[schemas.Run],Runnable],/,data:DATA_T,evaluators:Optional[Sequence[EVALUATOR_T]]=None,summary_evaluators:Optional[Sequence[SUMMARY_EVALUATOR_T]]=None,metadata:Optional[dict]=None,experiment_prefix:Optional[str]=None,description:Optional[str]=None,max_concurrency:Optional[int]=None,num_repetitions:int=1,client:Optional[langsmith.Client]=None,blocking:bool=True,experiment:Optional[Union[schemas.TracerSession,str,uuid.UUID]]=None,upload_results:bool=True,)->ExperimentResults:# Initialize the experiment manager.client=clientorrt.get_cached_client()runs=Noneif_is_callable(target)elsecast(Iterable[schemas.Run],target)experiment_,runs=_resolve_experiment(experiment,runs,client,)manager=_ExperimentManager(data,client=client,metadata=metadata,experiment=experiment_orexperiment_prefix,description=description,num_repetitions=num_repetitions,# If provided, we don't need to create a new experiment.runs=runs,# Create or resolve the experiment.upload_results=upload_results,).start()cache_dir=ls_utils.get_cache_dir(None)cache_path=(pathlib.Path(cache_dir)/f"{manager.dataset_id}.yaml"ifcache_direlseNone)withls_utils.with_optional_cache(cache_path,ignore_hosts=[client.api_url]):if_is_callable(target):# Add predictions to the experiment.manager=manager.with_predictions(cast(TARGET_T,target),max_concurrency=max_concurrency)ifevaluators:# Apply evaluators to the predictions.manager=manager.with_evaluators(evaluators,max_concurrency=max_concurrency)ifsummary_evaluators:# Apply the experiment-level summary evaluators.manager=manager.with_summary_evaluators(summary_evaluators)# Start consuming the results.results=ExperimentResults(manager,blocking=blocking)returnresultsdef_is_uuid(value:str)->bool:try:uuid.UUID(value)returnTrueexceptValueError:returnFalsedef_load_experiment(project:Union[str,uuid.UUID],client:langsmith.Client)->schemas.TracerSessionResult:ifisinstance(project,uuid.UUID)or_is_uuid(project):returnclient.read_project(project_id=project)returnclient.read_project(project_name=project)def_load_traces(project:Union[str,uuid.UUID,schemas.TracerSession],client:langsmith.Client,load_nested:bool=False,)->List[schemas.Run]:"""Load nested traces for a given project."""is_root=Noneifload_nestedelseTrueifisinstance(project,schemas.TracerSession):runs=client.list_runs(project_id=project.id,is_root=is_root)elifisinstance(project,uuid.UUID)or_is_uuid(project):runs=client.list_runs(project_id=project,is_root=is_root)else:runs=client.list_runs(project_name=project,is_root=is_root)ifnotload_nested:returnlist(runs)treemap:DefaultDict[uuid.UUID,List[schemas.Run]]=collections.defaultdict(list)results=[]all_runs={}forruninruns:ifrun.parent_run_idisnotNone:treemap[run.parent_run_id].append(run)else:results.append(run)all_runs[run.id]=runforrun_id,child_runsintreemap.items():all_runs[run_id].child_runs=sorted(child_runs,key=lambdar:r.dotted_order)returnresultsdef_load_examples_map(client:langsmith.Client,project:schemas.TracerSession)->Dict[uuid.UUID,schemas.Example]:return{e.id:eforeinclient.list_examples(dataset_id=project.reference_dataset_id,as_of=project.metadata.get("dataset_version"),)}IT=TypeVar("IT")def_load_tqdm()->Callable[[IT],IT]:try:fromtqdm.autoimporttqdmexceptImportError:returnlambdax:xreturntqdm# type: ignore[return-value]ET=TypeVar("ET",bound="_ExperimentManagerMixin")class_ExperimentManagerMixin:def__init__(self,/,experiment:Optional[Union[schemas.TracerSession,str]],metadata:Optional[dict]=None,client:Optional[langsmith.Client]=None,description:Optional[str]=None,):self.client=clientorrt.get_cached_client()self._experiment:Optional[schemas.TracerSession]=NoneifexperimentisNone:self._experiment_name=_get_random_name()elifisinstance(experiment,str):self._experiment_name=experiment+"-"+str(uuid.uuid4().hex[:8])else:self._experiment_name=cast(str,experiment.name)self._experiment=experimentmetadata=metadataor{}ifnotmetadata.get("revision_id"):metadata={"revision_id":ls_env.get_langchain_env_var_metadata().get("revision_id"),**metadata,}self._metadata=metadataor{}self._description=description@propertydefexperiment_name(self)->str:ifself._experiment_nameisnotNone:returnself._experiment_nameraiseValueError("Experiment name not provided, and experiment not yet started.")def_get_experiment(self)->schemas.TracerSession:ifself._experimentisNone:raiseValueError("Experiment not started yet.")returnself._experimentdef_get_experiment_metadata(self):project_metadata=self._metadataor{}git_info=ls_env.get_git_info()ifgit_info:project_metadata={**project_metadata,"git":git_info,}ifself._experiment:project_metadata={**self._experiment.metadata,**project_metadata,}returnproject_metadatadef_create_experiment(self,dataset_id:uuid.UUID,metadata:dict)->schemas.TracerSession:# There is a chance of name collision, so we'll retrystarting_name=self._experiment_namenum_attempts=10for_inrange(num_attempts):try:returnself.client.create_project(self._experiment_name,description=self._description,reference_dataset_id=dataset_id,metadata=metadata,)exceptls_utils.LangSmithConflictError:self._experiment_name=f"{starting_name}-{str(uuid.uuid4().hex[:6])}"raiseValueError(f"Could not find a unique experiment name in {num_attempts} attempts."" Please try again with a different experiment name.")def_get_project(self,first_example:schemas.Example)->schemas.TracerSession:ifself._experimentisNone:project_metadata=self._get_experiment_metadata()project=self._create_experiment(first_example.dataset_id,project_metadata)else:project=self._experimentreturnprojectdef_print_experiment_start(self,project:Optional[schemas.TracerSession],first_example:schemas.Example)->None:ifprojectandproject.url:# TODO: Make this a public APIproject_url=project.url.split("?")[0]dataset_id=first_example.dataset_idbase_url=project_url.split("/projects/p/")[0]comparison_url=(f"{base_url}/datasets/{dataset_id}/compare?"f"selectedSessions={project.id}")print(# noqa: T201f"View the evaluation results for experiment: '{self.experiment_name}'"f" at:\n{comparison_url}\n\n")else:# HACKHACKprint(# noqa: T201"Starting evaluation of experiment: %s",self.experiment_name)class_ExperimentManager(_ExperimentManagerMixin):"""Manage the execution of experiments. Supports lazily running predictions and evaluations in parallel to facilitate result streaming and early debugging. Args: data (DATA_T): The data used for the experiment. Can be a dataset name or ID OR a generator of examples. num_repetitions (int): The number of times to run over the data. runs (Optional[Iterable[schemas.Run]]): The runs associated with the experiment predictions. experiment (Optional[schemas.TracerSession]): The tracer session associated with the experiment. experiment_prefix (Optional[str]): The prefix for the experiment name. metadata (Optional[dict]): Additional metadata for the experiment. client (Optional[langsmith.Client]): The Langsmith client used for the experiment. evaluation_results (Optional[Iterable[EvaluationResults]]): The evaluation sresults for the experiment. summary_results (Optional[Iterable[EvaluationResults]]): The aggregate results for the experiment. """def__init__(self,data:DATA_T,/,experiment:Optional[Union[schemas.TracerSession,str]],metadata:Optional[dict]=None,client:Optional[langsmith.Client]=None,runs:Optional[Iterable[schemas.Run]]=None,evaluation_results:Optional[Iterable[EvaluationResults]]=None,summary_results:Optional[Iterable[EvaluationResults]]=None,description:Optional[str]=None,num_repetitions:int=1,upload_results:bool=True,):super().__init__(experiment=experiment,metadata=metadata,client=client,description=description,)self._data=dataself._examples:Optional[Iterable[schemas.Example]]=Noneself._runs=runsself._evaluation_results=evaluation_resultsself._summary_results=summary_resultsself._num_repetitions=num_repetitionsself._upload_results=upload_results@propertydefexamples(self)->Iterable[schemas.Example]:ifself._examplesisNone:self._examples=_resolve_data(self._data,client=self.client)ifself._num_repetitions>1:self._examples=itertools.chain.from_iterable(itertools.tee(self._examples,self._num_repetitions))self._examples,examples_iter=itertools.tee(self._examples)returnexamples_iter@propertydefdataset_id(self)->str:ifself._experimentisNoneornotgetattr(self._experiment,"reference_dataset_id",None):example=next(iter(self.examples))returnstr(example.dataset_id)returnstr(cast(schemas.TracerSessionResult,self._experiment).reference_dataset_id)@propertydefevaluation_results(self)->Iterable[EvaluationResults]:ifself._evaluation_resultsisNone:return({"results":[]}for_inself.examples)returnself._evaluation_results@propertydefruns(self)->Iterable[schemas.Run]:ifself._runsisNone:raiseValueError("Runs not provided in this experiment."" Please predict first.")self._runs,runs_iter=itertools.tee(self._runs)returnruns_iterdefstart(self)->_ExperimentManager:first_example=next(itertools.islice(self.examples,1))project=self._get_project(first_example)ifself._upload_resultselseNoneself._print_experiment_start(project,first_example)self._metadata["num_repetitions"]=self._num_repetitionsreturnself.__class__(self.examples,experiment=project,metadata=self._metadata,client=self.client,runs=self._runs,evaluation_results=self._evaluation_results,upload_results=self._upload_results,)defwith_predictions(self,target:TARGET_T,/,max_concurrency:Optional[int]=None,)->_ExperimentManager:"""Lazily apply the target function to the experiment."""context=copy_context()_experiment_results=context.run(self._predict,target,max_concurrency=max_concurrency)r1,r2=itertools.tee(_experiment_results,2)return_ExperimentManager((pred["example"]forpredinr1),experiment=self._experiment,metadata=self._metadata,client=self.client,runs=(pred["run"]forpredinr2),upload_results=self._upload_results,# TODO: Can't do multiple prediction rounds rn.)defwith_evaluators(self,evaluators:Sequence[Union[EVALUATOR_T,RunEvaluator,]],*,max_concurrency:Optional[int]=None,)->_ExperimentManager:"""Lazily apply the provided evaluators to the experiment."""evaluators=_resolve_evaluators(evaluators)context=copy_context()experiment_results=context.run(self._score,evaluators,max_concurrency=max_concurrency)# Split the generator into three so the manager# can consume each value individually.r1,r2,r3=itertools.tee(experiment_results,3)return_ExperimentManager((result["example"]forresultinr1),experiment=self._experiment,metadata=self._metadata,client=self.client,runs=(result["run"]forresultinr2),evaluation_results=(result["evaluation_results"]forresultinr3),summary_results=self._summary_results,upload_results=self._upload_results,)defwith_summary_evaluators(self,summary_evaluators:Sequence[SUMMARY_EVALUATOR_T],)->_ExperimentManager:"""Lazily apply the provided summary evaluators to the experiment."""wrapped_evaluators=_wrap_summary_evaluators(summary_evaluators)context=copy_context()aggregate_feedback_gen=context.run(self._apply_summary_evaluators,wrapped_evaluators)return_ExperimentManager(self.examples,experiment=self._experiment,metadata=self._metadata,client=self.client,runs=self.runs,evaluation_results=self._evaluation_results,summary_results=aggregate_feedback_gen,upload_results=self._upload_results,)defget_results(self)->Iterable[ExperimentResultRow]:"""Return the traces, evaluation results, and associated examples."""forrun,example,evaluation_resultsinzip(self.runs,self.examples,self.evaluation_results):yieldExperimentResultRow(run=run,example=example,evaluation_results=evaluation_results,)defget_summary_scores(self)->Dict[str,List[dict]]:"""If summary_evaluators were applied, consume and return the results."""ifself._summary_resultsisNone:return{"results":[]}# Consume the generatorreturn{"results":[res# type: ignore[misc]forresultsinself._summary_resultsforresinresults["results"]]}# Private methodsdef_predict(self,target:TARGET_T,/,max_concurrency:Optional[int]=None)->Generator[_ForwardResults,None,None]:"""Run the target function on the examples."""fn=_ensure_traceable(target)ifmax_concurrency==0:forexampleinself.examples:yield_forward(fn,example,self.experiment_name,self._metadata,self.client,self._upload_results,)else:withls_utils.ContextThreadPoolExecutor(max_concurrency)asexecutor:futures=[executor.submit(_forward,fn,example,self.experiment_name,self._metadata,self.client,self._upload_results,)forexampleinself.examples]forfutureincf.as_completed(futures):yieldfuture.result()# Close out the project.self._end()def_run_evaluators(self,evaluators:Sequence[RunEvaluator],current_results:ExperimentResultRow,executor:cf.ThreadPoolExecutor,)->ExperimentResultRow:current_context=rh.get_tracing_context()metadata={**(current_context["metadata"]or{}),**{"experiment":self.experiment_name,"reference_example_id":current_results["example"].id,"reference_run_id":current_results["run"].id,},}withrh.tracing_context(**{**current_context,"project_name":"evaluators","metadata":metadata,"enabled":"local"ifnotself._upload_resultselseTrue,"client":self.client,}):run=current_results["run"]example=current_results["example"]eval_results=current_results["evaluation_results"]forevaluatorinevaluators:try:evaluator_response=evaluator.evaluate_run(run=run,example=example,)eval_results["results"].extend(self.client._select_eval_results(evaluator_response))ifself._upload_results:# TODO: This is a hackself.client._log_evaluation_feedback(evaluator_response,run=run,_executor=executor)exceptExceptionase:try:feedback_keys=_extract_feedback_keys(evaluator)error_response=EvaluationResults(results=[EvaluationResult(key=key,source_run_id=run.id,comment=repr(e),extra={"error":True},)forkeyinfeedback_keys])eval_results["results"].extend(self.client._select_eval_results(error_response))ifself._upload_results:# TODO: This is a hackself.client._log_evaluation_feedback(error_response,run=run,_executor=executor)exceptExceptionase2:logger.debug(f"Error parsing feedback keys: {e2}")passlogger.error(f"Error running evaluator {repr(evaluator)} on"f" run {run.idifrunelse''}: {repr(e)}",exc_info=True,)returnExperimentResultRow(run=run,example=example,evaluation_results=eval_results,)def_score(self,evaluators:Sequence[RunEvaluator],max_concurrency:Optional[int]=None,)->Iterable[ExperimentResultRow]:"""Run the evaluators on the prediction stream. Expects runs to be available in the manager. (e.g. from a previous prediction step) """withls_utils.ContextThreadPoolExecutor(max_workers=max_concurrency)asexecutor:ifmax_concurrency==0:context=copy_context()forcurrent_resultsinself.get_results():yieldcontext.run(self._run_evaluators,evaluators,current_results,executor,)else:futures=set()forcurrent_resultsinself.get_results():futures.add(executor.submit(self._run_evaluators,evaluators,current_results,executor,))try:# Since prediction may be slow, yield (with a timeout) to# allow for early results to be emitted.forfutureincf.as_completed(futures,timeout=0.001):yieldfuture.result()futures.remove(future)except(cf.TimeoutError,TimeoutError):passforfutureincf.as_completed(futures):result=future.result()yieldresultdef_apply_summary_evaluators(self,summary_evaluators:Sequence[SUMMARY_EVALUATOR_T])->Generator[EvaluationResults,None,None]:runs,examples=[],[]forrun,exampleinzip(self.runs,self.examples):runs.append(run)examples.append(example)aggregate_feedback=[]withls_utils.ContextThreadPoolExecutor()asexecutor:project_id=self._get_experiment().idifself._upload_resultselseNonecurrent_context=rh.get_tracing_context()metadata={**(current_context["metadata"]or{}),**{"experiment":self.experiment_name,"experiment_id":project_id,},}withrh.tracing_context(**{**current_context,"project_name":"evaluators","metadata":metadata,"client":self.client,"enabled":"local"ifnotself._upload_resultselseTrue,}):forevaluatorinsummary_evaluators:try:summary_eval_result=evaluator(runs,examples)# TODO: Expose public API for this.flattened_results=self.client._select_eval_results(summary_eval_result,fn_name=evaluator.__name__,)aggregate_feedback.extend(flattened_results)ifself._upload_results:forresultinflattened_results:feedback=result.dict(exclude={"target_run_id"})evaluator_info=feedback.pop("evaluator_info",None)executor.submit(self.client.create_feedback,**feedback,run_id=None,project_id=project_id,source_info=evaluator_info,)exceptExceptionase:logger.error(f"Error running summary evaluator {repr(evaluator)}: {e}",exc_info=True,)yield{"results":aggregate_feedback}def_get_dataset_version(self)->Optional[str]:examples=list(self.examples)modified_at=[ex.modified_atforexinexamplesifex.modified_at]# Should always be defined in practice when fetched,# but the typing permits Nonemax_modified_at=max(modified_at)ifmodified_atelseNonereturnmax_modified_at.isoformat()ifmax_modified_atelseNonedef_get_dataset_splits(self)->Optional[list[str]]:examples=list(self.examples)splits=set()forexampleinexamples:if(example.metadataandexample.metadata.get("dataset_split")andisinstance(example.metadata["dataset_split"],list)):forsplitinexample.metadata["dataset_split"]:ifisinstance(split,str):splits.add(split)else:splits.add("base")returnlist(splits)def_end(self)->None:ifnotself._upload_results:returnexperiment=self._experimentifexperimentisNone:raiseValueError("Experiment not started yet.")project_metadata=self._get_experiment_metadata()project_metadata["dataset_version"]=self._get_dataset_version()project_metadata["dataset_splits"]=self._get_dataset_splits()self.client.update_project(experiment.id,end_time=experiment.end_timeordatetime.datetime.now(datetime.timezone.utc),metadata={**experiment.metadata,**project_metadata,},)def_resolve_evaluators(evaluators:Sequence[Union[EVALUATOR_T,RunEvaluator,AEVALUATOR_T]],)->Sequence[RunEvaluator]:results=[]forevaluatorinevaluators:ifisinstance(evaluator,RunEvaluator):results.append(evaluator)elifisinstance(evaluator,LangChainStringEvaluator):results.append(evaluator.as_run_evaluator())else:results.append(run_evaluator(evaluator))returnresultsdef_wrap_summary_evaluators(evaluators:Sequence[SUMMARY_EVALUATOR_T],)->List[SUMMARY_EVALUATOR_T]:def_wrap(evaluator:SUMMARY_EVALUATOR_T)->SUMMARY_EVALUATOR_T:eval_name=getattr(evaluator,"__name__","BatchEvaluator")evaluator=_normalize_summary_evaluator(evaluator)@functools.wraps(evaluator)def_wrapper_inner(runs:Sequence[schemas.Run],examples:Sequence[schemas.Example])->Union[EvaluationResult,EvaluationResults]:@rh.traceable(name=eval_name)def_wrapper_super_inner(runs_:str,examples_:str)->Union[EvaluationResult,EvaluationResults]:returnevaluator(list(runs),list(examples))return_wrapper_super_inner(f"Runs[] (Length={len(runs)})",f"Examples[] (Length={len(examples)})")return_wrapper_innerresults=[]forevaluatorinevaluators:results.append(_wrap(evaluator))returnresultsclass_ForwardResults(TypedDict):run:schemas.Runexample:schemas.Exampledef_forward(fn:rh.SupportsLangsmithExtra,example:schemas.Example,experiment_name:str,metadata:dict,client:langsmith.Client,upload_results:bool,)->_ForwardResults:run:Optional[schemas.RunBase]=Nonedef_get_run(r:rt.RunTree)->None:nonlocalrunrun=rwithrh.tracing_context(enabled="local"ifnotupload_resultselseTrue):example_version=(example.modified_at.isoformat()ifexample.modified_atelseexample.created_at.isoformat())langsmith_extra=rh.LangSmithExtra(reference_example_id=example.id,on_end=_get_run,project_name=experiment_name,metadata={**metadata,"example_version":example_version},client=client,)try:fn(example.inputs,langsmith_extra=langsmith_extra)exceptExceptionase:logger.error(f"Error running target function: {e}",exc_info=True,stacklevel=1)return_ForwardResults(run=cast(schemas.Run,run),example=example)def_resolve_data(data:DATA_T,*,client:langsmith.Client)->Iterable[schemas.Example]:"""Return the examples for the given dataset."""ifisinstance(data,str):returnclient.list_examples(dataset_name=data)elifisinstance(data,uuid.UUID):returnclient.list_examples(dataset_id=data)elifisinstance(data,schemas.Dataset):returnclient.list_examples(dataset_id=data.id)returndatadef_ensure_traceable(target:TARGET_T|rh.SupportsLangsmithExtra[[dict],dict]|Runnable,)->rh.SupportsLangsmithExtra[[dict],dict]:"""Ensure the target function is traceable."""ifnot_is_callable(target):raiseValueError("Target must be a callable function or a langchain/langgraph object. For ""example:\n\n""def predict(inputs: dict) -> dict:\n"" # do work, like chain.invoke(inputs)\n"" return {...}\n\n""evaluate(\n"" predict,\n"" ...\n"")")ifrh.is_traceable_function(target):fn:rh.SupportsLangsmithExtra[[dict],dict]=targetelse:if_is_langchain_runnable(target):target=target.invoke# type: ignore[union-attr]fn=rh.traceable(name="Target")(cast(Callable,target))returnfndef_resolve_experiment(experiment:Optional[Union[schemas.TracerSession,str,uuid.UUID]],runs:Optional[Iterable[schemas.Run]],client:langsmith.Client,)->Tuple[Optional[Union[schemas.TracerSession,str]],Optional[Iterable[schemas.Run]]]:# TODO: Remove this, handle outside the managerifexperimentisnotNone:ifisinstance(experiment,schemas.TracerSession):experiment_=experimentelse:experiment_=_load_experiment(experiment,client)ifnotexperiment_.name:raiseValueError("Experiment name must be defined if provided.")ifnotexperiment_.reference_dataset_id:raiseValueError("Experiment must have an associated reference_dataset_id, ""but none was provided.")returnexperiment_,runs# If we have runs, that means the experiment was already started.ifrunsisnotNone:runs_,runs=itertools.tee(runs)first_run=next(runs_)experiment_=client.read_project(project_id=first_run.session_id)ifnotexperiment_.name:raiseValueError("Experiment name not found for provided runs.")returnexperiment_,runsreturnNone,Nonedef_get_random_name()->str:fromlangsmith.evaluation._name_generationimportrandom_name# noqa: F401returnrandom_name()def_extract_feedback_keys(evaluator:RunEvaluator):ifisinstance(evaluator,DynamicRunEvaluator):ifgetattr(evaluator,"func",None):return_extract_code_evaluator_feedback_keys(evaluator.func)elifgetattr(evaluator,"afunc",None):return_extract_code_evaluator_feedback_keys(evaluator.afunc)# TODO: Support for DynamicComparisonRunEvaluatorifhasattr(evaluator,"evaluator"):# LangChainStringEvaluatorifgetattr(getattr(evaluator,"evaluator"),"evaluation_name",None):return[evaluator.evaluator.evaluation_name]return[]def_extract_code_evaluator_feedback_keys(func:Callable)->list[str]:python_code=inspect.getsource(func)defextract_dict_keys(node):ifisinstance(node,ast.Dict):keys=[]key_value=Noneforkey,valueinzip(node.keys,node.values):ifisinstance(key,(ast.Str,ast.Constant)):key_str=key.sifisinstance(key,ast.Str)elsekey.valueifkey_str=="key"andisinstance(value,(ast.Str,ast.Constant)):key_value=(value.sifisinstance(value,ast.Str)elsevalue.value)return[key_value]ifkey_valueelsekeyselif(isinstance(node,ast.Call)andisinstance(node.func,ast.Name)andnode.func.id=="dict"):forkeywordinnode.keywords:ifkeyword.arg=="key"andisinstance(keyword.value,(ast.Str,ast.Constant)):return[(keyword.value.sifisinstance(keyword.value,ast.Str)elsekeyword.value.value)]return[]defextract_evaluation_result_key(node):if(isinstance(node,ast.Call)andisinstance(node.func,ast.Name)andnode.func.id=="EvaluationResult"):forkeywordinnode.keywords:ifkeyword.arg=="key"andisinstance(keyword.value,(ast.Str,ast.Constant)):return[(keyword.value.sifisinstance(keyword.value,ast.Str)elsekeyword.value.value)]return[]defextract_evaluation_results_keys(node,variables):if(isinstance(node,ast.Call)andisinstance(node.func,ast.Name)andnode.func.id=="EvaluationResults"):forkeywordinnode.keywords:ifkeyword.arg=="results":ifisinstance(keyword.value,ast.Name):returnvariables.get(keyword.value.id,[])elifisinstance(keyword.value,ast.List):keys=[]foreltinkeyword.value.elts:keys.extend(extract_evaluation_result_key(elt))returnkeyselifisinstance(node,ast.Dict):forkey,valueinzip(node.keys,node.values):ifisinstance(key,(ast.Str,ast.Constant))andkey.s=="results":ifisinstance(value,ast.List):keys=[]foreltinvalue.elts:ifisinstance(elt,ast.Dict):forelt_key,elt_valueinzip(elt.keys,elt.values):if(isinstance(elt_key,(ast.Str,ast.Constant))andelt_key.s=="key"):ifisinstance(elt_value,(ast.Str,ast.Constant)):keys.append(elt_value.s)elif(isinstance(elt,ast.Call)andisinstance(elt.func,ast.Name)andelt.func.idin("EvaluationResult","dict")):forkeywordinelt.keywords:ifkeyword.arg=="key"andisinstance(keyword.value,(ast.Str,ast.Constant)):keys.append(keyword.value.sifisinstance(keyword.value,ast.Str)elsekeyword.value.value)returnkeysreturn[]python_code=textwrap.dedent(python_code)try:tree=ast.parse(python_code)function_def=tree.body[0]ifnotisinstance(function_def,(ast.FunctionDef,ast.AsyncFunctionDef)):return[]variables={}keys=[]fornodeinast.walk(function_def):ifisinstance(node,ast.Assign):ifisinstance(node.value,ast.List):list_keys=[]foreltinnode.value.elts:list_keys.extend(extract_evaluation_result_key(elt))ifisinstance(node.targets[0],ast.Name):variables[node.targets[0].id]=list_keyselifisinstance(node,ast.Return)andnode.valueisnotNone:dict_keys=extract_dict_keys(node.value)eval_result_key=extract_evaluation_result_key(node.value)eval_results_keys=extract_evaluation_results_keys(node.value,variables)keys.extend(dict_keys)keys.extend(eval_result_key)keys.extend(eval_results_keys)# If no keys found, return the function namereturnkeysifkeyselse[function_def.name]exceptSyntaxError:return[]def_to_pandas(results:list[ExperimentResultRow],start:Optional[int]=0,end:Optional[int]=None,):try:importpandasaspdexceptImportErrorase:raiseImportError("The 'pandas' library is required to use the 'to_pandas' function. ""Please install it using 'pip install pandas' or ""'conda install pandas' before calling this method.")fromereturnpd.DataFrame(_flatten_experiment_results(results,start=start,end=end))def_flatten_experiment_results(results:list[ExperimentResultRow],start:Optional[int]=0,end:Optional[int]=None,):return[{**{f"inputs.{k}":vfork,vinx["example"].inputs.items()},**{f"outputs.{k}":vfork,vin(x["run"].outputsor{}).items()},"error":x["run"].error,**({f"reference.{k}":vfork,vinx["example"].outputs.items()}ifx["example"].outputsisnotNoneelse{}),**{f"feedback.{r.key}":r.scoreifr.scoreisnotNoneelser.valueforrinx["evaluation_results"]["results"]},"execution_time":((x["run"].end_time-x["run"].start_time).total_seconds()ifx["run"].end_timeelseNone),"example_id":x["run"].reference_example_id,"id":x["run"].id,}forxinresults[start:end]]@functools.lru_cache(maxsize=1)def_import_langchain_runnable()->Optional[type]:try:fromlangchain_core.runnablesimportRunnablereturnRunnableexceptImportError:returnNonedef_is_langchain_runnable(o:Any)->bool:returnbool((Runnable:=_import_langchain_runnable())andisinstance(o,Runnable))