From 4eef3336f5df6ceb6added137cd1f695a01dc0d7 Mon Sep 17 00:00:00 2001 From: Apurva Koti Date: Mon, 13 May 2024 13:20:57 -0500 Subject: [PATCH] rename Signed-off-by: Apurva Koti --- mlflow/metrics/genai/__init__.py | 4 ++-- mlflow/metrics/genai/genai_metric.py | 6 +++--- tests/metrics/genai/test_genai_metrics.py | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mlflow/metrics/genai/__init__.py b/mlflow/metrics/genai/__init__.py index 2b8c93f388588..b35fa3cd19569 100644 --- a/mlflow/metrics/genai/__init__.py +++ b/mlflow/metrics/genai/__init__.py @@ -1,5 +1,5 @@ from mlflow.metrics.genai.base import EvaluationExample -from mlflow.metrics.genai.genai_metric import make_custom_genai_metric, make_genai_metric +from mlflow.metrics.genai.genai_metric import make_genai_metric_from_prompt, make_genai_metric from mlflow.metrics.genai.metric_definitions import ( answer_correctness, answer_relevance, @@ -11,7 +11,7 @@ __all__ = [ "EvaluationExample", "make_genai_metric", - "make_custom_genai_metric", + "make_genai_metric_from_prompt", "answer_similarity", "answer_correctness", "faithfulness", diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index 530388f74061c..563fb10a182c0 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -180,7 +180,7 @@ def aggregate_function(aggregate_option, scores): @experimental -def make_custom_genai_metric( +def make_genai_metric_from_prompt( name: str, judge_prompt: Optional[str] = None, model: Optional[str] = _get_default_model(), @@ -228,9 +228,9 @@ def make_custom_genai_metric( :test: :caption: Example for creating a genai metric - from mlflow.metrics.genai import make_custom_genai_metric + from mlflow.metrics.genai import make_genai_metric_from_prompt - metric = make_custom_genai_metric( + metric = make_genai_metric_from_prompt( name="ease_of_understanding", judge_prompt=( "You must evaluate the output of a bot based on how easy it is to " diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index a1b7587c3c965..e43dd83b74c1b 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -11,7 +11,7 @@ from mlflow.metrics.genai.genai_metric import ( _extract_score_and_justification, _format_args_string, - make_custom_genai_metric, + make_genai_metric_from_prompt, make_genai_metric, ) from mlflow.metrics.genai.metric_definitions import ( @@ -1066,7 +1066,7 @@ def test_make_genai_metric_metric_metadata(): def test_make_custom_judge_prompt_genai_metric(): custom_judge_prompt = "This is a custom judge prompt that uses {input} and {output}" - custom_judge_prompt_metric = make_custom_genai_metric( + custom_judge_prompt_metric = make_genai_metric_from_prompt( name="custom", judge_prompt=custom_judge_prompt, metric_metadata={"metadata_field": "metadata_value"}, @@ -1120,7 +1120,7 @@ def test_make_custom_judge_prompt_genai_metric(): def test_make_custom_prompt_genai_metric_validates_input_kwargs(): custom_judge_prompt = "This is a custom judge prompt that uses {input} and {output}" - custom_judge_prompt_metric = make_custom_genai_metric( + custom_judge_prompt_metric = make_genai_metric_from_prompt( name="custom", judge_prompt=custom_judge_prompt, )