diff --git a/tests/flow/rater/test_rater_flow.py b/tests/flow/rater/test_rater_flow.py new file mode 100644 index 00000000..a3a65ecf --- /dev/null +++ b/tests/flow/rater/test_rater_flow.py @@ -0,0 +1,55 @@ +import unittest +from unittest.mock import MagicMock, patch +from uniflow.flow.rater.rater_flow import RaterFlow +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + +class TestRaterFlow(unittest.TestCase): + @patch('uniflow.flow.rater.rater_flow.HuggingfaceJsonFormattedLLMRater') + @patch('uniflow.flow.rater.rater_flow.OpenAIJsonFormattedLLMRater') + @patch('uniflow.flow.rater.rater_flow.LmRaterModel') + @patch('uniflow.flow.rater.rater_flow.ModelOp') + def setUp(self, mock_model_op, mock_lm_rater_model, mock_openai_rater_model, mock_huggingface_rater_model): + self.mock_model_op = mock_model_op + self.mock_lm_rater_model = mock_lm_rater_model + self.mock_openai_rater_model = mock_openai_rater_model + self.mock_huggingface_rater_model = mock_huggingface_rater_model + self.mock_lm_rater_model.return_value = MagicMock() + self.prompt_template = PromptTemplate(instruction="instruction", few_shot_prompt=[{}]) + self.model_config_openai = {"response_format": {"type": "json_object"}, "model_server": "OpenAI"} + self.model_config_huggingface = {"response_format": {"type": "json_object"}, "model_server": "HuggingFace"} + self.model_config_rater = {"response_format": {"type": "other"}, "model_server": "open_ai"} + self.label2score = {"label1": 1.0, "label2": 2.0} + self.rater_flow_openai = RaterFlow(self.prompt_template, self.model_config_openai, self.label2score) + self.rater_flow_huggingface = RaterFlow(self.prompt_template, self.model_config_huggingface, self.label2score) + self.rater_flow_rater = RaterFlow(self.prompt_template, self.model_config_rater, self.label2score) + + def test_init_json_openAI(self): + self.mock_model_op.assert_called() + self.mock_openai_rater_model.assert_called_once_with(prompt_template=self.prompt_template, model_config=self.model_config_openai, label2score=self.label2score) + + def test_init_json_huggingface(self): + self.mock_model_op.assert_called() + self.mock_huggingface_rater_model.assert_called_once_with(prompt_template=self.prompt_template, model_config=self.model_config_huggingface, label2score=self.label2score) + + def test_init_not_json(self): + self.mock_model_op.assert_called() + self.mock_lm_rater_model.assert_called_once_with(prompt_template=self.prompt_template, model_config=self.model_config_rater, label2score=self.label2score) + + def test_run_openai(self): + nodes = [Node(name="node1", value_dict={"a": 1}), Node(name="node2", value_dict={"b": 2})] + self.rater_flow_openai.run(nodes) + self.mock_model_op.return_value.assert_called_once_with(nodes) + + def test_run_huggingface(self): + nodes = [Node(name="node1", value_dict={"a": 1}), Node(name="node2", value_dict={"b": 2})] + self.rater_flow_huggingface.run(nodes) + self.mock_model_op.return_value.assert_called_once_with(nodes) + + def test_run_not_json(self): + nodes = [Node(name="node1", value_dict={"a": 1}), Node(name="node2", value_dict={"b": 2})] + self.rater_flow_rater.run(nodes) + self.mock_model_op.return_value.assert_called_once_with(nodes) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tests/flow/transform/test_azure_openai.py b/tests/flow/transform/test_azure_openai.py new file mode 100644 index 00000000..b2bf7cf2 --- /dev/null +++ b/tests/flow/transform/test_azure_openai.py @@ -0,0 +1,85 @@ +import unittest +from unittest.mock import patch + +from uniflow.flow.transform.transform_azure_openai_flow import AzureOpenAIModelFlow +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + + +class TestAzureOpenAIModelFlow(unittest.TestCase): + @patch("uniflow.flow.transform.transform_azure_openai_flow.ModelOp") + @patch("uniflow.flow.transform.transform_azure_openai_flow.LmModel") + @patch("uniflow.flow.transform.transform_azure_openai_flow.JsonLmModel") + def setUp(self, mock_json_model, mock_lm_model, mock_model_op): + self.mock_json_model = mock_json_model + self.mock_lm_model = mock_lm_model + self.mock_model_op = mock_model_op + + self.prompt_template0 = None + self.prompt_template = PromptTemplate( + instruction="instruction", few_shot_prompt=[{}] + ) + + self.model_config0 = None + self.model_config1 = {"response_format": {"type": "json_object"}} + self.model_config2 = {"response_format": {"type": "other"}} + self.azure_flow1 = AzureOpenAIModelFlow( + self.prompt_template, self.model_config1 + ) + self.azure_flow2 = AzureOpenAIModelFlow( + self.prompt_template, self.model_config2 + ) + + def test_prompt_template_none(self): + """Test AzureOpenAIModelFlow initialization with prompt_template=None.""" + model_config = self.model_config1 # Mocked model config + + with self.assertRaises(ValueError): + AzureOpenAIModelFlow(prompt_template=None, model_config=model_config) + + def test_model_config_none(self): + """Test AzureOpenAIModelFlow initialization with model_config=None.""" + prompt_template = self.prompt_template # Mocked or real prompt template + + with self.assertRaises(ValueError): + AzureOpenAIModelFlow(prompt_template=prompt_template, model_config=None) + + def test_init_success(self): + self.mock_json_model.assert_called_once_with( + prompt_template=self.prompt_template, model_config=self.model_config1 + ) + self.mock_model_op.assert_called() + + def test_not_json_init(self): + self.mock_lm_model.assert_called_once_with( + prompt_template=self.prompt_template, model_config=self.model_config2 + ) + self.mock_model_op.assert_called() + + # def test_call_with_empty_node(self, mock_read_file): + # # arrange + # nodes = [] + + # # act + # output_nodes = self.extract_txt_op(nodes) + + # # assert + # mock_read_file.assert_not_called() + # self.assertEqual(len(output_nodes), 0) + def test_run(self): + node1 = Node(name="node1", value_dict={"a": 1}) + result = self.azure_flow1.run(node1) + self.mock_model_op.return_value.assert_called_once_with(node1) + expected_result = self.mock_model_op.return_value(node1) + self.assertEqual(result, expected_result) + # self.assertEqual(result, self.mock_model_op.return_value(node1)) + + def test_not_json_run(self): + node1 = Node(name="node1", value_dict={"a": 1}) + result = self.azure_flow2.run(node1) + self.mock_model_op.return_value.assert_called_once_with(node1) + self.assertEqual(result, self.mock_model_op.return_value(node1)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flow/transform/test_copy_flow.py b/tests/flow/transform/test_copy_flow.py new file mode 100644 index 00000000..5a1ee209 --- /dev/null +++ b/tests/flow/transform/test_copy_flow.py @@ -0,0 +1,31 @@ +import unittest +from unittest.mock import patch + +from uniflow.flow.transform.transform_copy_flow import TransformCopyFlow +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + + +class TestTransformCopyFlow(unittest.TestCase): + @patch("uniflow.flow.transform.transform_copy_flow.CopyOp") + def setUp(self, mock_copy_op): + self.mock_copy_op = mock_copy_op + self.prompt_template = PromptTemplate( + instruction="instruction", few_shot_prompt=[{}] + ) + self.model_config = {"response_format": {"type": "json_object"}} + self.copy_flow = TransformCopyFlow(self.prompt_template, self.model_config) + + def test_init(self): + self.mock_copy_op.assert_called_once_with(name="copy_op") + + def test_run(self): + node1 = Node(name="node1", value_dict={"a": 1}) + result = self.copy_flow.run(node1) + self.mock_copy_op.return_value.assert_called_once_with(node1) + expected_result = self.mock_copy_op.return_value(node1) + self.assertEqual(result, expected_result) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flow/transform/test_google_flow.py b/tests/flow/transform/test_google_flow.py new file mode 100644 index 00000000..e7cba670 --- /dev/null +++ b/tests/flow/transform/test_google_flow.py @@ -0,0 +1,46 @@ +import unittest +from unittest.mock import MagicMock, patch + +from uniflow.flow.transform.transform_google_flow import GoogleModelFlow +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + + +class TestGoogleModelFlow(unittest.TestCase): + @patch("uniflow.flow.transform.transform_google_flow.LmModel") + @patch("uniflow.flow.transform.transform_google_flow.ModelOp") + def setUp(self, mock_model_op, mock_lm_model): + self.mock_model_op = mock_model_op + self.mock_lm_model = mock_lm_model + self.mock_lm_model.return_value = MagicMock() + self.prompt_template = PromptTemplate( + instruction="instruction", few_shot_prompt=[{}] + ) + self.model_config = {"response_format": {"type": "json_object"}} + self.google_flow = GoogleModelFlow(self.prompt_template, self.model_config) + # self.lm_model = LmModel(prompt_template=self.prompt_template, model_config=self.model_config) + + def test_init(self): + self.mock_model_op.assert_called_once_with( + name="google_model_op", + model=self.mock_lm_model.return_value, # This represents the LmModel instance + ) + + def test_run(self): + nodes = [ + Node(name="node1", value_dict={"a": 1}), + Node(name="node2", value_dict={"b": 2}), + ] + result = self.google_flow.run(nodes) + self.mock_model_op.return_value.assert_called_once_with(nodes) + expected_result = self.mock_model_op.return_value(nodes) + self.assertEqual(result, expected_result) + + +# class TestTransformGoogleFlow(TestGoogleModelFlow): +# def setUp(self): +# super().setUp() +# self.transform_google_flow = TransformGoogleFlow(self.prompt_template, self.model_config) + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flow/transform/test_google_multimodal_flow.py b/tests/flow/transform/test_google_multimodal_flow.py new file mode 100644 index 00000000..91064af1 --- /dev/null +++ b/tests/flow/transform/test_google_multimodal_flow.py @@ -0,0 +1,45 @@ +import unittest +from unittest.mock import MagicMock, patch + +from uniflow.flow.transform.transform_google_multimodal_flow import ( + GoogleMultiModalModelFlow, +) +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + + +class TestGoogleMultiModalModelFlow(unittest.TestCase): + @patch("uniflow.flow.transform.transform_google_multimodal_flow.MmModel") + @patch("uniflow.flow.transform.transform_google_multimodal_flow.ModelOp") + def setUp(self, mock_model_op, mock_mm_model): + self.mock_model_op = mock_model_op + self.mock_mm_model = mock_mm_model + self.mock_mm_model.return_value = MagicMock() + self.prompt_template = PromptTemplate( + instruction="instruction", few_shot_prompt=[{}] + ) + self.model_config = {"response_format": {"type": "json_object"}} + self.google_mm_flow = GoogleMultiModalModelFlow( + self.prompt_template, self.model_config + ) + + def test_init(self): + self.mock_model_op.assert_called_once_with( + name="google_mm_model_op", + model=self.mock_mm_model.return_value, # This represents the MmModel instance + ) + + def test_run(self): + nodes = [ + Node(name="node1", value_dict={"a": 1}), + Node(name="node2", value_dict={"b": 2}), + ] + # Mock the __call__ method of the ModelOp instance to return the nodes directly for simplicity + self.mock_model_op.return_value.return_value = nodes + result = self.google_mm_flow.run(nodes) + self.mock_model_op.return_value.assert_called_once_with(nodes) + self.assertEqual(result, nodes) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flow/transform/test_huggingface_flow.py b/tests/flow/transform/test_huggingface_flow.py new file mode 100644 index 00000000..55bb6b7b --- /dev/null +++ b/tests/flow/transform/test_huggingface_flow.py @@ -0,0 +1,54 @@ +import unittest +from unittest.mock import MagicMock, patch + +from uniflow.flow.transform.transform_huggingface_flow import ( + HuggingFaceModelFlow, # Update the import path as necessary +) +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + + +class TestHuggingFaceModelFlow(unittest.TestCase): + @patch( + "uniflow.flow.transform.transform_huggingface_flow.ModelOp" + ) # Update the import path as necessary + @patch( + "uniflow.flow.transform.transform_huggingface_flow.LmModel" + ) # Update the import path as necessary + def setUp(self, mock_lm_model, mock_model_op): + self.mock_model_op = mock_model_op + self.mock_lm_model = mock_lm_model + # Mock the return value of LmModel to simulate its behavior without actual instantiation + self.mock_lm_model.return_value = MagicMock() + self.prompt_template = PromptTemplate( + instruction="instruction", few_shot_prompt=[{}] + ) + self.model_config = {"response_format": {"type": "json_object"}} + self.huggingface_flow = HuggingFaceModelFlow( + self.prompt_template, self.model_config + ) + + def test_init(self): + # Verify that ModelOp is correctly instantiated with the expected arguments + self.mock_model_op.assert_called_once_with( + name="huggingface_model_op", model=self.mock_lm_model.return_value + ) + + def test_run(self): + # Prepare some test nodes to run through the flow + nodes = [ + Node(name="node1", value_dict={"a": 1}), + Node(name="node2", value_dict={"b": 2}), + ] + # Assume the ModelOp operation simply returns the nodes it receives + self.mock_model_op.return_value.return_value = nodes + # Run the flow with the test nodes + result = self.huggingface_flow.run(nodes) + # Verify that the ModelOp mock was called with the test nodes + self.mock_model_op.return_value.assert_called_once_with(nodes) + # Verify the result matches the expected outcome + self.assertEqual(result, nodes) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flow/transform/test_lmqg_flow.py b/tests/flow/transform/test_lmqg_flow.py new file mode 100644 index 00000000..5c801e79 --- /dev/null +++ b/tests/flow/transform/test_lmqg_flow.py @@ -0,0 +1,46 @@ +import unittest +from unittest.mock import MagicMock, patch + +from uniflow.flow.transform.transform_lmqg_flow import TransformLMQGFlow +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + + +class TestTransformLMQGFlow(unittest.TestCase): + @patch("uniflow.flow.transform.transform_lmqg_flow.ModelOp") + @patch("uniflow.flow.transform.transform_lmqg_flow.LmModel") + def setUp(self, mock_lm_model, mock_model_op): + self.mock_model_op = mock_model_op + self.mock_lm_model = mock_lm_model + # Setup the mock for LmModel to simulate its behavior without actual instantiation + self.mock_lm_model.return_value = MagicMock() + self.prompt_template = PromptTemplate( + instruction="instruction", few_shot_prompt=[{}] + ) + self.model_config = {"response_format": {"type": "json_object"}} + self.lmqg_flow = TransformLMQGFlow(self.prompt_template, self.model_config) + + def test_init(self): + # Verify that ModelOp is instantiated with the correct arguments + self.mock_model_op.assert_called_once_with( + name="lmqg_model_op", model=self.mock_lm_model.return_value + ) + + def test_run(self): + # Prepare some test nodes to process + nodes = [ + Node(name="node1", value_dict={"a": 1}), + Node(name="node2", value_dict={"b": 2}), + ] + # Assume the ModelOp operation simply returns the nodes it receives for simplicity + self.mock_model_op.return_value.return_value = nodes + # Execute the flow with the test nodes + result = self.lmqg_flow.run(nodes) + # Verify that the ModelOp mock was invoked with the test nodes + self.mock_model_op.return_value.assert_called_once_with(nodes) + # Check that the result matches the expected output + self.assertEqual(result, nodes) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flow/transform/test_openai_flow.py b/tests/flow/transform/test_openai_flow.py new file mode 100644 index 00000000..7f13caaa --- /dev/null +++ b/tests/flow/transform/test_openai_flow.py @@ -0,0 +1,60 @@ +import unittest +from unittest.mock import MagicMock, patch + +from uniflow.flow.transform.transform_openai_flow import OpenAIModelFlow +from uniflow.node import Node +from uniflow.op.prompt import PromptTemplate + + +class TestOpenAIModelFlow(unittest.TestCase): + @patch("uniflow.flow.transform.transform_openai_flow.JsonLmModel") + @patch("uniflow.flow.transform.transform_openai_flow.ModelOp") + @patch("uniflow.flow.transform.transform_openai_flow.LmModel") + def setUp(self, mock_lm_model, mock_model_op, mock_json_model): + self.mock_model_op = mock_model_op + self.mock_lm_model = mock_lm_model + self.mock_json_model = mock_json_model + self.mock_json_model.return_value = MagicMock() + self.prompt_template = PromptTemplate( + instruction="instruction", few_shot_prompt=[{}] + ) + self.model_config1 = {"response_format": {"type": "json_object"}} + self.model_config2 = {"response_format": {"type": "other"}} + self.azure_flow1 = OpenAIModelFlow(self.prompt_template, self.model_config1) + self.azure_flow2 = OpenAIModelFlow(self.prompt_template, self.model_config2) + + def test_init(self): + self.mock_json_model.assert_called_once_with( + prompt_template=self.prompt_template, model_config=self.model_config1 + ) + self.mock_model_op.assert_called() + + def test_not_json_init(self): + self.mock_lm_model.assert_called_once_with( + prompt_template=self.prompt_template, model_config=self.model_config2 + ) + self.mock_model_op.assert_called() + + def test_run(self): + nodes1 = [ + Node(name="node1", value_dict={"a": 1}), + Node(name="node2", value_dict={"b": 2}), + ] + result = self.azure_flow1.run(nodes1) + self.mock_model_op.return_value.assert_called_once_with(nodes1) + expected_result = self.mock_model_op.return_value(nodes1) + self.assertEqual(result, expected_result) + # self.assertEqual(result, self.mock_model_op.return_value(node1)) + + def test_not_json_run(self): + nodes2 = [ + Node(name="node1", value_dict={"a": 1}), + Node(name="node2", value_dict={"b": 2}), + ] + result = self.azure_flow2.run(nodes2) + self.mock_model_op.return_value.assert_called_once_with(nodes2) + self.assertEqual(result, self.mock_model_op.return_value(nodes2)) + + +if __name__ == "__main__": + unittest.main()