Skip to content
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions backend/api_v2/api_deployment_views.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,16 +45,25 @@ def initialize_request(self, request: Request, *args: Any, **kwargs: Any) -> Req

@DeploymentHelper.validate_api_key
def post(
self, request: Request, org_name: str, api_name: str, api: APIDeployment
self,
request: Request,
org_name: str,
api_name: str,
api: APIDeployment,
api_key: str,
) -> Response:
serializer = ExecutionRequestSerializer(data=request.data)
serializer = ExecutionRequestSerializer(
data=request.data, context={"api": api, "api_key": api_key}
)
serializer.is_valid(raise_exception=True)
file_objs = serializer.validated_data.get(ApiExecution.FILES_FORM_DATA)
timeout = serializer.validated_data.get(ApiExecution.TIMEOUT_FORM_DATA)
include_metadata = serializer.validated_data.get(ApiExecution.INCLUDE_METADATA)
include_metrics = serializer.validated_data.get(ApiExecution.INCLUDE_METRICS)
use_file_history = serializer.validated_data.get(ApiExecution.USE_FILE_HISTORY)
tag_names = serializer.validated_data.get(ApiExecution.TAGS)
llm_profile_id = serializer.validated_data.get(ApiExecution.LLM_PROFILE_ID)

response = DeploymentHelper.execute_workflow(
organization_name=org_name,
api=api,
Expand All @@ -64,6 +73,7 @@ def post(
include_metrics=include_metrics,
use_file_history=use_file_history,
tag_names=tag_names,
llm_profile_id=llm_profile_id,
)
if "error" in response and response["error"]:
return Response(
Expand Down
1 change: 1 addition & 0 deletions backend/api_v2/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ class ApiExecution:
USE_FILE_HISTORY: str = "use_file_history" # Undocumented parameter
EXECUTION_ID: str = "execution_id"
TAGS: str = "tags"
LLM_PROFILE_ID: str = "llm_profile_id"
4 changes: 4 additions & 0 deletions backend/api_v2/deployment_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ def validate_and_process(
api_deployment = DeploymentHelper.get_deployment_by_api_name(api_name=api_name)
DeploymentHelper.validate_api(api_deployment=api_deployment, api_key=api_key)
kwargs["api"] = api_deployment
kwargs["api_key"] = api_key
return func(self, request, *args, **kwargs)

@staticmethod
Expand Down Expand Up @@ -143,6 +144,7 @@ def execute_workflow(
include_metrics: bool = False,
use_file_history: bool = False,
tag_names: list[str] = [],
llm_profile_id: str | None = None,
) -> ReturnDict:
"""Execute workflow by api.

Expand All @@ -153,6 +155,7 @@ def execute_workflow(
use_file_history (bool): Use FileHistory table to return results on already
processed files. Defaults to False
tag_names (list(str)): list of tag names
llm_profile_id (str, optional): LLM profile ID for overriding tool settings

Returns:
ReturnDict: execution status/ result
Expand Down Expand Up @@ -185,6 +188,7 @@ def execute_workflow(
execution_id=execution_id,
queue=CeleryQueue.CELERY_API_DEPLOYMENTS,
use_file_history=use_file_history,
llm_profile_id=llm_profile_id,
)
result.status_api = DeploymentHelper.construct_status_endpoint(
api_endpoint=api.api_endpoint, execution_id=execution_id
Expand Down
6 changes: 6 additions & 0 deletions backend/api_v2/postman_collection/dto.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,12 @@ def get_form_data_items(self) -> list[FormDataItem]:
),
FormDataItem(key=ApiExecution.INCLUDE_METADATA, type="text", value="False"),
FormDataItem(key=ApiExecution.INCLUDE_METRICS, type="text", value="False"),
FormDataItem(
key=ApiExecution.LLM_PROFILE_ID,
type="text",
value="",
description="Optional: UUID of the LLM profile to override default settings",
),
]

def get_api_key(self) -> str:
Expand Down
34 changes: 34 additions & 0 deletions backend/api_v2/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from django.core.validators import RegexValidator
from pipeline_v2.models import Pipeline
from prompt_studio.prompt_profile_manager_v2.models import ProfileManager
from rest_framework.serializers import (
BooleanField,
CharField,
Expand Down Expand Up @@ -114,6 +115,8 @@ class ExecutionRequestSerializer(TagParamsSerializer):
helpful for demos.
tags (str): Comma-separated List of tags to associate with the execution.
e.g:'tag1,tag2-name,tag3_name'
llm_profile_id (str): UUID of the LLM profile to override the default profile.
If not provided, the default profile will be used.
"""

MAX_FILES_ALLOWED = 32
Expand All @@ -124,6 +127,7 @@ class ExecutionRequestSerializer(TagParamsSerializer):
include_metadata = BooleanField(default=False)
include_metrics = BooleanField(default=False)
use_file_history = BooleanField(default=False)
llm_profile_id = CharField(required=False, allow_null=True, allow_blank=True)
files = ListField(
child=FileField(),
required=True,
Expand All @@ -142,6 +146,36 @@ def validate_files(self, value):
raise ValidationError(f"Maximum '{self.MAX_FILES_ALLOWED}' files allowed.")
return value

def validate_llm_profile_id(self, value):
"""Validate that the llm_profile_id belongs to the API key owner."""
if not value:
return value

# Get context from serializer
api = self.context.get("api")
api_key = self.context.get("api_key")

if not api or not api_key:
raise ValidationError("Unable to validate LLM profile ownership")

# Check if profile exists
try:
profile = ProfileManager.objects.get(profile_id=value)
except ProfileManager.DoesNotExist:
raise ValidationError("Profile not found")

# Get the specific API key being used
try:
active_api_key = api.api_keys.get(api_key=api_key, is_active=True)
except api.api_keys.model.DoesNotExist:
raise ValidationError("API key not found or not active for this deployment")

# Check if the profile owner matches the API key owner
if profile.created_by != active_api_key.created_by:
raise ValidationError("You can only use profiles that you own")

return value


class ExecutionQuerySerializer(Serializer):
execution_id = CharField(required=True)
Expand Down
3 changes: 3 additions & 0 deletions backend/workflow_manager/endpoint_v2/source.py
Original file line number Diff line number Diff line change
Expand Up @@ -810,13 +810,15 @@ def add_file_to_volume(
workflow_file_execution: WorkflowFileExecution,
tags: list[str],
file_hash: FileHash,
llm_profile_id: str | None = None,
) -> str:
"""Add input file to execution directory.

Args:
workflow_file_execution: WorkflowFileExecution model
tags (list[str]): Tag names associated with the workflow execution.
file_hash: FileHash model
llm_profile_id (str, optional): LLM profile ID for overriding tool settings.

Raises:
InvalidSource: _description_
Expand Down Expand Up @@ -849,6 +851,7 @@ def add_file_to_volume(
file_execution_id=workflow_file_execution.id,
source_hash=file_content_hash,
tags=tags,
llm_profile_id=llm_profile_id,
)
return file_content_hash

Expand Down
1 change: 1 addition & 0 deletions backend/workflow_manager/workflow_v2/dto.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ class FileData:
execution_mode: str
use_file_history: bool
q_file_no_list: list[int]
llm_profile_id: str | None = None

@classmethod
def from_dict(cls, data: dict[str, Any]) -> FileData:
Expand Down
3 changes: 3 additions & 0 deletions backend/workflow_manager/workflow_v2/file_execution_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,7 @@ def _process_file(
workflow_file_execution,
file_hash,
workflow_execution,
file_data,
)

# Update file execution tracker with updated file hash
Expand Down Expand Up @@ -671,6 +672,7 @@ def _prepare_file_for_processing(
workflow_file_exec: WorkflowFileExecution,
file_hash: FileHash,
workflow_execution: WorkflowExecution,
file_data: FileData,
) -> str:
"""Handle file preparation and volume storage."""
workflow_file_exec.update_status(ExecutionStatus.EXECUTING)
Expand All @@ -687,6 +689,7 @@ def _prepare_file_for_processing(
workflow_file_execution=workflow_file_exec,
tags=workflow_execution.tag_names,
file_hash=file_hash,
llm_profile_id=file_data.llm_profile_id,
)
file_hash.file_hash = content_hash
workflow_file_exec.update(file_hash=content_hash)
Expand Down
12 changes: 11 additions & 1 deletion backend/workflow_manager/workflow_v2/workflow_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ def process_input_files(
scheduled: bool,
execution_mode: tuple[str, str],
use_file_history: bool,
llm_profile_id: str | None,
) -> str | None:
total_files = len(input_files)
workflow_log.publish_initial_workflow_logs(total_files=total_files)
Expand Down Expand Up @@ -196,6 +197,7 @@ def process_input_files(
execution_mode=mode,
use_file_history=use_file_history,
q_file_no_list=list(q_file_no_list) if q_file_no_list else [],
llm_profile_id=llm_profile_id,
)
batch_data = FileBatchData(files=batch, file_data=file_data)

Expand Down Expand Up @@ -255,6 +257,7 @@ def run_workflow(
single_step: bool = False,
execution_mode: tuple[str, str] | None = None,
use_file_history: bool = True,
llm_profile_id: str | None = None,
) -> ExecutionResponse:
tool_instances: list[ToolInstance] = (
ToolInstanceHelper.get_tool_instances_by_workflow(
Expand Down Expand Up @@ -304,6 +307,7 @@ def run_workflow(
scheduled=scheduled,
use_file_history=use_file_history,
execution_mode=execution_mode,
llm_profile_id=llm_profile_id,
)
api_results = []
return ExecutionResponse(
Expand Down Expand Up @@ -424,6 +428,7 @@ def execute_workflow_async(
pipeline_id: str | None = None,
queue: str | None = None,
use_file_history: bool = True,
llm_profile_id: str | None = None,
) -> ExecutionResponse:
"""Adding a workflow to the queue for execution.

Expand All @@ -435,6 +440,7 @@ def execute_workflow_async(
queue (Optional[str]): Name of the celery queue to push into
use_file_history (bool): Use FileHistory table to return results on already
processed files. Defaults to True
llm_profile_id (str, optional): LLM profile ID for overriding tool settings

Returns:
ExecutionResponse: Existing status of execution
Expand All @@ -459,6 +465,7 @@ def execute_workflow_async(
"pipeline_id": pipeline_id,
"log_events_id": log_events_id,
"use_file_history": use_file_history,
"llm_profile_id": llm_profile_id,
},
queue=queue,
)
Expand Down Expand Up @@ -625,14 +632,16 @@ def execute_workflow(
workflow = Workflow.objects.get(id=workflow_id)
# TODO: Make use of WorkflowExecution.get_or_create()
try:
# Filter out llm_profile_id from kwargs as create_workflow_execution doesn't accept it
filtered_kwargs = {k: v for k, v in kwargs.items() if k != "llm_profile_id"}
workflow_execution = WorkflowExecutionServiceHelper.create_workflow_execution(
workflow_id=workflow_id,
single_step=False,
pipeline_id=pipeline_id,
mode=WorkflowExecution.Mode.QUEUE,
execution_id=execution_id,
total_files=len(hash_values),
**kwargs, # type: ignore
**filtered_kwargs, # type: ignore
)
except IntegrityError:
# Use existing instance on retry attempt
Expand All @@ -650,6 +659,7 @@ def execute_workflow(
execution_mode=execution_mode,
hash_values_of_files=hash_values,
use_file_history=use_file_history,
llm_profile_id=kwargs.get("llm_profile_id"),
)
except Exception as error:
error_message = traceback.format_exc()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,30 @@
.manage-llm-pro-icon {
font-size: 10px;
}

.action-buttons-container {
display: flex;
align-items: center;
gap: 8px;
}

.profile-id-container {
display: flex;
align-items: center;
gap: 8px;
margin-top: 4px;
}

.profile-id-text {
font-size: 12px;
}

.profile-copy-button {
padding: 0;
min-width: auto;
height: auto;
}

.profile-copy-icon {
font-size: 12px;
}
Loading