id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
4,375 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_delete_flow_session_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sessionId": _SERIALIZER.url("session_id", session_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,376 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_list_flow_session_pip_packages_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}/pipPackages')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sessionId": _SERIALIZER.url("session_id", session_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,377 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_poll_operation_status_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
action_type, # type: Union[str, "_models.SetupFlowSessionAction"]
location, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "1.0.0") # type: Optional[str]
type = kwargs.pop('type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}/{actionType}/locations/{location}/operations/{operationId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sessionId": _SERIALIZER.url("session_id", session_id, 'str'),
"actionType": _SERIALIZER.url("action_type", action_type, 'str'),
"location": _SERIALIZER.url("location", location, 'str'),
"operationId": _SERIALIZER.url("operation_id", operation_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if type is not None:
query_parameters['type'] = _SERIALIZER.query("type", type, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
) | null |
4,378 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_get_standby_pools_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/standbypools')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,379 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_submit_bulk_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/submit')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,380 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_resume_bulk_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/resume')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,381 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_cancel_flow_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/cancel')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,382 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_get_flow_run_info_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,383 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_get_flow_child_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/childRuns')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
) | null |
4,384 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_get_flow_node_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
aggregation = kwargs.pop('aggregation', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
if aggregation is not None:
query_parameters['aggregation'] = _SERIALIZER.query("aggregation", aggregation, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
) | null |
4,385 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_get_flow_node_run_base_path_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}/basePath')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,386 | import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
def build_get_flow_run_log_content_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/logContent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | null |
4,387 |
def patch_sdk():
pass | null |
4,389 | from azure.core.pipeline.transport import HttpRequest
def _convert_request(request, files=None):
data = request.content if not files else None
request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data)
if files:
request.set_formdata_body(files)
return request | null |
4,390 | from os import PathLike
from pathlib import Path
from typing import IO, AnyStr, Optional, Union
from ._utils import is_arm_id
The provided code snippet includes necessary dependencies for implementing the `load_flow` function. Write a Python function `def load_flow( source: Union[str, PathLike, IO[AnyStr]], *, relative_origin: Optional[str] = None, **kwargs, )` to solve the following problem:
Construct a flow object from a yaml file. :param source: The local yaml source of a compute. Must be either a path to a local file, or an already-open file. If the source is a path, it will be open and read. An exception is raised if the file does not exist. If the source is an open file, the file will be read directly, and an exception is raised if the file is not readable. :type source: Union[PathLike, str, io.TextIOWrapper] :param relative_origin: The origin to be used when deducing the relative locations of files referenced in the parsed yaml. Defaults to the inputted source's directory if it is a file or file path input. Defaults to "./" if the source is a stream input with no name value. :type relative_origin: str :param params_override: Fields to overwrite on top of the yaml file. Format is [{"field1": "value1"}, {"field2": "value2"}] :type params_override: List[Dict] :return: Loaded flow object. :rtype: promptflow.azure.Flow
Here is the function:
def load_flow(
source: Union[str, PathLike, IO[AnyStr]],
*,
relative_origin: Optional[str] = None,
**kwargs,
):
"""Construct a flow object from a yaml file.
:param source: The local yaml source of a compute. Must be either a
path to a local file, or an already-open file.
If the source is a path, it will be open and read.
An exception is raised if the file does not exist.
If the source is an open file, the file will be read directly,
and an exception is raised if the file is not readable.
:type source: Union[PathLike, str, io.TextIOWrapper]
:param relative_origin: The origin to be used when deducing
the relative locations of files referenced in the parsed yaml.
Defaults to the inputted source's directory if it is a file or file path input.
Defaults to "./" if the source is a stream input with no name value.
:type relative_origin: str
:param params_override: Fields to overwrite on top of the yaml file.
Format is [{"field1": "value1"}, {"field2": "value2"}]
:type params_override: List[Dict]
:return: Loaded flow object.
:rtype: promptflow.azure.Flow
"""
from promptflow.azure._entities._flow import Flow
if is_arm_id(source):
return source
return Flow(path=Path(source)) | Construct a flow object from a yaml file. :param source: The local yaml source of a compute. Must be either a path to a local file, or an already-open file. If the source is a path, it will be open and read. An exception is raised if the file does not exist. If the source is an open file, the file will be read directly, and an exception is raised if the file is not readable. :type source: Union[PathLike, str, io.TextIOWrapper] :param relative_origin: The origin to be used when deducing the relative locations of files referenced in the parsed yaml. Defaults to the inputted source's directory if it is a file or file path input. Defaults to "./" if the source is a stream input with no name value. :type relative_origin: str :param params_override: Fields to overwrite on top of the yaml file. Format is [{"field1": "value1"}, {"field2": "value2"}] :type params_override: List[Dict] :return: Loaded flow object. :rtype: promptflow.azure.Flow |
4,391 | import ast
import datetime
import threading
client_map = {}
_token_timeout = 60 * 4
def _get_client_from_map(client_key: str):
client = client_map.get(client_key, None)
if client is None:
return None
if client["expire_at"] > datetime.datetime.now():
return client["client"]
return None
def _get_container_lock(client_key: str):
with _thread_lock:
container_lock = _container_lock_dict.get(client_key, None)
if container_lock is None:
container_lock = threading.Lock()
_container_lock_dict[client_key] = container_lock
return container_lock
def _get_resource_token(
container_name: str, subscription_id: str, resource_group_name: str, workspace_name: str
) -> object:
from azure.identity import DefaultAzureCredential
from promptflow.azure import PFClient
pf_client = PFClient(
credential=DefaultAzureCredential(),
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
)
token_resp = pf_client._traces._get_cosmos_db_token(container_name=container_name, acquire_write=True)
# Support json with single quotes
return ast.literal_eval(token_resp)
def _init_container_client(endpoint: str, database_name: str, container_name: str, resource_url: str, token: str):
from azure.cosmos.cosmos_client import CosmosClient
token_dict = {resource_url: token}
token_client = CosmosClient(endpoint, token_dict)
token_db = token_client.get_database_client(database_name)
container_client = token_db.get_container_client(container_name)
return container_client
def _get_db_client_key(container_name: str, subscription_id: str, resource_group_name: str, workspace_name: str) -> str:
return f"{subscription_id}_{resource_group_name}_{workspace_name}_{container_name}"
def get_client(container_name: str, subscription_id: str, resource_group_name: str, workspace_name: str):
client_key = _get_db_client_key(container_name, subscription_id, resource_group_name, workspace_name)
container_client = _get_client_from_map(client_key)
if container_client is None:
# Use lock to avoid too many requests for same container token
container_lock = _get_container_lock(client_key)
with container_lock:
container_client = _get_client_from_map(client_key)
if container_client is None:
token = _get_resource_token(container_name, subscription_id, resource_group_name, workspace_name)
container_client = _init_container_client(
endpoint=token["accountEndpoint"],
database_name=token["databaseName"],
container_name=token["containerName"],
resource_url=token["resourceUrl"],
token=token["resourceToken"],
)
client_map[client_key] = {
"expire_at": datetime.datetime.now() + datetime.timedelta(0, _token_timeout),
"client": container_client,
}
return container_client | null |
4,392 | import json
import logging
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Type, TypeVar
from promptflow._constants import CONNECTION_NAME_PROPERTY
from .multimedia import Image
from .types import AssistantDefinition, FilePath, PromptTemplate, Secret
T = TypeVar("T", bound="Enum")
def _deserialize_enum(cls: Type[T], val) -> T:
if not all(isinstance(i.value, str) for i in cls):
return val
typ = next((i for i in cls if val.lower() == i.value.lower()), None)
# Keep string value for unknown type, as they may be resolved later after some requisites imported.
# Type resolve will be ensured in 'ensure_node_inputs_type' before execution.
return typ if typ else val | null |
4,393 | import os
import typing
from opentelemetry import trace
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from ._constants import (
PF_TRACING_SKIP_LOCAL_SETUP_ENVIRON,
RESOURCE_ATTRIBUTES_SERVICE_NAME,
ResourceAttributesFieldName,
)
from ._openai_injector import inject_openai_api
def _skip_tracing_local_setup() -> bool:
return str(os.getenv(PF_TRACING_SKIP_LOCAL_SETUP_ENVIRON, "false")).lower() == "true"
def _set_tracer_provider(res_attrs: typing.Dict[str, str]) -> None:
res = Resource(attributes=res_attrs)
tracer_provider = TracerProvider(resource=res)
if _is_tracer_provider_set():
_force_set_tracer_provider(tracer_provider)
else:
trace.set_tracer_provider(tracer_provider)
def _is_devkit_installed() -> bool:
try:
from promptflow._sdk._tracing import setup_exporter_to_pfs, start_trace_with_devkit # noqa: F401
return True
except ImportError:
return False
class ResourceAttributesFieldName:
SERVICE_NAME = "service.name"
SESSION_ID = "session.id"
RESOURCE_ATTRIBUTES_SERVICE_NAME = "promptflow"
The provided code snippet includes necessary dependencies for implementing the `start_trace` function. Write a Python function `def start_trace( *, resource_attributes: typing.Optional[dict] = None, session: typing.Optional[str] = None, **kwargs, )` to solve the following problem:
Start a tracing session. Instrument `openai`, and set tracer provider for current tracing session. :param resource_attributes: Specify the resource attributes for current tracing session. :type resource_attributes: typing.Optional[dict] :param session: Specify the session id for current tracing session. :type session: typing.Optional[str]
Here is the function:
def start_trace(
*,
resource_attributes: typing.Optional[dict] = None,
session: typing.Optional[str] = None,
**kwargs,
):
"""Start a tracing session.
Instrument `openai`, and set tracer provider for current tracing session.
:param resource_attributes: Specify the resource attributes for current tracing session.
:type resource_attributes: typing.Optional[dict]
:param session: Specify the session id for current tracing session.
:type session: typing.Optional[str]
"""
# prepare resource.attributes and set tracer provider
res_attrs = {ResourceAttributesFieldName.SERVICE_NAME: RESOURCE_ATTRIBUTES_SERVICE_NAME}
if session is not None:
res_attrs[ResourceAttributesFieldName.SESSION_ID] = session
if isinstance(resource_attributes, dict):
for attr_key, attr_value in resource_attributes.items():
res_attrs[attr_key] = attr_value
_set_tracer_provider(res_attrs)
if _skip_tracing_local_setup():
return
if _is_devkit_installed():
from promptflow._sdk._tracing import start_trace_with_devkit
start_trace_with_devkit(
session_id=session,
attrs=kwargs.get("attributes", None),
run=kwargs.get("run", None),
) | Start a tracing session. Instrument `openai`, and set tracer provider for current tracing session. :param resource_attributes: Specify the resource attributes for current tracing session. :type resource_attributes: typing.Optional[dict] :param session: Specify the session id for current tracing session. :type session: typing.Optional[str] |
4,394 | import os
import typing
from opentelemetry import trace
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from ._constants import (
PF_TRACING_SKIP_LOCAL_SETUP_ENVIRON,
RESOURCE_ATTRIBUTES_SERVICE_NAME,
ResourceAttributesFieldName,
)
from ._openai_injector import inject_openai_api
def _is_devkit_installed() -> bool:
def inject_openai_api():
def setup_exporter_from_environ() -> None:
# openai instrumentation
inject_openai_api()
if _is_devkit_installed():
from promptflow._sdk._tracing import setup_exporter_to_pfs
setup_exporter_to_pfs() | null |
4,395 | import asyncio
import functools
import importlib
import json
import logging
import os
from importlib.metadata import version
import openai
from promptflow._core.operation_context import OperationContext
from ._trace import _traced_async, _traced_sync
from .contracts.trace import TraceType
def available_openai_apis_and_injectors():
"""
Generates a sequence of tuples containing OpenAI API classes, method names, and
corresponding injector functions based on whether the legacy OpenAI interface is used.
This function handles the discrepancy reported in https://github.com/openai/openai-python/issues/996,
where async interfaces were not recognized as coroutines. It ensures that decorators
are applied correctly to both synchronous and asynchronous methods.
Yields:
Tuples of (api_class, method_name, injector_function)
"""
yield from _generate_api_and_injector(_openai_api_list())
The provided code snippet includes necessary dependencies for implementing the `recover_openai_api` function. Write a Python function `def recover_openai_api()` to solve the following problem:
This function restores the original create methods of the OpenAI API classes by assigning them back from the _original attributes of the modified methods.
Here is the function:
def recover_openai_api():
"""This function restores the original create methods of the OpenAI API classes
by assigning them back from the _original attributes of the modified methods.
"""
for api, method, _, _ in available_openai_apis_and_injectors():
if hasattr(getattr(api, method), "_original"):
setattr(api, method, getattr(getattr(api, method), "_original")) | This function restores the original create methods of the OpenAI API classes by assigning them back from the _original attributes of the modified methods. |
4,396 | import re
from promptflow._core._errors import NotSupported
from promptflow.contracts.flow import InputAssignment, InputValueType
from promptflow.executor._errors import (
InputNotFound,
InputNotFoundFromAncestorNodeOutput,
InvalidReferenceProperty,
UnsupportedReference,
)
def parse_node_property(node_name, node_val, property=""):
val = node_val
property_parts = re.findall(property_pattern, property)
try:
for part in property_parts:
part = [p for p in part if p][0]
if part.startswith("[") and part.endswith("]"):
index = part[1:-1]
if index.startswith("'") and index.endswith("'") or index.startswith('"') and index.endswith('"'):
index = index[1:-1]
elif index.isdigit():
index = int(index)
else:
raise InvalidReferenceProperty(
message_format=(
"Flow execution failed. "
"Invalid index '{index}' when accessing property '{property}' of the node '{node_name}'. "
"Please check the index and try again."
),
index=index,
property=property,
node_name=node_name,
)
val = val[index]
else:
if isinstance(val, dict):
val = val[part]
else:
val = getattr(val, part)
except (KeyError, IndexError, AttributeError) as e:
message_format = (
"Flow execution failed. "
"Invalid property '{property}' when accessing the node '{node_name}'. "
"Please check the property and try again."
)
raise InvalidReferenceProperty(message_format=message_format, property=property, node_name=node_name) from e
return val
def parse_value(i: InputAssignment, nodes_outputs: dict, flow_inputs: dict):
if i.value_type == InputValueType.LITERAL:
return i.value
if i.value_type == InputValueType.FLOW_INPUT:
if i.value not in flow_inputs:
flow_input_keys = ", ".join(flow_inputs.keys()) if flow_inputs is not None else None
raise InputNotFound(
message_format=(
"Flow execution failed. "
"The input '{input_name}' is not found from flow inputs '{flow_input_keys}'. "
"Please check the input name and try again."
),
input_name=i.value,
flow_input_keys=flow_input_keys,
)
return flow_inputs[i.value]
if i.value_type == InputValueType.NODE_REFERENCE:
if i.section != "output":
raise UnsupportedReference(
message_format=(
"Flow execution failed. "
"The section '{reference_section}' of reference is currently unsupported. "
"Please specify the output part of the node '{reference_node_name}'."
),
reference_section=i.section,
reference_node_name=i.value,
)
if i.value not in nodes_outputs:
node_output_keys = [output_keys for output_keys in nodes_outputs.keys() if nodes_outputs]
raise InputNotFoundFromAncestorNodeOutput(
message_format=(
"Flow execution failed. "
"The input '{input_name}' is not found from ancestor node outputs {node_output_keys}. "
"Please check the node name and try again."
),
input_name=i.value,
node_output_keys=node_output_keys,
)
return parse_node_property(i.value, nodes_outputs[i.value], i.property)
raise NotSupported(
message_format=(
"Flow execution failed. "
"The type '{input_type}' is currently unsupported. "
"Please choose from available types: {supported_output_type} and try again."
),
input_type=i.value_type.value if hasattr(i.value_type, "value") else i.value_type,
supported_output_type=[value_type.value for value_type in InputValueType],
) | null |
4,397 | import asyncio
import contextvars
import inspect
import os
import signal
import threading
import time
import traceback
from asyncio import Task
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Tuple
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.logger_utils import flow_logger
from promptflow._utils.thread_utils import ThreadWithContextVars
from promptflow._utils.utils import extract_user_frame_summaries, set_context
from promptflow.contracts.flow import Node
from promptflow.executor._dag_manager import DAGManager
from promptflow.executor._errors import NoNodeExecutedError
def monitor_coroutine_after_cancellation(loop: asyncio.AbstractEventLoop):
"""Exit the process when all coroutines are done.
We add this function because if a sync tool is running in async mode,
the task will be cancelled after receiving SIGINT,
but the thread will not be terminated and blocks the program from exiting.
:param loop: event loop of main thread
:type loop: asyncio.AbstractEventLoop
"""
# TODO: Use environment variable to ensure it is flow test scenario to avoid unexpected exit.
# E.g. Customer is integrating Promptflow in their own code, and they want to handle SIGINT by themselves.
max_wait_seconds = os.environ.get("PF_WAIT_SECONDS_AFTER_CANCELLATION", 30)
all_tasks_are_done = False
exceeded_wait_seconds = False
thread_start_time = time.time()
flow_logger.info(f"Start to monitor coroutines after cancellation, max wait seconds: {max_wait_seconds}s")
while not all_tasks_are_done and not exceeded_wait_seconds:
# For sync tool running in async mode, the task will be cancelled,
# but the thread will not be terminated, we exit the program despite of it.
# TODO: Detect whether there is any sync tool running in async mode,
# if there is none, avoid sys.exit and let the program exit gracefully.
all_tasks_are_done = all(task.done() for task in asyncio.all_tasks(loop))
if all_tasks_are_done:
flow_logger.info("All coroutines are done. Exiting.")
# We cannot ensure persist_flow_run is called before the process exits in the case that there is
# non-daemon thread running, sleep for 3 seconds as a best effort.
# If the caller wants to ensure flow status is cancelled in storage, it should check the flow status
# after timeout and set the flow status to Cancelled.
time.sleep(3)
# Use os._exit instead of sys.exit, so that the process can stop without
# waiting for the thread created by run_in_executor to finish.
# sys.exit: https://docs.python.org/3/library/sys.html#sys.exit
# Raise a SystemExit exception, signaling an intention to exit the interpreter.
# Specifically, it does not exit non-daemon thread
# os._exit https://docs.python.org/3/library/os.html#os._exit
# Exit the process with status n, without calling cleanup handlers, flushing stdio buffers, etc.
# Specifically, it stops process without waiting for non-daemon thread.
os._exit(0)
exceeded_wait_seconds = time.time() - thread_start_time > max_wait_seconds
time.sleep(1)
if exceeded_wait_seconds:
if not all_tasks_are_done:
flow_logger.info(
f"Not all coroutines are done within {max_wait_seconds}s"
" after cancellation. Exiting the process despite of them."
" Please config the environment variable"
" PF_WAIT_SECONDS_AFTER_CANCELLATION if your tool needs"
" more time to clean up after cancellation."
)
remaining_tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
flow_logger.info(f"Remaining tasks: {[task.get_name() for task in remaining_tasks]}")
time.sleep(3)
os._exit(0)
The provided code snippet includes necessary dependencies for implementing the `signal_handler` function. Write a Python function `def signal_handler(sig, frame)` to solve the following problem:
Start a thread to monitor coroutines after receiving signal.
Here is the function:
def signal_handler(sig, frame):
"""
Start a thread to monitor coroutines after receiving signal.
"""
flow_logger.info(f"Received signal {sig}({signal.Signals(sig).name}), start coroutine monitor thread.")
loop = asyncio.get_running_loop()
monitor = ThreadWithContextVars(target=monitor_coroutine_after_cancellation, args=(loop,))
monitor.start()
raise KeyboardInterrupt | Start a thread to monitor coroutines after receiving signal. |
4,398 | import asyncio
import contextvars
import inspect
import os
import signal
import threading
import time
import traceback
from asyncio import Task
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Tuple
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.logger_utils import flow_logger
from promptflow._utils.thread_utils import ThreadWithContextVars
from promptflow._utils.utils import extract_user_frame_summaries, set_context
from promptflow.contracts.flow import Node
from promptflow.executor._dag_manager import DAGManager
from promptflow.executor._errors import NoNodeExecutedError
PF_ASYNC_NODE_SCHEDULER_EXECUTE_TASK_NAME = "_pf_async_nodes_scheduler.execute"
DEFAULT_TASK_LOGGING_INTERVAL = 60
class AsyncNodesScheduler:
def __init__(
self,
tools_manager: ToolsManager,
node_concurrency: int,
) -> None:
self._tools_manager = tools_manager
self._node_concurrency = node_concurrency
self._task_start_time = {}
self._task_last_log_time = {}
self._dag_manager_completed_event = threading.Event()
async def execute(
self,
nodes: List[Node],
inputs: Dict[str, Any],
context: FlowExecutionContext,
) -> Tuple[dict, dict]:
# TODO: Provide cancel API
if threading.current_thread() is threading.main_thread():
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
else:
flow_logger.info(
"Current thread is not main thread, skip signal handler registration in AsyncNodesScheduler."
)
# Semaphore should be created in the loop, otherwise it will not work.
loop = asyncio.get_running_loop()
self._semaphore = asyncio.Semaphore(self._node_concurrency)
monitor = ThreadWithContextVars(
target=monitor_long_running_coroutine,
args=(loop, self._task_start_time, self._task_last_log_time, self._dag_manager_completed_event),
daemon=True,
)
monitor.start()
# Set the name of scheduler tasks to avoid monitoring its duration
task = asyncio.current_task()
task.set_name(PF_ASYNC_NODE_SCHEDULER_EXECUTE_TASK_NAME)
parent_context = contextvars.copy_context()
executor = ThreadPoolExecutor(
max_workers=self._node_concurrency, initializer=set_context, initargs=(parent_context,)
)
# Note that we must not use `with` statement to manage the executor.
# This is because it will always call `executor.shutdown()` when exiting the `with` block.
# Then the event loop will wait for all tasks to be completed before raising the cancellation error.
# See reference: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Executor
outputs = await self._execute_with_thread_pool(executor, nodes, inputs, context)
executor.shutdown()
return outputs
async def _execute_with_thread_pool(
self,
executor: ThreadPoolExecutor,
nodes: List[Node],
inputs: Dict[str, Any],
context: FlowExecutionContext,
) -> Tuple[dict, dict]:
flow_logger.info(f"Start to run {len(nodes)} nodes with the current event loop.")
dag_manager = DAGManager(nodes, inputs)
task2nodes = self._execute_nodes(dag_manager, context, executor)
while not dag_manager.completed():
task2nodes = await self._wait_and_complete_nodes(task2nodes, dag_manager)
submitted_tasks2nodes = self._execute_nodes(dag_manager, context, executor)
task2nodes.update(submitted_tasks2nodes)
# Set the event to notify the monitor thread to exit
# Ref: https://docs.python.org/3/library/threading.html#event-objects
self._dag_manager_completed_event.set()
for node in dag_manager.bypassed_nodes:
dag_manager.completed_nodes_outputs[node] = None
return dag_manager.completed_nodes_outputs, dag_manager.bypassed_nodes
async def _wait_and_complete_nodes(self, task2nodes: Dict[Task, Node], dag_manager: DAGManager) -> Dict[Task, Node]:
if not task2nodes:
raise NoNodeExecutedError("No nodes are ready for execution, but the flow is not completed.")
tasks = [task for task in task2nodes]
for task in tasks:
self._task_start_time[task] = time.time()
done, _ = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
dag_manager.complete_nodes({task2nodes[task].name: task.result() for task in done})
for task in done:
del task2nodes[task]
return task2nodes
def _execute_nodes(
self,
dag_manager: DAGManager,
context: FlowExecutionContext,
executor: ThreadPoolExecutor,
) -> Dict[Task, Node]:
# Bypass nodes and update node run info until there are no nodes to bypass
nodes_to_bypass = dag_manager.pop_bypassable_nodes()
while nodes_to_bypass:
for node in nodes_to_bypass:
context.bypass_node(node)
nodes_to_bypass = dag_manager.pop_bypassable_nodes()
# Create tasks for ready nodes
return {
self._create_node_task(node, dag_manager, context, executor): node for node in dag_manager.pop_ready_nodes()
}
async def run_task_with_semaphore(self, coroutine):
async with self._semaphore:
return await coroutine
def _create_node_task(
self,
node: Node,
dag_manager: DAGManager,
context: FlowExecutionContext,
executor: ThreadPoolExecutor,
) -> Task:
f = self._tools_manager.get_tool(node.name)
kwargs = dag_manager.get_node_valid_inputs(node, f)
if inspect.iscoroutinefunction(f):
# For async task, it will not be executed before calling create_task.
task = context.invoke_tool_async(node, f, kwargs)
else:
# For sync task, convert it to async task and run it in executor thread.
# Even though the task is put to the thread pool, thread.start will only be triggered after create_task.
task = self._sync_function_to_async_task(executor, context, node, f, kwargs)
# Set the name of the task to the node name for debugging purpose
# It does not need to be unique by design.
# Wrap the coroutine in a task with asyncio.create_task to schedule it for event loop execution
# The task is created and added to the event loop, but the exact execution depends on loop's scheduling
return asyncio.create_task(self.run_task_with_semaphore(task), name=node.name)
async def _sync_function_to_async_task(
executor: ThreadPoolExecutor,
context: FlowExecutionContext,
node,
f,
kwargs,
):
# The task will not be executed before calling create_task.
return await asyncio.get_running_loop().run_in_executor(executor, context.invoke_tool, node, f, kwargs)
def log_stack_recursively(task: asyncio.Task, elapse_time: float):
"""Recursively log the frame of a task or coroutine.
Traditional stacktrace would stop at the first awaited nested inside the coroutine.
:param task: Task to log
:type task_or_coroutine: asyncio.Task
:param elapse_time: Seconds elapsed since the task started
:type elapse_time: float
"""
# We cannot use task.get_stack() to get the stack, because only one stack frame is
# returned for a suspended coroutine because of the implementation of CPython
# Ref: https://github.com/python/cpython/blob/main/Lib/asyncio/tasks.py
# "only one stack frame is returned for a suspended coroutine."
task_or_coroutine = task
frame_summaries = []
# Collect frame_summaries along async call chain
while True:
if isinstance(task_or_coroutine, asyncio.Task):
# For a task, get the coroutine it's running
coroutine: asyncio.coroutine = task_or_coroutine.get_coro()
elif asyncio.iscoroutine(task_or_coroutine):
coroutine = task_or_coroutine
else:
break
frame = coroutine.cr_frame
stack_summary: traceback.StackSummary = traceback.extract_stack(frame)
frame_summaries.extend(stack_summary)
task_or_coroutine = coroutine.cr_await
# Format the frame summaries to warning message
if frame_summaries:
user_frame_summaries = extract_user_frame_summaries(frame_summaries)
stack_messages = traceback.format_list(user_frame_summaries)
all_stack_message = "".join(stack_messages)
task_msg = (
f"Task {task.get_name()} has been running for {elapse_time:.0f} seconds,"
f" stacktrace:\n{all_stack_message}"
)
flow_logger.warning(task_msg)
def monitor_long_running_coroutine(
loop: asyncio.AbstractEventLoop,
task_start_time: dict,
task_last_log_time: dict,
dag_manager_completed_event: threading.Event,
):
flow_logger.info("monitor_long_running_coroutine started")
logging_interval = DEFAULT_TASK_LOGGING_INTERVAL
logging_interval_in_env = os.environ.get("PF_TASK_PEEKING_INTERVAL")
if logging_interval_in_env:
try:
value = int(logging_interval_in_env)
if value <= 0:
raise ValueError
logging_interval = value
flow_logger.info(
f"Using value of PF_TASK_PEEKING_INTERVAL in environment variable as "
f"logging interval: {logging_interval_in_env}"
)
except ValueError:
flow_logger.warning(
f"Value of PF_TASK_PEEKING_INTERVAL in environment variable ('{logging_interval_in_env}') "
f"is invalid, use default value {DEFAULT_TASK_LOGGING_INTERVAL}"
)
while not dag_manager_completed_event.is_set():
running_tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
# get duration of running tasks
for task in running_tasks:
# Do not monitor the scheduler task
if task.get_name() == PF_ASYNC_NODE_SCHEDULER_EXECUTE_TASK_NAME:
continue
# Do not monitor sync tools, since they will run in executor thread and will
# be monitored by RepeatLogTimer.
task_stacks = task.get_stack()
if (
task_stacks
and task_stacks[-1].f_code
and task_stacks[-1].f_code.co_name == AsyncNodesScheduler._sync_function_to_async_task.__name__
):
continue
if task_start_time.get(task) is None:
flow_logger.warning(f"task {task.get_name()} has no start time, which should not happen")
else:
duration = time.time() - task_start_time[task]
if duration > logging_interval:
if (
task_last_log_time.get(task) is None
or time.time() - task_last_log_time[task] > logging_interval
):
log_stack_recursively(task, duration)
task_last_log_time[task] = time.time()
time.sleep(1) | null |
4,399 | import asyncio
import contextlib
import copy
import functools
import inspect
import os
import uuid
from pathlib import Path
from threading import current_thread
from types import GeneratorType
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from opentelemetry.trace.status import StatusCode
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._core._errors import NotSupported, UnexpectedError
from promptflow._core.cache_manager import AbstractCacheManager
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.metric_logger import add_metric_logger, remove_metric_logger
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._core.tool import STREAMING_OPTION_PARAMETER_ATTR
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.execution_utils import (
apply_default_value_for_input,
collect_lines,
extract_aggregation_inputs,
get_aggregation_inputs_properties,
)
from promptflow._utils.logger_utils import flow_logger, logger
from promptflow._utils.multimedia_utils import (
load_multimedia_data,
load_multimedia_data_recursively,
persist_multimedia_data,
)
from promptflow._utils.utils import get_int_env_var, transpose
from promptflow._utils.yaml_utils import load_yaml
from promptflow.contracts.flow import Flow, FlowInputDefinition, InputAssignment, InputValueType, Node
from promptflow.contracts.run_info import FlowRunInfo, Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import PromptflowException
from promptflow.executor import _input_assignment_parser
from promptflow.executor._async_nodes_scheduler import AsyncNodesScheduler
from promptflow.executor._errors import NodeOutputNotFound, OutputReferenceNotExist, SingleNodeValidationError
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
)
from promptflow.executor._result import AggregationResult, LineResult
from promptflow.executor._tool_resolver import ToolResolver
from promptflow.executor.flow_validator import FlowValidator
from promptflow.storage import AbstractRunStorage
from promptflow.storage._run_storage import DefaultRunStorage
from promptflow.tracing._openai_injector import inject_openai_api
from promptflow.tracing._trace import (
enrich_span_with_context,
enrich_span_with_input,
enrich_span_with_trace_type,
open_telemetry_tracer,
)
from promptflow.tracing.contracts.trace import TraceType
The provided code snippet includes necessary dependencies for implementing the `_inject_stream_options` function. Write a Python function `def _inject_stream_options(should_stream: Callable[[], bool], streaming_option_parameter="stream")` to solve the following problem:
Inject the stream options to the decorated function. AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode. The stream mode is controlled by the "stream" parameter.
Here is the function:
def _inject_stream_options(should_stream: Callable[[], bool], streaming_option_parameter="stream"):
"""Inject the stream options to the decorated function.
AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode.
The stream mode is controlled by the "stream" parameter.
"""
def stream_option_decorator(f):
# We only wrap the function if it has a "stream" parameter
signature = inspect.signature(f)
if streaming_option_parameter not in signature.parameters:
return f
@functools.wraps(f)
def wrapper(*args, **kwargs):
kwargs = kwargs or {}
kwargs.update({streaming_option_parameter: should_stream()})
return f(*args, **kwargs)
return wrapper
return stream_option_decorator | Inject the stream options to the decorated function. AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode. The stream mode is controlled by the "stream" parameter. |
4,400 | import asyncio
import contextlib
import copy
import functools
import inspect
import os
import uuid
from pathlib import Path
from threading import current_thread
from types import GeneratorType
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from opentelemetry.trace.status import StatusCode
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._core._errors import NotSupported, UnexpectedError
from promptflow._core.cache_manager import AbstractCacheManager
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.metric_logger import add_metric_logger, remove_metric_logger
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._core.tool import STREAMING_OPTION_PARAMETER_ATTR
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.execution_utils import (
apply_default_value_for_input,
collect_lines,
extract_aggregation_inputs,
get_aggregation_inputs_properties,
)
from promptflow._utils.logger_utils import flow_logger, logger
from promptflow._utils.multimedia_utils import (
load_multimedia_data,
load_multimedia_data_recursively,
persist_multimedia_data,
)
from promptflow._utils.utils import get_int_env_var, transpose
from promptflow._utils.yaml_utils import load_yaml
from promptflow.contracts.flow import Flow, FlowInputDefinition, InputAssignment, InputValueType, Node
from promptflow.contracts.run_info import FlowRunInfo, Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import PromptflowException
from promptflow.executor import _input_assignment_parser
from promptflow.executor._async_nodes_scheduler import AsyncNodesScheduler
from promptflow.executor._errors import NodeOutputNotFound, OutputReferenceNotExist, SingleNodeValidationError
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
)
from promptflow.executor._result import AggregationResult, LineResult
from promptflow.executor._tool_resolver import ToolResolver
from promptflow.executor.flow_validator import FlowValidator
from promptflow.storage import AbstractRunStorage
from promptflow.storage._run_storage import DefaultRunStorage
from promptflow.tracing._openai_injector import inject_openai_api
from promptflow.tracing._trace import (
enrich_span_with_context,
enrich_span_with_input,
enrich_span_with_trace_type,
open_telemetry_tracer,
)
from promptflow.tracing.contracts.trace import TraceType
The provided code snippet includes necessary dependencies for implementing the `enable_streaming_for_llm_tool` function. Write a Python function `def enable_streaming_for_llm_tool(f)` to solve the following problem:
Enable the stream mode for LLM tools that support it. :param f: The function to wrap. :type f: function :return: The wrapped function. :rtype: function AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode. The stream mode is turned off by default. Use this wrapper to turn it on.
Here is the function:
def enable_streaming_for_llm_tool(f):
"""Enable the stream mode for LLM tools that support it.
:param f: The function to wrap.
:type f: function
:return: The wrapped function.
:rtype: function
AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode.
The stream mode is turned off by default. Use this wrapper to turn it on.
"""
# We only wrap the function if it has a "stream" parameter
signature = inspect.signature(f)
if "stream" not in signature.parameters:
return f
@functools.wraps(f)
def wrapper(*args, **kwargs):
kwargs = kwargs or {}
kwargs.update(stream=True)
return f(*args, **kwargs)
return wrapper | Enable the stream mode for LLM tools that support it. :param f: The function to wrap. :type f: function :return: The wrapped function. :rtype: function AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode. The stream mode is turned off by default. Use this wrapper to turn it on. |
4,401 | import asyncio
import contextlib
import copy
import functools
import inspect
import os
import uuid
from pathlib import Path
from threading import current_thread
from types import GeneratorType
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from opentelemetry.trace.status import StatusCode
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._core._errors import NotSupported, UnexpectedError
from promptflow._core.cache_manager import AbstractCacheManager
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.metric_logger import add_metric_logger, remove_metric_logger
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._core.tool import STREAMING_OPTION_PARAMETER_ATTR
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.execution_utils import (
apply_default_value_for_input,
collect_lines,
extract_aggregation_inputs,
get_aggregation_inputs_properties,
)
from promptflow._utils.logger_utils import flow_logger, logger
from promptflow._utils.multimedia_utils import (
load_multimedia_data,
load_multimedia_data_recursively,
persist_multimedia_data,
)
from promptflow._utils.utils import get_int_env_var, transpose
from promptflow._utils.yaml_utils import load_yaml
from promptflow.contracts.flow import Flow, FlowInputDefinition, InputAssignment, InputValueType, Node
from promptflow.contracts.run_info import FlowRunInfo, Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import PromptflowException
from promptflow.executor import _input_assignment_parser
from promptflow.executor._async_nodes_scheduler import AsyncNodesScheduler
from promptflow.executor._errors import NodeOutputNotFound, OutputReferenceNotExist, SingleNodeValidationError
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
)
from promptflow.executor._result import AggregationResult, LineResult
from promptflow.executor._tool_resolver import ToolResolver
from promptflow.executor.flow_validator import FlowValidator
from promptflow.storage import AbstractRunStorage
from promptflow.storage._run_storage import DefaultRunStorage
from promptflow.tracing._openai_injector import inject_openai_api
from promptflow.tracing._trace import (
enrich_span_with_context,
enrich_span_with_input,
enrich_span_with_trace_type,
open_telemetry_tracer,
)
from promptflow.tracing.contracts.trace import TraceType
The provided code snippet includes necessary dependencies for implementing the `_ensure_node_result_is_serializable` function. Write a Python function `def _ensure_node_result_is_serializable(f)` to solve the following problem:
Ensure the node result is serializable. Some of the nodes may return a generator of strings to create streaming outputs. This is useful when the flow is deployed as a web service. However, in the interactive mode, the executor assumes that the node result is JSON serializable. This wrapper ensures the node result is serializable by consuming the data from the generator and merging them into a string.
Here is the function:
def _ensure_node_result_is_serializable(f):
"""Ensure the node result is serializable.
Some of the nodes may return a generator of strings to create streaming outputs.
This is useful when the flow is deployed as a web service.
However, in the interactive mode, the executor assumes that the node result is JSON serializable.
This wrapper ensures the node result is serializable
by consuming the data from the generator and merging them into a string.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
if isinstance(result, GeneratorType):
result = "".join(str(trunk) for trunk in result)
return result
return wrapper | Ensure the node result is serializable. Some of the nodes may return a generator of strings to create streaming outputs. This is useful when the flow is deployed as a web service. However, in the interactive mode, the executor assumes that the node result is JSON serializable. This wrapper ensures the node result is serializable by consuming the data from the generator and merging them into a string. |
4,402 | import asyncio
import contextlib
import copy
import functools
import inspect
import os
import uuid
from pathlib import Path
from threading import current_thread
from types import GeneratorType
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from opentelemetry.trace.status import StatusCode
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._core._errors import NotSupported, UnexpectedError
from promptflow._core.cache_manager import AbstractCacheManager
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.metric_logger import add_metric_logger, remove_metric_logger
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._core.tool import STREAMING_OPTION_PARAMETER_ATTR
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.execution_utils import (
apply_default_value_for_input,
collect_lines,
extract_aggregation_inputs,
get_aggregation_inputs_properties,
)
from promptflow._utils.logger_utils import flow_logger, logger
from promptflow._utils.multimedia_utils import (
load_multimedia_data,
load_multimedia_data_recursively,
persist_multimedia_data,
)
from promptflow._utils.utils import get_int_env_var, transpose
from promptflow._utils.yaml_utils import load_yaml
from promptflow.contracts.flow import Flow, FlowInputDefinition, InputAssignment, InputValueType, Node
from promptflow.contracts.run_info import FlowRunInfo, Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import PromptflowException
from promptflow.executor import _input_assignment_parser
from promptflow.executor._async_nodes_scheduler import AsyncNodesScheduler
from promptflow.executor._errors import NodeOutputNotFound, OutputReferenceNotExist, SingleNodeValidationError
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
)
from promptflow.executor._result import AggregationResult, LineResult
from promptflow.executor._tool_resolver import ToolResolver
from promptflow.executor.flow_validator import FlowValidator
from promptflow.storage import AbstractRunStorage
from promptflow.storage._run_storage import DefaultRunStorage
from promptflow.tracing._openai_injector import inject_openai_api
from promptflow.tracing._trace import (
enrich_span_with_context,
enrich_span_with_input,
enrich_span_with_trace_type,
open_telemetry_tracer,
)
from promptflow.tracing.contracts.trace import TraceType
class FlowExecutor:
"""This class is used to execute a single flow for different inputs.
:param flow: The flow to be executed.
:type flow: ~promptflow.contracts.flow.Flow
:param connections: The connections to be used for the flow.
:type connections: dict
:param run_tracker: The run tracker to be used for the flow.
:type run_tracker: ~promptflow._core.run_tracker.RunTracker
:param cache_manager: The cache manager to be used for the flow.
:type cache_manager: ~promptflow._core.cache_manager.AbstractCacheManager
:param loaded_tools: The loaded tools to be used for the flow.
:type loaded_tools: Mapping[str, Callable]
:param worker_count: The number of workers to be used for the flow. Default is 16.
:type worker_count: Optional[int]
:param raise_ex: Whether to raise exceptions or not. Default is False.
:type raise_ex: Optional[bool]
:param working_dir: The working directory to be used for the flow. Default is None.
:type working_dir: Optional[str]
:param line_timeout_sec: The line timeout in seconds to be used for the flow. Default is LINE_TIMEOUT_SEC.
:type line_timeout_sec: Optional[int]
:param flow_file: The flow file to be used for the flow. Default is None.
:type flow_file: Optional[Path]
"""
def __init__(
self,
flow: Flow,
connections: dict,
run_tracker: RunTracker,
cache_manager: AbstractCacheManager,
loaded_tools: Mapping[str, Callable],
*,
raise_ex: bool = False,
working_dir=None,
line_timeout_sec=None,
flow_file=None,
):
"""Initialize a FlowExecutor object.
:param flow: The Flow object to execute.
:type flow: ~promptflow.contracts.flow.Flow
:param connections: The connections between nodes in the Flow.
:type connections: dict
:param run_tracker: The RunTracker object to track the execution of the Flow.
:type run_tracker: ~promptflow._core.run_tracker.RunTracker
:param cache_manager: The AbstractCacheManager object to manage caching of results.
:type cache_manager: ~promptflow._core.cache_manager.AbstractCacheManager
:param loaded_tools: A mapping of tool names to their corresponding functions.
:type loaded_tools: Mapping[str, Callable]
:param raise_ex: Whether to raise an exception if an error occurs during execution.
:type raise_ex: bool
:param working_dir: The working directory to use for execution.
:type working_dir: str or None
:param line_timeout_sec: The maximum time to wait for a line of output from a node.
:type line_timeout_sec: int or None
:param flow_file: The path to the file containing the Flow definition.
:type flow_file: str or None
"""
# Inject OpenAI API to make sure traces and headers injection works and
# update OpenAI API configs from environment variables.
inject_openai_api()
self._flow = flow
self._flow_id = flow.id or str(uuid.uuid4())
self._connections = connections
self._aggregation_inputs_references = get_aggregation_inputs_properties(flow)
self._aggregation_nodes = {node.name for node in self._flow.nodes if node.aggregation}
self._run_tracker = run_tracker
self._cache_manager = cache_manager
self._loaded_tools = loaded_tools
self._working_dir = working_dir
self._line_timeout_sec = line_timeout_sec or get_int_env_var("PF_LINE_TIMEOUT_SEC")
self._flow_file = flow_file
try:
self._tools_manager = ToolsManager(loaded_tools)
tool_to_meta = {tool.name: tool for tool in flow.tools}
custom_tools = {
node.name: self._tools_manager._load_custom_tool(tool_to_meta[node.tool], node.name)
for node in flow.nodes
if not self._tools_manager.loaded(node.name)
}
self._tools_manager.load_tools(custom_tools)
except PromptflowException as e:
# For PromptflowException, we don't wrap it, because need generate ErrorResponse by inner exception.
# Will try to find one common way to handle this case.
raise e
except Exception as e:
raise ValueError(f"Failed to load custom tools for flow due to exception:\n {e}.") from e
for node in flow.nodes:
self._tools_manager.assert_loaded(node.name)
self._raise_ex = raise_ex
self._log_interval = 60
self._processing_idx = None
self._completed_idx = None
# TODO: Improve the experience about configuring node concurrency.
self._node_concurrency = DEFAULT_CONCURRENCY_BULK
def create(
cls,
flow_file: Path,
connections: dict,
working_dir: Optional[Path] = None,
*,
entry: Optional[str] = None,
storage: Optional[AbstractRunStorage] = None,
raise_ex: bool = True,
node_override: Optional[Dict[str, Dict[str, Any]]] = None,
line_timeout_sec: Optional[int] = None,
) -> "FlowExecutor":
"""Create a new instance of FlowExecutor.
:param flow_file: The path to the flow file.
:type flow_file: Path
:param connections: The connections to be used for the flow.
:type connections: dict
:param working_dir: The working directory to be used for the flow. Default is None.
:type working_dir: Optional[str]
:param func: The function to be used for the flow if .py is provided. Default is None.
:type func: Optional[str]
:param storage: The storage to be used for the flow. Default is None.
:type storage: Optional[~promptflow.storage.AbstractRunStorage]
:param raise_ex: Whether to raise exceptions or not. Default is True.
:type raise_ex: Optional[bool]
:param node_override: The node overrides to be used for the flow. Default is None.
:type node_override: Optional[Dict[str, Dict[str, Any]]]
:param line_timeout_sec: The line timeout in seconds to be used for the flow. Default is LINE_TIMEOUT_SEC.
:type line_timeout_sec: Optional[int]
:return: A new instance of FlowExecutor.
:rtype: ~promptflow.executor.flow_executor.FlowExecutor
"""
if cls._is_eager_flow_yaml(flow_file, working_dir):
from ._script_executor import ScriptExecutor
return ScriptExecutor(
flow_file=Path(flow_file),
working_dir=working_dir,
storage=storage,
)
else:
flow = Flow.from_yaml(flow_file, working_dir=working_dir)
return cls._create_from_flow(
flow_file=flow_file,
flow=flow,
connections=connections,
working_dir=working_dir,
storage=storage,
raise_ex=raise_ex,
node_override=node_override,
line_timeout_sec=line_timeout_sec,
)
def _create_from_flow(
cls,
flow: Flow,
connections: dict,
working_dir: Optional[Path],
*,
flow_file: Optional[Path] = None,
storage: Optional[AbstractRunStorage] = None,
raise_ex: bool = True,
node_override: Optional[Dict[str, Dict[str, Any]]] = None,
line_timeout_sec: Optional[int] = None,
):
logger.debug("Start initializing the flow executor.")
working_dir = Flow._resolve_working_dir(flow_file, working_dir)
if node_override:
flow = flow._apply_node_overrides(node_override)
flow = flow._apply_default_node_variants()
package_tool_keys = [node.source.tool for node in flow.nodes if node.source and node.source.tool]
tool_resolver = ToolResolver(working_dir, connections, package_tool_keys)
with _change_working_dir(working_dir):
resolved_tools = [tool_resolver.resolve_tool_by_node(node) for node in flow.nodes]
flow = Flow(
id=flow.id,
name=flow.name,
nodes=[r.node for r in resolved_tools],
inputs=flow.inputs,
outputs=flow.outputs,
tools=[],
)
# ensure_flow_valid including validation + resolve
# Todo: 1) split pure validation + resolve from below method 2) provide completed validation()
flow = FlowValidator._validate_nodes_topology(flow)
flow.outputs = FlowValidator._ensure_outputs_valid(flow)
if storage is None:
storage = DefaultRunStorage()
run_tracker = RunTracker(storage)
cache_manager = AbstractCacheManager.init_from_env()
executor = FlowExecutor(
flow=flow,
connections=connections,
run_tracker=run_tracker,
cache_manager=cache_manager,
loaded_tools={r.node.name: r.callable for r in resolved_tools},
raise_ex=raise_ex,
working_dir=working_dir,
line_timeout_sec=line_timeout_sec,
flow_file=flow_file,
)
logger.debug("The flow executor is initialized successfully.")
return executor
def _is_eager_flow_yaml(cls, flow_file: Path, working_dir: Optional[Path] = None):
if Path(flow_file).suffix.lower() in [".yaml", ".yml"]:
flow_file = working_dir / flow_file if working_dir else flow_file
with open(flow_file, "r", encoding="utf-8") as fin:
flow_dag = load_yaml(fin)
if "entry" in flow_dag:
return True
return False
def load_and_exec_node(
cls,
flow_file: Path,
node_name: str,
*,
storage: AbstractRunStorage = None,
output_sub_dir: Optional[str] = None,
flow_inputs: Optional[Mapping[str, Any]] = None,
dependency_nodes_outputs: Optional[Mapping[str, Any]] = None,
connections: Optional[dict] = None,
working_dir: Optional[Path] = None,
raise_ex: bool = False,
):
"""Load and execute a single node from the flow.
:param flow_file: The path to the flow file.
:type flow_file: Path
:param node_name: The name of the node to be executed.
:type node_name: str
:param storage: The storage to be used for the flow.
:type storage: Optional[~promptflow.storage.AbstractRunStorage]
:param output_sub_dir: The directory to persist image for the flow. Keep it only for backward compatibility.
:type output_sub_dir: Optional[str]
:param flow_inputs: The inputs to be used for the flow. Default is None.
:type flow_inputs: Optional[Mapping[str, Any]]
:param dependency_nodes_outputs: The outputs of the dependency nodes. Default is None.
:type dependency_nodes_outputs: Optional[Mapping[str, Any]
:param connections: The connections to be used for the flow. Default is None.
:type connections: Optional[dict]
:param working_dir: The working directory to be used for the flow. Default is None.
:type working_dir: Optional[str]
:param raise_ex: Whether to raise exceptions or not. Default is False.
:type raise_ex: Optional[bool]
"""
# Inject OpenAI API to make sure traces and headers injection works and
# update OpenAI API configs from environment variables.
inject_openai_api()
OperationContext.get_instance().run_mode = RunMode.SingleNode.name
dependency_nodes_outputs = dependency_nodes_outputs or {}
# Load the node from the flow file
working_dir = Flow._resolve_working_dir(flow_file, working_dir)
with open(working_dir / flow_file, "r") as fin:
flow = Flow.deserialize(load_yaml(fin))
node = flow.get_node(node_name)
if node is None:
raise SingleNodeValidationError(
message_format=(
"Validation failed when attempting to execute the node. "
"Node '{node_name}' is not found in flow '{flow_file}'. "
"Please change node name or correct the flow file."
),
node_name=node_name,
flow_file=flow_file,
)
if not node.source or not node.type:
raise SingleNodeValidationError(
message_format=(
"Validation failed when attempting to execute the node. "
"Properties 'source' or 'type' are not specified for Node '{node_name}' in flow '{flow_file}'. "
"Please make sure these properties are in place and try again."
),
node_name=node_name,
flow_file=flow_file,
)
# Only load the node's referenced flow inputs
node_referenced_flow_inputs = FlowExecutor._get_node_referenced_flow_inputs(node, flow.inputs)
inputs_with_default_value = apply_default_value_for_input(node_referenced_flow_inputs, flow_inputs)
converted_flow_inputs_for_node = FlowValidator.convert_flow_inputs_for_node(
flow, node, inputs_with_default_value
)
inputs = load_multimedia_data(node_referenced_flow_inputs, converted_flow_inputs_for_node)
dependency_nodes_outputs = load_multimedia_data_recursively(dependency_nodes_outputs)
package_tool_keys = [node.source.tool] if node.source and node.source.tool else []
tool_resolver = ToolResolver(working_dir, connections, package_tool_keys)
resolved_node = tool_resolver.resolve_tool_by_node(node)
# Prepare callable and real inputs here
resolved_inputs = {}
for k, v in resolved_node.node.inputs.items():
value = _input_assignment_parser.parse_value(v, dependency_nodes_outputs, inputs)
resolved_inputs[k] = value
if resolved_node.node.aggregation:
# For aggregation node, we need to convert value to list.
if (
v.value_type == InputValueType.FLOW_INPUT
or v.value_type == InputValueType.NODE_REFERENCE
and flow.is_normal_node(v.value)
):
resolved_inputs[k] = [value]
# Note that the init args are only used when resolving the tool,
# so we need to remove them from the inputs before invoking.
resolved_inputs = {k: v for k, v in resolved_inputs.items() if k not in resolved_node.init_args}
if storage is None:
sub_dir = "." if output_sub_dir is None else output_sub_dir
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path(sub_dir))
run_tracker = RunTracker(storage)
with run_tracker.node_log_manager:
# Will generate node run in context
context = FlowExecutionContext(
name=flow.name,
run_tracker=run_tracker,
cache_manager=AbstractCacheManager.init_from_env(),
)
try:
if inspect.iscoroutinefunction(resolved_node.callable):
asyncio.run(
context.invoke_tool_async(resolved_node.node, resolved_node.callable, kwargs=resolved_inputs),
)
else:
context.invoke_tool(resolved_node.node, resolved_node.callable, kwargs=resolved_inputs)
except Exception:
if raise_ex: # Only raise exception when raise_ex is True
raise
node_runs = run_tracker.collect_node_runs()
if len(node_runs) != 1:
# Should not happen except there is bug in run_tracker or thread control.
raise UnexpectedError(
message_format=(
"Single node execution failed. Expected one node result, "
"but received {node_result_num}. Please contact support for further assistance."
),
node_result_num=len(node_runs),
)
return node_runs[0]
def update_environment_variables_with_connections(connections: dict):
"""Update environment variables with connections.
:param connections: A dictionary containing connection information.
:type connections: dict
:return: A dictionary containing updated environment variables.
:rtype: dict
"""
from promptflow._sdk._utils import update_environment_variables_with_connections
return update_environment_variables_with_connections(connections)
def convert_flow_input_types(self, inputs: dict) -> Mapping[str, Any]:
"""Convert the input types of the given inputs dictionary to match the expected types of the flow.
:param inputs: A dictionary containing the inputs to the flow.
:type inputs: dict
:return: A dictionary containing the converted inputs.
:rtype: Mapping[str, Any]
"""
return FlowValidator.resolve_flow_inputs_type(self._flow, inputs)
def _default_inputs_mapping(self):
return {key: f"${{data.{key}}}" for key in self._flow.inputs}
def has_aggregation_node(self) -> bool:
"""Check if the flow executor has any aggregation nodes.
:return: True if the flow executor has at least one aggregation node, False otherwise.
:rtype: bool
"""
return len(self._aggregation_nodes) > 0
def aggregation_nodes(self):
"""Get the aggregation nodes of the flow executor.
:return: A list of aggregation nodes.
:rtype: list
"""
return self._aggregation_nodes
def _fill_lines(self, indexes, values, nlines):
"""Fill the values into the result list according to the indexes."""
result = [None] * nlines
for idx, value in zip(indexes, values):
result[idx] = value
return result
def _exec_aggregation_with_bulk_results(
self,
batch_inputs: List[dict],
results: List[LineResult],
run_id=None,
) -> AggregationResult:
if not self.aggregation_nodes:
return AggregationResult({}, {}, {})
logger.info("Executing aggregation nodes...")
run_infos = [r.run_info for r in results]
succeeded = [i for i, r in enumerate(run_infos) if r.status == Status.Completed]
succeeded_batch_inputs = [batch_inputs[i] for i in succeeded]
resolved_succeeded_batch_inputs = [
FlowValidator.ensure_flow_inputs_type(flow=self._flow, inputs=input) for input in succeeded_batch_inputs
]
succeeded_inputs = transpose(resolved_succeeded_batch_inputs, keys=list(self._flow.inputs.keys()))
aggregation_inputs = transpose(
[result.aggregation_inputs for result in results],
keys=self._aggregation_inputs_references,
)
succeeded_aggregation_inputs = collect_lines(succeeded, aggregation_inputs)
try:
aggr_results = self._exec_aggregation(succeeded_inputs, succeeded_aggregation_inputs, run_id)
logger.info("Finish executing aggregation nodes.")
return aggr_results
except PromptflowException as e:
# For PromptflowException, we already do classification, so throw directly.
raise e
except Exception as e:
error_type_and_message = f"({e.__class__.__name__}) {e}"
raise UnexpectedError(
message_format=(
"Unexpected error occurred while executing the aggregated nodes. "
"Please fix or contact support for assistance. The error details: {error_type_and_message}."
),
error_type_and_message=error_type_and_message,
) from e
def _try_get_aggregation_input(val: InputAssignment, aggregation_inputs: dict):
if val.value_type != InputValueType.NODE_REFERENCE:
return val
serialized_val = val.serialize()
if serialized_val not in aggregation_inputs:
return val
return InputAssignment(value=aggregation_inputs[serialized_val])
def get_status_summary(self, run_id: str):
"""Get a summary of the status of a given run.
:param run_id: The ID of the run to get the status summary for.
:type run_id: str
:return: A summary of the status of the given run.
:rtype: str
"""
return self._run_tracker.get_status_summary(run_id)
def exec_aggregation(
self,
inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id=None,
node_concurrency=DEFAULT_CONCURRENCY_FLOW,
) -> AggregationResult:
"""Execute the aggregation node of the flow.
:param inputs: A mapping of input names to their values.
:type inputs: Mapping[str, Any]
:param aggregation_inputs: A mapping of aggregation input names to their values.
:type aggregation_inputs: Mapping[str, Any]
:param run_id: The ID of the current run, if any.
:type run_id: Optional[str]
:param node_concurrency: The maximum number of nodes that can be executed concurrently.
:type node_concurrency: int
:return: The result of the aggregation node.
:rtype: ~promptflow.executor._result.AggregationResult
:raises: FlowError if the inputs or aggregation_inputs are invalid.
"""
self._node_concurrency = node_concurrency
aggregated_flow_inputs = dict(inputs or {})
aggregation_inputs = dict(aggregation_inputs or {})
FlowValidator._validate_aggregation_inputs(aggregated_flow_inputs, aggregation_inputs)
aggregated_flow_inputs = self._apply_default_value_for_aggregation_input(
self._flow.inputs, aggregated_flow_inputs, aggregation_inputs
)
# Resolve aggregated_flow_inputs from list of strings to list of objects, whose type is specified in yaml file.
# TODO: For now, we resolve type for batch run's aggregation input in _exec_aggregation_with_bulk_results.
# If we decide to merge the resolve logic into one place, remember to take care of index for batch run.
resolved_aggregated_flow_inputs = FlowValidator.resolve_aggregated_flow_inputs_type(
self._flow, aggregated_flow_inputs
)
with self._run_tracker.node_log_manager:
return self._exec_aggregation(resolved_aggregated_flow_inputs, aggregation_inputs, run_id)
def _apply_default_value_for_aggregation_input(
inputs: Dict[str, FlowInputDefinition],
aggregated_flow_inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
):
aggregation_lines = 1
if aggregated_flow_inputs.values():
one_input_value = list(aggregated_flow_inputs.values())[0]
aggregation_lines = len(one_input_value)
# If aggregated_flow_inputs is empty, we should use aggregation_inputs to get the length.
elif aggregation_inputs.values():
one_input_value = list(aggregation_inputs.values())[0]
aggregation_lines = len(one_input_value)
for key, value in inputs.items():
if key not in aggregated_flow_inputs and (value and value.default is not None):
aggregated_flow_inputs[key] = [value.default] * aggregation_lines
return aggregated_flow_inputs
def _exec_aggregation(
self,
inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id=None,
) -> AggregationResult:
if not self._flow.has_aggregation_node:
return AggregationResult({}, {}, {})
run_id = run_id or str(uuid.uuid4())
nodes = [copy.deepcopy(node) for node in self._flow.nodes if node.aggregation]
# Update the inputs of the aggregation nodes with the aggregation inputs.
for node in nodes:
node.inputs = {
k: FlowExecutor._try_get_aggregation_input(v, aggregation_inputs) for k, v in node.inputs.items()
}
# Load multimedia data for the flow inputs of aggregation nodes.
inputs = load_multimedia_data(self._flow.inputs, inputs)
# TODO: Use a new run tracker to avoid memory increase infinitely.
run_tracker = self._run_tracker
context = FlowExecutionContext(
name=self._flow.name,
run_tracker=run_tracker,
cache_manager=self._cache_manager,
run_id=run_id,
flow_id=self._flow_id,
)
metrics = {}
def _log_metric(key, value):
metrics[key] = value
add_metric_logger(_log_metric)
try:
self._submit_to_scheduler(context, inputs, nodes)
node_run_infos = run_tracker.collect_child_node_runs(run_id)
# Output is set as an empty dict, because the aggregation outputs story is not finalized.
return AggregationResult({}, metrics, {run.node: run for run in node_run_infos})
except Exception:
if self._raise_ex:
raise
node_run_infos = run_tracker.collect_child_node_runs(run_id)
return AggregationResult({}, metrics, {run.node: run for run in node_run_infos})
finally:
remove_metric_logger(_log_metric)
def exec(self, inputs: dict, node_concurrency=DEFAULT_CONCURRENCY_FLOW) -> dict:
"""Executes the flow with the given inputs and returns the output.
:param inputs: A dictionary containing the input values for the flow.
:type inputs: dict
:param node_concurrency: The maximum number of nodes that can be executed concurrently.
:type node_concurrency: int
:return: A dictionary containing the output values of the flow.
:rtype: dict
"""
self._node_concurrency = node_concurrency
inputs = apply_default_value_for_input(self._flow.inputs, inputs)
result = self._exec(inputs)
# TODO: remove this line once serving directly calling self.exec_line
self._add_line_results([result])
return result.output or {}
def _exec_in_thread(self, args) -> LineResult:
inputs, run_id, line_number, validate_inputs = args
thread_name = current_thread().name
self._processing_idx[line_number] = thread_name
self._run_tracker._activate_in_context()
results = self._exec(
inputs, run_id=run_id, line_number=line_number, validate_inputs=validate_inputs
)
self._run_tracker._deactivate_in_context()
self._processing_idx.pop(line_number)
self._completed_idx[line_number] = thread_name
return results
def exec_line(
self,
inputs: Mapping[str, Any],
index: Optional[int] = None,
run_id: Optional[str] = None,
validate_inputs: bool = True,
node_concurrency=DEFAULT_CONCURRENCY_FLOW,
allow_generator_output: bool = False,
line_timeout_sec: Optional[int] = None,
) -> LineResult:
"""Execute a single line of the flow.
:param inputs: The input values for the line.
:type inputs: Mapping[str, Any]
:param index: The index of the line to execute.
:type index: Optional[int]
:param run_id: The ID of the flow run.
:type run_id: Optional[str]
:param validate_inputs: Whether to validate the input values.
:type validate_inputs: bool
:param node_concurrency: The maximum number of nodes that can be executed concurrently.
:type node_concurrency: int
:param allow_generator_output: Whether to allow generator output.
:type allow_generator_output: bool
:param line_timeout_sec: The maximum time to wait for a line of output.
:type line_timeout_sec: Optional[int]
:return: The result of executing the line.
:rtype: ~promptflow.executor._result.LineResult
"""
# TODO: Call exec_line_async in exec_line when async is mature.
self._node_concurrency = node_concurrency
# TODO: Pass line_timeout_sec to flow node scheduler instead of updating self._line_timeout_sec
self._line_timeout_sec = line_timeout_sec or self._line_timeout_sec
inputs = apply_default_value_for_input(self._flow.inputs, inputs)
# For flow run, validate inputs as default
with self._run_tracker.node_log_manager:
# exec_line interface may be called when executing a batch run, so we only set run_mode as flow run when
# it is not set.
run_id = run_id or str(uuid.uuid4())
with self._update_operation_context(run_id, index):
line_result = self._exec(
inputs,
run_id=run_id,
line_number=index,
validate_inputs=validate_inputs,
allow_generator_output=allow_generator_output,
)
# Return line result with index
if index is not None and isinstance(line_result.output, dict):
line_result.output[LINE_NUMBER_KEY] = index
return line_result
async def exec_line_async(
self,
inputs: Mapping[str, Any],
index: Optional[int] = None,
run_id: Optional[str] = None,
validate_inputs: bool = True,
node_concurrency=DEFAULT_CONCURRENCY_FLOW,
allow_generator_output: bool = False,
) -> LineResult:
"""Execute a single line of the flow.
:param inputs: The input values for the line.
:type inputs: Mapping[str, Any]
:param index: The index of the line to execute.
:type index: Optional[int]
:param run_id: The ID of the flow run.
:type run_id: Optional[str]
:param validate_inputs: Whether to validate the input values.
:type validate_inputs: bool
:param node_concurrency: The maximum number of nodes that can be executed concurrently.
:type node_concurrency: int
:param allow_generator_output: Whether to allow generator output.
:type allow_generator_output: bool
:return: The result of executing the line.
:rtype: ~promptflow.executor._result.LineResult
"""
self._node_concurrency = node_concurrency
inputs = apply_default_value_for_input(self._flow.inputs, inputs)
# For flow run, validate inputs as default
with self._run_tracker.node_log_manager:
# exec_line interface may be called when executing a batch run, so we only set run_mode as flow run when
# it is not set.
operation_context = OperationContext.get_instance()
operation_context.run_mode = operation_context.get("run_mode", None) or RunMode.Test.name
line_result = await self._exec_async(
inputs,
run_id=run_id,
line_number=index,
validate_inputs=validate_inputs,
allow_generator_output=allow_generator_output,
)
# Return line result with index
if index is not None and isinstance(line_result.output, dict):
line_result.output[LINE_NUMBER_KEY] = index
return line_result
def _update_operation_context(self, run_id: str, line_number: int):
operation_context = OperationContext.get_instance()
original_context = operation_context.copy()
original_mode = operation_context.get("run_mode", None)
values_for_context = {"flow_id": self._flow_id, "root_run_id": run_id}
if original_mode == RunMode.Batch.name:
values_for_otel = {
"batch_run_id": run_id,
"line_number": line_number,
}
else:
values_for_otel = {"line_run_id": run_id}
try:
operation_context.run_mode = original_mode or RunMode.Test.name
operation_context.update(values_for_context)
for k, v in values_for_otel.items():
operation_context._add_otel_attributes(k, v)
yield
finally:
OperationContext.set_instance(original_context)
def _add_line_results(self, line_results: List[LineResult], run_tracker: Optional[RunTracker] = None):
run_tracker = run_tracker or self._run_tracker
run_tracker._flow_runs.update({result.run_info.run_id: result.run_info for result in line_results})
run_tracker._node_runs.update(
{
node_run_info.run_id: node_run_info
for result in line_results
for node_run_info in result.node_run_infos.values()
}
)
def _get_node_referenced_flow_inputs(
node, flow_inputs: Dict[str, FlowInputDefinition]
) -> Dict[str, FlowInputDefinition]:
node_referenced_flow_inputs = {}
for _, value in node.inputs.items():
# Only add flow input to node_referenced_flow_inputs when it is exist and referenced by node.
# If flow input is not exist, we will raise exception in FlowValidator.convert_flow_inputs_for_node.
if value.value_type == InputValueType.FLOW_INPUT and value.value in flow_inputs:
node_referenced_flow_inputs[value.value] = flow_inputs[value.value]
return node_referenced_flow_inputs
def _exec_inner_with_trace(
self,
inputs: Mapping[str, Any],
run_info: FlowRunInfo,
run_tracker: RunTracker,
context: FlowExecutionContext,
validate_inputs=False,
allow_generator_output=False,
):
with open_telemetry_tracer.start_as_current_span(self._flow.name) as span:
# initialize span
span.set_attributes(
{
"framework": "promptflow",
"span_type": TraceType.FLOW.value,
}
)
enrich_span_with_context(span)
# enrich span with input
enrich_span_with_input(span, inputs)
# invoke
output, aggregation_inputs = self._exec_inner(
inputs,
run_info,
run_tracker,
context,
validate_inputs,
allow_generator_output,
)
# enrich span with trace type
enrich_span_with_trace_type(span, inputs, output, trace_type=TraceType.FLOW)
# set status
span.set_status(StatusCode.OK)
return output, aggregation_inputs
def _exec_inner(
self,
inputs: Mapping[str, Any],
run_info: FlowRunInfo,
run_tracker: RunTracker,
context: FlowExecutionContext,
validate_inputs=False,
allow_generator_output=False,
):
if validate_inputs:
inputs = FlowValidator.ensure_flow_inputs_type(flow=self._flow, inputs=inputs, idx=run_info.index)
inputs = load_multimedia_data(self._flow.inputs, inputs)
# Inputs are assigned after validation and multimedia data loading, instead of at the start of the flow run.
# This way, if validation or multimedia data loading fails, we avoid persisting invalid inputs.
run_info.inputs = inputs
output, nodes_outputs = self._traverse_nodes(inputs, context)
output = self._stringify_generator_output(output) if not allow_generator_output else output
# Persist the node runs for the nodes that have a generator output
generator_output_nodes = [
nodename for nodename, output in nodes_outputs.items() if isinstance(output, GeneratorType)
]
run_tracker.persist_selected_node_runs(run_info, generator_output_nodes)
run_tracker.allow_generator_types = allow_generator_output
run_tracker.end_run(run_info.run_id, result=output)
aggregation_inputs = extract_aggregation_inputs(self._flow, nodes_outputs)
return output, aggregation_inputs
def _exec(
self,
inputs: Mapping[str, Any],
run_id: Optional[str] = None,
line_number: Optional[int] = None,
validate_inputs: bool = False,
allow_generator_output: bool = False,
) -> LineResult:
"""execute line run
Args:
inputs (Mapping): flow inputs
run_id: the id to identify the flow run
line_number: line number for batch inputs
validate_inputs:
Flag to indicate if input validation needed. It is used along with "_raise_ex" to
define if exception shall be raised if inputs validation (type check, etc) failed
The flag is True for Flow Run, False for bulk run as default
allow_generator_output:
Flag to indicate if generator output is allowed.
Returns:
LineResult: Line run result
"""
line_run_id = run_id if line_number is None else f"{run_id}_{line_number}"
run_tracker = RunTracker(
self._run_tracker._storage, self._run_tracker._run_mode, self._run_tracker.node_log_manager
)
# We need to copy the allow_generator_types from the original run_tracker.
run_tracker.allow_generator_types = self._run_tracker.allow_generator_types
run_info: FlowRunInfo = run_tracker.start_flow_run(
flow_id=self._flow_id,
root_run_id=run_id,
run_id=line_run_id,
parent_run_id=run_id,
index=line_number,
)
context = FlowExecutionContext(
name=self._flow.name,
run_tracker=run_tracker,
cache_manager=self._cache_manager,
run_id=run_id,
flow_id=self._flow_id,
line_number=line_number,
)
output = {}
aggregation_inputs = {}
try:
output, aggregation_inputs = self._exec_inner_with_trace(
inputs,
run_info,
run_tracker,
context,
validate_inputs,
allow_generator_output,
)
except KeyboardInterrupt as ex:
# Run will be cancelled when the process receives a SIGINT signal.
# KeyboardInterrupt will be raised after asyncio finishes its signal handling
# End run with the KeyboardInterrupt exception, so that its status will be Canceled
flow_logger.info("Received KeyboardInterrupt, cancel the run.")
run_tracker.end_run(line_run_id, ex=ex)
raise
except Exception as e:
run_tracker.end_run(line_run_id, ex=e)
if self._raise_ex:
raise
finally:
run_tracker._update_flow_run_info_with_node_runs(run_info)
run_tracker.persist_flow_run(run_info)
node_run_infos = run_tracker.collect_child_node_runs(line_run_id)
node_runs = {node_run.node: node_run for node_run in node_run_infos}
return LineResult(output, aggregation_inputs, run_info, node_runs)
async def _exec_async(
self,
inputs: Mapping[str, Any],
run_id: Optional[str] = None,
line_number: Optional[int] = None,
validate_inputs: bool = False,
allow_generator_output: bool = False,
) -> LineResult:
"""execute line run
Args:
inputs (Mapping): flow inputs
run_id: the id to identify the flow run
line_number: line number for batch inputs
validate_inputs:
Flag to indicate if input validation needed. It is used along with "_raise_ex" to
define if exception shall be raised if inputs validation (type check, etc) failed
The flag is True for Flow Run, False for bulk run as default
allow_generator_output:
Flag to indicate if generator output is allowed.
Returns:
LineResult: Line run result
"""
run_id = run_id or str(uuid.uuid4())
line_run_id = run_id if line_number is None else f"{run_id}_{line_number}"
run_tracker = RunTracker(
self._run_tracker._storage, self._run_tracker._run_mode, self._run_tracker.node_log_manager
)
# We need to copy the allow_generator_types from the original run_tracker.
run_tracker.allow_generator_types = self._run_tracker.allow_generator_types
run_info: FlowRunInfo = run_tracker.start_flow_run(
flow_id=self._flow_id,
root_run_id=run_id,
run_id=line_run_id,
parent_run_id=run_id,
inputs={k: inputs[k] for k in self._flow.inputs if k in inputs},
index=line_number,
)
context = FlowExecutionContext(
name=self._flow.name,
run_tracker=run_tracker,
cache_manager=self._cache_manager,
run_id=run_id,
flow_id=self._flow_id,
line_number=line_number,
)
output = {}
aggregation_inputs = {}
try:
if validate_inputs:
inputs = FlowValidator.ensure_flow_inputs_type(flow=self._flow, inputs=inputs, idx=line_number)
# TODO: Consider async implementation for load_multimedia_data
inputs = load_multimedia_data(self._flow.inputs, inputs)
# Make sure the run_info with converted inputs results rather than original inputs
run_info.inputs = inputs
output, nodes_outputs = await self._traverse_nodes_async(inputs, context)
# TODO: Consider async implementation for _stringify_generator_output
output = self._stringify_generator_output(output) if not allow_generator_output else output
# Persist the node runs for the nodes that have a generator output
generator_output_nodes = [
nodename for nodename, output in nodes_outputs.items() if isinstance(output, GeneratorType)
]
# TODO: Consider async implementation for persist_selected_node_runs
run_tracker.persist_selected_node_runs(run_info, generator_output_nodes)
run_tracker.allow_generator_types = allow_generator_output
# TODO: Consider async implementation for end_run
run_tracker.end_run(line_run_id, result=output)
aggregation_inputs = extract_aggregation_inputs(self._flow, nodes_outputs)
except KeyboardInterrupt as ex:
# Run will be cancelled when the process receives a SIGINT signal.
# KeyboardInterrupt will be raised after asyncio finishes its signal handling
# End run with the KeyboardInterrupt exception, so that its status will be Canceled
flow_logger.info("Received KeyboardInterrupt, cancel the run.")
run_tracker.end_run(line_run_id, ex=ex)
raise
except Exception as e:
run_tracker.end_run(line_run_id, ex=e)
if self._raise_ex:
raise
finally:
run_tracker._update_flow_run_info_with_node_runs(run_info)
run_tracker.persist_flow_run(run_info)
node_run_infos = run_tracker.collect_child_node_runs(line_run_id)
node_runs = {node_run.node: node_run for node_run in node_run_infos}
return LineResult(output, aggregation_inputs, run_info, node_runs)
def _extract_outputs(self, nodes_outputs, bypassed_nodes, flow_inputs):
outputs = {}
for name, output in self._flow.outputs.items():
if output.reference.value_type == InputValueType.LITERAL:
outputs[name] = output.reference.value
continue
if output.reference.value_type == InputValueType.FLOW_INPUT:
outputs[name] = flow_inputs[output.reference.value]
continue
if output.reference.value_type != InputValueType.NODE_REFERENCE:
raise NotSupported(
message_format=(
"The output type '{output_type}' is currently unsupported. "
"Please choose from available types: '{supported_output_type}' and try again."
),
output_type=output.reference.value_type.value
if hasattr(output.reference.value_type, "value")
else output.reference.value_type,
supported_output_type=[output_type.value for output_type in InputValueType],
)
node = next((n for n in self._flow.nodes if n.name == output.reference.value), None)
if not node:
raise OutputReferenceNotExist(
message_format=(
"The output '{output_name}' for flow is incorrect. The node '{node_name}' "
"referenced by the output '{output_name}' can not found in flow. "
"Please rectify the error in your flow and try again."
),
node_name=output.reference.value,
output_name=name,
)
if node.aggregation:
# Note that the reduce node referenced in the output is not supported.
continue
if node.name not in nodes_outputs:
raise NodeOutputNotFound(
message_format=(
"The output '{output_name}' for flow is incorrect. "
"No outputs found for node '{node_name}'. Please review the problematic "
"output and rectify the error."
),
output_name=name,
node_name=node.name,
)
if output.reference.value in bypassed_nodes:
flow_logger.warning(
f"The node referenced by output:'{output.reference.value}' is bypassed, which is not recommended."
)
node_result = nodes_outputs[output.reference.value]
outputs[name] = _input_assignment_parser.parse_node_property(
output.reference.value, node_result, output.reference.property
)
return outputs
def _should_use_async(self):
return (
all(inspect.iscoroutinefunction(f) for f in self._tools_manager._tools.values())
or os.environ.get("PF_USE_ASYNC", "false").lower() == "true"
)
def _traverse_nodes(self, inputs, context: FlowExecutionContext) -> Tuple[dict, dict]:
batch_nodes = [node for node in self._flow.nodes if not node.aggregation]
outputs = {}
# TODO: Use a mixed scheduler to support both async and thread pool mode.
if self._should_use_async():
flow_logger.info("Start executing nodes in async mode.")
scheduler = AsyncNodesScheduler(self._tools_manager, self._node_concurrency)
nodes_outputs, bypassed_nodes = asyncio.run(scheduler.execute(batch_nodes, inputs, context))
else:
flow_logger.info("Start executing nodes in thread pool mode.")
nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context, inputs, batch_nodes)
outputs = self._extract_outputs(nodes_outputs, bypassed_nodes, inputs)
return outputs, nodes_outputs
async def _traverse_nodes_async(self, inputs, context: FlowExecutionContext) -> Tuple[dict, dict]:
batch_nodes = [node for node in self._flow.nodes if not node.aggregation]
outputs = {}
# Always use async scheduler when calling from async function.
flow_logger.info("Start executing nodes in async mode.")
scheduler = AsyncNodesScheduler(self._tools_manager, self._node_concurrency)
nodes_outputs, bypassed_nodes = await scheduler.execute(batch_nodes, inputs, context)
outputs = self._extract_outputs(nodes_outputs, bypassed_nodes, inputs)
return outputs, nodes_outputs
def _stringify_generator_output(self, outputs: dict):
for k, v in outputs.items():
if isinstance(v, GeneratorType):
outputs[k] = "".join(str(chuck) for chuck in v)
return outputs
def _submit_to_scheduler(self, context: FlowExecutionContext, inputs, nodes: List[Node]) -> Tuple[dict, dict]:
if not isinstance(self._node_concurrency, int):
raise UnexpectedError(
message_format=(
"Flow execution failed. To proceed, ensure that a valid node concurrency value is set. "
"The current value is {current_value}. Please contact support for further assistance."
),
current_value=self._node_concurrency,
)
return FlowNodesScheduler(
self._tools_manager,
inputs,
nodes,
self._node_concurrency,
context,
).execute(self._line_timeout_sec)
def apply_inputs_mapping(
inputs: Mapping[str, Mapping[str, Any]],
inputs_mapping: Mapping[str, str],
) -> Dict[str, Any]:
# TODO: This function will be removed after the batch engine refactoring is completed.
from promptflow._utils.inputs_mapping_utils import apply_inputs_mapping
return apply_inputs_mapping(inputs, inputs_mapping)
def enable_streaming_for_llm_flow(self, stream_required: Callable[[], bool]):
"""Enable the LLM node that is connected to output to return streaming results controlled by `stream_required`.
If the stream_required callback returns True, the LLM node will return a generator of strings.
Otherwise, the LLM node will return a string.
:param stream_required: A callback that takes no arguments and returns a boolean value indicating whether \
streaming results should be enabled for the LLM node.
:type stream_required: Callable[[], bool]
:return: None
"""
for node in self._flow.nodes:
streaming_option_parameter = self._parse_streaming_option_parameter(node)
if (
streaming_option_parameter is not None
and self._flow.is_referenced_by_flow_output(node)
and not self._flow.is_referenced_by_other_node(node)
):
wrapper = _inject_stream_options(stream_required, streaming_option_parameter)
self._tools_manager.wrap_tool(node.name, wrapper=wrapper)
def _parse_streaming_option_parameter(self, node: Node) -> Optional[str]:
if self._flow.is_llm_node(node):
return "stream"
tool_function = self._tools_manager.get_tool(node.name)
return getattr(tool_function, STREAMING_OPTION_PARAMETER_ATTR, None)
def ensure_flow_is_serializable(self):
"""Ensure that the flow is serializable.
Some of the nodes may return a generator of strings to create streaming outputs.
This is useful when the flow is deployed as a web service.
However, in the interactive mode, the executor assumes that the node result is JSON serializable.
This method adds a wrapper to each node in the flow
to consume the streaming outputs and merge them into a string for executor usage.
:return: None
"""
for node in self._flow.nodes:
self._tools_manager.wrap_tool(node.name, wrapper=_ensure_node_result_is_serializable)
The provided code snippet includes necessary dependencies for implementing the `execute_flow` function. Write a Python function `def execute_flow( flow_file: Path, working_dir: Path, output_dir: Path, connections: dict, inputs: Mapping[str, Any], *, run_id: str = None, run_aggregation: bool = True, enable_stream_output: bool = False, allow_generator_output: bool = False, # TODO: remove this **kwargs, ) -> LineResult` to solve the following problem:
Execute the flow, including aggregation nodes. :param flow_file: The path to the flow file. :type flow_file: Path :param working_dir: The working directory of the flow. :type working_dir: Path :param output_dir: Relative path relative to working_dir. :type output_dir: Path :param connections: A dictionary containing connection information. :type connections: dict :param inputs: A dictionary containing the input values for the flow. :type inputs: Mapping[str, Any] :param enable_stream_output: Whether to allow stream (generator) output for flow output. Default is False. :type enable_stream_output: Optional[bool] :param run_id: Run id will be set in operation context and used for session. :type run_id: Optional[str] :param kwargs: Other keyword arguments to create flow executor. :type kwargs: Any :return: The line result of executing the flow. :rtype: ~promptflow.executor._result.LineResult
Here is the function:
def execute_flow(
flow_file: Path,
working_dir: Path,
output_dir: Path,
connections: dict,
inputs: Mapping[str, Any],
*,
run_id: str = None,
run_aggregation: bool = True,
enable_stream_output: bool = False,
allow_generator_output: bool = False, # TODO: remove this
**kwargs,
) -> LineResult:
"""Execute the flow, including aggregation nodes.
:param flow_file: The path to the flow file.
:type flow_file: Path
:param working_dir: The working directory of the flow.
:type working_dir: Path
:param output_dir: Relative path relative to working_dir.
:type output_dir: Path
:param connections: A dictionary containing connection information.
:type connections: dict
:param inputs: A dictionary containing the input values for the flow.
:type inputs: Mapping[str, Any]
:param enable_stream_output: Whether to allow stream (generator) output for flow output. Default is False.
:type enable_stream_output: Optional[bool]
:param run_id: Run id will be set in operation context and used for session.
:type run_id: Optional[str]
:param kwargs: Other keyword arguments to create flow executor.
:type kwargs: Any
:return: The line result of executing the flow.
:rtype: ~promptflow.executor._result.LineResult
"""
flow_executor = FlowExecutor.create(flow_file, connections, working_dir, raise_ex=False, **kwargs)
flow_executor.enable_streaming_for_llm_flow(lambda: enable_stream_output)
with _change_working_dir(working_dir):
# execute nodes in the flow except the aggregation nodes
# TODO: remove index=0 after UX no longer requires a run id similar to batch runs
# (run_id_index, eg. xxx_0) for displaying the interface
line_result = flow_executor.exec_line(
inputs, index=0, allow_generator_output=allow_generator_output, run_id=run_id
)
# persist the output to the output directory
line_result.output = persist_multimedia_data(line_result.output, base_dir=working_dir, sub_dir=output_dir)
if run_aggregation and line_result.aggregation_inputs:
# convert inputs of aggregation to list type
flow_inputs = {k: [v] for k, v in inputs.items()}
aggregation_inputs = {k: [v] for k, v in line_result.aggregation_inputs.items()}
aggregation_results = flow_executor.exec_aggregation(
flow_inputs, aggregation_inputs=aggregation_inputs, run_id=run_id
)
line_result.node_run_infos = {**line_result.node_run_infos, **aggregation_results.node_run_infos}
line_result.run_info.metrics = aggregation_results.metrics
if isinstance(line_result.output, dict):
# remove line_number from output
line_result.output.pop(LINE_NUMBER_KEY, None)
return line_result | Execute the flow, including aggregation nodes. :param flow_file: The path to the flow file. :type flow_file: Path :param working_dir: The working directory of the flow. :type working_dir: Path :param output_dir: Relative path relative to working_dir. :type output_dir: Path :param connections: A dictionary containing connection information. :type connections: dict :param inputs: A dictionary containing the input values for the flow. :type inputs: Mapping[str, Any] :param enable_stream_output: Whether to allow stream (generator) output for flow output. Default is False. :type enable_stream_output: Optional[bool] :param run_id: Run id will be set in operation context and used for session. :type run_id: Optional[str] :param kwargs: Other keyword arguments to create flow executor. :type kwargs: Any :return: The line result of executing the flow. :rtype: ~promptflow.executor._result.LineResult |
4,403 | import asyncio
import contextvars
import multiprocessing
import os
import queue
import signal
import sys
import threading
from datetime import datetime
from functools import partial
from logging import INFO
from multiprocessing import Manager, Queue
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import Dict, List, Optional, Union
import psutil
from promptflow._constants import LINE_NUMBER_KEY, LINE_TIMEOUT_SEC
from promptflow._core._errors import ProcessPoolError, UnexpectedError
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._utils.dataclass_serializer import convert_eager_flow_output_to_dict
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import bulk_logger
from promptflow._utils.multimedia_utils import convert_multimedia_data_to_string, persist_multimedia_data
from promptflow._utils.process_utils import get_available_max_worker_count
from promptflow._utils.thread_utils import RepeatLogTimer
from promptflow._utils.utils import log_progress, set_context
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, PromptflowException
from promptflow.executor._errors import (
BatchExecutionTimeoutError,
LineExecutionTimeoutError,
ProcessCrashError,
ThreadCrashError,
)
from promptflow.executor._process_manager import ForkProcessManager, ProcessInfo, SpawnProcessManager
from promptflow.executor._result import LineResult
from promptflow.executor._script_executor import ScriptExecutor
from promptflow.executor.flow_executor import DEFAULT_CONCURRENCY_BULK, FlowExecutor
from promptflow.storage._queue_run_storage import QueueRunStorage
def signal_handler(signum, frame):
def _exec_line_for_queue(executor_creation_func, input_queue: Queue, output_queue: Queue):
def _process_wrapper(
executor_creation_func,
input_queue: Queue,
output_queue: Queue,
log_context_initialization_func,
operation_contexts_dict: dict,
):
if threading.current_thread() is threading.main_thread():
signal.signal(signal.SIGINT, signal_handler)
else:
bulk_logger.info("Current thread is not main thread, skip signal handler registration in batch process pool.")
OperationContext.get_instance().update(operation_contexts_dict) # Update the operation context for the new process.
# set up OpenTelemetry exporter in process who executes the line
from promptflow.tracing._start_trace import setup_exporter_from_environ
setup_exporter_from_environ()
if log_context_initialization_func:
with log_context_initialization_func():
_exec_line_for_queue(executor_creation_func, input_queue, output_queue)
else:
_exec_line_for_queue(executor_creation_func, input_queue, output_queue) | null |
4,404 | import asyncio
import contextvars
import multiprocessing
import os
import queue
import signal
import sys
import threading
from datetime import datetime
from functools import partial
from logging import INFO
from multiprocessing import Manager, Queue
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import Dict, List, Optional, Union
import psutil
from promptflow._constants import LINE_NUMBER_KEY, LINE_TIMEOUT_SEC
from promptflow._core._errors import ProcessPoolError, UnexpectedError
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._utils.dataclass_serializer import convert_eager_flow_output_to_dict
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import bulk_logger
from promptflow._utils.multimedia_utils import convert_multimedia_data_to_string, persist_multimedia_data
from promptflow._utils.process_utils import get_available_max_worker_count
from promptflow._utils.thread_utils import RepeatLogTimer
from promptflow._utils.utils import log_progress, set_context
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, PromptflowException
from promptflow.executor._errors import (
BatchExecutionTimeoutError,
LineExecutionTimeoutError,
ProcessCrashError,
ThreadCrashError,
)
from promptflow.executor._process_manager import ForkProcessManager, ProcessInfo, SpawnProcessManager
from promptflow.executor._result import LineResult
from promptflow.executor._script_executor import ScriptExecutor
from promptflow.executor.flow_executor import DEFAULT_CONCURRENCY_BULK, FlowExecutor
from promptflow.storage._queue_run_storage import QueueRunStorage
def format_current_process_info(process_name, pid, line_number: int):
return f"Process name({process_name})-Process id({pid})-Line number({line_number})"
def log_process_status(process_name, pid, line_number: int, is_completed=False, is_failed=False):
process_info = format_current_process_info(process_name, pid, line_number)
if is_completed:
bulk_logger.info(f"{process_info} completed.")
elif is_failed:
bulk_logger.info(f"{process_info} failed.")
else:
bulk_logger.info(f"{process_info} start execution.") | null |
4,405 | import multiprocessing
import queue
import signal
import time
from dataclasses import dataclass
from enum import Enum
from functools import partial
from multiprocessing import Process, Queue
from typing import Dict, List
import psutil
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._utils.logger_utils import LogContext, bulk_logger
from promptflow.executor._errors import (
ProcessInfoObtainedTimeout,
ProcessTerminatedTimeout,
SpawnedForkProcessManagerStartFailure,
)
from promptflow.executor._script_executor import ScriptExecutor
from promptflow.executor.flow_executor import FlowExecutor
from promptflow.storage import AbstractRunStorage
class SpawnedForkProcessManager(AbstractProcessManager):
"""
SpawnedForkProcessManager extends AbstractProcessManager to manage processes using 'fork' method
in a spawned process.
:param control_signal_queue: A queue for controlling signals to manage process operations.
:type control_signal_queue: multiprocessing.Queue
:param executor_creation_func: Function to create an executor for each process.
:type executor_creation_func: Callable
:param args: Additional positional arguments for the AbstractProcessManager.
:param kwargs: Additional keyword arguments for the AbstractProcessManager.
"""
def __init__(
self,
log_context_initialization_func,
current_operation_context,
control_signal_queue,
executor_creation_func,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self._log_context_initialization_func = log_context_initialization_func
self._current_operation_context = current_operation_context
self._control_signal_queue = control_signal_queue
self._executor_creation_func = executor_creation_func
self.context = multiprocessing.get_context("fork")
def new_process(self, i):
"""
Create and start a new process using the 'fork' context.
:param i: Index of the input and output queue for the new process.
:type i: int
"""
process: Process = self.context.Process(
target=self._process_target_func,
args=(
self._executor_creation_func,
self._input_queues[i],
self._output_queues[i],
self._log_context_initialization_func,
self._current_operation_context,
),
daemon=True,
)
process.start()
try:
self._process_info[i] = ProcessInfo(
index=i,
process_id=process.pid,
process_name=process.name,
)
except Exception as e:
bulk_logger.warning(
f"Unexpected error occurred while creating ProcessInfo for index {i} and process id {process.pid}. "
f"Exception: {e}"
)
return process
def handle_signals(self, control_signal, i):
"""
Handles control signals for processes, performing actions such as starting, ending,
or restarting them based on the received signal.
:param control_signal: The control signal indicating the desired action. It can be 'start', 'end', or 'restart'.
:type control_signal: str
:param i: Index of the process to control.
:type i: int
"""
if control_signal == ProcessControlSignal.END:
self.end_process(i)
elif control_signal == ProcessControlSignal.RESTART:
self.restart_process(i)
elif control_signal == ProcessControlSignal.START:
self.new_process(i)
def _create_executor_fork(*, flow_executor: FlowExecutor, storage: AbstractRunStorage):
if isinstance(flow_executor, ScriptExecutor):
return ScriptExecutor(
flow_file=flow_executor._flow_file,
connections=flow_executor._connections,
working_dir=flow_executor._working_dir,
storage=storage,
)
else:
run_tracker = RunTracker(run_storage=storage)
return FlowExecutor(
flow=flow_executor._flow,
connections=flow_executor._connections,
run_tracker=run_tracker,
cache_manager=flow_executor._cache_manager,
loaded_tools=flow_executor._loaded_tools,
raise_ex=False,
line_timeout_sec=flow_executor._line_timeout_sec,
)
The provided code snippet includes necessary dependencies for implementing the `create_spawned_fork_process_manager` function. Write a Python function `def create_spawned_fork_process_manager( log_context_initialization_func, current_operation_context, control_signal_queue, flow_create_kwargs, **kwargs, )` to solve the following problem:
Manages the creation, termination, and signaling of processes using the 'fork' context.
Here is the function:
def create_spawned_fork_process_manager(
log_context_initialization_func,
current_operation_context,
control_signal_queue,
flow_create_kwargs,
**kwargs,
):
"""
Manages the creation, termination, and signaling of processes using the 'fork' context.
"""
# Set up signal handling for process interruption.
from promptflow.executor._line_execution_process_pool import signal_handler
signal.signal(signal.SIGINT, signal_handler)
# Create flow executor.
executor = FlowExecutor.create(**flow_create_kwargs)
# When using fork, we use this method to create the executor to avoid reloading the flow
# which will introduce a lot more memory.
executor_creation_func = partial(_create_executor_fork, flow_executor=executor)
manager = SpawnedForkProcessManager(
log_context_initialization_func,
current_operation_context,
control_signal_queue,
executor_creation_func,
**kwargs,
)
# Initialize processes.
for i in range(len(manager._input_queues)):
manager.new_process(i)
# Main loop to handle control signals and manage process lifecycle.
while True:
all_processes_stopped = True
try:
process_info_list = manager._process_info.items()
except Exception as e:
bulk_logger.warning(f"Unexpected error occurred while get process info list. Exception: {e}")
break
for _, info in list(process_info_list):
pid = info.process_id
# Check if at least one process is alive.
if psutil.pid_exists(pid):
process = psutil.Process(pid)
if process.status() != "zombie":
all_processes_stopped = False
else:
# If do not call wait(), the child process may become a zombie process,
# and psutil.pid_exists(pid) is always true, which will cause spawn proces
# never exit.
process.wait()
# If all fork child processes exit, exit the loop.
if all_processes_stopped:
break
try:
control_signal, i = control_signal_queue.get(timeout=1)
manager.handle_signals(control_signal, i)
except queue.Empty:
# Do nothing until the process_queue have not content or process is killed
pass | Manages the creation, termination, and signaling of processes using the 'fork' context. |
4,406 | from fastapi import APIRouter
from fastapi.responses import JSONResponse
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.logger_utils import service_logger
from promptflow.executor._service.contracts.execution_request import (
CancelExecutionRequest,
FlowExecutionRequest,
NodeExecutionRequest,
)
from promptflow.executor._service.utils.process_manager import ProcessManager
from promptflow.executor._service.utils.process_utils import invoke_sync_function_in_process
from promptflow.executor._service.utils.service_utils import (
get_log_context,
set_environment_variables,
update_and_get_operation_context,
)
from promptflow.executor.flow_executor import FlowExecutor, execute_flow
from promptflow.storage._run_storage import DefaultRunStorage
def flow_test(request: FlowExecutionRequest):
# validate request
request.validate_request()
# resolve environment variables
set_environment_variables(request)
# execute flow
storage = DefaultRunStorage(base_dir=request.working_dir, sub_dir=request.output_dir)
with get_log_context(request):
return execute_flow(
request.flow_file,
request.working_dir,
request.output_dir,
request.connections,
request.inputs,
run_id=request.run_id,
storage=storage,
)
async def flow_execution(request: FlowExecutionRequest):
with get_log_context(request, enable_service_logger=True):
operation_context = update_and_get_operation_context(request.operation_context)
service_logger.info(
f"Received flow execution request, flow run id: {request.run_id}, "
f"request id: {operation_context.get_request_id()}, executor version: {operation_context.get_user_agent()}."
)
try:
result = await invoke_sync_function_in_process(
flow_test, args=(request,), run_id=request.run_id, context_dict=request.operation_context
)
service_logger.info(f"Completed flow execution request, flow run id: {request.run_id}.")
return result
except Exception as ex:
error_type_and_message = (f"({ex.__class__.__name__}) {ex}",)
service_logger.error(
f"Failed to execute flow, flow run id: {request.run_id}. Error: {error_type_and_message}"
)
raise ex | null |
4,407 | from fastapi import APIRouter
from fastapi.responses import JSONResponse
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.logger_utils import service_logger
from promptflow.executor._service.contracts.execution_request import (
CancelExecutionRequest,
FlowExecutionRequest,
NodeExecutionRequest,
)
from promptflow.executor._service.utils.process_manager import ProcessManager
from promptflow.executor._service.utils.process_utils import invoke_sync_function_in_process
from promptflow.executor._service.utils.service_utils import (
get_log_context,
set_environment_variables,
update_and_get_operation_context,
)
from promptflow.executor.flow_executor import FlowExecutor, execute_flow
from promptflow.storage._run_storage import DefaultRunStorage
def single_node_run(request: NodeExecutionRequest):
# validate request
request.validate_request()
# resolve environment variables
set_environment_variables(request)
storage = DefaultRunStorage(base_dir=request.working_dir, sub_dir=request.output_dir)
with _change_working_dir(request.working_dir), get_log_context(request):
return FlowExecutor.load_and_exec_node(
request.flow_file,
request.node_name,
flow_inputs=request.flow_inputs,
dependency_nodes_outputs=request.dependency_nodes_outputs,
connections=request.connections,
working_dir=request.working_dir,
storage=storage,
)
async def node_execution(request: NodeExecutionRequest):
with get_log_context(request, enable_service_logger=True):
operation_context = update_and_get_operation_context(request.operation_context)
service_logger.info(
f"Received node execution request, node name: {request.node_name}, "
f"request id: {operation_context.get_request_id()}, executor version: {operation_context.get_user_agent()}."
)
try:
result = await invoke_sync_function_in_process(
single_node_run, args=(request,), run_id=request.run_id, context_dict=request.operation_context
)
service_logger.info(f"Completed node execution request, node name: {request.node_name}.")
return result
except Exception as ex:
error_type_and_message = (f"({ex.__class__.__name__}) {ex}",)
service_logger.error(
f"Failed to execute node, node name: {request.node_name}. Error: {error_type_and_message}"
)
raise ex | null |
4,408 | from fastapi import APIRouter
from fastapi.responses import JSONResponse
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.logger_utils import service_logger
from promptflow.executor._service.contracts.execution_request import (
CancelExecutionRequest,
FlowExecutionRequest,
NodeExecutionRequest,
)
from promptflow.executor._service.utils.process_manager import ProcessManager
from promptflow.executor._service.utils.process_utils import invoke_sync_function_in_process
from promptflow.executor._service.utils.service_utils import (
get_log_context,
set_environment_variables,
update_and_get_operation_context,
)
from promptflow.executor.flow_executor import FlowExecutor, execute_flow
from promptflow.storage._run_storage import DefaultRunStorage
def cancel_execution(request: CancelExecutionRequest):
ProcessManager().end_process(request.run_id)
resp = {"status": "canceled"}
return JSONResponse(resp) | null |
4,409 | from fastapi import APIRouter
from promptflow._core.tool_meta_generator import generate_tool_meta_in_subprocess
from promptflow._core.tools_manager import collect_package_tools
from promptflow._utils.logger_utils import service_logger
from promptflow.executor._service.contracts.tool_request import RetrieveToolFuncResultRequest, ToolMetaRequest
from promptflow.executor._service.utils.process_utils import SHORT_WAIT_TIMEOUT, invoke_sync_function_in_process
from promptflow.executor._service.utils.service_utils import generate_error_response
def list_package_tools():
return collect_package_tools() | null |
4,410 | from fastapi import APIRouter
from promptflow._core.tool_meta_generator import generate_tool_meta_in_subprocess
from promptflow._core.tools_manager import collect_package_tools
from promptflow._utils.logger_utils import service_logger
from promptflow.executor._service.contracts.tool_request import RetrieveToolFuncResultRequest, ToolMetaRequest
from promptflow.executor._service.utils.process_utils import SHORT_WAIT_TIMEOUT, invoke_sync_function_in_process
from promptflow.executor._service.utils.service_utils import generate_error_response
async def retrieve_tool_func_result(request: RetrieveToolFuncResultRequest):
from promptflow._core.tools_manager import retrieve_tool_func_result
args = (request.func_call_scenario, request.func_path, request.func_kwargs, request.ws_triple)
return await invoke_sync_function_in_process(retrieve_tool_func_result, args=args, wait_timeout=SHORT_WAIT_TIMEOUT) | null |
4,411 | from fastapi import APIRouter
from promptflow._core.tool_meta_generator import generate_tool_meta_in_subprocess
from promptflow._core.tools_manager import collect_package_tools
from promptflow._utils.logger_utils import service_logger
from promptflow.executor._service.contracts.tool_request import RetrieveToolFuncResultRequest, ToolMetaRequest
from promptflow.executor._service.utils.process_utils import SHORT_WAIT_TIMEOUT, invoke_sync_function_in_process
from promptflow.executor._service.utils.service_utils import generate_error_response
def gen_tool_meta(request: ToolMetaRequest):
tool_dict, exception_dict = generate_tool_meta_in_subprocess(
request.working_dir, request.tools, service_logger, prevent_terminate_signal_propagation=True
)
exception_dict = {
source: generate_error_response(error_dict).to_dict() for source, error_dict in exception_dict.items()
}
return {"tools": tool_dict, "errors": exception_dict} | null |
4,412 | from fastapi import APIRouter
from fastapi.responses import PlainTextResponse
from promptflow._utils.feature_utils import get_feature_list
from promptflow.executor._service.utils.service_utils import get_executor_version
def health_check():
return PlainTextResponse("healthy") | null |
4,413 | from fastapi import APIRouter
from fastapi.responses import PlainTextResponse
from promptflow._utils.feature_utils import get_feature_list
from promptflow.executor._service.utils.service_utils import get_executor_version
def version():
return {
"status": "healthy",
"version": get_executor_version(),
"feature_list": get_feature_list(),
} | null |
4,414 | from fastapi import FastAPI
from fastapi.responses import JSONResponse
from promptflow.executor._service.apis.common import router as common_router
from promptflow.executor._service.apis.execution import router as execution_router
from promptflow.executor._service.apis.tool import router as tool_router
from promptflow.executor._service.utils.service_utils import generate_error_response
async def exception_handler(request, exc):
resp = generate_error_response(exc)
return JSONResponse(status_code=int(resp.response_code), content=resp.to_dict()) | null |
4,415 | import asyncio
import contextlib
import json
import multiprocessing
import os
from datetime import datetime, timedelta
from typing import Callable
import psutil
from promptflow._core._errors import UnexpectedError
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import ExceptionPresenter, JsonSerializedPromptflowException
from promptflow._utils.logger_utils import service_logger
from promptflow._utils.process_utils import block_terminate_signal_to_parent
from promptflow.exceptions import ErrorTarget
from promptflow.executor._service._errors import ExecutionCanceledError, ExecutionTimeoutError
from promptflow.executor._service.utils.process_manager import ProcessManager
LONG_WAIT_TIMEOUT = timedelta(days=1).total_seconds()
def _is_process_alive(p: multiprocessing.Process):
if psutil.pid_exists(p.pid):
if psutil.Process(p.pid).status() != psutil.STATUS_ZOMBIE:
return True
# Call p.join() to clear the zombie process correctly.
p.join()
return False
def _execute_target_function(
target_function: Callable,
args: tuple,
kwargs: dict,
return_dict: dict,
error_dict: dict,
context_dict: dict,
):
block_terminate_signal_to_parent()
with exception_wrapper(error_dict):
if context_dict:
OperationContext.get_instance().update(context_dict)
service_logger.info("Start processing request in executor service...")
result = target_function(*args, **kwargs)
return_dict["result"] = result
async def invoke_sync_function_in_process(
target_function: Callable,
*,
args: tuple = (),
kwargs: dict = {},
run_id: str = None,
context_dict: dict = None,
wait_timeout: int = LONG_WAIT_TIMEOUT,
):
with multiprocessing.Manager() as manager:
return_dict = manager.dict()
error_dict = manager.dict()
p = multiprocessing.Process(
target=_execute_target_function,
args=(target_function, args, kwargs, return_dict, error_dict, context_dict),
)
p.start()
service_logger.info(f"[{os.getpid()}--{p.pid}] Start process to execute the request.")
if run_id:
ProcessManager().start_process(run_id, p.pid)
try:
# Wait for the process to finish or timeout asynchronously
start_time = datetime.utcnow()
while (datetime.utcnow() - start_time).total_seconds() < wait_timeout and _is_process_alive(p):
await asyncio.sleep(1)
# If process_id is None, it indicates that the process has been terminated by cancel request.
if run_id and not ProcessManager().get_process(run_id):
raise ExecutionCanceledError(run_id)
# Terminate the process if it is still alive after timeout
if p.is_alive():
service_logger.error(f"[{p.pid}] Stop process for exceeding {wait_timeout} seconds.")
p.terminate()
p.join()
raise ExecutionTimeoutError(wait_timeout)
# Raise exception if the process exit code is not 0
if p.exitcode != 0:
exception = error_dict.get("error", None)
if exception is None:
raise UnexpectedError(
message="Unexpected error occurred while executing the request",
target=ErrorTarget.EXECUTOR,
)
# JsonSerializedPromptflowException will be raised here
# no need to change to PromptflowException since it will be handled in app.exception_handler
raise exception
service_logger.info(f"[{p.pid}--{os.getpid()}] Process finished.")
return return_dict.get("result", {})
finally:
if run_id:
ProcessManager().remove_process(run_id) | null |
4,416 | import json
import os
from typing import Any, Mapping, Union
from promptflow._core.connection_manager import ConnectionManager
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import ErrorResponse, ExceptionPresenter, JsonSerializedPromptflowException
from promptflow._utils.logger_utils import LogContext, service_logger
from promptflow._version import VERSION
from promptflow.executor._service.contracts.execution_request import BaseExecutionRequest
def get_log_context(request: BaseExecutionRequest, enable_service_logger: bool = False) -> LogContext:
run_mode = request.get_run_mode()
credential_list = ConnectionManager(request.connections).get_secret_list()
log_context = LogContext(file_path=request.log_path, run_mode=run_mode, credential_list=credential_list)
if enable_service_logger:
log_context.input_logger = service_logger
return log_context | null |
4,417 | import json
import os
from typing import Any, Mapping, Union
from promptflow._core.connection_manager import ConnectionManager
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import ErrorResponse, ExceptionPresenter, JsonSerializedPromptflowException
from promptflow._utils.logger_utils import LogContext, service_logger
from promptflow._version import VERSION
from promptflow.executor._service.contracts.execution_request import BaseExecutionRequest
def get_executor_version():
def update_and_get_operation_context(context_dict: Mapping[str, Any]) -> OperationContext:
operation_context = OperationContext.get_instance()
if not context_dict:
return operation_context
# update operation context with context_dict
operation_context.update(context_dict)
# update user agent to operation context
executor_user_agent = get_executor_version()
operation_context.append_user_agent(executor_user_agent)
return operation_context | null |
4,418 | import json
import os
from typing import Any, Mapping, Union
from promptflow._core.connection_manager import ConnectionManager
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import ErrorResponse, ExceptionPresenter, JsonSerializedPromptflowException
from promptflow._utils.logger_utils import LogContext, service_logger
from promptflow._version import VERSION
from promptflow.executor._service.contracts.execution_request import BaseExecutionRequest
def generate_error_response(ex: Union[dict, Exception]):
if isinstance(ex, dict):
error_dict = ex
elif isinstance(ex, JsonSerializedPromptflowException):
error_dict = json.loads(ex.message)
else:
error_dict = ExceptionPresenter.create(ex).to_dict()
return ErrorResponse.from_error_dict(error_dict) | null |
4,419 | import json
import os
from typing import Any, Mapping, Union
from promptflow._core.connection_manager import ConnectionManager
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import ErrorResponse, ExceptionPresenter, JsonSerializedPromptflowException
from promptflow._utils.logger_utils import LogContext, service_logger
from promptflow._version import VERSION
from promptflow.executor._service.contracts.execution_request import BaseExecutionRequest
def set_environment_variables(request: BaseExecutionRequest):
if isinstance(request.environment_variables, dict) and request.environment_variables:
os.environ.update(request.environment_variables) | null |
4,420 | import base64
import json
import os
import re
import streamlit as st
from pathlib import Path
from streamlit_quill import st_quill
from bs4 import BeautifulSoup, NavigableString, Tag
from promptflow._sdk._utils import print_yellow_warning
from promptflow._sdk._serving.flow_invoker import FlowInvoker
from promptflow._utils.multimedia_utils import is_multimedia_dict, MIME_PATTERN
invoker = None
def start():
def clear_chat() -> None:
st.session_state.messages = []
def show_image(image, key=None):
if not image.startswith("data:image"):
st.image(key + ',' + image)
else:
st.image(image)
def json_dumps(value):
try:
return json.dumps(value)
except Exception:
return value
def is_list_contains_rich_text(rich_text):
result = False
for item in rich_text:
if isinstance(item, list):
result |= is_list_contains_rich_text(item)
elif isinstance(item, dict):
result |= is_dict_contains_rich_text(item)
else:
if isinstance(item, str) and item.startswith("data:image"):
result = True
return result
def is_dict_contains_rich_text(rich_text):
result = False
for rich_text_key, rich_text_value in rich_text.items():
if isinstance(rich_text_value, list):
result |= is_list_contains_rich_text(rich_text_value)
elif isinstance(rich_text_value, dict):
result |= is_dict_contains_rich_text(rich_text_value)
elif re.match(MIME_PATTERN, rich_text_key) or (
isinstance(rich_text_value, str) and rich_text_value.startswith("data:image")):
result = True
return result
def render_message(role, message_items):
def item_render_message(value, key=None):
if key and re.match(MIME_PATTERN, key):
show_image(value, key)
elif isinstance(value, str) and value.startswith("data:image"):
show_image(value)
else:
if key is None:
st.markdown(f"`{json_dumps(value)},`")
else:
st.markdown(f"`{key}: {json_dumps(value)},`")
def list_iter_render_message(message_items):
if is_list_contains_rich_text(message_items):
st.markdown("`[ `")
for item in message_items:
if isinstance(item, list):
list_iter_render_message(item)
if isinstance(item, dict):
dict_iter_render_message(item)
else:
item_render_message(item)
st.markdown("`], `")
else:
st.markdown(f"`{json_dumps(message_items)},`")
def dict_iter_render_message(message_items):
if is_multimedia_dict(message_items):
key = list(message_items.keys())[0]
value = message_items[key]
show_image(value, key)
elif is_dict_contains_rich_text(message_items):
st.markdown("`{ `")
for key, value in message_items.items():
if re.match(MIME_PATTERN, key):
show_image(value, key)
else:
if isinstance(value, list):
st.markdown(f"`{key}: `")
list_iter_render_message(value)
elif isinstance(value, dict):
st.markdown(f"`{key}: `")
dict_iter_render_message(value)
else:
item_render_message(value, key)
st.markdown("`}, `")
else:
st.markdown(f"`{json_dumps(message_items)},`")
with st.chat_message(role):
dict_iter_render_message(message_items)
def show_conversation() -> None:
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.history = []
if st.session_state.messages:
for role, message_items in st.session_state.messages:
render_message(role, message_items)
def get_chat_history_from_session():
if "history" in st.session_state:
return st.session_state.history
return []
def submit(**kwargs) -> None:
st.session_state.messages.append(("user", kwargs))
session_state_history = dict()
session_state_history.update({"inputs": kwargs})
with container:
render_message("user", kwargs)
# Force append chat history to kwargs
response = run_flow(kwargs)
st.session_state.messages.append(("assistant", response))
session_state_history.update({"outputs": response})
st.session_state.history.append(session_state_history)
with container:
render_message("assistant", response)
def run_flow(data: dict) -> dict:
global invoker
if not invoker:
flow = Path(__file__).parent / "flow"
dump_path = flow.parent
if flow.is_dir():
os.chdir(flow)
else:
os.chdir(flow.parent)
invoker = FlowInvoker(flow, connection_provider="local", dump_to=dump_path)
result, result_output = invoker.invoke(data)
print_yellow_warning(f"Result: {result_output}")
return result
def extract_content(node):
if isinstance(node, NavigableString):
text = node.strip()
if text:
return [text]
elif isinstance(node, Tag):
if node.name == 'img':
prefix, base64_str = node['src'].split(',', 1)
return [{prefix: base64_str}]
else:
result = []
for child in node.contents:
result.extend(extract_content(child))
return result
return []
def parse_html_content(html_content):
soup = BeautifulSoup(html_content, 'html.parser')
result = []
for p in soup.find_all('p'):
result.extend(extract_content(p))
return result
def parse_image_content(image_content, image_type):
if image_content is not None:
file_contents = image_content.read()
image_content = base64.b64encode(file_contents).decode('utf-8')
prefix = f"data:{image_type};base64"
return {prefix: image_content}
st.title("web-classification APP")
st.chat_message("assistant").write("Hello, please input following flow inputs.")
container = st.container()
with container:
show_conversation()
with st.form(key='input_form', clear_on_submit=True):
settings_path = os.path.join(os.path.dirname(__file__), "settings.json")
if os.path.exists(settings_path):
with open(settings_path, "r") as file:
json_data = json.load(file)
environment_variables = list(json_data.keys())
for environment_variable in environment_variables:
secret_input = st.text_input(
label=environment_variable,
type="password",
placeholder=f"Please input {environment_variable} here. If you input before, you can leave it "
f"blank.")
if secret_input != "":
os.environ[environment_variable] = secret_input
url = st.text_input(label='url',
placeholder='https://play.google.com/store/apps/details?id=com.twitter.android')
cols = st.columns(7)
submit_bt = cols[0].form_submit_button(label='Submit')
clear_bt = cols[1].form_submit_button(label='Clear')
if submit_bt:
submit(url=url)
if clear_bt:
clear_chat() | null |
4,421 | import os
import sys
from promptflow._cli._pf._connection import create_connection
from streamlit.web import cli as st_cli
from streamlit.runtime import exists
from main import start
def is_yaml_file(file_path):
_, file_extension = os.path.splitext(file_path)
return file_extension.lower() in ('.yaml', '.yml')
def create_connections(directory_path) -> None:
for root, dirs, files in os.walk(directory_path):
for file in files:
file_path = os.path.join(root, file)
if is_yaml_file(file_path):
create_connection(file_path) | null |
4,422 | import json
import logging
from flask import Flask, jsonify, request
from promptflow import load_flow
from promptflow.connections import AzureOpenAIConnection
from promptflow.entities import FlowContext
from promptflow.exceptions import SystemErrorException, UserErrorException
def handle_error(e):
if isinstance(e, UserErrorException):
return jsonify({"message": e.message, "additional_info": e.additional_info}), 400
elif isinstance(e, SystemErrorException):
return jsonify({"message": e.message, "additional_info": e.additional_info}), 500
else:
from promptflow._internal import ErrorResponse, ExceptionPresenter
# handle other unexpected errors, can use internal class to format them
# but interface may change in the future
presenter = ExceptionPresenter.create(e)
trace_back = presenter.formatted_traceback
resp = ErrorResponse(presenter.to_dict(include_debug_info=False))
response_code = resp.response_code
result = resp.to_simplified_dict()
result.update({"trace_back": trace_back})
return jsonify(result), response_code | null |
4,423 | import json
import logging
from flask import Flask, jsonify, request
from promptflow import load_flow
from promptflow.connections import AzureOpenAIConnection
from promptflow.entities import FlowContext
from promptflow.exceptions import SystemErrorException, UserErrorException
The provided code snippet includes necessary dependencies for implementing the `health` function. Write a Python function `def health()` to solve the following problem:
Check if the runtime is alive.
Here is the function:
def health():
"""Check if the runtime is alive."""
return {"status": "Healthy"} | Check if the runtime is alive. |
4,424 | import json
import logging
from flask import Flask, jsonify, request
from promptflow import load_flow
from promptflow.connections import AzureOpenAIConnection
from promptflow.entities import FlowContext
from promptflow.exceptions import SystemErrorException, UserErrorException
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
f = load_flow("./echo_connection_flow/")
The provided code snippet includes necessary dependencies for implementing the `score` function. Write a Python function `def score()` to solve the following problem:
process a flow request in the runtime.
Here is the function:
def score():
"""process a flow request in the runtime."""
raw_data = request.get_data()
logger.info(f"Start loading request data '{raw_data}'.")
data = json.loads(raw_data)
# create a dummy connection object
# the connection object will only exist in memory and won't store in local db.
llm_connection = AzureOpenAIConnection(
name="llm_connection", api_key="[determined by request]", api_base="[determined by request]"
)
# configure flow contexts, create a new context object for each request to make sure they are thread safe.
f.context = FlowContext(
# override flow connections with connection object created above
connections={"echo_connection": {"connection": llm_connection}},
# override the flow nodes' inputs or other flow configs, the overrides may come from the request
# **Note**: after this change, node "echo_connection" will take input node_input from request
overrides={"nodes.echo_connection.inputs.node_input": data["node_input"]} if "node_input" in data else {},
)
# data in request will be passed to flow as kwargs
result_dict = f(**data)
# Note: if specified streaming=True in the flow context, the result will be a generator
# reference promptflow._sdk._serving.response_creator.ResponseCreator on how to handle it in app.
return jsonify(result_dict) | process a flow request in the runtime. |
4,425 | import json
import logging
from flask import Flask, jsonify, request
from promptflow import load_flow
from promptflow.connections import AzureOpenAIConnection
from promptflow.entities import FlowContext
from promptflow.exceptions import SystemErrorException, UserErrorException
app = SimpleScoreApp(__name__)
def create_app(**kwargs):
return app | null |
4,426 | from promptflow import tool
from promptflow.connections import AzureOpenAIConnection
def echo_connection(flow_input: str, node_input: str, connection: AzureOpenAIConnection):
print(f"Flow input: {flow_input}")
print(f"Node input: {node_input}")
print(f"Flow connection: {connection._to_dict()}")
# get from env var
return {"value": flow_input} | null |
4,427 | from enum import Enum
from promptflow import tool
class ConversationModality(str, Enum):
TEXT = "text"
TRANSCRIPT = "transcript"
def create_conversation_item(line: str, id: int) -> dict:
name_and_text = line.split(":", maxsplit=1)
name = name_and_text[0].strip()
text = name_and_text[1].strip()
return {
"id": id,
"participantId": name,
"role": name if name.lower() in {"customer", "agent"} else "generic",
"text": text
}
The provided code snippet includes necessary dependencies for implementing the `create_conversation` function. Write a Python function `def create_conversation(text: str, modality: ConversationModality, language: str, id: int) -> dict` to solve the following problem:
This tool creates a conversation input for conversation-based language skills. Conversation text is assumed to be of the following form: <speaker id>: <speaker text> <speaker id>: <speaker text> ... :param text: conversation text. :param modality: conversation modality. :param language: conversation language. :param id: conversation id.
Here is the function:
def create_conversation(text: str,
modality: ConversationModality,
language: str,
id: int) -> dict:
"""
This tool creates a conversation input for conversation-based
language skills.
Conversation text is assumed to be of the following form:
<speaker id>: <speaker text>
<speaker id>: <speaker text>
...
:param text: conversation text.
:param modality: conversation modality.
:param language: conversation language.
:param id: conversation id.
"""
conv_items = []
id = 1
lines = text.replace(" ", "\n").split("\n")
lines = filter(lambda line: len(line.strip()) != 0, lines)
for line in lines:
conv_items.append(create_conversation_item(line, id))
id += 1
return {
"conversationItems": conv_items,
"language": language,
"modality": modality,
"id": str(id)
} | This tool creates a conversation input for conversation-based language skills. Conversation text is assumed to be of the following form: <speaker id>: <speaker text> <speaker id>: <speaker text> ... :param text: conversation text. :param modality: conversation modality. :param language: conversation language. :param id: conversation id. |
4,428 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `read_file` function. Write a Python function `def read_file(file_path: str) -> str` to solve the following problem:
This tool opens a file and reads its contents into a string. :param file_path: the file path of the file to be read.
Here is the function:
def read_file(file_path: str) -> str:
"""
This tool opens a file and reads its contents into a string.
:param file_path: the file path of the file to be read.
"""
with open(file_path, 'r', encoding="utf8") as f:
file = f.read()
return file | This tool opens a file and reads its contents into a string. :param file_path: the file path of the file to be read. |
4,429 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `create_document` function. Write a Python function `def create_document(text: str, language: str, id: int) -> dict` to solve the following problem:
This tool creates a document input for document-based language skills :param text: document text. :param language: document language. :param id: document id.
Here is the function:
def create_document(text: str, language: str, id: int) -> dict:
"""
This tool creates a document input for document-based
language skills
:param text: document text.
:param language: document language.
:param id: document id.
"""
return {
"text": text,
"language": language,
"id": str(id)
} | This tool creates a document input for document-based language skills :param text: document text. :param language: document language. :param id: document id. |
4,430 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `extract_language_code` function. Write a Python function `def extract_language_code(ld_output: dict) -> str` to solve the following problem:
This tool extracts the ISO 639-1 language code from language detection output. :param ld_output: language detection output (parsed).
Here is the function:
def extract_language_code(ld_output: dict) -> str:
"""
This tool extracts the ISO 639-1 language code
from language detection output.
:param ld_output: language detection output (parsed).
"""
return ld_output["detectedLanguage"]["iso6391Name"] | This tool extracts the ISO 639-1 language code from language detection output. :param ld_output: language detection output (parsed). |
4,431 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `create_redacted_conversation` function. Write a Python function `def create_redacted_conversation(conversation: dict, pii_output: dict) -> dict` to solve the following problem:
This tool creates a conversation input for conversation-based language skills from the task output of conversational PII. It does so by replacing all original text with the PII redacted text. :param conversation: original conversation object. :param pii_output: conversational pii node output (parsed).
Here is the function:
def create_redacted_conversation(conversation: dict, pii_output: dict) -> dict:
"""
This tool creates a conversation input for conversation-based
language skills from the task output of conversational PII.
It does so by replacing all original text with the PII redacted
text.
:param conversation: original conversation object.
:param pii_output: conversational pii node output (parsed).
"""
redacted_conversation = conversation.copy()
redacted_conv_items = pii_output["conversationItems"]
for i in range(len(redacted_conv_items)):
redacted_text = redacted_conv_items[i]["redactedContent"]["text"]
redacted_conversation["conversationItems"][i]["text"] = redacted_text
return redacted_conversation | This tool creates a conversation input for conversation-based language skills from the task output of conversational PII. It does so by replacing all original text with the PII redacted text. :param conversation: original conversation object. :param pii_output: conversational pii node output (parsed). |
4,432 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `parse_skill_to_text` function. Write a Python function `def parse_skill_to_text(output: object, skill: str) -> str` to solve the following problem:
This tool parses a language skill result into a string, when possible. Not all skills give logical string parsings. :param output: skill output. :param skill: skill type to parse.
Here is the function:
def parse_skill_to_text(output: object, skill: str) -> str:
"""
This tool parses a language skill result into a string,
when possible. Not all skills give logical string parsings.
:param output: skill output.
:param skill: skill type to parse.
"""
result = None
if skill == "TRANSLATION":
# Translation: return first translation.
result = output["translations"][0]["text"]
if skill == "PII":
# PII: return redacted text.
result = output["redactedText"]
elif skill == "ABSTRACTIVE":
# Abstractive summarization: return summary.
result = output["summaries"][0]["text"]
if result is None:
raise RuntimeError("Unsupported skill parsing.")
return result | This tool parses a language skill result into a string, when possible. Not all skills give logical string parsings. :param output: skill output. :param skill: skill type to parse. |
4,435 | from typing import List
from promptflow import tool
from promptflow import log_metric
def accuracy_aggregate(processed_results: List[int]):
num_exception = 0
num_correct = 0
for i in range(len(processed_results)):
if processed_results[i] == -1:
num_exception += 1
elif processed_results[i] == 1:
num_correct += 1
num_total = len(processed_results)
accuracy = round(1.0 * num_correct / num_total, 2)
error_rate = round(1.0 * num_exception / num_total, 2)
log_metric(key="accuracy", value=accuracy)
log_metric(key="error_rate", value=error_rate)
return {
"num_total": num_total,
"num_correct": num_correct,
"num_exception": num_exception,
"accuracy": accuracy,
"error_rate": error_rate
} | null |
4,436 | from promptflow import tool
def line_process(groundtruth: str, prediction: str) -> int:
processed_result = 0
if prediction == "JSONDecodeError" or prediction.startswith("Unknown Error:"):
processed_result = -1
return processed_result
try:
groundtruth = float(groundtruth)
prediction = float(prediction)
except ValueError:
processed_result = -1
return processed_result
if round(prediction, 2) == round(groundtruth, 2):
processed_result = 1
return processed_result | null |
4,437 | from typing import List
from promptflow import tool
def aggregate(perceived_intelligence_score: List[float]):
aggregated_results = {"perceived_intelligence_score": 0.0, "count": 0}
# Calculate average perceived_intelligence_score
for i in range(len(perceived_intelligence_score)):
aggregated_results["perceived_intelligence_score"] += perceived_intelligence_score[i]
aggregated_results["count"] += 1
aggregated_results["perceived_intelligence_score"] /= aggregated_results["count"]
# Log metric for each variant
from promptflow import log_metric
log_metric(key="perceived_intelligence_score", value=aggregated_results["perceived_intelligence_score"])
return aggregated_results | null |
4,438 | from promptflow import tool
import re
def extract_float(s):
def parse_score(gpt_score: str):
return float(extract_float(gpt_score)) | null |
4,439 | import logging
import logging.config
import re
from pathlib import Path
from typing import List
import promptflow
import yaml
from openai import AzureOpenAI
from promptflow.connections import AzureOpenAIConnection
from tenacity import (
RetryError,
Retrying,
after_log,
before_sleep_log,
stop_after_attempt,
wait_random_exponential,
)
logger = Logger().get_logger()
def aggregate_llm_scores(llm_responses: List[str], max_score: int) -> float:
"""Parse and average valid scores from the generated responses of
the G-Eval LLM call.
Args:
llm_responses (List[str]): List of scores from multiple LLMs
max_score (float): The maximum score allowed.
Returns:
float: The average of all the valid scores
"""
all_scores = []
error_count = 0
for generated in llm_responses:
try:
parsed = parse_output(generated, max_score)
all_scores.append(parsed)
except ValueError as e:
logger.warning(e)
error_count += 1
if error_count:
logger.warning(
f"{error_count} out of 20 scores were discarded due to corrupt g-eval generation"
)
score = sum(all_scores) / len(all_scores)
return score
The provided code snippet includes necessary dependencies for implementing the `geval_summarization` function. Write a Python function `def geval_summarization( prompt_with_src_and_gen: str, max_score: float, connection: AzureOpenAIConnection, deployment_name: str = "gpt-4", ) -> float` to solve the following problem:
Using GPT, evaluate a generated summary with respect to a source document from which it was generated. This function should be used for four dimensions of summarization evaluation inline with the SummEval benchmark: fluency, coherence, consistency, relevance. Args: prompt_with_src_and_gen (str): The prompt containing the source document and generated summary. max_score (float): The maximum score allowed. connection (AzureOpenAIConnection): The connection object for Azure OpenAI. deployment_name (str, optional): The name of the deployment. Defaults to "gpt-4". Returns: float: The evaluation score
Here is the function:
def geval_summarization(
prompt_with_src_and_gen: str,
max_score: float,
connection: AzureOpenAIConnection,
deployment_name: str = "gpt-4",
) -> float:
"""Using GPT, evaluate a generated summary with respect to a source document from
which it was generated. This function should be used for four dimensions of
summarization evaluation inline with the SummEval benchmark: fluency, coherence,
consistency, relevance.
Args:
prompt_with_src_and_gen (str): The prompt containing the source document and generated summary.
max_score (float): The maximum score allowed.
connection (AzureOpenAIConnection): The connection object for Azure OpenAI.
deployment_name (str, optional): The name of the deployment. Defaults to "gpt-4".
Returns:
float: The evaluation score
"""
# make sure you use the same api version/model with the one used for meta evaluation
logger.info(
f"OpenAI API Base: {connection.api_base} - Version: {connection.api_version}"
f" - Deployment: {deployment_name}"
)
client = AzureOpenAI(
azure_endpoint=connection.api_base,
api_version=connection.api_version,
api_key=connection.api_key,
)
message = {"role": "system", "content": prompt_with_src_and_gen}
try:
for attempt in Retrying(
reraise=True,
before_sleep=before_sleep_log(logger, logging.INFO),
after=after_log(logger, logging.INFO),
wait=wait_random_exponential(multiplier=1, min=1, max=120),
stop=stop_after_attempt(10),
):
with attempt:
response = client.chat.completions.create(
model=deployment_name,
messages=[message],
temperature=2,
max_tokens=5,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None,
n=20,
)
except RetryError:
logger.exception(f"geval openai call failed\nInput prompt was: {message}")
raise
all_responses = []
for i in range(len(response.choices)):
try:
content = response.choices[i].message.content
all_responses.append(content)
except KeyError:
# `content` won't exist in returned json when openai content_filter is triggered
logger.exception(
f"""data with key missing was: {response.choices[i]}\nInput prompt was: {message}"""
)
return aggregate_llm_scores(all_responses, max_score=max_score) | Using GPT, evaluate a generated summary with respect to a source document from which it was generated. This function should be used for four dimensions of summarization evaluation inline with the SummEval benchmark: fluency, coherence, consistency, relevance. Args: prompt_with_src_and_gen (str): The prompt containing the source document and generated summary. max_score (float): The maximum score allowed. connection (AzureOpenAIConnection): The connection object for Azure OpenAI. deployment_name (str, optional): The name of the deployment. Defaults to "gpt-4". Returns: float: The evaluation score |
4,440 | from typing import Dict, List
from promptflow import log_metric, tool
The provided code snippet includes necessary dependencies for implementing the `aggregate` function. Write a Python function `def aggregate( fluency_list: List[float], consistency_list: List[float], relevance_list: List[float], coherence_list: List[float], ) -> Dict[str, float]` to solve the following problem:
Takes list of scores for 4 dims and outputs average for them. Args: fluency_list (List(float)): list of fluency scores consistency_list (List(float)): list of consistency scores relevance_list (List(float)): list of relevance scores coherence_list (List(float)): list of coherence scores Returns: Dict[str, float]: Returns average scores
Here is the function:
def aggregate(
fluency_list: List[float],
consistency_list: List[float],
relevance_list: List[float],
coherence_list: List[float],
) -> Dict[str, float]:
"""
Takes list of scores for 4 dims and outputs average for them.
Args:
fluency_list (List(float)): list of fluency scores
consistency_list (List(float)): list of consistency scores
relevance_list (List(float)): list of relevance scores
coherence_list (List(float)): list of coherence scores
Returns:
Dict[str, float]: Returns average scores
"""
average_fluency = sum(fluency_list) / len(fluency_list)
average_consistency = sum(consistency_list) / len(consistency_list)
average_relevance = sum(relevance_list) / len(relevance_list)
average_coherence = sum(coherence_list) / len(coherence_list)
log_metric("average_fluency", average_fluency)
log_metric("average_consistency", average_consistency)
log_metric("average_relevance", average_relevance)
log_metric("average_coherence", average_coherence)
return {
"average_fluency": average_fluency,
"average_consistency": average_consistency,
"average_relevance": average_relevance,
"average_coherence": average_coherence,
} | Takes list of scores for 4 dims and outputs average for them. Args: fluency_list (List(float)): list of fluency scores consistency_list (List(float)): list of consistency scores relevance_list (List(float)): list of relevance scores coherence_list (List(float)): list of coherence scores Returns: Dict[str, float]: Returns average scores |
4,441 | from promptflow import tool
from typing import List
from promptflow import log_metric
def log_metrics(match_counts: List[dict]):
exact_match_rate = sum([m["exact_match"] for m in match_counts]) / len(match_counts)
partial_match_rate = sum([m["partial_match"] for m in match_counts]) / len(match_counts)
log_metric(key="exact_match_rate", value=exact_match_rate)
log_metric(key="partial_match_rate", value=partial_match_rate)
print("exact_match_rate: ", exact_match_rate)
print("partial_match_rate: ", partial_match_rate)
return {"exact_match_rate": exact_match_rate, "partial_match_rate": partial_match_rate} | null |
4,442 | from promptflow import tool
from typing import List
def is_match(
answer: List[str],
ground_truth: List[str],
ignore_case: bool,
ignore_order: bool,
allow_partial: bool) -> bool:
if ignore_case:
answer = [a.lower() for a in answer]
ground_truth = [g.lower() for g in ground_truth]
if ignore_order:
answer.sort()
ground_truth.sort()
if allow_partial:
x = [a for a in answer if a in ground_truth]
return x == answer
return answer == ground_truth
def match(answer: List[str], ground_truth: List[str]):
exact_match = 0
partial_match = 0
if is_match(answer, ground_truth, ignore_case=True, ignore_order=True, allow_partial=False):
exact_match = 1
if is_match(answer, ground_truth, ignore_case=True, ignore_order=True, allow_partial=True):
partial_match = 1
return {"exact_match": exact_match, "partial_match": partial_match, "answer": answer, "ground_truth": ground_truth} | null |
4,443 | from typing import List
from promptflow import tool
def cleansing(entities_str: str) -> List[str]:
# Split, remove leading and trailing spaces/tabs/dots
parts = entities_str.split(",")
cleaned_parts = [part.strip(" \t.\"") for part in parts]
entities = [part for part in cleaned_parts if len(part) > 0]
return entities | null |
4,445 | from promptflow import tool
def string_to_number(raw_string: str) -> float:
''' Try to parse the prediction string and groundtruth string to float number.
Support parse int, float, fraction and recognize non-numeric string with wrong format.
Wrong format cases: 'the answer is \box{2/3}', '0, 5, or any number greater than 11', '4/7//9'
'''
float_number = 0.0
try:
float_number = float(raw_string)
except Exception:
if '/' in raw_string:
split_list = raw_string.split('/')
if len(split_list) == 2:
numerator, denominator = split_list
try:
float_number = float(numerator) / float(denominator)
except Exception:
return None
else:
return None
else:
return None
return float_number
The provided code snippet includes necessary dependencies for implementing the `line_process` function. Write a Python function `def line_process(groundtruth: str, prediction: str) -> int` to solve the following problem:
Early stop
Here is the function:
def line_process(groundtruth: str, prediction: str) -> int:
pred_float = string_to_number(prediction)
'''Early stop'''
if (pred_float is None):
return -1
gt_float = string_to_number(groundtruth)
if (gt_float is None):
return -1
''' both pred_float and gt_float are valid'''
if round(pred_float, 10) == round(gt_float, 10):
return 1
else:
return -1 | Early stop |
4,446 | from promptflow import tool
def grade(groundtruth: str, prediction: str):
return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect" | null |
4,447 | from typing import List
from promptflow import log_metric, tool
def calculate_accuracy(grades: List[str]):
result = []
for index in range(len(grades)):
grade = grades[index]
result.append(grade)
# calculate accuracy for each variant
accuracy = round((result.count("Correct") / len(result)), 2)
log_metric("accuracy", accuracy)
return result | null |
4,448 | from promptflow import tool
def select_metrics(metrics: str) -> str:
supported_metrics = ('gpt_relevance', 'gpt_groundedness', 'gpt_retrieval_score')
user_selected_metrics = [metric.strip() for metric in metrics.split(',') if metric]
metric_selection_dict = {}
for metric in supported_metrics:
if metric in user_selected_metrics:
metric_selection_dict[metric] = True
else:
metric_selection_dict[metric] = False
return metric_selection_dict | null |
4,449 | from promptflow import tool
import numpy as np
def concat_results(rag_retrieval_score: dict = None,
rag_grounding_score: dict = None, rag_generation_score: dict = None):
load_list = [{'name': 'gpt_groundedness', 'result': rag_grounding_score},
{'name': 'gpt_retrieval_score', 'result': rag_retrieval_score},
{'name': 'gpt_relevance', 'result': rag_generation_score}]
score_list = []
errors = []
for item in load_list:
if item['result']:
try:
score = float(item['result']["quality_score"])
except Exception as e:
score = np.nan
errors.append({"name": item["name"], "msg": str(e), "data": item['result']})
reasoning = item['result']['quality_reasoning']
else:
score = np.nan
reasoning = None
score_list.append({"name": item["name"], "score": score, "quality_reasoning": reasoning})
variant_level_result = {}
for item in score_list:
item_name = str(item["name"])
variant_level_result[item_name] = item["score"]
return variant_level_result | null |
4,450 | from promptflow import tool
import re
def parse_generation_output(rag_generation_score: str) -> str:
quality_score = float('nan')
quality_reasoning = ''
for sent in rag_generation_score.split('\n'):
sent = sent.strip()
if re.match(r"\s*(<)?Quality score:", sent):
numbers_found = re.findall(r"(\d+\.*\d*)\/", sent)
if len(numbers_found) == 0:
continue
quality_score = int(
float(numbers_found[0].replace("'", "")))
for sent in rag_generation_score.split('\n'):
sent = sent.strip()
if re.match(r"\s*(<)?Quality score reasoning:", sent):
quality_reasoning += sent.strip()
break
return {"quality_score": quality_score, "quality_reasoning": quality_reasoning} | null |
4,451 | from promptflow import tool
import re
def parse_retrieval_output(retrieval_output: str) -> str:
score_response = [sent.strip() for sent in
retrieval_output.strip("\"").split("# Result")[-1].strip().split('.') if sent.strip()]
parsed_score_response = re.findall(r"\d+", score_response[-1])
if len(parsed_score_response) > 0:
score = parsed_score_response[-1].strip()
if float(score) < 1.0 or float(score) > 5.0:
score = float('nan')
else:
score = float('nan')
try:
reasoning_response, _ = retrieval_output.split("# Result")
except Exception:
reasoning_response = retrieval_output
return {"quality_score": float(score), "quality_reasoning": reasoning_response} | null |
4,452 | from typing import List
from promptflow import tool, log_metric
import numpy as np
def aggregate_variants_results(results: List[dict], metrics: List[str]):
aggregate_results = {}
for result in results:
for name, value in result.items():
if name not in aggregate_results.keys():
aggregate_results[name] = []
try:
float_val = float(value)
except Exception:
float_val = np.nan
aggregate_results[name].append(float_val)
for name, value in aggregate_results.items():
if name in metrics[0]:
metric_name = name
aggregate_results[name] = np.nanmean(value)
if 'pass_rate' in metric_name:
metric_name = metric_name + "(%)"
aggregate_results[name] = aggregate_results[name] * 100.0
aggregate_results[name] = round(aggregate_results[name], 2)
log_metric(metric_name, aggregate_results[name])
return aggregate_results | null |
4,453 | from promptflow import tool
def is_valid(input_item):
return True if input_item and input_item.strip() else False
def validate_input(question: str, answer: str, documents: str, selected_metrics: dict) -> dict:
input_data = {"question": is_valid(question), "answer": is_valid(answer), "documents": is_valid(documents)}
expected_input_cols = set(input_data.keys())
dict_metric_required_fields = {"gpt_groundedness": set(["question", "answer", "documents"]),
"gpt_relevance": set(["question", "answer", "documents"]),
"gpt_retrieval_score": set(["question", "documents"])}
actual_input_cols = set()
for col in expected_input_cols:
if input_data[col]:
actual_input_cols.add(col)
data_validation = selected_metrics
for metric in selected_metrics:
if selected_metrics[metric]:
metric_required_fields = dict_metric_required_fields[metric]
if metric_required_fields <= actual_input_cols:
data_validation[metric] = True
else:
data_validation[metric] = False
return data_validation | null |
4,454 | from promptflow import tool
import re
def parse_grounding_output(rag_grounding_score: str) -> str:
try:
numbers_found = re.findall(r"Quality score:\s*(\d+)\/\d", rag_grounding_score)
score = float(numbers_found[0]) if len(numbers_found) > 0 else 0
except Exception:
score = float("nan")
try:
quality_reasoning, _ = rag_grounding_score.split("Quality score: ")
except Exception:
quality_reasoning = rag_grounding_score
return {"quality_score": score, "quality_reasoning": quality_reasoning} | null |
4,455 | from typing import List
from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `aggregate` function. Write a Python function `def aggregate(processed_results: List[str])` to solve the following problem:
This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node.
Here is the function:
def aggregate(processed_results: List[str]):
"""
This tool aggregates the processed result of all lines to the variant level and log metric for each variant.
:param processed_results: List of the output of line_process node.
"""
# Add your aggregation logic here
# aggregated_results should be a dictionary with the metric name as the key and the metric value as the value.
results_num = len(processed_results)
print(results_num)
print(processed_results)
# Log metric for each variant
from promptflow import log_metric
log_metric(key="results_num", value=results_num)
return results_num | This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node. |
4,456 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `line_process` function. Write a Python function `def line_process(groundtruth: str, prediction: str)` to solve the following problem:
This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line.
Here is the function:
def line_process(groundtruth: str, prediction: str):
"""
This tool processes the prediction of a single line and returns the processed result.
:param groundtruth: the groundtruth of a single line.
:param prediction: the prediction of a single line.
"""
# Add your line processing logic here
return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect" | This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line. |
4,457 | from promptflow import tool
def select_metrics(metrics: str) -> str:
supported_metrics = ('gpt_coherence', 'gpt_similarity', 'gpt_fluency', 'gpt_relevance', 'gpt_groundedness',
'f1_score', 'ada_similarity')
user_selected_metrics = [metric.strip() for metric in metrics.split(',') if metric]
metric_selection_dict = {}
for metric in supported_metrics:
if metric in user_selected_metrics:
metric_selection_dict[metric] = True
else:
metric_selection_dict[metric] = False
return metric_selection_dict | null |
4,458 | from promptflow import tool
import numpy as np
from numpy.linalg import norm
def compute_ada_cosine_similarity(a, b) -> float:
return np.dot(a, b)/(norm(a)*norm(b)) | null |
4,459 | from promptflow import tool
import numpy as np
import re
def concat_results(gpt_coherence_score: str = None,
gpt_similarity_score: str = None,
gpt_fluency_score: str = None,
gpt_relevance_score: str = None,
gpt_groundedness_score: str = None,
f1_score: float = None,
ada_cosine_similarity: float = None):
load_list = [{'name': 'gpt_coherence', 'score': gpt_coherence_score},
{'name': 'gpt_similarity', 'score': gpt_similarity_score},
{'name': 'gpt_fluency', 'score': gpt_fluency_score},
{'name': 'gpt_relevance', 'score': gpt_relevance_score},
{'name': 'gpt_groundedness', 'score': gpt_groundedness_score},
{'name': 'f1_score', 'score': f1_score},
{'name': 'ada_similarity', 'score': ada_cosine_similarity}]
scalar_metrics = ["f1_score", "ada_similarity"]
score_list = []
errors = []
for item in load_list:
if item["name"] in scalar_metrics:
try:
score = float(item["score"])
except Exception as e:
score = np.nan
errors.append({"name": item["name"], "msg": str(e), "data": item["score"]})
else:
if item['score']:
try:
score = item["score"]
match = re.search(r'\d', score)
if match:
score = float(match.group())
else:
score = np.nan
except Exception as e:
score = np.nan
errors.append({"name": item["name"], "msg": str(e), "data": item["score"]})
else:
score = np.nan
score_list.append({"name": item["name"], "score": score})
variant_level_result = {}
for item in score_list:
item_name = str(item["name"])
variant_level_result[item_name] = item["score"]
if 'gpt' in item_name:
variant_level_result[item_name + '_pass_rate'] = 1 if item["score"] > 3 else 0
return variant_level_result | null |
4,460 | from promptflow import tool
from collections import Counter
def compute_f1_score(ground_truth: str, answer: str) -> str:
import string
import re
class QASplitTokenizer:
def __call__(self, line):
"""Tokenizes an input line using split() on whitespace
:param line: a segment to tokenize
:return: the tokenized line
"""
return line.split()
def normalize_text(text) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punctuation(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punctuation(lower(text))))
prediction_tokens = normalize_text(answer)
reference_tokens = normalize_text(ground_truth)
tokenizer = QASplitTokenizer()
prediction_tokens = tokenizer(prediction_tokens)
reference_tokens = tokenizer(reference_tokens)
common_tokens = Counter(prediction_tokens) & Counter(reference_tokens)
num_common_tokens = sum(common_tokens.values())
if num_common_tokens == 0:
f1 = 0.0
else:
precision = 1.0 * num_common_tokens / len(prediction_tokens)
recall = 1.0 * num_common_tokens / len(reference_tokens)
f1 = (2.0 * precision * recall) / (precision + recall)
return f1 | null |
4,461 | from typing import List
from promptflow import tool, log_metric
import numpy as np
def aggregate_variants_results(results: List[dict], metrics: List[str]):
aggregate_results = {}
for result in results:
for name, value in result.items():
if name in metrics[0]:
if name not in aggregate_results.keys():
aggregate_results[name] = []
try:
float_val = float(value)
except Exception:
float_val = np.nan
aggregate_results[name].append(float_val)
for name, value in aggregate_results.items():
if name in metrics[0]:
aggregate_results[name] = np.nanmean(value)
aggregate_results[name] = round(aggregate_results[name], 2)
log_metric(name, aggregate_results[name])
return aggregate_results | null |
4,462 | from promptflow import tool
def validate_input(question: str, answer: str, context: str, ground_truth: str, selected_metrics: dict) -> dict:
input_data = {"question": question, "answer": answer, "context": context, "ground_truth": ground_truth}
expected_input_cols = set(input_data.keys())
dict_metric_required_fields = {"gpt_groundedness": set(["answer", "context"]),
"gpt_relevance": set(["question", "answer", "context"]),
"gpt_coherence": set(["question", "answer"]),
"gpt_similarity": set(["question", "answer", "ground_truth"]),
"gpt_fluency": set(["question", "answer"]),
"f1_score": set(["answer", "ground_truth"]),
"ada_similarity": set(["answer", "ground_truth"])}
actual_input_cols = set()
for col in expected_input_cols:
if input_data[col] and input_data[col].strip():
actual_input_cols.add(col)
data_validation = selected_metrics
for metric in selected_metrics:
if selected_metrics[metric]:
metric_required_fields = dict_metric_required_fields[metric]
if metric_required_fields <= actual_input_cols:
data_validation[metric] = True
else:
data_validation[metric] = False
return data_validation | null |
4,463 | from typing import List
from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `aggregate` function. Write a Python function `def aggregate(groundedness_scores: List[float])` to solve the following problem:
This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node. :param variant_ids: List of variant ids that can be used to group the results by variant. :param line_numbers: List of line numbers of the variants. If provided, this can be used to group the results by line number.
Here is the function:
def aggregate(groundedness_scores: List[float]):
"""
This tool aggregates the processed result of all lines to the variant level and log metric for each variant.
:param processed_results: List of the output of line_process node.
:param variant_ids: List of variant ids that can be used to group the results by variant.
:param line_numbers: List of line numbers of the variants. If provided, this can be used to
group the results by line number.
"""
aggregated_results = {"groundedness": 0.0, "count": 0}
# Calculate average groundedness score for each variant
for i in range(len(groundedness_scores)):
aggregated_results["groundedness"] += groundedness_scores[i]
aggregated_results["count"] += 1
aggregated_results["groundedness"] /= aggregated_results["count"]
# Log metric for each variant
from promptflow import log_metric
log_metric(key="groundedness", value=aggregated_results["groundedness"])
return aggregated_results | This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node. :param variant_ids: List of variant ids that can be used to group the results by variant. :param line_numbers: List of line numbers of the variants. If provided, this can be used to group the results by line number. |
4,464 | from promptflow import tool
import re
def extract_float(s):
match = re.search(r"[-+]?\d*\.\d+|\d+", s)
if match:
return float(match.group())
else:
return None
def parse_score(gpt_score: str):
return float(extract_float(gpt_score)) | null |
4,465 | import re
import bs4
import requests
from promptflow import tool
def decode_str(string):
return string.encode().decode("unicode-escape").encode("latin1").decode("utf-8")
def remove_nested_parentheses(string):
pattern = r"\([^()]+\)"
while re.search(pattern, string):
string = re.sub(pattern, "", string)
return string
def get_wiki_url(entity: str, count=2):
# Send a request to the URL
url = f"https://en.wikipedia.org/w/index.php?search={entity}"
url_list = []
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = bs4.BeautifulSoup(response.text, "html.parser")
mw_divs = soup.find_all("div", {"class": "mw-search-result-heading"})
if mw_divs: # mismatch
result_titles = [decode_str(div.get_text().strip()) for div in mw_divs]
result_titles = [remove_nested_parentheses(result_title) for result_title in result_titles]
print(f"Could not find {entity}. Similar entity: {result_titles[:count]}.")
url_list.extend(
[f"https://en.wikipedia.org/w/index.php?search={result_title}" for result_title in result_titles]
)
else:
page_content = [p_ul.get_text().strip() for p_ul in soup.find_all("p") + soup.find_all("ul")]
if any("may refer to:" in p for p in page_content):
url_list.extend(get_wiki_url("[" + entity + "]"))
else:
url_list.append(url)
else:
msg = (
f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: "
f"{response.text[:100]}"
)
print(msg)
return url_list[:count]
except Exception as e:
print("Get url failed with error: {}".format(e))
return url_list | null |
4,466 | import random
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import bs4
import requests
from promptflow import tool
def fetch_text_content_from_url(url: str, count: int = 10):
# Send a request to the URL
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
delay = random.uniform(0, 0.5)
time.sleep(delay)
response = session.get(url, headers=headers)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = bs4.BeautifulSoup(response.text, "html.parser")
page_content = [p_ul.get_text().strip() for p_ul in soup.find_all("p") + soup.find_all("ul")]
page = ""
for content in page_content:
if len(content.split(" ")) > 2:
page += decode_str(content)
if not content.endswith("\n"):
page += "\n"
text = get_page_sentence(page, count=count)
return (url, text)
else:
msg = (
f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: "
f"{response.text[:100]}"
)
print(msg)
return (url, "No available content")
except Exception as e:
print("Get url failed with error: {}".format(e))
return (url, "No available content")
def search_result_from_url(url_list: list, count: int = 10):
results = []
partial_func_of_fetch_text_content_from_url = partial(fetch_text_content_from_url, count=count)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = executor.map(partial_func_of_fetch_text_content_from_url, url_list)
for feature in futures:
results.append(feature)
return results | null |
4,467 | from promptflow import tool
def process_search_result(search_result):
def format(doc: dict):
return f"Content: {doc['Content']}\nSource: {doc['Source']}"
try:
context = []
for url, content in search_result:
context.append({"Content": content, "Source": url})
context_str = "\n\n".join([format(c) for c in context])
return context_str
except Exception as e:
print(f"Error: {e}")
return "" | null |
4,468 | from promptflow import tool
import json
import re
def my_python_tool(input1: str) -> str:
input1 = re.sub(r'[$\\!]', '', input1)
try:
json_answer = json.loads(input1)
answer = json_answer['answer']
except Exception:
answer = input1
return answer | null |
4,469 | import os
from typing import Union
from promptflow import tool
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from chat_with_pdf.utils.lock import acquire_lock
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + "/chat_with_pdf/"
def acquire_lock(filename):
if not sys.platform.startswith("win"):
with open(filename, "a+") as f:
fcntl.flock(f, fcntl.LOCK_EX)
yield f
fcntl.flock(f, fcntl.LOCK_UN)
else: # Windows
with open(filename, "w") as f:
msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, 1)
yield f
msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, 1)
try:
os.remove(filename)
except OSError:
pass # best effort to remove the lock file
def setup_env(connection: Union[AzureOpenAIConnection, OpenAIConnection], config: dict):
if not connection or not config:
return
if isinstance(connection, AzureOpenAIConnection):
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = connection.api_base
os.environ["OPENAI_API_KEY"] = connection.api_key
os.environ["OPENAI_API_VERSION"] = connection.api_version
if isinstance(connection, OpenAIConnection):
os.environ["OPENAI_API_KEY"] = connection.api_key
if connection.organization is not None:
os.environ["OPENAI_ORG_ID"] = connection.organization
for key in config:
os.environ[key] = str(config[key])
with acquire_lock(BASE_DIR + "create_folder.lock"):
if not os.path.exists(BASE_DIR + ".pdfs"):
os.mkdir(BASE_DIR + ".pdfs")
if not os.path.exists(BASE_DIR + ".index/.pdfs"):
os.makedirs(BASE_DIR + ".index/.pdfs")
return "Ready" | null |
4,470 | from promptflow import tool
from chat_with_pdf.rewrite_question import rewrite_question
def rewrite_question(question: str, history: list):
template = Environment(
loader=FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
).get_template("rewrite_question_prompt.md")
token_limit = int(os.environ["PROMPT_TOKEN_LIMIT"])
max_completion_tokens = int(os.environ["MAX_COMPLETION_TOKENS"])
# Try to render the prompt with token limit and reduce the history count if it fails
while True:
try:
prompt = render_with_token_limit(
template, token_limit, question=question, history=history
)
break
except ValueError:
history = history[:-1]
log(f"Reducing chat history count to {len(history)} to fit token limit")
chat = OAIChat()
rewritten_question = chat.generate(
messages=[{"role": "user", "content": prompt}], max_tokens=max_completion_tokens
)
log(f"Rewritten question: {rewritten_question}")
return rewritten_question
def rewrite_question_tool(question: str, history: list, env_ready_signal: str):
return rewrite_question(question, history) | null |
4,471 | from promptflow import tool
from chat_with_pdf.download import download
def download(url: str) -> str:
path = os.path.join(PDF_DIR, normalize_filename(url) + ".pdf")
lock_path = path + ".lock"
with acquire_lock(lock_path):
if os.path.exists(path):
log("Pdf already exists in " + os.path.abspath(path))
return path
log("Downloading pdf from " + url)
response = requests.get(url)
with open(path, "wb") as f:
f.write(response.content)
return path
def download_tool(url: str, env_ready_signal: str) -> str:
return download(url) | null |
4,472 | from promptflow import tool
from chat_with_pdf.build_index import create_faiss_index
def create_faiss_index(pdf_path: str) -> str:
chunk_size = int(os.environ.get("CHUNK_SIZE"))
chunk_overlap = int(os.environ.get("CHUNK_OVERLAP"))
log(f"Chunk size: {chunk_size}, chunk overlap: {chunk_overlap}")
file_name = Path(pdf_path).name + f".index_{chunk_size}_{chunk_overlap}"
index_persistent_path = Path(INDEX_DIR) / file_name
index_persistent_path = index_persistent_path.resolve().as_posix()
lock_path = index_persistent_path + ".lock"
log("Index path: " + os.path.abspath(index_persistent_path))
with acquire_lock(lock_path):
if os.path.exists(os.path.join(index_persistent_path, "index.faiss")):
log("Index already exists, bypassing index creation")
return index_persistent_path
else:
if not os.path.exists(index_persistent_path):
os.makedirs(index_persistent_path)
log("Building index")
pdf_reader = PyPDF2.PdfReader(pdf_path)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
# Chunk the words into segments of X words with Y-word overlap, X=CHUNK_SIZE, Y=OVERLAP_SIZE
segments = split_text(text, chunk_size, chunk_overlap)
log(f"Number of segments: {len(segments)}")
index = FAISSIndex(index=faiss.IndexFlatL2(1536), embedding=OAIEmbedding())
index.insert_batch(segments)
index.save(index_persistent_path)
log("Index built: " + index_persistent_path)
return index_persistent_path
def build_index_tool(pdf_path: str) -> str:
return create_faiss_index(pdf_path) | null |
4,473 | from promptflow import tool
from chat_with_pdf.qna import qna
def convert_chat_history_to_chatml_messages(history):
def qna(prompt: str, history: list):
def qna_tool(prompt: str, history: list):
stream = qna(prompt, convert_chat_history_to_chatml_messages(history))
answer = ""
for str in stream:
answer = answer + str + ""
return {"answer": answer} | null |
4,474 | from promptflow import tool
from chat_with_pdf.find_context import find_context
def find_context(question: str, index_path: str):
def find_context_tool(question: str, index_path: str):
prompt, context = find_context(question, index_path)
return {"prompt": prompt, "context": [c.text for c in context]} | null |
4,475 | import argparse
from dotenv import load_dotenv
import os
from qna import qna
from find_context import find_context
from rewrite_question import rewrite_question
from build_index import create_faiss_index
from download import download
from utils.lock import acquire_lock
from constants import PDF_DIR, INDEX_DIR
def chat_with_pdf(question: str, pdf_url: str, history: list):
def print_stream_and_return_full_answer(stream):
def main_loop(url: str):
load_dotenv(os.path.join(os.path.dirname(__file__), ".env"), override=True)
history = []
while True:
question = input("\033[92m" + "$User (type q! to quit): " + "\033[0m")
if question == "q!":
break
stream, context = chat_with_pdf(question, url, history)
print("\033[92m" + "$Bot: " + "\033[0m", end=" ", flush=True)
answer = print_stream_and_return_full_answer(stream)
history = history + [
{"role": "user", "content": question},
{"role": "assistant", "content": answer},
] | null |
4,476 | from typing import Tuple, Union, Optional, Type
import functools
import time
import random
def retry_and_handle_exceptions(
exception_to_check: Union[Type[Exception], Tuple[Type[Exception], ...]],
max_retries: int = 3,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = False,
extract_delay_from_error_message: Optional[any] = None,
):
def deco_retry(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
delay = initial_delay
for i in range(max_retries):
try:
return func(*args, **kwargs)
except exception_to_check as e:
if i == max_retries - 1:
raise Exception(
"Func execution failed after {0} retries: {1}".format(
max_retries, e
)
)
delay *= exponential_base * (1 + jitter * random.random())
delay_from_error_message = None
if extract_delay_from_error_message is not None:
delay_from_error_message = extract_delay_from_error_message(
str(e)
)
final_delay = (
delay_from_error_message if delay_from_error_message else delay
)
print(
"Func execution failed. Retrying in {0} seconds: {1}".format(
final_delay, e
)
)
time.sleep(final_delay)
return wrapper
return deco_retry | null |
4,477 | from typing import Tuple, Union, Optional, Type
import functools
import time
import random
def retry_and_handle_exceptions_for_generator(
exception_to_check: Union[Type[Exception], Tuple[Type[Exception], ...]],
max_retries: int = 3,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = False,
extract_delay_from_error_message: Optional[any] = None,
):
def deco_retry(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
delay = initial_delay
for i in range(max_retries):
try:
for value in func(*args, **kwargs):
yield value
break
except exception_to_check as e:
if i == max_retries - 1:
raise Exception(
"Func execution failed after {0} retries: {1}".format(
max_retries, e
)
)
delay *= exponential_base * (1 + jitter * random.random())
delay_from_error_message = None
if extract_delay_from_error_message is not None:
delay_from_error_message = extract_delay_from_error_message(
str(e)
)
final_delay = (
delay_from_error_message if delay_from_error_message else delay
)
print(
"Func execution failed. Retrying in {0} seconds: {1}".format(
final_delay, e
)
)
time.sleep(final_delay)
return wrapper
return deco_retry | null |
4,478 | from typing import List
import openai
from openai.version import VERSION as OPENAI_VERSION
import os
import tiktoken
from jinja2 import Template
from .retry import (
retry_and_handle_exceptions,
retry_and_handle_exceptions_for_generator,
)
from .logging import log
def count_token(text: str) -> int:
encoding = tiktoken.get_encoding("cl100k_base")
return len(encoding.encode(text))
def render_with_token_limit(template: Template, token_limit: int, **kwargs) -> str:
text = template.render(**kwargs)
token_count = count_token(text)
if token_count > token_limit:
message = f"token count {token_count} exceeds limit {token_limit}"
log(message)
raise ValueError(message)
return text
def extract_delay_from_rate_limit_error_msg(text):
import re
pattern = r"retry after (\d+)"
match = re.search(pattern, text)
if match:
retry_time_from_message = match.group(1)
return float(retry_time_from_message)
else:
return 5 # default retry time | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.