id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
4,175 | import contextlib
import functools
import threading
import uuid
from contextvars import ContextVar
from datetime import datetime
from typing import Any, Dict
from promptflow._sdk._telemetry.telemetry import TelemetryMixin
from promptflow._sdk._utils import ClientUserAgentUtil
from promptflow.exceptions import _ErrorInfo
class ActivityType(object):
"""The type of activity (code) monitored.
The default type is "PublicAPI".
"""
PUBLICAPI = "PublicApi" # incoming public API call (default)
INTERNALCALL = "InternalCall" # internal (function) call
CLIENTPROXY = "ClientProxy" # an outgoing service API call
def log_activity(
logger,
activity_name,
activity_type=ActivityType.INTERNALCALL,
custom_dimensions=None,
user_agent=None,
):
"""Log an activity.
An activity is a logical block of code that consumers want to monitor.
To monitor, wrap the logical block of code with the ``log_activity()`` method. As an alternative, you can
also use the ``@monitor_with_activity`` decorator.
:param logger: The logger adapter.
:type logger: logging.LoggerAdapter
:param activity_name: The name of the activity. The name should be unique per the wrapped logical code block.
:type activity_name: str
:param activity_type: One of PUBLICAPI, INTERNALCALL, or CLIENTPROXY which represent an incoming API call,
an internal (function) call, or an outgoing API call. If not specified, INTERNALCALL is used.
:type activity_type: str
:param custom_dimensions: The custom properties of the activity.
:type custom_dimensions: dict
:param user_agent: Specify user agent. If not specified, the user agent will be got from OperationContext.
:type user_agent: str
:return: None
"""
if not custom_dimensions:
custom_dimensions = {}
# provided user agent will be respected even if it's ""
if user_agent is None:
user_agent = ClientUserAgentUtil.get_user_agent()
request_id = request_id_context.get()
if not request_id:
# public function call
first_call = True
request_id = generate_request_id()
request_id_context.set(request_id)
else:
first_call = False
activity_info = {
"request_id": request_id,
"first_call": first_call,
"activity_name": activity_name,
"activity_type": activity_type,
"user_agent": user_agent,
}
activity_info.update(custom_dimensions)
start_time = datetime.utcnow()
completion_status = ActivityCompletionStatus.SUCCESS
log_activity_start(activity_info, logger)
exception = None
try:
yield logger
except BaseException as e: # pylint: disable=broad-except
exception = e
completion_status = ActivityCompletionStatus.FAILURE
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(exception)
activity_info["error_category"] = error_category
activity_info["error_type"] = error_type
activity_info["error_target"] = error_target
activity_info["error_message"] = error_message
activity_info["error_detail"] = error_detail
finally:
if first_call:
# recover request id in global storage
request_id_context.set(None)
end_time = datetime.utcnow()
duration_ms = round((end_time - start_time).total_seconds() * 1000, 2)
activity_info["completion_status"] = completion_status
activity_info["duration_ms"] = duration_ms
log_activity_end(activity_info, logger)
# raise the exception to align with the behavior of the with statement
if exception:
raise exception
def update_activity_name(activity_name, kwargs=None, args=None):
"""Update activity name according to kwargs. For flow test, we want to know if it's node test."""
if activity_name == "pf.flows.test":
# SDK
if kwargs.get("node", None):
activity_name = "pf.flows.node_test"
elif activity_name == "pf.flow.test":
# CLI
if getattr(args, "node", None):
activity_name = "pf.flow.node_test"
return activity_name
The provided code snippet includes necessary dependencies for implementing the `monitor_operation` function. Write a Python function `def monitor_operation( activity_name, activity_type=ActivityType.INTERNALCALL, custom_dimensions=None, )` to solve the following problem:
A wrapper for monitoring an activity in operations class. To monitor, use the ``@monitor_operation`` decorator. Note: this decorator should only use in operations class methods. :param activity_name: The name of the activity. The name should be unique per the wrapped logical code block. :type activity_name: str :param activity_type: One of PUBLICAPI, INTERNALCALL, or CLIENTPROXY which represent an incoming API call, an internal (function) call, or an outgoing API call. If not specified, INTERNALCALL is used. :type activity_type: str :param custom_dimensions: The custom properties of the activity. :type custom_dimensions: dict :return:
Here is the function:
def monitor_operation(
activity_name,
activity_type=ActivityType.INTERNALCALL,
custom_dimensions=None,
):
"""A wrapper for monitoring an activity in operations class.
To monitor, use the ``@monitor_operation`` decorator.
Note: this decorator should only use in operations class methods.
:param activity_name: The name of the activity. The name should be unique per the wrapped logical code block.
:type activity_name: str
:param activity_type: One of PUBLICAPI, INTERNALCALL, or CLIENTPROXY which represent an incoming API call,
an internal (function) call, or an outgoing API call. If not specified, INTERNALCALL is used.
:type activity_type: str
:param custom_dimensions: The custom properties of the activity.
:type custom_dimensions: dict
:return:
"""
if not custom_dimensions:
custom_dimensions = {}
def monitor(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
from promptflow._sdk._telemetry.telemetry import get_telemetry_logger
from promptflow._utils.version_hint_utils import HINT_ACTIVITY_NAME, check_latest_version, hint_for_update
logger = get_telemetry_logger()
if isinstance(self, TelemetryMixin):
custom_dimensions.update(self._get_telemetry_values())
user_agent = self._get_user_agent_override()
else:
user_agent = None
# update activity name according to kwargs.
_activity_name = update_activity_name(activity_name, kwargs=kwargs)
with log_activity(
logger=logger,
activity_name=_activity_name,
activity_type=activity_type,
custom_dimensions=custom_dimensions,
user_agent=user_agent,
):
if _activity_name in HINT_ACTIVITY_NAME:
hint_for_update()
# set check_latest_version as deamon thread to avoid blocking main thread
thread = threading.Thread(target=check_latest_version, daemon=True)
thread.start()
return f(self, *args, **kwargs)
return wrapper
return monitor | A wrapper for monitoring an activity in operations class. To monitor, use the ``@monitor_operation`` decorator. Note: this decorator should only use in operations class methods. :param activity_name: The name of the activity. The name should be unique per the wrapped logical code block. :type activity_name: str :param activity_type: One of PUBLICAPI, INTERNALCALL, or CLIENTPROXY which represent an incoming API call, an internal (function) call, or an outgoing API call. If not specified, INTERNALCALL is used. :type activity_type: str :param custom_dimensions: The custom properties of the activity. :type custom_dimensions: dict :return: |
4,176 | import logging
from typing import Optional
from promptflow._constants import USER_AGENT_OVERRIDE_KEY
from promptflow._sdk._configuration import Configuration
PROMPTFLOW_LOGGER_NAMESPACE = "promptflow._sdk._telemetry"
def is_telemetry_enabled():
"""Check if telemetry is enabled. Telemetry is enabled by default.
User can disable it by:
1. running `pf config set telemetry.enabled=false` command.
"""
config = Configuration.get_instance()
telemetry_consent = config.get_telemetry_consent()
if telemetry_consent is not None:
return str(telemetry_consent).lower() == "true"
return True
def get_telemetry_logger():
from promptflow._sdk._telemetry.logging_handler import get_appinsights_log_handler
current_logger = logging.getLogger(PROMPTFLOW_LOGGER_NAMESPACE)
# avoid telemetry log appearing in higher level loggers
current_logger.propagate = False
current_logger.setLevel(logging.INFO)
# Remove the existing handler and only keep an appinsights_log_handler
for log_handler in current_logger.handlers:
current_logger.removeHandler(log_handler)
handler = get_appinsights_log_handler()
# Due to the possibility of obtaining previously cached handler, update config of handler.
handler._is_telemetry_enabled = is_telemetry_enabled()
current_logger.addHandler(handler)
return current_logger | null |
4,177 | import functools
import logging
import os
import platform
import sys
from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter
from azure.monitor.opentelemetry.exporter._constants import _APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE
from azure.monitor.opentelemetry.exporter._generated.models import TelemetryItem
from opentelemetry.sdk._logs import LogData, LoggerProvider, LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.util.types import Attributes
from promptflow._sdk._configuration import Configuration
The provided code snippet includes necessary dependencies for implementing the `get_scrubbed_cloud_role` function. Write a Python function `def get_scrubbed_cloud_role()` to solve the following problem:
Create cloud role for telemetry, will scrub user script name and only leave extension.
Here is the function:
def get_scrubbed_cloud_role():
"""Create cloud role for telemetry, will scrub user script name and only leave extension."""
default = "Unknown Application"
known_scripts = [
"pfs",
"pfutil.py",
"pf",
"pfazure",
"pf.exe",
"pfazure.exe",
"app.py",
"python -m unittest",
"pytest",
"gunicorn",
"ipykernel_launcher.py",
"jupyter-notebook",
"jupyter-lab",
"python",
"_jb_pytest_runner.py",
default,
]
try:
cloud_role = os.path.basename(sys.argv[0]) or default
if cloud_role not in known_scripts:
ext = os.path.splitext(cloud_role)[1]
cloud_role = "***" + ext
except Exception:
# fallback to default cloud role if failed to scrub
cloud_role = default
return cloud_role | Create cloud role for telemetry, will scrub user script name and only leave extension. |
4,178 | import functools
import logging
import os
import platform
import sys
from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter
from azure.monitor.opentelemetry.exporter._constants import _APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE
from azure.monitor.opentelemetry.exporter._generated.models import TelemetryItem
from opentelemetry.sdk._logs import LogData, LoggerProvider, LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.util.types import Attributes
from promptflow._sdk._configuration import Configuration
def get_promptflow_sdk_log_handler():
from promptflow._sdk._telemetry.telemetry import is_telemetry_enabled
config = Configuration.get_instance()
custom_dimensions = {
"python_version": platform.python_version(),
"installation_id": config.get_or_set_installation_id(),
}
handler = PromptFlowSDKLogHandler(
custom_dimensions=custom_dimensions,
enable_telemetry=is_telemetry_enabled(),
)
return handler
The provided code snippet includes necessary dependencies for implementing the `get_appinsights_log_handler` function. Write a Python function `def get_appinsights_log_handler()` to solve the following problem:
Enable the opentelemetry logging handler for specified logger and instrumentation key to send info to AppInsights.
Here is the function:
def get_appinsights_log_handler():
"""
Enable the opentelemetry logging handler for specified logger and instrumentation key to send info to AppInsights.
"""
try:
return get_promptflow_sdk_log_handler()
except Exception: # pylint: disable=broad-except
# ignore any exceptions, telemetry collection errors shouldn't block an operation
return logging.NullHandler() | Enable the opentelemetry logging handler for specified logger and instrumentation key to send info to AppInsights. |
4,179 | from flask import jsonify, make_response, request
from flask_restx import fields
from promptflow._sdk._service import Namespace, Resource
from promptflow._sdk._service.utils.utils import build_pfs_user_agent, local_user_only
from promptflow._sdk._telemetry import ActivityCompletionStatus, ActivityType
from promptflow._utils.utils import camel_to_snake
from promptflow.exceptions import UserErrorException
def _dict_camel_to_snake(data):
if isinstance(data, dict):
result = {}
for key, value in data.items():
result[camel_to_snake(key)] = _dict_camel_to_snake(value)
return result
else:
return data
def parse_activity_info(metadata, first_call, user_agent, request_id):
request_id = request_id
return {
"request_id": request_id,
"first_call": first_call,
"user_agent": user_agent,
**_dict_camel_to_snake(metadata),
} | null |
4,180 | from flask import jsonify, make_response, request
from flask_restx import fields
from promptflow._sdk._service import Namespace, Resource
from promptflow._sdk._service.utils.utils import build_pfs_user_agent, local_user_only
from promptflow._sdk._telemetry import ActivityCompletionStatus, ActivityType
from promptflow._utils.utils import camel_to_snake
from promptflow.exceptions import UserErrorException
class AllowedActivityName:
def validate_metadata(value: dict) -> dict:
allowed_activity_names = [
AllowedActivityName.FLOW_TEST,
AllowedActivityName.FLOW_NODE_TEST,
AllowedActivityName.GENERATE_TOOL_META,
]
if value.get("activityName", None) not in allowed_activity_names:
raise UserErrorException(f"metadata.activityName must be one of {', '.join(allowed_activity_names)}.")
allowed_activity_types = [
ActivityType.INTERNALCALL,
ActivityType.PUBLICAPI,
]
if value.get("activityType") not in allowed_activity_types:
raise UserErrorException(f"metadata.activityType must be one of {', '.join(allowed_activity_types)}")
return value | null |
4,181 | from flask import jsonify, make_response, request
from flask_restx import fields
from promptflow._sdk._service import Namespace, Resource
from promptflow._sdk._service.utils.utils import build_pfs_user_agent, local_user_only
from promptflow._sdk._telemetry import ActivityCompletionStatus, ActivityType
from promptflow._utils.utils import camel_to_snake
from promptflow.exceptions import UserErrorException
class EventType:
START = "Start"
END = "End"
def validate_metadata_based_on_event_type(metadata: dict, event_type: str):
if event_type == EventType.END:
if not all(
key in metadata
for key in (
"completionStatus", # End event should have completionStatus
"durationMs", # End event should have durationMs
)
):
missing_fields = {"completionStatus", "durationMs"} - set(metadata.keys())
raise UserErrorException(f"Missing required fields in telemetry metadata: {', '.join(missing_fields)}")
if metadata.get("completionStatus") == ActivityCompletionStatus.FAILURE:
if not all(
key in metadata
for key in (
"errorCategory", # Failure event should have errorCategory
"errorType", # Failure event should have errorType
"errorTarget", # Failure event should have errorTarget
"errorMessage", # Failure event should have errorMessage
)
):
missing_fields = {"errorCategory", "errorType", "errorTarget", "errorMessage"} - set(metadata.keys())
raise UserErrorException(f"Missing required fields in telemetry payload: {', '.join(missing_fields)}") | null |
4,182 | from flask import jsonify, make_response, request
from flask_restx import fields
from promptflow._sdk._service import Namespace, Resource
from promptflow._sdk._service.utils.utils import build_pfs_user_agent, local_user_only
from promptflow._sdk._telemetry import ActivityCompletionStatus, ActivityType
from promptflow._utils.utils import camel_to_snake
from promptflow.exceptions import UserErrorException
class EventType:
START = "Start"
END = "End"
def validate_event_type(value) -> str:
if value not in (EventType.START, EventType.END):
raise ValueError(f"Event type must be one of {EventType.START} and {EventType.END}.")
return value | null |
4,183 | import json
import logging
import traceback
from datetime import datetime
from typing import Callable
from flask import request
from google.protobuf.json_format import MessageToJson
from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ExportTraceServiceRequest
from promptflow._constants import (
CosmosDBContainerName,
SpanFieldName,
SpanResourceAttributesFieldName,
SpanResourceFieldName,
)
from promptflow._sdk._constants import TRACE_DEFAULT_SESSION_ID
from promptflow._sdk._utils import parse_kv_from_pb_attribute
from promptflow._sdk.entities._trace import Span
from promptflow._utils.thread_utils import ThreadWithContextVars
def _try_write_trace_to_cosmosdb(all_spans, get_created_by_info_with_cache: Callable, logger: logging.Logger):
if not all_spans:
return
try:
span_resource = all_spans[0]._content.get(SpanFieldName.RESOURCE, {})
resource_attributes = span_resource.get(SpanResourceFieldName.ATTRIBUTES, {})
subscription_id = resource_attributes.get(SpanResourceAttributesFieldName.SUBSCRIPTION_ID, None)
resource_group_name = resource_attributes.get(SpanResourceAttributesFieldName.RESOURCE_GROUP_NAME, None)
workspace_name = resource_attributes.get(SpanResourceAttributesFieldName.WORKSPACE_NAME, None)
if subscription_id is None or resource_group_name is None or workspace_name is None:
logger.debug("Cannot find workspace info in span resource, skip writing trace to cosmosdb.")
return
logger.info(f"Start writing trace to cosmosdb, total spans count: {len(all_spans)}.")
start_time = datetime.now()
from promptflow.azure._storage.cosmosdb.client import get_client
from promptflow.azure._storage.cosmosdb.span import Span as SpanCosmosDB
from promptflow.azure._storage.cosmosdb.summary import Summary
# Load span and summary clients first time may slow.
# So, we load 2 client in parallel for warm up.
span_client_thread = ThreadWithContextVars(
target=get_client, args=(CosmosDBContainerName.SPAN, subscription_id, resource_group_name, workspace_name)
)
span_client_thread.start()
# Load created_by info first time may slow. So, we load it in parallel for warm up.
created_by_thread = ThreadWithContextVars(target=get_created_by_info_with_cache)
created_by_thread.start()
get_client(CosmosDBContainerName.LINE_SUMMARY, subscription_id, resource_group_name, workspace_name)
span_client_thread.join()
created_by_thread.join()
created_by = get_created_by_info_with_cache()
for span in all_spans:
span_client = get_client(CosmosDBContainerName.SPAN, subscription_id, resource_group_name, workspace_name)
result = SpanCosmosDB(span, created_by).persist(span_client)
# None means the span already exists, then we don't need to persist the summary also.
if result is not None:
line_summary_client = get_client(
CosmosDBContainerName.LINE_SUMMARY, subscription_id, resource_group_name, workspace_name
)
Summary(span, created_by, logger).persist(line_summary_client)
logger.info(
(
f"Finish writing trace to cosmosdb, total spans count: {len(all_spans)}."
f" Duration {datetime.now() - start_time}."
)
)
except Exception as e:
stack_trace = traceback.format_exc()
logger.error(f"Failed to write trace to cosmosdb: {e}, stack trace is {stack_trace}")
return
The provided code snippet includes necessary dependencies for implementing the `trace_collector` function. Write a Python function `def trace_collector(get_created_by_info_with_cache: Callable, logger: logging.Logger)` to solve the following problem:
This function is target to be reused in other places, so pass in get_created_by_info_with_cache and logger to avoid app related dependencies. Args: get_created_by_info_with_cache (Callable): A function that retrieves information about the creator of the trace. logger (logging.Logger): The logger object used for logging.
Here is the function:
def trace_collector(get_created_by_info_with_cache: Callable, logger: logging.Logger):
"""
This function is target to be reused in other places, so pass in get_created_by_info_with_cache and logger to avoid
app related dependencies.
Args:
get_created_by_info_with_cache (Callable): A function that retrieves information about the creator of the trace.
logger (logging.Logger): The logger object used for logging.
"""
content_type = request.headers.get("Content-Type")
# binary protobuf encoding
if "application/x-protobuf" in content_type:
traces_request = ExportTraceServiceRequest()
traces_request.ParseFromString(request.data)
all_spans = []
for resource_span in traces_request.resource_spans:
resource_attributes = dict()
for attribute in resource_span.resource.attributes:
attribute_dict = json.loads(MessageToJson(attribute))
attr_key, attr_value = parse_kv_from_pb_attribute(attribute_dict)
resource_attributes[attr_key] = attr_value
if SpanResourceAttributesFieldName.SESSION_ID not in resource_attributes:
resource_attributes[SpanResourceAttributesFieldName.SESSION_ID] = TRACE_DEFAULT_SESSION_ID
resource = {
SpanResourceFieldName.ATTRIBUTES: resource_attributes,
SpanResourceFieldName.SCHEMA_URL: resource_span.schema_url,
}
for scope_span in resource_span.scope_spans:
for span in scope_span.spans:
# TODO: persist with batch
span = Span._from_protobuf_object(span, resource=resource)
span._persist()
all_spans.append(span)
# Create a new thread to write trace to cosmosdb to avoid blocking the main thread
ThreadWithContextVars(
target=_try_write_trace_to_cosmosdb, args=(all_spans, get_created_by_info_with_cache, logger)
).start()
return "Traces received", 200
# JSON protobuf encoding
elif "application/json" in content_type:
raise NotImplementedError | This function is target to be reused in other places, so pass in get_created_by_info_with_cache and logger to avoid app related dependencies. Args: get_created_by_info_with_cache (Callable): A function that retrieves information about the creator of the trace. logger (logging.Logger): The logger object used for logging. |
4,184 | import inspect
from pathlib import Path
from flask import jsonify, request
import promptflow._sdk.schemas._connection as connection
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._service import Namespace, Resource, fields
from promptflow._sdk._service.utils.utils import get_client_from_request, local_user_only, make_response_no_content
from promptflow._sdk.entities._connection import _Connection
def validate_working_directory(value):
if value is None:
return
if not isinstance(value, str):
value = str(value)
if not Path(value).is_dir():
raise ValueError("Invalid working directory.")
return value | null |
4,185 | import inspect
from pathlib import Path
from flask import jsonify, request
import promptflow._sdk.schemas._connection as connection
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._service import Namespace, Resource, fields
from promptflow._sdk._service.utils.utils import get_client_from_request, local_user_only, make_response_no_content
from promptflow._sdk.entities._connection import _Connection
def _get_connection_operation(working_directory=None):
connection_provider = Configuration().get_connection_provider(path=working_directory)
# get_connection_operation is a shared function, so we build user agent based on request first and
# then pass it to the function
connection_operation = get_client_from_request(connection_provider=connection_provider).connections
return connection_operation | null |
4,186 | import logging
import threading
import time
from datetime import datetime, timedelta
from logging.handlers import RotatingFileHandler
from flask import Blueprint, Flask, current_app, g, jsonify, request
from flask_cors import CORS
from werkzeug.exceptions import HTTPException
from promptflow._sdk._constants import (
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_HOUR_TIMEOUT,
PF_SERVICE_LOG_FILE,
PF_SERVICE_MONITOR_SECOND,
)
from promptflow._sdk._service import Api
from promptflow._sdk._service.apis.collector import trace_collector
from promptflow._sdk._service.apis.connection import api as connection_api
from promptflow._sdk._service.apis.line_run import api as line_run_api
from promptflow._sdk._service.apis.run import api as run_api
from promptflow._sdk._service.apis.span import api as span_api
from promptflow._sdk._service.apis.telemetry import api as telemetry_api
from promptflow._sdk._service.apis.ui import api as ui_api
from promptflow._sdk._service.utils.utils import (
FormattedException,
get_current_env_pfs_file,
get_port_from_config,
is_run_from_built_binary,
kill_exist_service,
)
from promptflow._sdk._utils import get_promptflow_sdk_version, overwrite_null_std_logger, read_write_by_user
from promptflow._utils.thread_utils import ThreadWithContextVars
def heartbeat():
response = {"promptflow": get_promptflow_sdk_version()}
return jsonify(response)
def get_created_by_info_with_cache():
if len(created_by_for_local_to_cloud_trace) > 0:
return created_by_for_local_to_cloud_trace
with created_by_for_local_to_cloud_trace_lock:
if len(created_by_for_local_to_cloud_trace) > 0:
return created_by_for_local_to_cloud_trace
try:
# The total time of collecting info is about 3s.
import jwt
from azure.identity import DefaultAzureCredential
from promptflow.azure._utils.general import get_arm_token
default_credential = DefaultAzureCredential()
token = get_arm_token(credential=default_credential)
decoded_token = jwt.decode(token, options={"verify_signature": False})
created_by_for_local_to_cloud_trace.update(
{
"object_id": decoded_token["oid"],
"tenant_id": decoded_token["tid"],
# Use appid as fallback for service principal scenario.
"name": decoded_token.get("name", decoded_token.get("appid", "")),
}
)
except Exception as e:
# This function is only target to be used in Flask app.
current_app.logger.error(f"Failed to get created_by info, ignore it: {e}")
return created_by_for_local_to_cloud_trace
def create_app():
app = Flask(__name__)
# in normal case, we don't need to handle CORS for PFS
# as far as we know, local UX development might need to handle this
# as there might be different ports in that scenario
CORS(app)
app.add_url_rule("/heartbeat", view_func=heartbeat)
app.add_url_rule(
"/v1/traces", view_func=lambda: trace_collector(get_created_by_info_with_cache, app.logger), methods=["POST"]
)
with app.app_context():
api_v1 = Blueprint("Prompt Flow Service", __name__, url_prefix="/v1.0")
# Registers resources from namespace for current instance of api
api = Api(api_v1, title="Prompt Flow Service", version="1.0")
api.add_namespace(connection_api)
api.add_namespace(run_api)
api.add_namespace(telemetry_api)
api.add_namespace(span_api)
api.add_namespace(line_run_api)
api.add_namespace(ui_api)
app.register_blueprint(api_v1)
# Disable flask-restx set X-Fields in header. https://flask-restx.readthedocs.io/en/latest/mask.html#usage
app.config["RESTX_MASK_SWAGGER"] = False
# Enable log
app.logger.setLevel(logging.INFO)
# each env will have its own log file
if is_run_from_built_binary():
log_file = HOME_PROMPT_FLOW_DIR / PF_SERVICE_LOG_FILE
log_file.touch(mode=read_write_by_user(), exist_ok=True)
else:
log_file = get_current_env_pfs_file(PF_SERVICE_LOG_FILE)
# Create a rotating file handler with a max size of 1 MB and keeping up to 1 backup files
handler = RotatingFileHandler(filename=log_file, maxBytes=1_000_000, backupCount=1)
formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] - %(message)s")
handler.setFormatter(formatter)
# Set app logger to the only one RotatingFileHandler to avoid duplicate logs
app.logger.handlers = [handler]
# Basic error handler
@api.errorhandler(Exception)
def handle_exception(e):
"""When any error occurs on the server, return a formatted error message."""
from dataclasses import asdict
if isinstance(e, HTTPException):
return asdict(FormattedException(e), dict_factory=lambda x: {k: v for (k, v) in x if v}), e.code
app.logger.error(e, exc_info=True, stack_info=True)
formatted_exception = FormattedException(e)
return (
asdict(formatted_exception, dict_factory=lambda x: {k: v for (k, v) in x if v}),
formatted_exception.status_code,
)
@app.before_request
def log_before_request_info():
app.config["last_request_time"] = datetime.now()
g.start = time.perf_counter()
if "/v1.0/Connections" in request.url:
request_body = "Request body not recorded for Connections API"
else:
request_body = request.get_data()
app.logger.debug(
"Last request time: %s, Headers: %s, Body: %s",
app.config["last_request_time"],
request.headers,
request_body,
)
@app.after_request
def log_after_request_info(response):
duration_time = time.perf_counter() - g.start
app.logger.info(
"Request_url: %s, duration: %s, response code: %s", request.url, duration_time, response.status_code
)
return response
# Start a monitor process using detach mode. It will stop pfs service if no request to pfs service in 1h in
# python scenario. For C# scenario, pfs will live until the process is killed manually.
def monitor_request():
while True:
time.sleep(PF_SERVICE_MONITOR_SECOND)
if "last_request_time" in app.config and datetime.now() - app.config["last_request_time"] > timedelta(
hours=PF_SERVICE_HOUR_TIMEOUT
):
# Todo: check if we have any not complete work? like persist all traces.
port = get_port_from_config()
if port:
app.logger.info(f"Try auto stop pfs service in port {port} since no request to app within 1h")
kill_exist_service(port)
break
if not is_run_from_built_binary():
monitor_thread = ThreadWithContextVars(target=monitor_request, daemon=True)
monitor_thread.start()
return app, api | null |
4,187 | from promptflow._sdk._service.entry import main
import sys
import win32serviceutil
import win32service
import servicemanager
class PromptFlowServiceFramework(win32serviceutil.ServiceFramework):
_svc_name_ = "PromptFlowService"
_svc_display_name_ = "Prompt Flow Service"
def SvcStop(self):
"""Stop the service"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.service_impl.stop()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def SvcDoRun(self):
"""Start the service; does not return until stopped"""
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
self.service_impl = PromptFlowService()
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
# Run the service
self.service_impl.run()
def init():
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(PromptFlowServiceFramework)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(PromptFlowServiceFramework) | null |
4,188 | import argparse
import json
import logging
import os
import platform
import subprocess
import sys
import waitress
from promptflow._cli._utils import _get_cli_activity_name, cli_exception_and_telemetry_handler
from promptflow._constants import PF_NO_INTERACTIVE_LOGIN
from promptflow._sdk._constants import LOGGER_NAME, PF_SERVICE_DEBUG, PF_SERVICE_WORKER_NUM
from promptflow._sdk._service.app import create_app
from promptflow._sdk._service.utils.utils import (
check_pfs_service_status,
dump_port_to_config,
get_port_from_config,
get_started_service_info,
is_port_in_use,
is_run_from_built_binary,
kill_exist_service,
)
from promptflow._sdk._utils import get_promptflow_sdk_version, print_pf_version
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
logger = get_cli_sdk_logger()
def get_app(environ, start_response):
app, _ = create_app()
if os.environ.get(PF_SERVICE_DEBUG) == "true":
app.logger.setLevel(logging.DEBUG)
else:
app.logger.setLevel(logging.INFO)
return app.wsgi_app(environ, start_response) | null |
4,189 | import argparse
import json
import logging
import os
import platform
import subprocess
import sys
import waitress
from promptflow._cli._utils import _get_cli_activity_name, cli_exception_and_telemetry_handler
from promptflow._constants import PF_NO_INTERACTIVE_LOGIN
from promptflow._sdk._constants import LOGGER_NAME, PF_SERVICE_DEBUG, PF_SERVICE_WORKER_NUM
from promptflow._sdk._service.app import create_app
from promptflow._sdk._service.utils.utils import (
check_pfs_service_status,
dump_port_to_config,
get_port_from_config,
get_started_service_info,
is_port_in_use,
is_run_from_built_binary,
kill_exist_service,
)
from promptflow._sdk._utils import get_promptflow_sdk_version, print_pf_version
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def add_start_service_action(subparsers):
"""Add action to start pfs."""
start_pfs_parser = subparsers.add_parser(
"start",
description="Start promptflow service.",
help="pfs start",
)
start_pfs_parser.add_argument("-p", "--port", type=int, help="port of the promptflow service")
start_pfs_parser.add_argument(
"--force",
action="store_true",
help="If the port is used, the existing service will be terminated and restart a new service.",
)
start_pfs_parser.add_argument(
"-d",
"--debug",
action="store_true",
help="The flag to turn on debug mode for pfs.",
)
start_pfs_parser.set_defaults(action="start")
def add_stop_service_action(subparsers):
"""Add action to stop pfs."""
stop_pfs_parser = subparsers.add_parser(
"stop",
description="Stop promptflow service.",
help="pfs stop",
)
stop_pfs_parser.set_defaults(action="stop")
def add_show_status_action(subparsers):
"""Add action to show pfs status."""
show_status_parser = subparsers.add_parser(
"show-status",
description="Display the started promptflow service info.",
help="pfs show-status",
)
show_status_parser.set_defaults(action="show-status")
def run_command(args):
if args.version:
print_pf_version()
return
elif args.action == "show-status":
port = get_port_from_config()
status = get_started_service_info(port)
if status:
print(status)
return
else:
logger = logging.getLogger(LOGGER_NAME)
logger.warning("Promptflow service is not started.")
sys.exit(1)
elif args.action == "start":
start_service(args)
elif args.action == "stop":
stop_service()
def entry(command_args):
parser = argparse.ArgumentParser(
prog="pfs",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Prompt Flow Service",
)
parser.add_argument(
"-v", "--version", dest="version", action="store_true", help="show current PromptflowService version and exit"
)
subparsers = parser.add_subparsers()
add_start_service_action(subparsers)
add_show_status_action(subparsers)
add_stop_service_action(subparsers)
args = parser.parse_args(command_args)
activity_name = _get_cli_activity_name(cli=parser.prog, args=args)
cli_exception_and_telemetry_handler(run_command, activity_name)(args) | null |
4,190 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def local_user_only(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Get the user name from request.
user = request.environ.get("REMOTE_USER") or request.headers.get("X-Remote-User")
if user != getpass.getuser():
abort(403)
return func(*args, **kwargs)
return wrapper | null |
4,191 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def get_current_env_pfs_file(file_name):
executable_path = Path(sys.executable.lower()).as_posix()
dir_name = os.path.basename(os.path.dirname(executable_path))
# Hash the executable path
hash_object = hashlib.sha1(executable_path.encode())
hex_dig = hash_object.hexdigest()
port_file_name = f"{dir_name}_{hex_dig}_{file_name}"
port_file_dir = HOME_PROMPT_FLOW_DIR / PF_SERVICE_PORT_DIT_NAME
port_file_dir.mkdir(parents=True, exist_ok=True)
port_file_path = port_file_dir / port_file_name
port_file_path.touch(mode=read_write_by_user(), exist_ok=True)
return port_file_path
def get_random_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("localhost", 0))
return s.getsockname()[1]
def is_run_from_built_binary():
"""
Use this function to trigger behavior difference between calling from promptflow sdk/cli and built binary.
Allow customer to use environment variable to control the triggering.
"""
return sys.executable.endswith("pfcli.exe") or os.environ.get(PF_RUN_AS_BUILT_BINARY, "").lower() == "true"
def get_port_from_config(create_if_not_exists=False):
if is_run_from_built_binary():
port_file_path = HOME_PROMPT_FLOW_DIR / PF_SERVICE_PORT_FILE
port_file_path.touch(mode=read_write_by_user(), exist_ok=True)
else:
port_file_path = get_current_env_pfs_file(PF_SERVICE_PORT_FILE)
with open(port_file_path, "r", encoding=DEFAULT_ENCODING) as f:
service_config = load_yaml(f) or {}
port = service_config.get("service", {}).get("port", None)
if not port and create_if_not_exists:
with open(port_file_path, "w", encoding=DEFAULT_ENCODING) as f:
# Set random port to ~/.promptflow/pf.yaml
port = get_random_port()
service_config["service"] = service_config.get("service", {})
service_config["service"]["port"] = port
dump_yaml(service_config, f)
return port | null |
4,192 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def get_current_env_pfs_file(file_name):
def is_run_from_built_binary():
def dump_port_to_config(port):
if is_run_from_built_binary():
port_file_path = HOME_PROMPT_FLOW_DIR / PF_SERVICE_PORT_FILE
port_file_path.touch(mode=read_write_by_user(), exist_ok=True)
else:
# Set port to ~/.promptflow/pfs/**_pf.port, if already have a port in file , will overwrite it.
port_file_path = get_current_env_pfs_file(PF_SERVICE_PORT_FILE)
with open(port_file_path, "r", encoding=DEFAULT_ENCODING) as f:
service_config = load_yaml(f) or {}
with open(port_file_path, "w", encoding=DEFAULT_ENCODING) as f:
service_config["service"] = service_config.get("service", {})
service_config["service"]["port"] = port
dump_yaml(service_config, f) | null |
4,193 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def is_port_in_use(port: int):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0 | null |
4,194 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def _get_process_by_port(port):
for proc in psutil.process_iter(["pid", "connections", "create_time"]):
try:
for connection in proc.connections():
if connection.laddr.port == port:
return proc
except psutil.AccessDenied:
pass
def kill_exist_service(port):
proc = _get_process_by_port(port)
if proc:
proc.terminate()
proc.wait(10) | null |
4,195 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def _get_process_by_port(port):
for proc in psutil.process_iter(["pid", "connections", "create_time"]):
try:
for connection in proc.connections():
if connection.laddr.port == port:
return proc
except psutil.AccessDenied:
pass
def get_started_service_info(port):
service_info = {}
proc = _get_process_by_port(port)
if proc:
create_time = proc.info["create_time"]
process_uptime = datetime.now() - datetime.fromtimestamp(create_time)
service_info["create_time"] = str(datetime.fromtimestamp(create_time))
service_info["uptime"] = str(process_uptime)
service_info["port"] = port
return service_info | null |
4,196 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def make_response_no_content():
return make_response("", 204) | null |
4,197 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
logger = get_cli_sdk_logger()
def is_pfs_service_healthy(pfs_port) -> bool:
"""Check if pfs service is running and pfs version matches pf version."""
try:
response = requests.get("http://localhost:{}/heartbeat".format(pfs_port))
if response.status_code == 200:
logger.debug(f"Promptflow service is already running on port {pfs_port}, {response.text}")
match = re.search(r'"promptflow":"(.*?)"', response.text)
if match:
version = match.group(1)
is_healthy = version == get_promptflow_sdk_version()
if not is_healthy:
logger.warning(
f"Promptflow service is running on port {pfs_port}, but the version is not the same as "
f"promptflow sdk version {get_promptflow_sdk_version()}. The service version is {version}."
)
else:
is_healthy = False
logger.warning("/heartbeat response doesn't contain current pfs version.")
return is_healthy
except Exception: # pylint: disable=broad-except
pass
logger.warning(
f"Promptflow service can't be reached through port {pfs_port}, will try to start/force restart "
f"promptflow service."
)
return False
def check_pfs_service_status(pfs_port, time_delay=1, time_threshold=20) -> bool:
wait_time = time_delay
time.sleep(time_delay)
is_healthy = is_pfs_service_healthy(pfs_port)
while is_healthy is False and time_threshold > wait_time:
logger.info(
f"Promptflow service is not ready. It has been waited for {wait_time}s, will wait for at most "
f"{time_threshold}s."
)
wait_time += time_delay
time.sleep(time_delay)
is_healthy = is_pfs_service_healthy(pfs_port)
return is_healthy | null |
4,198 | import getpass
import hashlib
import os
import re
import socket
import sys
import time
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
from pathlib import Path
import psutil
import requests
from flask import abort, make_response, request
from promptflow._constants import PF_RUN_AS_BUILT_BINARY
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
HOME_PROMPT_FLOW_DIR,
PF_SERVICE_PORT_DIT_NAME,
PF_SERVICE_PORT_FILE,
)
from promptflow._sdk._errors import ConnectionNotFoundError, RunNotFoundError
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow._version import VERSION
from promptflow.exceptions import PromptflowException, UserErrorException
def build_pfs_user_agent():
user_agent = request.user_agent.string
user_agent_for_local_pfs = f"local_pfs/{VERSION}"
if user_agent:
return f"{user_agent} {user_agent_for_local_pfs}"
return user_agent_for_local_pfs
The provided code snippet includes necessary dependencies for implementing the `get_client_from_request` function. Write a Python function `def get_client_from_request(*, connection_provider=None) -> "PFClient"` to solve the following problem:
Build a PFClient instance based on current request in local PFS. User agent may be different for each request.
Here is the function:
def get_client_from_request(*, connection_provider=None) -> "PFClient":
"""
Build a PFClient instance based on current request in local PFS.
User agent may be different for each request.
"""
from promptflow._sdk._pf_client import PFClient
user_agent = build_pfs_user_agent()
if connection_provider:
pf_client = PFClient(connection_provider=connection_provider, user_agent_override=user_agent)
else:
pf_client = PFClient(user_agent_override=user_agent)
return pf_client | Build a PFClient instance based on current request in local PFS. User agent may be different for each request. |
4,199 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
The provided code snippet includes necessary dependencies for implementing the `_set_workspace_argument_for_subparsers` function. Write a Python function `def _set_workspace_argument_for_subparsers(subparser, required=False)` to solve the following problem:
Add workspace arguments to subparsers.
Here is the function:
def _set_workspace_argument_for_subparsers(subparser, required=False):
"""Add workspace arguments to subparsers."""
# Make these arguments optional so that user can use local azure cli context
subparser.add_argument(
"--subscription", required=required, type=str, help="Subscription id, required when pass run id."
)
subparser.add_argument(
"--resource-group", "-g", required=required, type=str, help="Resource group name, required when pass run id."
)
subparser.add_argument(
"--workspace-name", "-w", required=required, type=str, help="Workspace name, required when pass run id."
) | Add workspace arguments to subparsers. |
4,200 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def dump_connection_file(dot_env_file: str):
for key in ["AZURE_OPENAI_API_KEY", "AZURE_OPENAI_API_BASE", "CHAT_DEPLOYMENT_NAME"]:
if key not in os.environ:
# skip dump connection file if not all required environment variables are set
return
connection_file_path = "./connection.json"
os.environ["PROMPTFLOW_CONNECTIONS"] = connection_file_path
load_dotenv(dot_env_file)
connection_dict = {
"custom_connection": {
"type": "CustomConnection",
"value": {
"AZURE_OPENAI_API_KEY": os.environ["AZURE_OPENAI_API_KEY"],
"AZURE_OPENAI_API_BASE": os.environ["AZURE_OPENAI_API_BASE"],
"CHAT_DEPLOYMENT_NAME": os.environ["CHAT_DEPLOYMENT_NAME"],
},
"module": "promptflow.connections",
}
}
with open(connection_file_path, "w") as f:
json.dump(connection_dict, f) | null |
4,201 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def get_credentials_for_cli():
"""
This function is part of mldesigner.dsl._dynamic_executor.DynamicExecutor._get_ml_client with
some local imports.
"""
from azure.ai.ml.identity import AzureMLOnBehalfOfCredential
from azure.identity import AzureCliCredential, DefaultAzureCredential, ManagedIdentityCredential
# May return a different one if executing in local
# credential priority: OBO > azure cli > managed identity > default
# check OBO via environment variable, the referenced code can be found from below search:
# https://msdata.visualstudio.com/Vienna/_search?text=AZUREML_OBO_ENABLED&type=code&pageSize=25&filters=ProjectFilters%7BVienna%7D&action=contents
if os.getenv(IdentityEnvironmentVariable.OBO_ENABLED_FLAG):
logger.debug("User identity is configured, use OBO credential.")
credential = AzureMLOnBehalfOfCredential()
elif _use_azure_cli_credential():
logger.debug("Use azure cli credential since specified in environment variable.")
credential = AzureCliCredential()
else:
client_id_from_env = os.getenv(IdentityEnvironmentVariable.DEFAULT_IDENTITY_CLIENT_ID)
if client_id_from_env:
# use managed identity when client id is available from environment variable.
# reference code:
# https://learn.microsoft.com/en-us/azure/machine-learning/how-to-identity-based-service-authentication?tabs=cli#compute-cluster
logger.debug("Use managed identity credential.")
credential = ManagedIdentityCredential(client_id=client_id_from_env)
elif is_in_ci_pipeline():
# use managed identity when executing in CI pipeline.
logger.debug("Use azure cli credential since in CI pipeline.")
credential = AzureCliCredential()
else:
# use default Azure credential to handle other cases.
logger.debug("Use default credential.")
credential = DefaultAzureCredential()
return credential
def get_client_info_for_cli(subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None):
if not (subscription_id and resource_group_name and workspace_name):
workspace_triad = get_workspace_triad_from_local()
subscription_id = subscription_id or workspace_triad.subscription_id
resource_group_name = resource_group_name or workspace_triad.resource_group_name
workspace_name = workspace_name or workspace_triad.workspace_name
if not (subscription_id and resource_group_name and workspace_name):
workspace_name = workspace_name or os.getenv("AZUREML_ARM_WORKSPACE_NAME")
subscription_id = subscription_id or os.getenv("AZUREML_ARM_SUBSCRIPTION")
resource_group_name = resource_group_name or os.getenv("AZUREML_ARM_RESOURCEGROUP")
return subscription_id, resource_group_name, workspace_name
def get_client_for_cli(*, subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None):
from azure.ai.ml import MLClient
subscription_id, resource_group_name, workspace_name = get_client_info_for_cli(
subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name
)
missing_fields = []
for key in ["workspace_name", "subscription_id", "resource_group_name"]:
if not locals()[key]:
missing_fields.append(key)
if missing_fields:
raise UserErrorException(
"Please provide all required fields to work on specific workspace: {}".format(", ".join(missing_fields)),
target=ErrorTarget.CONTROL_PLANE_SDK,
)
return MLClient(
credential=get_credentials_for_cli(),
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
) | null |
4,202 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def confirm(question, skip_confirm) -> bool:
if skip_confirm:
return True
answer = input(f"{question} [y/n]")
while answer.lower() not in ["y", "n"]:
answer = input("Please input 'y' or 'n':")
return answer.lower() == "y" | null |
4,203 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def inject_sys_path(path):
original_sys_path = sys.path.copy()
sys.path.insert(0, str(path))
try:
yield
finally:
sys.path = original_sys_path | null |
4,204 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def activate_action(name, description, epilog, add_params, subparsers, help_message, action_param_name="action"):
parser = subparsers.add_parser(
name,
description=description,
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=help_message,
)
if add_params:
for add_param_func in add_params:
add_param_func(parser)
parser.set_defaults(**{action_param_name: name})
return parser | null |
4,205 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
logger = get_cli_sdk_logger()
def _dump_entity_with_warnings(entity) -> Dict:
if not entity:
return
if isinstance(entity, Dict):
return entity
try:
return entity._to_dict() # type: ignore
except Exception as err:
logger.warning("Failed to deserialize response: " + str(err))
logger.warning(str(entity))
logger.debug(traceback.format_exc()) | null |
4,206 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def list_of_dict_to_dict(obj: list):
if not isinstance(obj, list):
return {}
result = {}
for item in obj:
result.update(item)
return result | null |
4,207 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def list_of_dict_to_nested_dict(obj: list):
result = {}
for item in obj:
for keys, value in item.items():
keys = keys.split(".")
pydash.set_(result, keys, value)
return result | null |
4,208 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def is_format_exception():
if os.environ.get("PROMPTFLOW_STRUCTURE_EXCEPTION_OUTPUT", "false").lower() == "true":
return True
return False
The provided code snippet includes necessary dependencies for implementing the `cli_exception_and_telemetry_handler` function. Write a Python function `def cli_exception_and_telemetry_handler(func, activity_name, custom_dimensions=None)` to solve the following problem:
Catch known cli exceptions.
Here is the function:
def cli_exception_and_telemetry_handler(func, activity_name, custom_dimensions=None):
"""Catch known cli exceptions."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
telemetry_logger = get_telemetry_logger()
with log_activity(
telemetry_logger,
activity_name,
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{activity_name} failed with {e.__class__.__name__}: {str(e)}")
sys.exit(1)
else:
raise e
return wrapper | Catch known cli exceptions. |
4,209 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
The provided code snippet includes necessary dependencies for implementing the `get_secret_input` function. Write a Python function `def get_secret_input(prompt, mask="*")` to solve the following problem:
Get secret input with mask printed on screen in CLI. Provide better handling for control characters: - Handle Ctrl-C as KeyboardInterrupt - Ignore control characters and print warning message.
Here is the function:
def get_secret_input(prompt, mask="*"):
"""Get secret input with mask printed on screen in CLI.
Provide better handling for control characters:
- Handle Ctrl-C as KeyboardInterrupt
- Ignore control characters and print warning message.
"""
if not isinstance(prompt, str):
raise TypeError(f"prompt must be a str, not ${type(prompt).__name__}")
if not isinstance(mask, str):
raise TypeError(f"mask argument must be a one-character str, not ${type(mask).__name__}")
if len(mask) != 1:
raise ValueError("mask argument must be a one-character str")
if sys.platform == "win32":
# For some reason, mypy reports that msvcrt doesn't have getch, ignore this warning:
from msvcrt import getch # type: ignore
else: # macOS and Linux
import termios
import tty
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
secret_input = []
sys.stdout.write(prompt)
sys.stdout.flush()
while True:
key = ord(getch())
if key == 13: # Enter key pressed.
sys.stdout.write("\n")
return "".join(secret_input)
elif key == 3: # Ctrl-C pressed.
raise KeyboardInterrupt()
elif key in (8, 127): # Backspace/Del key erases previous output.
if len(secret_input) > 0:
# Erases previous character.
sys.stdout.write("\b \b") # \b doesn't erase the character, it just moves the cursor back.
sys.stdout.flush()
secret_input = secret_input[:-1]
elif 0 <= key <= 31:
msg = "\nThe last user input got ignored as it is control character."
print_yellow_warning(msg)
sys.stdout.write(prompt + mask * len(secret_input))
sys.stdout.flush()
else:
# display the mask character.
char = chr(key)
sys.stdout.write(mask)
sys.stdout.flush()
secret_input.append(char) | Get secret input with mask printed on screen in CLI. Provide better handling for control characters: - Handle Ctrl-C as KeyboardInterrupt - Ignore control characters and print warning message. |
4,210 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def _copy_to_flow(flow_path, source_file):
target = flow_path / source_file.name
action = "Overwriting" if target.exists() else "Creating"
if source_file.is_file():
print(f"{action} {source_file.name}...")
shutil.copy2(source_file, target)
else:
print(f"{action} {source_file.name} folder...")
shutil.copytree(source_file, target, dirs_exist_ok=True) | null |
4,211 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
logger = get_cli_sdk_logger()
def pretty_print_dataframe_as_table(df: "DataFrame") -> None:
def _output_result_list_with_format(result_list: List[Dict], output_format: CLIListOutputFormat) -> None:
import pandas as pd
if output_format == CLIListOutputFormat.TABLE:
df = pd.DataFrame(result_list)
df.fillna("", inplace=True)
pretty_print_dataframe_as_table(df)
elif output_format == CLIListOutputFormat.JSON:
print(json.dumps(result_list, indent=4))
else:
warning_message = (
f"Unknown output format {output_format!r}, accepted values are 'json' and 'table';"
"will print using 'json'."
)
logger.warning(warning_message)
print(json.dumps(result_list, indent=4)) | null |
4,212 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def _get_cli_activity_name(cli, args):
activity_name = cli
if getattr(args, "action", None):
activity_name += f".{args.action}"
if getattr(args, "sub_action", None):
activity_name += f".{args.sub_action}"
return activity_name | null |
4,213 | import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import AzureMLWorkspaceTriad, CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
def _try_delete_existing_run_record(run_name: str):
from promptflow._sdk._errors import RunNotFoundError
from promptflow._sdk._orm import RunInfo as ORMRun
try:
ORMRun.delete(run_name)
except RunNotFoundError:
pass | null |
4,214 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_yes(parser):
parser.add_argument(
"-y",
"--yes",
"--assume-yes",
action="store_true",
help="Automatic yes to all prompts; assume 'yes' as answer to all prompts and run non-interactively.",
) | null |
4,215 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_ua(parser):
# suppress user agent for now since it's only used in vscode extension
parser.add_argument("--user-agent", help=argparse.SUPPRESS) | null |
4,216 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_flow_display_name(parser):
parser.add_argument("--flow", type=str, required=True, help="The flow name to create.") | null |
4,217 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_entry(parser):
parser.add_argument("--entry", type=str, help="The entry file.") | null |
4,218 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_function(parser):
parser.add_argument("--function", type=str, help="The function name in entry file.") | null |
4,219 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_prompt_template(parser):
parser.add_argument(
"--prompt-template", action=AppendToDictAction, help="The prompt template parameter and assignment.", nargs="+"
) | null |
4,220 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_set(parser):
parser.add_argument(
"--set",
dest="params_override",
action=AppendToDictAction,
help="Update an object by specifying a property path and value to set. Example: --set "
"property1.property2=<value>.",
nargs="+",
) | null |
4,221 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_set_positional(parser):
parser.add_argument(
"params_override",
action=AppendToDictAction,
help="Set an object by specifying a property path and value to set. Example: set "
"property1.property2=<value>.",
nargs="+",
) | null |
4,222 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_environment_variables(parser):
parser.add_argument(
"--environment-variables",
action=AppendToDictAction,
help="Environment variables to set by specifying a property path and value. Example: --environment-variable "
"key1='${my_connection.api_key}' key2='value2'. The value reference to connection keys will be resolved "
"to the actual value, and all environment variables specified will be set into os.environ.",
nargs="+",
) | null |
4,223 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_connections(parser):
parser.add_argument(
"--connections",
action=AppendToDictAction,
help="Overwrite node level connections with provided value. Example: --connections "
"node1.connection=test_llm_connection node1.deployment_name=gpt-35-turbo",
nargs="+",
) | null |
4,224 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_columns_mapping(parser):
parser.add_argument(
"--column-mapping",
action=AppendToDictAction,
help="Inputs column mapping, use ${data.xx} to refer to data columns, "
"use ${run.inputs.xx} to refer to referenced run's data columns. "
"and use ${run.outputs.xx} to refer to referenced run's output columns."
"Example: --column-mapping data1='${data.data1}' data2='${run.inputs.data2}' data3='${run.outputs.data3}'",
nargs="+",
) | null |
4,225 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_set_tool_extra_info(parser):
parser.add_argument(
"--set",
dest="extra_info",
action=AppendToDictAction,
help="Set extra information about the tool. Example: --set <key>=<value>.",
nargs="+",
) | null |
4,226 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class FlowTestInputAction(AppendToDictAction): # pylint: disable=protected-access
def get_action(self, values, option_string): # pylint: disable=no-self-use
if len(values) == 1 and "=" not in values[0]:
from promptflow._utils.load_data import load_data
if not values[0].endswith(".jsonl"):
raise ValueError("Only support jsonl file as input.")
return load_data(local_path=values[0])[0]
else:
return super().get_action(values, option_string)
def add_param_inputs(parser):
parser.add_argument(
"--inputs",
action=FlowTestInputAction,
help="Input datas of file for the flow. Example: --inputs data1=data1_val data2=data2_val",
nargs="+",
) | null |
4,227 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_env(parser):
parser.add_argument(
"--env",
type=str,
default=None,
help="The dotenv file path containing the environment variables to be used in the flow.",
) | null |
4,228 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_output(parser):
parser.add_argument(
"-o",
"--output",
type=str,
help=(
f"The output directory to store the results. "
f"Default to be ~/{PROMPT_FLOW_DIR_NAME}/{PROMPT_FLOW_RUNS_DIR_NAME} if not specified."
),
) | null |
4,229 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_overwrite(parser):
parser.add_argument("--overwrite", action="store_true", help="Overwrite the existing results.") | null |
4,230 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_run_name(parser):
parser.add_argument("-n", "--name", required=True, type=str, help="Name of the run.") | null |
4,231 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_connection_name(parser):
parser.add_argument("-n", "--name", type=str, help="Name of the connection to create.") | null |
4,232 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
MAX_LIST_CLI_RESULTS = 50
def add_param_max_results(parser):
parser.add_argument( # noqa: E731
"-r",
"--max-results",
dest="max_results",
type=int,
default=MAX_LIST_CLI_RESULTS,
help=f"Max number of results to return. Default is {MAX_LIST_CLI_RESULTS}.",
) | null |
4,233 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_all_results(parser):
parser.add_argument( # noqa: E731
"--all-results",
action="store_true",
dest="all_results",
default=False,
help="Returns all results. Default to False.",
) | null |
4,234 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_source(parser):
parser.add_argument("--source", type=str, required=True, help="The flow or run source to be used.")
def add_param_variant(parser):
parser.add_argument(
"--variant",
"-v",
type=str,
help="The variant to be used in flow, will use default variant if not specified.",
)
base_params = logging_params + [
add_param_ua,
]
def add_parser_build(subparsers, entity_name: str):
add_param_build_output = lambda parser: parser.add_argument( # noqa: E731
"--output", "-o", required=True, type=str, help="The destination folder path."
)
add_param_format = lambda parser: parser.add_argument( # noqa: E731
"--format", "-f", type=str, help="The format to build with.", choices=["docker", "executable"]
)
# this is a hidden parameter for `mldesigner compile` command
add_param_flow_only = lambda parser: parser.add_argument( # noqa: E731
"--flow-only",
action="store_true",
help=argparse.SUPPRESS,
)
add_params = [
add_param_source,
add_param_build_output,
add_param_format,
add_param_flow_only,
add_param_variant,
] + base_params
from promptflow._cli._utils import activate_action
description = f"Build a {entity_name} for further sharing or deployment."
activate_action(
name="build",
description=description,
epilog=f"pf {entity_name} build --source <source> --output <output> --format " f"docker|package",
add_params=add_params,
subparsers=subparsers,
action_param_name="sub_action",
help_message=description,
) | null |
4,235 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_debug(parser):
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="The flag to turn on debug mode for cli.",
) | null |
4,236 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_verbose(parser):
parser.add_argument(
"--verbose",
action="store_true",
help="Increase logging verbosity. Use --debug for full debug logs.",
) | null |
4,237 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
def add_param_config(parser):
parser.add_argument(
"--config",
nargs="+",
action=AppendToDictAction,
help=argparse.SUPPRESS,
) | null |
4,238 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_archived_only(parser):
parser.add_argument(
"--archived-only",
action="store_true",
help="Only list archived records.",
) | null |
4,239 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_include_archived(parser):
parser.add_argument(
"--include-archived",
action="store_true",
help="List both archived records and active records.",
) | null |
4,240 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_output_format(parser):
parser.add_argument(
"-o",
"--output",
type=str,
default=CLIListOutputFormat.JSON,
help="Output format, accepted values are 'json' and 'table'. Default is 'json'.",
choices=[CLIListOutputFormat.TABLE, CLIListOutputFormat.JSON],
) | null |
4,241 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_include_others(parser):
parser.add_argument(
"--include-others",
action="store_true",
help="Get records that are owned by all users.",
) | null |
4,242 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_flow_type(parser):
parser.add_argument(
"--type",
type=str,
help=(
f"The type of the flow. Available values are {FlowType.get_all_values()}. "
f"Default to be None, which means all types included."
),
) | null |
4,243 | import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
def add_param_flow_name(parser):
parser.add_argument(
"-n",
"--name",
type=str,
required=True,
help="The name of the flow.",
) | null |
4,244 | from typing import List
from promptflow import log_metric, tool
The provided code snippet includes necessary dependencies for implementing the `aggregate` function. Write a Python function `def aggregate(processed_results: List[str])` to solve the following problem:
This tool aggregates the processed result of all lines and calculate the accuracy. Then log metric for the accuracy. :param processed_results: List of the output of line_process node.
Here is the function:
def aggregate(processed_results: List[str]):
"""
This tool aggregates the processed result of all lines and calculate the accuracy. Then log metric for the accuracy.
:param processed_results: List of the output of line_process node.
"""
# Add your aggregation logic here
# Aggregate the results of all lines and calculate the accuracy
aggregated_result = round((processed_results.count("Correct") / len(processed_results)), 2)
# Log metric the aggregate result
log_metric(key="accuracy", value=aggregated_result)
return aggregated_result | This tool aggregates the processed result of all lines and calculate the accuracy. Then log metric for the accuracy. :param processed_results: List of the output of line_process node. |
4,245 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `line_process` function. Write a Python function `def line_process(groundtruth: str, prediction: str)` to solve the following problem:
This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line.
Here is the function:
def line_process(groundtruth: str, prediction: str):
"""
This tool processes the prediction of a single line and returns the processed result.
:param groundtruth: the groundtruth of a single line.
:param prediction: the prediction of a single line.
"""
# Add your line processing logic here
processed_result = "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect"
return processed_result | This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line. |
4,246 | from promptflow import tool
def my_python_tool(input1: str) -> str:
return "Prompt: " + input1 | null |
4,247 | from promptflow._cli._utils import get_client_for_cli
from promptflow.azure import PFClient
def _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=False):
ml_client = get_client_for_cli(
subscription_id=subscription_id, resource_group_name=resource_group, workspace_name=workspace_name
)
client = PFClient(ml_client=ml_client, logging_enable=debug)
return client | null |
4,248 | from pathlib import Path
from dotenv import dotenv_values
from promptflow._cli._params import add_param_connection_name, add_param_env, base_params
from promptflow._cli._utils import _set_workspace_argument_for_subparsers, activate_action, get_client_for_cli
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.connections import CustomConnection
from promptflow.contracts.types import Secret
def add_connection_create(subparsers):
add_param_type = lambda parser: parser.add_argument( # noqa: E731
"--type",
type=str,
help='Type of the connection, Possible values include: "OpenAI", "AzureOpenAI", "Serp", "Bing", '
'"Custom", "AzureContentSafety", "CognitiveSearch", "SubstrateLLM',
)
add_params = [
_set_workspace_argument_for_subparsers,
add_param_connection_name,
add_param_type,
add_param_env,
] + base_params
activate_action(
name="create",
description="Create a connection for promptflow.",
epilog=None,
add_params=add_params,
subparsers=subparsers,
help_message="pf connection create",
action_param_name="sub_action",
)
def add_connection_get(subparsers):
add_params = [
_set_workspace_argument_for_subparsers,
add_param_connection_name,
add_param_env,
] + base_params
activate_action(
name="get",
description="Get a connection for promptflow.",
epilog=None,
add_params=add_params,
subparsers=subparsers,
help_message="pf connection get",
action_param_name="sub_action",
)
def add_connection_parser(subparsers):
connection_parser = subparsers.add_parser(
"connection", description="A CLI tool to manage connections for promptflow.", help="pf connection"
)
subparsers = connection_parser.add_subparsers()
add_connection_create(subparsers)
add_connection_get(subparsers)
connection_parser.set_defaults(action="connection") | null |
4,249 | from pathlib import Path
from dotenv import dotenv_values
from promptflow._cli._params import add_param_connection_name, add_param_env, base_params
from promptflow._cli._utils import _set_workspace_argument_for_subparsers, activate_action, get_client_for_cli
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.connections import CustomConnection
from promptflow.contracts.types import Secret
def _get_conn_operations(subscription_id, resource_group, workspace_name):
from promptflow.azure import PFClient
client = get_client_for_cli(
subscription_id=subscription_id, workspace_name=workspace_name, resource_group_name=resource_group
)
pf = PFClient(ml_client=client)
return pf._connections
def create_conn(name, type, env, subscription_id, resource_group, workspace_name):
from promptflow._sdk.entities._connection import _Connection
if not Path(env).exists():
raise ValueError(f"Env file {env} does not exist.")
try:
dot_env = dotenv_values(env)
except Exception as e:
raise ValueError(f"Failed to load env file {env}. Error: {e}")
custom_configs = CustomConnection(**{k: Secret(v) for k, v in dot_env.items()})
connection = _Connection(name=name, type=type, custom_configs=custom_configs, connection_scope="WorkspaceShared")
conn_ops = _get_conn_operations(subscription_id, resource_group, workspace_name)
result = conn_ops.create_or_update(connection=connection)
print(result._to_yaml()) | null |
4,250 | from pathlib import Path
from dotenv import dotenv_values
from promptflow._cli._params import add_param_connection_name, add_param_env, base_params
from promptflow._cli._utils import _set_workspace_argument_for_subparsers, activate_action, get_client_for_cli
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.connections import CustomConnection
from promptflow.contracts.types import Secret
def _get_conn_operations(subscription_id, resource_group, workspace_name):
from promptflow.azure import PFClient
client = get_client_for_cli(
subscription_id=subscription_id, workspace_name=workspace_name, resource_group_name=resource_group
)
pf = PFClient(ml_client=client)
return pf._connections
def get_conn(name, subscription_id, resource_group, workspace_name):
conn_ops = _get_conn_operations(subscription_id, resource_group, workspace_name)
result = conn_ops.get(name=name)
print(result._to_yaml()) | null |
4,251 | import argparse
import functools
import json
from typing import Dict, List, Optional
from promptflow._cli._params import (
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
add_param_max_results,
add_param_output,
add_param_output_format,
add_param_overwrite,
add_param_run_name,
add_param_set,
base_params,
)
from promptflow._cli._pf._run import _parse_metadata_args, add_run_create_common, create_run
from promptflow._cli._pf_azure._utils import _get_azure_pf_client
from promptflow._cli._utils import (
_output_result_list_with_format,
_set_workspace_argument_for_subparsers,
activate_action,
pretty_print_dataframe_as_table,
)
from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, ListViewType
from promptflow._sdk._errors import InvalidRunStatusError
from promptflow._sdk._utils import print_red_error
from promptflow.azure._restclient.flow_service_caller import FlowRequestException
def add_run_create_cloud(subparsers):
epilog = """
Example:
# Create a run with YAML file:
pfazure run create -f <yaml-filename>
# Create a run from flow directory and reference a run:
pfazure run create --flow <path-to-flow-directory> --data <path-to-data-file> --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run <run-name> --variant "${summarize_text_content.variant_0}" --stream
# Create a run from existing workspace flow
pfazure run create --flow azureml:<flow-name> --data <path-to-data-file> --column-mapping <key-value-pair>
# Create a run from existing registry flow
pfazure run create --flow azureml://registries/<registry-name>/models/<flow-name>/versions/<version> --data <path-to-data-file> --column-mapping <key-value-pair>
""" # noqa: E501
def add_param_data(parser):
# cloud pf can also accept remote data
parser.add_argument(
"--data", type=str, help="Local path to the data file or remote data. e.g. azureml:name:version."
)
add_param_runtime = lambda parser: parser.add_argument("--runtime", type=str, help=argparse.SUPPRESS) # noqa: E731
add_param_reset = lambda parser: parser.add_argument( # noqa: E731
"--reset-runtime", action="store_true", help=argparse.SUPPRESS
)
add_run_create_common(
subparsers,
[add_param_data, add_param_runtime, add_param_reset, _set_workspace_argument_for_subparsers],
epilog=epilog,
)
def add_parser_run_list(subparsers):
"""Add run list parser to the pfazure subparsers."""
epilog = """
Examples:
# List runs status:
pfazure run list
# List most recent 10 runs status:
pfazure run list --max-results 10
# List active and archived runs status:
pfazure run list --include-archived
# List archived runs status only:
pfazure run list --archived-only
# List all runs status as table:
pfazure run list --output table
"""
add_params = [
add_param_max_results,
add_param_archived_only,
add_param_include_archived,
add_param_output_format,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="list",
description="A CLI tool to List all runs.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="List runs in a workspace.",
action_param_name="sub_action",
)
def add_parser_run_stream(subparsers):
"""Add run stream parser to the pfazure subparsers."""
epilog = """
Example:
# Stream run logs:
pfazure run stream --name <name>
# Stream run logs with timeout:
pfazure run stream --name <name> --timeout 600
"""
def add_param_timeout(parser):
parser.add_argument(
"--timeout",
type=int,
default=600,
help=(
"Timeout in seconds. If the run stays in the same status and produce no new logs in a period "
"longer than the timeout value, the stream operation will abort. Default value is 600 seconds.",
),
)
add_params = [
add_param_run_name,
add_param_timeout,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="stream",
description="A CLI tool to stream run logs to the console.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Stream run logs to the console.",
action_param_name="sub_action",
)
def add_parser_run_show(subparsers):
"""Add run show parser to the pfazure subparsers."""
epilog = """
Example:
# Show the status of a run:
pfazure run show --name <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="show",
description="A CLI tool to show a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Show a run.",
action_param_name="sub_action",
)
def add_parser_run_show_details(subparsers):
"""Add run show details parser to the pfazure subparsers."""
epilog = """
Example:
# View input(s) and output(s) of a run:
pfazure run show-details --name <name>
"""
add_param_max_results = lambda parser: parser.add_argument( # noqa: E731
"-r",
"--max-results",
dest="max_results",
type=int,
default=MAX_SHOW_DETAILS_RESULTS,
help=f"Number of lines to show. Default is {MAX_SHOW_DETAILS_RESULTS}.",
)
add_params = [
add_param_max_results,
add_param_run_name,
add_param_all_results,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="show-details",
description="A CLI tool to show a run details.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Show a run details.",
action_param_name="sub_action",
)
def add_parser_run_show_metrics(subparsers):
"""Add run show metrics parser to the pfazure subparsers."""
epilog = """
Example:
# View metrics of a run:
pfazure run show-metrics --name <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="show-metrics",
description="A CLI tool to show run metrics.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Show run metrics.",
action_param_name="sub_action",
)
def add_parser_run_cancel(subparsers):
"""Add run cancel parser to the pfazure subparsers."""
epilog = """
Example:
# Cancel a run:
pfazure run cancel --name <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="cancel",
description="A CLI tool to cancel a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Cancel a run.",
action_param_name="sub_action",
)
def add_parser_run_visualize(subparsers):
"""Add run visualize parser to the pfazure subparsers."""
epilog = """
Examples:
# Visualize a run:
pfazure run visualize -n <name>
# Visualize runs:
pfazure run visualize --names "<name1,name2>"
pfazure run visualize --names "<name1>, <name2>"
"""
add_param_name = lambda parser: parser.add_argument( # noqa: E731
"-n", "--names", type=str, required=True, help="Name of the runs, comma separated."
)
add_param_html_path = lambda parser: parser.add_argument( # noqa: E731
"--html-path", type=str, default=None, help=argparse.SUPPRESS
)
add_params = [
add_param_name,
add_param_html_path,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="visualize",
description="A CLI tool to visualize a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Visualize a run.",
action_param_name="sub_action",
)
def add_parser_run_archive(subparsers):
"""Add run archive parser to the pfazure subparsers."""
epilog = """
Examples:
# Archive a run:
pfazure run archive -n <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="archive",
description="A CLI tool to archive a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Archive a run.",
action_param_name="sub_action",
)
def add_parser_run_restore(subparsers):
"""Add run restore parser to the pfazure subparsers."""
epilog = """
Examples:
# Restore an archived run:
pfazure run restore -n <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="restore",
description="A CLI tool to restore a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Restore a run.",
action_param_name="sub_action",
)
def add_parser_run_update(subparsers):
"""Add run update parser to the pfazure subparsers."""
epilog = """
Example:
# Update a run metadata:
pfazure run update --name <name> --set display_name="<display-name>" description="<description>" tags.key="<value>"
"""
add_params = [
add_param_run_name,
add_param_set,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="update",
description="A CLI tool to update a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Update a run.",
action_param_name="sub_action",
)
def add_parser_run_download(subparsers):
"""Add run download parser to the pfazure subparsers."""
epilog = """
Example:
# Download a run data to local:
pfazure run download --name <name> --output <output-folder-path>
"""
add_params = [
add_param_run_name,
add_param_output,
add_param_overwrite,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="download",
description="A CLI tool to download a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Download a run.",
action_param_name="sub_action",
)
The provided code snippet includes necessary dependencies for implementing the `add_parser_run` function. Write a Python function `def add_parser_run(subparsers)` to solve the following problem:
Add run parser to the pfazure subparsers.
Here is the function:
def add_parser_run(subparsers):
"""Add run parser to the pfazure subparsers."""
run_parser = subparsers.add_parser(
"run", description="A CLI tool to manage cloud runs for prompt flow.", help="Manage prompt flow runs."
)
subparsers = run_parser.add_subparsers()
add_run_create_cloud(subparsers)
add_parser_run_list(subparsers)
add_parser_run_stream(subparsers)
add_parser_run_show(subparsers)
add_parser_run_show_details(subparsers)
add_parser_run_show_metrics(subparsers)
add_parser_run_cancel(subparsers)
add_parser_run_visualize(subparsers)
add_parser_run_archive(subparsers)
add_parser_run_restore(subparsers)
add_parser_run_update(subparsers)
add_parser_run_download(subparsers)
run_parser.set_defaults(action="run") | Add run parser to the pfazure subparsers. |
4,252 | import argparse
import functools
import json
from typing import Dict, List, Optional
from promptflow._cli._params import (
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
add_param_max_results,
add_param_output,
add_param_output_format,
add_param_overwrite,
add_param_run_name,
add_param_set,
base_params,
)
from promptflow._cli._pf._run import _parse_metadata_args, add_run_create_common, create_run
from promptflow._cli._pf_azure._utils import _get_azure_pf_client
from promptflow._cli._utils import (
_output_result_list_with_format,
_set_workspace_argument_for_subparsers,
activate_action,
pretty_print_dataframe_as_table,
)
from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, ListViewType
from promptflow._sdk._errors import InvalidRunStatusError
from promptflow._sdk._utils import print_red_error
from promptflow.azure._restclient.flow_service_caller import FlowRequestException
def list_runs(
subscription_id,
resource_group,
workspace_name,
max_results,
archived_only,
include_archived,
output,
):
def show_run(subscription_id, resource_group, workspace_name, run_name):
def show_run_details(subscription_id, resource_group, workspace_name, run_name, max_results, all_results, debug=False):
def show_metrics(subscription_id, resource_group, workspace_name, run_name):
def stream_run(args: argparse.Namespace):
def visualize(
subscription_id: str,
resource_group: str,
workspace_name: str,
names: str,
html_path: Optional[str] = None,
debug: bool = False,
):
def archive_run(
subscription_id: str,
resource_group: str,
workspace_name: str,
run_name: str,
):
def restore_run(
subscription_id: str,
resource_group: str,
workspace_name: str,
run_name: str,
):
def update_run(
subscription_id: str,
resource_group: str,
workspace_name: str,
run_name: str,
params: List[Dict[str, str]],
):
def download_run(args: argparse.Namespace):
def cancel_run(args: argparse.Namespace):
def dispatch_run_commands(args: argparse.Namespace):
if args.sub_action == "create":
pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug)
create_run(
create_func=functools.partial(
pf.runs.create_or_update, runtime=args.runtime, reset_runtime=args.reset_runtime
),
resume_func=pf.runs._create_by_resume_from,
args=args,
)
elif args.sub_action == "list":
list_runs(
args.subscription,
args.resource_group,
args.workspace_name,
args.max_results,
args.archived_only,
args.include_archived,
args.output,
)
elif args.sub_action == "show":
show_run(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "show-details":
show_run_details(
args.subscription,
args.resource_group,
args.workspace_name,
args.name,
args.max_results,
args.all_results,
args.debug,
)
elif args.sub_action == "show-metrics":
show_metrics(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "stream":
stream_run(args)
elif args.sub_action == "visualize":
visualize(
args.subscription,
args.resource_group,
args.workspace_name,
args.names,
args.html_path,
args.debug,
)
elif args.sub_action == "archive":
archive_run(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "restore":
restore_run(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "update":
update_run(args.subscription, args.resource_group, args.workspace_name, args.name, params=args.params_override)
elif args.sub_action == "download":
download_run(args)
elif args.sub_action == "cancel":
cancel_run(args) | null |
4,253 | import argparse
import json
from typing import Dict, List
from promptflow._cli._params import (
add_param_archived_only,
add_param_flow_name,
add_param_flow_type,
add_param_include_archived,
add_param_include_others,
add_param_max_results,
add_param_output_format,
add_param_set,
base_params,
)
from promptflow._cli._pf_azure._utils import _get_azure_pf_client
from promptflow._cli._utils import (
_output_result_list_with_format,
_set_workspace_argument_for_subparsers,
activate_action,
)
from promptflow._sdk._constants import AzureFlowSource, get_list_view_type
from promptflow.azure._entities._flow import Flow
def add_parser_flow_create(subparsers):
"""Add flow create parser to the pf flow subparsers."""
epilog = """
Use "--set" to set flow properties like:
display_name: Flow display name that will be created in remote. Default to be flow folder name + timestamp if not specified.
type: Flow type. Default to be "standard" if not specified. Available types are: "standard", "evaluation", "chat".
description: Flow description. e.g. "--set description=<description>."
tags: Flow tags. e.g. "--set tags.key1=value1 tags.key2=value2."
Note:
In "--set" parameter, if the key name consists of multiple words, use snake-case instead of kebab-case. e.g. "--set display_name=<flow-display-name>"
Examples:
# Create a flow to azure portal with local flow folder.
pfazure flow create --flow <flow-folder-path> --set display_name=<flow-display-name> type=<flow-type>
# Create a flow with more properties
pfazure flow create --flow <flow-folder-path> --set display_name=<flow-display-name> type=<flow-type> description=<flow-description> tags.key1=value1 tags.key2=value2
""" # noqa: E501
add_param_source = lambda parser: parser.add_argument( # noqa: E731
"--flow", type=str, help="Source folder of the flow."
)
add_params = [
add_param_source,
add_param_set,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="create",
description="A CLI tool to create a flow to Azure.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Create a flow to Azure with local flow folder.",
action_param_name="sub_action",
)
def add_parser_flow_update(subparsers):
"""Add flow update parser to the pf flow subparsers."""
epilog = """
Use "--set" to set flow properties that you want to update. Supported properties are: [display_name, description, tags].
Note:
1. In "--set" parameter, if the key name consists of multiple words, use snake-case instead of kebab-case. e.g. "--set display_name=<flow-display-name>"
2. Parameter flow is required to update a flow. It's a guid that can be found from 2 ways:
a. After creating a flow to azure, it can be found in the printed message in "name" attribute.
b. Open a flow in azure portal, the guid is in the url. e.g. https://ml.azure.com/prompts/flow/<workspace-id>/<flow-name>/xxx
Examples:
# Update a flow display name
pfazure flow update --flow <flow-name> --set display_name=<flow-display-name>
""" # noqa: E501
add_param_source = lambda parser: parser.add_argument( # noqa: E731
"--flow", type=str, help="Flow name to be updated which is a guid."
)
add_params = [
add_param_source,
add_param_set,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="update",
description="A CLI tool to update a flow's metadata on Azure.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Update a flow's metadata on azure.",
action_param_name="sub_action",
)
def add_parser_flow_list(subparsers):
"""Add flow list parser to the pf flow subparsers."""
epilog = """
Examples:
# List flows:
pfazure flow list
# List most recent 10 runs status:
pfazure flow list --max-results 10
# List active and archived flows:
pfazure flow list --include-archived
# List archived flow only:
pfazure flow list --archived-only
# List all flows as table:
pfazure flow list --output table
# List flows with specific type:
pfazure flow list --type standard
# List flows that are owned by all users:
pfazure flow list --include-others
"""
add_params = [
add_param_max_results,
add_param_include_others,
add_param_flow_type,
add_param_archived_only,
add_param_include_archived,
add_param_output_format,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="list",
description="List flows for promptflow.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="pfazure flow list",
action_param_name="sub_action",
)
def add_parser_flow_show(subparsers):
"""Add flow get parser to the pf flow subparsers."""
epilog = """
Examples:
# Get flow:
pfazure flow show --name <flow-name>
"""
add_params = [add_param_flow_name, _set_workspace_argument_for_subparsers] + base_params
activate_action(
name="show",
description="Show a flow from Azure.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="pfazure flow show",
action_param_name="sub_action",
)
The provided code snippet includes necessary dependencies for implementing the `add_parser_flow` function. Write a Python function `def add_parser_flow(subparsers)` to solve the following problem:
Add flow parser to the pf subparsers.
Here is the function:
def add_parser_flow(subparsers):
"""Add flow parser to the pf subparsers."""
flow_parser = subparsers.add_parser(
"flow",
description="Manage flows for prompt flow.",
help="Manage prompt flows.",
)
flow_subparsers = flow_parser.add_subparsers()
add_parser_flow_create(flow_subparsers)
add_parser_flow_update(flow_subparsers)
add_parser_flow_show(flow_subparsers)
add_parser_flow_list(flow_subparsers)
flow_parser.set_defaults(action="flow") | Add flow parser to the pf subparsers. |
4,254 | import argparse
import json
from typing import Dict, List
from promptflow._cli._params import (
add_param_archived_only,
add_param_flow_name,
add_param_flow_type,
add_param_include_archived,
add_param_include_others,
add_param_max_results,
add_param_output_format,
add_param_set,
base_params,
)
from promptflow._cli._pf_azure._utils import _get_azure_pf_client
from promptflow._cli._utils import (
_output_result_list_with_format,
_set_workspace_argument_for_subparsers,
activate_action,
)
from promptflow._sdk._constants import AzureFlowSource, get_list_view_type
from promptflow.azure._entities._flow import Flow
The provided code snippet includes necessary dependencies for implementing the `add_parser_flow_download` function. Write a Python function `def add_parser_flow_download(subparsers)` to solve the following problem:
Add flow download parser to the pf flow subparsers.
Here is the function:
def add_parser_flow_download(subparsers):
"""Add flow download parser to the pf flow subparsers."""
add_param_source = lambda parser: parser.add_argument( # noqa: E731
"--source", type=str, help="The flow folder path on file share to download."
)
add_param_destination = lambda parser: parser.add_argument( # noqa: E731
"--destination", "-d", type=str, help="The destination folder path to download."
)
add_params = [
_set_workspace_argument_for_subparsers,
add_param_source,
add_param_destination,
] + base_params
activate_action(
name="download",
description="Download a flow from file share to local.",
epilog=None,
add_params=add_params,
subparsers=subparsers,
help_message="pf flow download",
action_param_name="sub_action",
) | Add flow download parser to the pf flow subparsers. |
4,255 | import argparse
import json
from typing import Dict, List
from promptflow._cli._params import (
add_param_archived_only,
add_param_flow_name,
add_param_flow_type,
add_param_include_archived,
add_param_include_others,
add_param_max_results,
add_param_output_format,
add_param_set,
base_params,
)
from promptflow._cli._pf_azure._utils import _get_azure_pf_client
from promptflow._cli._utils import (
_output_result_list_with_format,
_set_workspace_argument_for_subparsers,
activate_action,
)
from promptflow._sdk._constants import AzureFlowSource, get_list_view_type
from promptflow.azure._entities._flow import Flow
def create_flow(args: argparse.Namespace):
def update_flow(args: argparse.Namespace):
def show_flow(args: argparse.Namespace):
def list_flows(args: argparse.Namespace):
def dispatch_flow_commands(args: argparse.Namespace):
if args.sub_action == "create":
create_flow(args)
elif args.sub_action == "show":
show_flow(args)
elif args.sub_action == "list":
list_flows(args)
elif args.sub_action == "update":
update_flow(args) | null |
4,256 | import argparse
import json
from typing import Dict, List
from promptflow._cli._params import (
add_param_archived_only,
add_param_flow_name,
add_param_flow_type,
add_param_include_archived,
add_param_include_others,
add_param_max_results,
add_param_output_format,
add_param_set,
base_params,
)
from promptflow._cli._pf_azure._utils import _get_azure_pf_client
from promptflow._cli._utils import (
_output_result_list_with_format,
_set_workspace_argument_for_subparsers,
activate_action,
)
from promptflow._sdk._constants import AzureFlowSource, get_list_view_type
from promptflow.azure._entities._flow import Flow
def _get_flow_operation(subscription_id, resource_group, workspace_name):
pf_client = _get_azure_pf_client(subscription_id, resource_group, workspace_name)
return pf_client._flows | null |
4,257 | import json
import time
from promptflow._cli._pf.help import show_privacy_statement, show_welcome_message
from promptflow._cli._user_agent import USER_AGENT
from promptflow._cli._utils import _get_cli_activity_name, get_client_info_for_cli, cli_exception_and_telemetry_handler
import argparse
import logging
import sys
from promptflow._cli._pf_azure._flow import add_parser_flow, dispatch_flow_commands
from promptflow._cli._pf_azure._run import add_parser_run, dispatch_run_commands
from promptflow._sdk._utils import ( # noqa: E402
get_promptflow_sdk_version,
print_pf_version,
setup_user_agent_to_operation_context,
)
from promptflow._utils.logger_utils import get_cli_sdk_logger
def run_command(args):
# Log the init finish time
init_finish_time = time.perf_counter()
try:
# --verbose, enable info logging
if hasattr(args, "verbose") and args.verbose:
for handler in logger.handlers:
handler.setLevel(logging.INFO)
# --debug, enable debug logging
if hasattr(args, "debug") and args.debug:
for handler in logger.handlers:
handler.setLevel(logging.DEBUG)
if args.version:
print_pf_version()
elif args.action == "run":
dispatch_run_commands(args)
elif args.action == "flow":
dispatch_flow_commands(args)
except KeyboardInterrupt as ex:
logger.debug("Keyboard interrupt is captured.")
raise ex
except SystemExit as ex: # some code directly call sys.exit, this is to make sure command metadata is logged
exit_code = ex.code if ex.code is not None else 1
logger.debug(f"Code directly call sys.exit with code {exit_code}")
raise ex
except Exception as ex:
logger.debug(f"Command {args} execute failed. {str(ex)}")
raise ex
finally:
# Log the invoke finish time
invoke_finish_time = time.perf_counter()
logger.info(
"Command ran in %.3f seconds (init: %.3f, invoke: %.3f)",
invoke_finish_time - start_time,
init_finish_time - start_time,
invoke_finish_time - init_finish_time,
)
def get_parser_args(argv):
parser = argparse.ArgumentParser(
prog="pfazure",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="pfazure: manage prompt flow assets in azure. Learn more: https://microsoft.github.io/promptflow.",
)
parser.add_argument(
"-v", "--version", dest="version", action="store_true", help="show current CLI version and exit"
)
subparsers = parser.add_subparsers()
add_parser_run(subparsers)
add_parser_flow(subparsers)
return parser.prog, parser.parse_args(argv)
def _get_workspace_info(args):
try:
subscription_id, resource_group_name, workspace_name = get_client_info_for_cli(
subscription_id=args.subscription,
resource_group_name=args.resource_group,
workspace_name=args.workspace_name,
)
return {
"subscription_id": subscription_id,
"resource_group_name": resource_group_name,
"workspace_name": workspace_name,
}
except Exception:
# fall back to empty dict if workspace info is not available
return {}
The provided code snippet includes necessary dependencies for implementing the `entry` function. Write a Python function `def entry(argv)` to solve the following problem:
Control plane CLI tools for promptflow cloud version.
Here is the function:
def entry(argv):
"""
Control plane CLI tools for promptflow cloud version.
"""
prog, args = get_parser_args(argv)
if hasattr(args, "user_agent"):
setup_user_agent_to_operation_context(args.user_agent)
custom_dimensions = _get_workspace_info(args)
activity_name = _get_cli_activity_name(cli=prog, args=args)
cli_exception_and_telemetry_handler(run_command, activity_name, custom_dimensions)(args) | Control plane CLI tools for promptflow cloud version. |
4,258 | import argparse
import json
from functools import partial
from promptflow._cli._params import (
add_param_all_results,
add_param_max_results,
add_param_set,
add_param_yes,
base_params,
)
from promptflow._cli._utils import activate_action, confirm, get_secret_input, print_yellow_warning
from promptflow._sdk._constants import MAX_LIST_CLI_RESULTS
from promptflow._sdk._load_functions import load_connection
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._connection import _Connection
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import load_yaml
def add_connection_create(subparsers):
def add_connection_update(subparsers):
def add_connection_show(subparsers):
def add_connection_delete(subparsers):
def add_connection_list(subparsers):
def add_connection_parser(subparsers):
connection_parser = subparsers.add_parser(
"connection",
description="""A CLI tool to manage connections for promptflow.
Your secrets will be encrypted using AES(Advanced Encryption Standard) technology.""", # noqa: E501
help="pf connection",
)
subparsers = connection_parser.add_subparsers()
add_connection_create(subparsers)
add_connection_update(subparsers)
add_connection_show(subparsers)
add_connection_list(subparsers)
add_connection_delete(subparsers)
connection_parser.set_defaults(action="connection") | null |
4,259 | import argparse
import json
from functools import partial
from promptflow._cli._params import (
add_param_all_results,
add_param_max_results,
add_param_set,
add_param_yes,
base_params,
)
from promptflow._cli._utils import activate_action, confirm, get_secret_input, print_yellow_warning
from promptflow._sdk._constants import MAX_LIST_CLI_RESULTS
from promptflow._sdk._load_functions import load_connection
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._connection import _Connection
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import load_yaml
def _get_pf_client():
def validate_and_interactive_get_secrets(connection, is_update=False):
def _upsert_connection_from_file(file, params_override=None):
# Note: This function is used for pfutil, do not edit it.
params_override = params_override or []
params_override.append(load_yaml(file))
connection = load_connection(source=file, params_override=params_override)
existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False)
if existing_connection:
connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override)
validate_and_interactive_get_secrets(connection, is_update=True)
# Set the secrets not scrubbed, as _to_dict() dump scrubbed connections.
connection._secrets = existing_connection._secrets
else:
validate_and_interactive_get_secrets(connection)
connection = _get_pf_client().connections.create_or_update(connection)
return connection | null |
4,260 | import argparse
import json
from functools import partial
from promptflow._cli._params import (
add_param_all_results,
add_param_max_results,
add_param_set,
add_param_yes,
base_params,
)
from promptflow._cli._utils import activate_action, confirm, get_secret_input, print_yellow_warning
from promptflow._sdk._constants import MAX_LIST_CLI_RESULTS
from promptflow._sdk._load_functions import load_connection
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._connection import _Connection
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import load_yaml
def create_connection(file_path, params_override=None, name=None):
params_override = params_override or []
if name:
params_override.append({"name": name})
connection = load_connection(source=file_path, params_override=params_override)
existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False)
if existing_connection:
logger.warning(f"Connection with name {connection.name} already exists. Updating it.")
# Note: We don't set the existing secret back here, let user input the secrets.
validate_and_interactive_get_secrets(connection)
connection = _get_pf_client().connections.create_or_update(connection)
print(json.dumps(connection._to_dict(), indent=4))
def show_connection(name):
connection = _get_pf_client().connections.get(name)
print(json.dumps(connection._to_dict(), indent=4))
def list_connection(max_results=MAX_LIST_CLI_RESULTS, all_results=False):
connections = _get_pf_client().connections.list(max_results, all_results)
print(json.dumps([connection._to_dict() for connection in connections], indent=4))
def update_connection(name, params_override=None):
params_override = params_override or []
existing_connection = _get_pf_client().connections.get(name)
connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override)
validate_and_interactive_get_secrets(connection, is_update=True)
# Set the secrets not scrubbed, as _to_dict() dump scrubbed connections.
connection._secrets = existing_connection._secrets
connection = _get_pf_client().connections.create_or_update(connection)
print(json.dumps(connection._to_dict(), indent=4))
def delete_connection(name, skip_confirm: bool = False):
if confirm("Are you sure you want to perform this operation?", skip_confirm):
_get_pf_client().connections.delete(name)
else:
print("The delete operation was canceled.")
def dispatch_connection_commands(args: argparse.Namespace):
if args.sub_action == "create":
create_connection(args.file, args.params_override, args.name)
elif args.sub_action == "show":
show_connection(args.name)
elif args.sub_action == "list":
list_connection(args.max_results, args.all_results)
elif args.sub_action == "update":
update_connection(args.name, args.params_override)
elif args.sub_action == "delete":
delete_connection(args.name, args.yes) | null |
4,261 | import argparse
import json
from typing import Callable, Dict, List, Optional, Tuple
from promptflow._cli._params import (
add_param_all_results,
add_param_archived_only,
add_param_columns_mapping,
add_param_connections,
add_param_environment_variables,
add_param_include_archived,
add_param_max_results,
add_param_output_format,
add_param_run_name,
add_param_set,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._utils import (
_output_result_list_with_format,
activate_action,
confirm,
list_of_dict_to_dict,
list_of_dict_to_nested_dict,
pretty_print_dataframe_as_table,
)
from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, get_list_view_type
from promptflow._sdk._load_functions import load_run
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk._run_functions import _create_run, _resume_run
from promptflow._sdk._utils import safe_parse_object_list
from promptflow._sdk.entities import Run
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def add_run_create(subparsers):
epilog = """
Examples:
# Create a run with YAML file:
pf run create -f <yaml-filename>
# Create a run with YAML file and replace another data in the YAML file:
pf run create -f <yaml-filename> --data <path-to-new-data-file-relative-to-yaml-file>
# Create a run from flow directory and reference a run:
pf run create --flow <path-to-flow-directory> --data <path-to-data-file> --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run <run-name> --variant "${summarize_text_content.variant_0}" --stream # noqa: E501
# Create a run from an existing run record folder:
pf run create --source <path-to-run-folder>
# Create a run resume from an existing run:
pf run create --resume-from <run-name>
# Create a run resume from an existing run, set the name, display_name, description and tags:
pf run create --resume-from <run-name> --name <new-run-name> --set display_name='A new run' description='my run description' tags.Type=Test
"""
# data for pf has different help doc than pfazure
def add_param_data(parser):
parser.add_argument(
"--data",
type=str,
help="Local path to the data file." "If --file is provided, this path should be relative path to the file.",
)
def add_param_source(parser):
parser.add_argument("--source", type=str, help="Local path to the existing run record folder.")
add_run_create_common(subparsers, [add_param_data, add_param_source], epilog=epilog)
def add_run_update(subparsers):
epilog = """
Example:
# Update a run metadata:
pf run update --name <name> --set display_name="<display-name>" description="<description>" tags.key="<value>"
"""
add_params = [
add_param_run_name,
add_param_set,
] + base_params
activate_action(
name="update",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Update a run metadata, including display name, description and tags.",
action_param_name="sub_action",
)
def add_run_stream(subparsers):
epilog = """
Example:
# Stream run logs:
pf run stream --name <name>
"""
add_params = [add_param_run_name] + base_params
activate_action(
name="stream",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Stream run logs to the console.",
action_param_name="sub_action",
)
def add_run_list(subparsers):
epilog = """
Examples:
# List runs status:
pf run list
# List most recent 10 runs status:
pf run list --max-results 10
# List active and archived runs status:
pf run list --include-archived
# List archived runs status only:
pf run list --archived-only
# List all runs status:
pf run list --all-results
# List all runs status as table:
pf run list --output table
"""
add_params = [
add_param_max_results,
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
add_param_output_format,
] + base_params
activate_action(
name="list",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="List runs.",
action_param_name="sub_action",
)
def add_run_show(subparsers):
epilog = """
Example:
# Show the status of a run:
pf run show --name <name>
"""
add_params = [add_param_run_name] + base_params
activate_action(
name="show",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Show details for a run.",
action_param_name="sub_action",
)
def add_run_show_details(subparsers):
epilog = """
Example:
# View input(s) and output(s) of a run:
pf run show-details --name <name>
"""
add_param_max_results = lambda parser: parser.add_argument( # noqa: E731
"-r",
"--max-results",
dest="max_results",
type=int,
default=MAX_SHOW_DETAILS_RESULTS,
help=f"Number of lines to show. Default is {MAX_SHOW_DETAILS_RESULTS}.",
)
add_params = [add_param_max_results, add_param_run_name, add_param_all_results] + base_params
activate_action(
name="show-details",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Preview a run's input(s) and output(s).",
action_param_name="sub_action",
)
def add_run_show_metrics(subparsers):
epilog = """
Example:
# View metrics of a run:
pf run show-metrics --name <name>
"""
add_params = [add_param_run_name] + base_params
activate_action(
name="show-metrics",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Print run metrics to the console.",
action_param_name="sub_action",
)
def add_run_visualize(subparsers):
epilog = """
Examples:
# Visualize a run:
pf run visualize -n <name>
# Visualize runs:
pf run visualize --names "<name1,name2>"
pf run visualize --names "<name1>, <name2>"
"""
add_param_name = lambda parser: parser.add_argument( # noqa: E731
"-n", "--names", type=str, required=True, help="Name of the runs, comma separated."
)
add_param_html_path = lambda parser: parser.add_argument( # noqa: E731
"--html-path", type=str, default=None, help=argparse.SUPPRESS
)
add_params = [add_param_name, add_param_html_path] + base_params
activate_action(
name="visualize",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Visualize a run.",
action_param_name="sub_action",
)
def add_run_delete(subparsers):
epilog = """
Example:
# Caution: pf run delete is irreversible.
# This operation will delete the run permanently from your local disk.
# Both run entity and output data will be deleted.
# Delete a run:
pf run delete -n "<name>"
"""
add_params = [add_param_run_name, add_param_yes] + base_params
activate_action(
name="delete",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Delete a run irreversible.",
action_param_name="sub_action",
)
def add_run_archive(subparsers):
epilog = """
Example:
# Archive a run:
pf run archive --name <name>
"""
add_params = [add_param_run_name] + base_params
activate_action(
name="archive",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Archive a run.",
action_param_name="sub_action",
)
def add_run_restore(subparsers):
epilog = """
Example:
# Restore an archived run:
pf run restore --name <name>
"""
add_params = [add_param_run_name] + base_params
activate_action(
name="restore",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Restore an archived run.",
action_param_name="sub_action",
)
def add_run_parser(subparsers):
run_parser = subparsers.add_parser("run", description="A CLI tool to manage runs for prompt flow.", help="pf run")
subparsers = run_parser.add_subparsers()
add_run_create(subparsers)
# add_run_cancel(subparsers)
add_run_update(subparsers)
add_run_stream(subparsers)
add_run_list(subparsers)
add_run_show(subparsers)
add_run_show_details(subparsers)
add_run_show_metrics(subparsers)
add_run_visualize(subparsers)
add_run_archive(subparsers)
add_run_restore(subparsers)
add_run_delete(subparsers)
add_parser_build(subparsers, "run")
run_parser.set_defaults(action="run") | null |
4,262 | import argparse
import json
from typing import Callable, Dict, List, Optional, Tuple
from promptflow._cli._params import (
add_param_all_results,
add_param_archived_only,
add_param_columns_mapping,
add_param_connections,
add_param_environment_variables,
add_param_include_archived,
add_param_max_results,
add_param_output_format,
add_param_run_name,
add_param_set,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._utils import (
_output_result_list_with_format,
activate_action,
confirm,
list_of_dict_to_dict,
list_of_dict_to_nested_dict,
pretty_print_dataframe_as_table,
)
from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, get_list_view_type
from promptflow._sdk._load_functions import load_run
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk._run_functions import _create_run, _resume_run
from promptflow._sdk._utils import safe_parse_object_list
from promptflow._sdk.entities import Run
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def add_run_cancel(subparsers):
epilog = """
Example:
# Cancel a run:
pf run cancel --name <name>
"""
add_params = [add_param_run_name] + base_params
activate_action(
name="cancel",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Cancel a run.",
action_param_name="sub_action",
) | null |
4,263 | import argparse
import json
from typing import Callable, Dict, List, Optional, Tuple
from promptflow._cli._params import (
add_param_all_results,
add_param_archived_only,
add_param_columns_mapping,
add_param_connections,
add_param_environment_variables,
add_param_include_archived,
add_param_max_results,
add_param_output_format,
add_param_run_name,
add_param_set,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._utils import (
_output_result_list_with_format,
activate_action,
confirm,
list_of_dict_to_dict,
list_of_dict_to_nested_dict,
pretty_print_dataframe_as_table,
)
from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, get_list_view_type
from promptflow._sdk._load_functions import load_run
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk._run_functions import _create_run, _resume_run
from promptflow._sdk._utils import safe_parse_object_list
from promptflow._sdk.entities import Run
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def update_run(name: str, params: List[Dict[str, str]]) -> None:
def stream_run(name: str) -> None:
def list_runs(
max_results: int,
all_results: bool,
archived_only: bool,
include_archived: bool,
output,
):
def show_run(name: str) -> None:
def show_run_details(name: str, max_results: int, all_results: bool) -> None:
def show_run_metrics(name: str) -> None:
def visualize_run(names: str, html_path: Optional[str] = None) -> None:
def archive_run(name: str) -> None:
def restore_run(name: str) -> None:
def create_run(create_func: Callable, resume_func: Callable, args):
def delete_run(name: str, skip_confirm: bool = False) -> None:
def export_run(args):
def dispatch_run_commands(args: argparse.Namespace):
if args.sub_action == "create":
create_run(create_func=_create_run, resume_func=_resume_run, args=args)
elif args.sub_action == "update":
update_run(name=args.name, params=args.params_override)
elif args.sub_action == "stream":
stream_run(name=args.name)
elif args.sub_action == "list":
list_runs(
max_results=args.max_results,
all_results=args.all_results,
archived_only=args.archived_only,
include_archived=args.include_archived,
output=args.output,
)
elif args.sub_action == "show":
show_run(name=args.name)
elif args.sub_action == "show-details":
show_run_details(name=args.name, max_results=args.max_results, all_results=args.all_results)
elif args.sub_action == "show-metrics":
show_run_metrics(name=args.name)
elif args.sub_action == "visualize":
visualize_run(names=args.names, html_path=args.html_path)
elif args.sub_action == "archive":
archive_run(name=args.name)
elif args.sub_action == "restore":
restore_run(name=args.name)
elif args.sub_action == "export":
export_run(args)
elif args.sub_action == "delete":
delete_run(args.name, args.yes)
else:
raise ValueError(f"Unrecognized command: {args.sub_action}") | null |
4,264 | import argparse
import json
from typing import Callable, Dict, List, Optional, Tuple
from promptflow._cli._params import (
add_param_all_results,
add_param_archived_only,
add_param_columns_mapping,
add_param_connections,
add_param_environment_variables,
add_param_include_archived,
add_param_max_results,
add_param_output_format,
add_param_run_name,
add_param_set,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._utils import (
_output_result_list_with_format,
activate_action,
confirm,
list_of_dict_to_dict,
list_of_dict_to_nested_dict,
pretty_print_dataframe_as_table,
)
from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, get_list_view_type
from promptflow._sdk._load_functions import load_run
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk._run_functions import _create_run, _resume_run
from promptflow._sdk._utils import safe_parse_object_list
from promptflow._sdk.entities import Run
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def _parse_kv_pair(kv_pairs: str) -> Dict[str, str]:
result = {}
for kv_pairs in kv_pairs.split(","):
kv_pair = kv_pairs.strip()
if "=" not in kv_pair:
raise ValueError(f"Invalid key-value pair: {kv_pair}")
key, value = kv_pair.split("=", 1)
result[key] = value
return result | null |
4,265 | import inspect
import json
import shutil
from abc import ABC, abstractmethod
from ast import literal_eval
from enum import Enum
from pathlib import Path
from jinja2 import Environment, Template, meta
from promptflow._sdk._constants import DEFAULT_ENCODING
from promptflow._sdk.operations._flow_operations import FlowOperations
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.contracts.flow import Flow as ExecutableFlow
from promptflow.exceptions import UserErrorException
EXTRA_FILES_MAPPING = {"requirements.txt": "requirements_txt", ".gitignore": "gitignore"}
def copy_extra_files(flow_path, extra_files, overwrite=False):
for file_name in extra_files:
extra_file_path = (
Path(__file__).parent.parent / "data" / "entry_flow" / EXTRA_FILES_MAPPING.get(file_name, file_name)
)
target_path = Path(flow_path) / file_name
if target_path.exists() and not overwrite:
continue
action = "Overwriting" if target_path.exists() else "Creating"
print(f"{action} {target_path.resolve()}...")
shutil.copy2(extra_file_path, target_path) | null |
4,266 | import argparse
import json
from promptflow._cli._params import add_param_set_positional, base_params
from promptflow._cli._utils import activate_action, list_of_dict_to_dict
from promptflow._sdk._configuration import Configuration, InvalidConfigValue
from promptflow._sdk._utils import print_red_error
from promptflow._utils.logger_utils import get_cli_sdk_logger
def add_config_set(subparsers):
epilog = """
Examples:
# Config connection provider to azure workspace for current user:
pf config set connection.provider="azureml://subscriptions/<your-subscription>/resourceGroups/<your-resourcegroup>/providers/Microsoft.MachineLearningServices/workspaces/<your-workspace>"
""" # noqa: E501
activate_action(
name="set",
description="Set prompt flow configs for current user.",
epilog=epilog,
add_params=[add_param_set_positional] + base_params,
subparsers=subparsers,
help_message="Set prompt flow configs for current user, configs will be stored at ~/.promptflow/pf.yaml.",
action_param_name="sub_action",
)
def add_config_show(subparsers):
epilog = """
Examples:
# Show prompt flow for current user:
pf config show
"""
activate_action(
name="show",
description="Show prompt flow configs for current user.",
epilog=epilog,
add_params=base_params,
subparsers=subparsers,
help_message="Show prompt flow configs for current user.",
action_param_name="sub_action",
)
def add_config_parser(subparsers):
config_parser = subparsers.add_parser(
"config", description="A CLI tool to set prompt flow configs for current user.", help="pf config"
)
subparsers = config_parser.add_subparsers()
add_config_set(subparsers)
add_config_show(subparsers)
config_parser.set_defaults(action="config") | null |
4,267 | import argparse
import json
from promptflow._cli._params import add_param_set_positional, base_params
from promptflow._cli._utils import activate_action, list_of_dict_to_dict
from promptflow._sdk._configuration import Configuration, InvalidConfigValue
from promptflow._sdk._utils import print_red_error
from promptflow._utils.logger_utils import get_cli_sdk_logger
def set_config(args):
params_override = list_of_dict_to_dict(args.params_override)
for k, v in params_override.items():
logger.debug("Setting config %s to %s", k, v)
try:
Configuration.get_instance().set_config(k, v)
print(f"Set config {args.params_override} successfully.")
except InvalidConfigValue as e:
error_message = f"Invalid config value {v!r} for {k!r}: {str(e)}"
print_red_error(error_message)
def show_config():
configs = Configuration.get_instance().get_all()
print(json.dumps(configs, indent=4))
def dispatch_config_commands(args: argparse.Namespace):
if args.sub_action == "set":
set_config(args)
if args.sub_action == "show":
show_config() | null |
4,268 | import argparse
import json
from promptflow._cli._params import (
AppendToDictAction,
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
add_param_max_results,
base_params,
)
from promptflow._cli._utils import activate_action, list_of_dict_to_dict
from promptflow._sdk._constants import get_list_view_type
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._experiment import Experiment
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def add_experiment_create(subparsers):
epilog = """
Examples:
# Create an experiment from a template:
pf experiment create --template flow.exp.yaml
"""
add_params = [add_param_template, add_param_name] + base_params
create_parser = activate_action(
name="create",
description=None,
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Create an experiment.",
action_param_name="sub_action",
)
return create_parser
def add_experiment_list(subparsers):
epilog = """
Examples:
# List all experiments:
pf experiment list
"""
activate_action(
name="list",
description="List all experiments.",
epilog=epilog,
add_params=[
add_param_max_results,
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
]
+ base_params,
subparsers=subparsers,
help_message="List all experiments.",
action_param_name="sub_action",
)
def add_experiment_show(subparsers):
epilog = """
Examples:
# Get and show an experiment:
pf experiment show -n my_experiment
"""
activate_action(
name="show",
description="Show an experiment for promptflow.",
epilog=epilog,
add_params=[add_param_name] + base_params,
subparsers=subparsers,
help_message="Show an experiment for promptflow.",
action_param_name="sub_action",
)
def add_experiment_start(subparsers):
epilog = """
Examples:
# Start a named experiment:
pf experiment start -n my_experiment --inputs data1=data1_val data2=data2_val
# Run an experiment by yaml file:
pf experiment start --template path/to/my_experiment.exp.yaml --inputs data1=data1_val data2=data2_val
"""
activate_action(
name="start",
description="Start an experiment.",
epilog=epilog,
add_params=[add_param_name, add_param_template, add_param_input, add_param_stream] + base_params,
subparsers=subparsers,
help_message="Start an experiment.",
action_param_name="sub_action",
)
def add_experiment_stop(subparsers):
epilog = """
Examples:
# Stop an named experiment:
pf experiment stop -n my_experiment
# Stop an experiment started by yaml file:
pf experiment stop --file path/to/my_experiment.exp.yaml
"""
activate_action(
name="stop",
description="Stop an experiment.",
epilog=epilog,
add_params=[add_param_name, add_param_file] + base_params,
subparsers=subparsers,
help_message="Stop an experiment.",
action_param_name="sub_action",
)
def add_experiment_parser(subparsers):
experiment_parser = subparsers.add_parser(
"experiment",
description="[Experimental] A CLI tool to manage experiment for prompt flow.",
help="[Experimental] pf experiment. This is an experimental feature, and may change at any time.",
)
subparsers = experiment_parser.add_subparsers()
add_experiment_create(subparsers)
add_experiment_list(subparsers)
add_experiment_show(subparsers)
add_experiment_start(subparsers)
add_experiment_stop(subparsers)
experiment_parser.set_defaults(action="experiment") | null |
4,269 | import argparse
import json
from promptflow._cli._params import (
AppendToDictAction,
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
add_param_max_results,
base_params,
)
from promptflow._cli._utils import activate_action, list_of_dict_to_dict
from promptflow._sdk._constants import get_list_view_type
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._experiment import Experiment
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def create_experiment(args: argparse.Namespace):
def list_experiment(args: argparse.Namespace):
def show_experiment(args: argparse.Namespace):
def start_experiment(args: argparse.Namespace):
def stop_experiment(args: argparse.Namespace):
def dispatch_experiment_commands(args: argparse.Namespace):
if args.sub_action == "create":
create_experiment(args)
elif args.sub_action == "list":
list_experiment(args)
elif args.sub_action == "show":
show_experiment(args)
elif args.sub_action == "start":
start_experiment(args)
elif args.sub_action == "show-status":
pass
elif args.sub_action == "update":
pass
elif args.sub_action == "delete":
pass
elif args.sub_action == "stop":
stop_experiment(args)
elif args.sub_action == "test":
pass
elif args.sub_action == "clone":
pass | null |
4,270 | import argparse
import json
from promptflow._cli._params import (
AppendToDictAction,
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
add_param_max_results,
base_params,
)
from promptflow._cli._utils import activate_action, list_of_dict_to_dict
from promptflow._sdk._constants import get_list_view_type
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._experiment import Experiment
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def _get_pf_client():
def test_experiment(args: argparse.Namespace):
result = _get_pf_client()._experiments._test(args.name)
print(json.dumps(result._to_dict(), indent=4)) | null |
4,271 | import argparse
import json
import re
import shutil
import sys
from pathlib import Path
from promptflow._cli._params import add_param_set_tool_extra_info, base_params
from promptflow._cli._pf._init_entry_generators import (
InitGenerator,
SetupGenerator,
ToolPackageGenerator,
ToolPackageUtilsGenerator,
ToolReadmeGenerator,
)
from promptflow._cli._utils import activate_action, list_of_dict_to_dict
from promptflow._sdk._constants import DEFAULT_ENCODING
from promptflow._sdk._pf_client import PFClient
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def add_parser_init_tool(subparsers):
"""Add tool init parser to the pf tool subparsers."""
epilog = """
Examples:
# Creating a package tool from scratch:
pf tool init --package package_tool --tool tool_name
# Creating a package tool with extra info:
pf tool init --package package_tool --tool tool_name --set icon=<icon-path> category=<category>
# Creating a python tool from scratch:
pf tool init --tool tool_name
""" # noqa: E501
add_param_package = lambda parser: parser.add_argument( # noqa: E731
"--package", type=str, help="The package name to create."
)
add_param_tool = lambda parser: parser.add_argument( # noqa: E731
"--tool", type=str, required=True, help="The tool name to create."
)
add_params = [
add_param_package,
add_param_tool,
add_param_set_tool_extra_info,
] + base_params
return activate_action(
name="init",
description="Creating a tool.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Initialize a tool directory.",
action_param_name="sub_action",
)
def add_parser_list_tool(subparsers):
"""Add tool list parser to the pf tool subparsers."""
epilog = """
Examples:
# List all package tool in the environment:
pf tool list
# List all package tool and code tool in the flow:
pf tool list --flow flow-path
""" # noqa: E501
add_param_flow = lambda parser: parser.add_argument("--flow", type=str, help="the flow directory") # noqa: E731
add_params = [
add_param_flow,
] + base_params
return activate_action(
name="list",
description="List tools.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="List all tools in the environment.",
action_param_name="sub_action",
)
def add_parser_validate_tool(subparsers):
"""Add tool list parser to the pf tool subparsers."""
epilog = """
Examples:
# Validate single function tool:
pf tool validate -–source <package_name>.<module_name>.<tool_function>
# Validate all tool in a package tool:
pf tool validate -–source <package_name>
# Validate tools in a python script:
pf tool validate --source <path_to_tool_script>
""" # noqa: E501
def add_param_source(parser):
parser.add_argument("--source", type=str, help="The tool source to be used.", required=True)
return activate_action(
name="validate",
description="Validate tool.",
epilog=epilog,
add_params=[
add_param_source,
],
subparsers=subparsers,
help_message="Validate tool. Will raise error if it is not valid.",
action_param_name="sub_action",
)
The provided code snippet includes necessary dependencies for implementing the `add_tool_parser` function. Write a Python function `def add_tool_parser(subparsers)` to solve the following problem:
Add flow parser to the pf subparsers.
Here is the function:
def add_tool_parser(subparsers):
"""Add flow parser to the pf subparsers."""
tool_parser = subparsers.add_parser(
"tool",
description="Manage tools for promptflow.",
help="pf tool",
)
subparsers = tool_parser.add_subparsers()
add_parser_init_tool(subparsers)
add_parser_list_tool(subparsers)
add_parser_validate_tool(subparsers)
tool_parser.set_defaults(action="tool") | Add flow parser to the pf subparsers. |
4,272 | import argparse
import json
import re
import shutil
import sys
from pathlib import Path
from promptflow._cli._params import add_param_set_tool_extra_info, base_params
from promptflow._cli._pf._init_entry_generators import (
InitGenerator,
SetupGenerator,
ToolPackageGenerator,
ToolPackageUtilsGenerator,
ToolReadmeGenerator,
)
from promptflow._cli._utils import activate_action, list_of_dict_to_dict
from promptflow._sdk._constants import DEFAULT_ENCODING
from promptflow._sdk._pf_client import PFClient
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
def init_tool(args):
# Validate package/tool name
pattern = r"^[a-zA-Z_][a-zA-Z0-9_]*$"
if args.package and not re.match(pattern, args.package):
raise UserErrorException(f"The package name {args.package} is a invalid identifier.")
if not re.match(pattern, args.tool):
raise UserErrorException(f"The tool name {args.tool} is a invalid identifier.")
print("Creating tool from scratch...")
extra_info = list_of_dict_to_dict(args.extra_info)
icon_path = extra_info.pop("icon", None)
if icon_path and not Path(icon_path).exists():
raise UserErrorException(f"Cannot find the icon path {icon_path}.")
if args.package:
package_path = Path(args.package)
package_name = package_path.stem
script_code_path = package_path / package_name
script_code_path.mkdir(parents=True, exist_ok=True)
# Generate manifest file
manifest_file = package_path / "MANIFEST.in"
manifest_file.touch(exist_ok=True)
with open(manifest_file, "r") as f:
manifest_contents = [line.strip() for line in f.readlines()]
if icon_path:
package_icon_path = package_path / "icons"
package_icon_path.mkdir(exist_ok=True)
dst = shutil.copy2(icon_path, package_icon_path)
icon_path = f'Path(__file__).parent.parent / "icons" / "{Path(dst).name}"'
icon_manifest = f"include {package_name}/icons"
if icon_manifest not in manifest_contents:
manifest_contents.append(icon_manifest)
with open(manifest_file, "w", encoding=DEFAULT_ENCODING) as f:
f.writelines("\n".join(set(manifest_contents)))
# Generate package setup.py
SetupGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "setup.py")
# Generate utils.py to list meta data of tools.
ToolPackageUtilsGenerator(package_name=package_name).generate_to_file(script_code_path / "utils.py")
ToolReadmeGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "README.md")
else:
script_code_path = Path(".")
if icon_path:
icon_path = f'"{Path(icon_path).as_posix()}"'
# Generate tool script
ToolPackageGenerator(tool_name=args.tool, icon=icon_path, extra_info=extra_info).generate_to_file(
script_code_path / f"{args.tool}.py"
)
InitGenerator().generate_to_file(script_code_path / "__init__.py")
print(f'Done. Created the tool "{args.tool}" in {script_code_path.resolve()}.')
def list_tool(args):
pf_client = PFClient()
package_tools = pf_client._tools.list(args.flow)
print(json.dumps(package_tools, indent=4))
def validate_tool(args):
import importlib
pf_client = PFClient()
try:
__import__(args.source)
source = importlib.import_module(args.source)
logger.debug(f"The source {args.source} is used as a package to validate.")
except ImportError:
try:
module_name, func_name = args.source.rsplit(".", 1)
module = importlib.import_module(module_name)
source = getattr(module, func_name)
logger.debug(f"The source {args.source} is used as a function to validate.")
except Exception:
if not Path(args.source).exists():
raise UserErrorException("Invalid source to validate tools.")
logger.debug(f"The source {args.source} is used as a script to validate.")
source = args.source
validation_result = pf_client._tools.validate(source)
print(repr(validation_result))
if not validation_result.passed:
sys.exit(1)
def dispatch_tool_commands(args: argparse.Namespace):
if args.sub_action == "init":
init_tool(args)
elif args.sub_action == "list":
list_tool(args)
elif args.sub_action == "validate":
validate_tool(args) | null |
4,273 | import argparse
import importlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import webbrowser
from pathlib import Path
from promptflow._cli._params import (
add_param_config,
add_param_entry,
add_param_environment_variables,
add_param_flow_display_name,
add_param_function,
add_param_inputs,
add_param_prompt_template,
add_param_source,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._pf._init_entry_generators import (
AzureOpenAIConnectionGenerator,
ChatFlowDAGGenerator,
FlowDAGGenerator,
OpenAIConnectionGenerator,
StreamlitFileReplicator,
ToolMetaGenerator,
ToolPyGenerator,
copy_extra_files,
)
from promptflow._cli._utils import _copy_to_flow, activate_action, confirm, inject_sys_path, list_of_dict_to_dict
from promptflow._constants import FlowLanguage
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, ConnectionProvider
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.operations._flow_operations import FlowOperations
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import ErrorTarget, UserErrorException
def add_parser_init_flow(subparsers):
"""Add flow create parser to the pf flow subparsers."""
epilog = """
Examples:
# Creating a flow folder with code/prompts and yaml definitions of the flow:
pf flow init --flow my-awesome-flow
# Creating an eval prompt flow:
pf flow init --flow my-awesome-flow --type evaluation
# Creating a flow in existing folder
pf flow init --flow intent_copilot --entry intent.py --function extract_intent --prompt-template prompt_template=tpl.jinja2
""" # noqa: E501
add_param_type = lambda parser: parser.add_argument( # noqa: E731
"--type",
type=str,
choices=["standard", "evaluation", "chat"],
help="The initialized flow type.",
default="standard",
)
add_param_connection = lambda parser: parser.add_argument( # noqa: E731
"--connection", type=str, help=argparse.SUPPRESS
)
add_param_deployment = lambda parser: parser.add_argument( # noqa: E731
"--deployment", type=str, help=argparse.SUPPRESS
)
add_params = [
add_param_type,
add_param_yes,
add_param_flow_display_name,
add_param_entry,
add_param_function,
add_param_prompt_template,
add_param_connection,
add_param_deployment,
] + base_params
activate_action(
name="init",
description="Creating a flow folder with code/prompts and yaml definitions of the flow.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Initialize a prompt flow directory.",
action_param_name="sub_action",
)
def add_parser_serve_flow(subparsers):
"""Add flow serve parser to the pf flow subparsers."""
epilog = """
Examples:
# Serve flow as an endpoint:
pf flow serve --source <path_to_flow>
# Serve flow as an endpoint with specific port and host:
pf flow serve --source <path_to_flow> --port 8080 --host localhost --environment-variables key1="`${my_connection.api_key}" key2="value2"
# Serve flow without opening browser:
pf flow serve --source <path_to_flow> --skip-open-browser
""" # noqa: E501
add_param_port = lambda parser: parser.add_argument( # noqa: E731
"--port", type=int, default=8080, help="The port on which endpoint to run."
)
add_param_host = lambda parser: parser.add_argument( # noqa: E731
"--host", type=str, default="localhost", help="The host of endpoint."
)
add_param_static_folder = lambda parser: parser.add_argument( # noqa: E731
"--static_folder", type=str, help=argparse.SUPPRESS
)
add_param_skip_browser = lambda parser: parser.add_argument( # noqa: E731
"--skip-open-browser", action="store_true", default=False, help="Skip open browser for flow serving."
)
activate_action(
name="serve",
description="Serving a flow as an endpoint.",
epilog=epilog,
add_params=[
add_param_source,
add_param_port,
add_param_host,
add_param_static_folder,
add_param_environment_variables,
add_param_config,
add_param_skip_browser,
]
+ base_params,
subparsers=subparsers,
help_message="Serving a flow as an endpoint.",
action_param_name="sub_action",
)
def add_parser_validate_flow(subparsers):
"""Add flow validate parser to the pf flow subparsers."""
epilog = """
Examples:
# Validate flow
pf flow validate --source <path_to_flow>
""" # noqa: E501
activate_action(
name="validate",
description="Validate a flow and generate flow.tools.json for the flow.",
epilog=epilog,
add_params=[
add_param_source,
]
+ base_params,
subparsers=subparsers,
help_message="Validate a flow. Will raise error if the flow is not valid.",
action_param_name="sub_action",
)
def add_parser_test_flow(subparsers):
"""Add flow test parser to the pf flow subparsers."""
epilog = """
Examples:
# Test the flow:
pf flow test --flow my-awesome-flow
# Test the flow with inputs:
pf flow test --flow my-awesome-flow --inputs key1=val1 key2=val2
# Test the flow with specified variant node:
pf flow test --flow my-awesome-flow --variant ${node_name.variant_name}
# Test the single node in the flow:
pf flow test --flow my-awesome-flow --node node_name
# Chat in the flow:
pf flow test --flow my-awesome-flow --node node_name --interactive
""" # noqa: E501
add_param_flow = lambda parser: parser.add_argument( # noqa: E731
"--flow", type=str, required=True, help="the flow directory to test."
)
add_param_node = lambda parser: parser.add_argument( # noqa: E731
"--node", type=str, help="the node name in the flow need to be tested."
)
add_param_variant = lambda parser: parser.add_argument( # noqa: E731
"--variant", type=str, help="Node & variant name in format of ${node_name.variant_name}."
)
add_param_interactive = lambda parser: parser.add_argument( # noqa: E731
"--interactive", action="store_true", help="start a interactive chat session for chat flow."
)
add_param_multi_modal = lambda parser: parser.add_argument( # noqa: E731
"--multi-modal", action="store_true", help=argparse.SUPPRESS
)
add_param_ui = lambda parser: parser.add_argument("--ui", action="store_true", help=argparse.SUPPRESS) # noqa: E731
add_param_input = lambda parser: parser.add_argument("--input", type=str, help=argparse.SUPPRESS) # noqa: E731
add_param_detail = lambda parser: parser.add_argument( # noqa: E731
"--detail", type=str, default=None, required=False, help=argparse.SUPPRESS
)
add_param_experiment = lambda parser: parser.add_argument( # noqa: E731
"--experiment", type=str, help="the experiment template path of flow."
)
add_param_skip_browser = lambda parser: parser.add_argument( # noqa: E731
"--skip-open-browser", action="store_true", help=argparse.SUPPRESS
)
add_params = [
add_param_flow,
add_param_node,
add_param_variant,
add_param_interactive,
add_param_input,
add_param_inputs,
add_param_environment_variables,
add_param_multi_modal,
add_param_ui,
add_param_config,
add_param_detail,
add_param_skip_browser,
] + base_params
if Configuration.get_instance().is_internal_features_enabled():
add_params.append(add_param_experiment)
activate_action(
name="test",
description="Test the flow.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Test the prompt flow or flow node.",
action_param_name="sub_action",
)
The provided code snippet includes necessary dependencies for implementing the `add_flow_parser` function. Write a Python function `def add_flow_parser(subparsers)` to solve the following problem:
Add flow parser to the pf subparsers.
Here is the function:
def add_flow_parser(subparsers):
"""Add flow parser to the pf subparsers."""
flow_parser = subparsers.add_parser(
"flow",
description="Manage flows for promptflow.",
help="pf flow",
)
flow_subparsers = flow_parser.add_subparsers()
add_parser_init_flow(flow_subparsers)
add_parser_test_flow(flow_subparsers)
add_parser_serve_flow(flow_subparsers)
add_parser_build(flow_subparsers, "flow")
add_parser_validate_flow(flow_subparsers)
flow_parser.set_defaults(action="flow") | Add flow parser to the pf subparsers. |
4,274 | import argparse
import importlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import webbrowser
from pathlib import Path
from promptflow._cli._params import (
add_param_config,
add_param_entry,
add_param_environment_variables,
add_param_flow_display_name,
add_param_function,
add_param_inputs,
add_param_prompt_template,
add_param_source,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._pf._init_entry_generators import (
AzureOpenAIConnectionGenerator,
ChatFlowDAGGenerator,
FlowDAGGenerator,
OpenAIConnectionGenerator,
StreamlitFileReplicator,
ToolMetaGenerator,
ToolPyGenerator,
copy_extra_files,
)
from promptflow._cli._utils import _copy_to_flow, activate_action, confirm, inject_sys_path, list_of_dict_to_dict
from promptflow._constants import FlowLanguage
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, ConnectionProvider
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.operations._flow_operations import FlowOperations
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import ErrorTarget, UserErrorException
def init_flow(args):
if any([args.entry, args.prompt_template]):
print("Creating flow from existing folder...")
prompt_tpl = {}
if args.prompt_template:
for _dct in args.prompt_template:
prompt_tpl.update(**_dct)
_init_existing_flow(args.flow, args.entry, args.function, prompt_tpl)
else:
# Create an example flow
print("Creating flow from scratch...")
_init_flow_by_template(args.flow, args.type, args.yes, args.connection, args.deployment)
def test_flow(args):
config = list_of_dict_to_dict(args.config)
pf_client = PFClient(config=config)
if args.environment_variables:
environment_variables = list_of_dict_to_dict(args.environment_variables)
else:
environment_variables = {}
inputs = _build_inputs_for_flow_test(args)
# Select different test mode
if Configuration.get_instance().is_internal_features_enabled() and args.experiment:
_test_flow_experiment(args, pf_client, inputs, environment_variables)
return
if args.multi_modal or args.ui:
_test_flow_multi_modal(args, pf_client)
return
if args.interactive:
_test_flow_interactive(args, pf_client, inputs, environment_variables)
return
_test_flow_standard(args, pf_client, inputs, environment_variables)
def serve_flow(args):
from promptflow._sdk._load_functions import load_flow
logger.info("Start serve model: %s", args.source)
# Set environment variable for local test
source = Path(args.source)
logger.info(
"Start promptflow server with port %s",
args.port,
)
os.environ["PROMPTFLOW_PROJECT_PATH"] = source.absolute().as_posix()
flow = load_flow(args.source)
if flow.language == FlowLanguage.CSharp:
serve_flow_csharp(args, source)
else:
serve_flow_python(args, source)
logger.info("Promptflow app ended")
def build_flow(args):
"""
i. `pf flow build --source <flow_folder> --output <output_folder> --variant <variant>`
ii. `pf flow build --source <flow_folder> --format docker --output <output_folder> --variant <variant>`
iii. `pf flow build --source <flow_folder> --format executable --output <output_folder> --variant <variant>`
# default to resolve variant and update flow.dag.yaml, support this in case customer want to keep the
variants for continuous development
# we can delay this before receiving specific customer request
v. `pf flow build --source <flow_folder> --output <output_folder> --keep-variants`
output structure:
flow/
.connections/
Dockerfile|executable.exe
...
"""
pf_client = PFClient()
pf_client.flows.build(
flow=args.source,
output=args.output,
format=args.format,
variant=args.variant,
flow_only=args.flow_only,
)
print(
f"Exported flow to {Path(args.output).absolute().as_posix()}.\n"
f"please check {Path(args.output).joinpath('README.md').absolute().as_posix()} "
f"for how to use it."
)
def validate_flow(args):
pf_client = PFClient()
validation_result = pf_client.flows.validate(
flow=args.source,
)
print(repr(validation_result))
if not validation_result.passed:
sys.exit(1)
else:
sys.exit(0)
def dispatch_flow_commands(args: argparse.Namespace):
if args.sub_action == "init":
init_flow(args)
elif args.sub_action == "test":
test_flow(args)
elif args.sub_action == "serve":
serve_flow(args)
elif args.sub_action == "build":
build_flow(args)
elif args.sub_action == "validate":
validate_flow(args) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.