id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
4,479 | from typing import List
import openai
from openai.version import VERSION as OPENAI_VERSION
import os
import tiktoken
from jinja2 import Template
from .retry import (
retry_and_handle_exceptions,
retry_and_handle_exceptions_for_generator,
)
from .logging import log
def count_token(text: str) -> int:
encoding = tiktoken.get_encoding("cl100k_base")
return len(encoding.encode(text))
def render_with_token_limit(template: Template, token_limit: int, **kwargs) -> str:
text = template.render(**kwargs)
token_count = count_token(text)
if token_count > token_limit:
message = f"token count {token_count} exceeds limit {token_limit}"
log(message)
raise ValueError(message)
return text
def log(message: str):
verbose = os.environ.get("VERBOSE", "false")
if verbose.lower() == "true":
print(message, flush=True)
def render_with_token_limit(template: Template, token_limit: int, **kwargs) -> str:
text = template.render(**kwargs)
token_count = count_token(text)
if token_count > token_limit:
message = f"token count {token_count} exceeds limit {token_limit}"
log(message)
raise ValueError(message)
return text | null |
4,480 | from promptflow import tool
from chat_with_pdf.main import chat_with_pdf
def convert_chat_history_to_chatml_messages(history):
messages = []
for item in history:
messages.append({"role": "user", "content": item["inputs"]["question"]})
messages.append({"role": "assistant", "content": item["outputs"]["answer"]})
return messages
def chat_with_pdf(question: str, pdf_url: str, history: list):
with acquire_lock("create_folder.lock"):
if not os.path.exists(PDF_DIR):
os.mkdir(PDF_DIR)
if not os.path.exists(INDEX_DIR):
os.makedirs(INDEX_DIR)
pdf_path = download(pdf_url)
index_path = create_faiss_index(pdf_path)
q = rewrite_question(question, history)
prompt, context = find_context(q, index_path)
stream = qna(prompt, history)
return stream, context
def chat_with_pdf_tool(question: str, pdf_url: str, history: list, ready: str):
history = convert_chat_history_to_chatml_messages(history)
stream, context = chat_with_pdf(question, pdf_url, history)
answer = ""
for str in stream:
answer = answer + str + ""
return {"answer": answer, "context": context} | null |
4,481 | from promptflow import tool
from chat_with_pdf.main import chat_with_pdf
def convert_chatml_messages_to_chat_history(messages):
history = []
for i in range(0, len(messages), 2):
history.append(
{
"inputs": {"question": messages[i]["content"]},
"outputs": {"answer": messages[i + 1]["content"]},
}
)
return history | null |
4,482 | from promptflow import tool
import json
The provided code snippet includes necessary dependencies for implementing the `get_current_weather` function. Write a Python function `def get_current_weather(location, unit="fahrenheit")` to solve the following problem:
Get the current weather in a given location
Here is the function:
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
weather_info = {
"location": location,
"temperature": "72",
"unit": unit,
"forecast": ["sunny", "windy"],
}
return weather_info | Get the current weather in a given location |
4,483 | from promptflow import tool
import json
The provided code snippet includes necessary dependencies for implementing the `get_n_day_weather_forecast` function. Write a Python function `def get_n_day_weather_forecast(location, format, num_days)` to solve the following problem:
Get next num_days weather in a given location
Here is the function:
def get_n_day_weather_forecast(location, format, num_days):
"""Get next num_days weather in a given location"""
weather_info = {
"location": location,
"temperature": "60",
"format": format,
"forecast": ["rainy"],
"num_days": num_days,
}
return weather_info | Get next num_days weather in a given location |
4,484 | from promptflow import tool
import json
def run_function(response_message: dict) -> str:
if "function_call" in response_message:
function_name = response_message["function_call"]["name"]
function_args = json.loads(response_message["function_call"]["arguments"])
print(function_args)
result = globals()[function_name](**function_args)
else:
print("No function call")
if isinstance(response_message, dict):
result = response_message["content"]
else:
result = response_message
return result | null |
4,485 | import os
from promptflow import tool
from promptflow.connections import CustomConnection
from intent import extract_intent
def extract_intent(chat_prompt: str):
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
if "AZURE_OPENAI_API_KEY" not in os.environ:
# load environment variables from .env file
try:
from dotenv import load_dotenv
except ImportError:
# This can be removed if user using custom image.
pip.main(["install", "python-dotenv"])
from dotenv import load_dotenv
load_dotenv()
chat = AzureChatOpenAI(
deployment_name=os.environ["CHAT_DEPLOYMENT_NAME"],
openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
openai_api_base=os.environ["AZURE_OPENAI_API_BASE"],
openai_api_type="azure",
openai_api_version="2023-03-15-preview",
temperature=0,
)
reply_message = chat([HumanMessage(content=chat_prompt)])
return reply_message.content
def extract_intent_tool(chat_prompt, connection: CustomConnection) -> str:
# set environment variables
for key, value in dict(connection).items():
os.environ[key] = value
# call the entry function
return extract_intent(
chat_prompt=chat_prompt,
) | null |
4,486 | import os
import pip
from langchain.chat_models import AzureChatOpenAI
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import HumanMessage
def extract_intent(chat_prompt: str):
if "AZURE_OPENAI_API_KEY" not in os.environ:
# load environment variables from .env file
try:
from dotenv import load_dotenv
except ImportError:
# This can be removed if user using custom image.
pip.main(["install", "python-dotenv"])
from dotenv import load_dotenv
load_dotenv()
chat = AzureChatOpenAI(
deployment_name=os.environ["CHAT_DEPLOYMENT_NAME"],
openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
openai_api_base=os.environ["AZURE_OPENAI_API_BASE"],
openai_api_type="azure",
openai_api_version="2023-07-01-preview",
temperature=0,
)
reply_message = chat([HumanMessage(content=chat_prompt)])
return reply_message.content | null |
4,487 | import os
import pip
from langchain.chat_models import AzureChatOpenAI
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import HumanMessage
def generate_prompt(customer_info: str, history: list, user_prompt_template: str):
chat_history_text = "\n".join(
[message["role"] + ": " + message["content"] for message in history]
)
prompt_template = PromptTemplate.from_template(user_prompt_template)
chat_prompt_template = ChatPromptTemplate.from_messages(
[
HumanMessagePromptTemplate(prompt=prompt_template)
]
)
return chat_prompt_template.format_prompt(customer_info=customer_info, chat_history=chat_history_text).to_string() | null |
4,488 | from typing import Union
from openai.version import VERSION as OPENAI_VERSION
from promptflow import tool
from promptflow.connections import CustomConnection, AzureOpenAIConnection
def to_bool(value) -> bool:
def get_client(connection: Union[CustomConnection, AzureOpenAIConnection]):
def my_python_tool(
prompt: str,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
suffix: str = None,
max_tokens: int = 120,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
connection: Union[CustomConnection, AzureOpenAIConnection] = None,
**kwargs,
) -> str:
# TODO: remove below type conversion after client can pass json rather than string.
echo = to_bool(echo)
response = get_client(connection).completions.create(
prompt=prompt,
model=deployment_name,
# empty string suffix should be treated as None.
suffix=suffix if suffix else None,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
n=int(n),
logprobs=int(logprobs) if logprobs else None,
echo=echo,
# fix bug "[] is not valid under any of the given schemas-'stop'"
stop=stop if stop else None,
presence_penalty=float(presence_penalty),
frequency_penalty=float(frequency_penalty),
best_of=int(best_of),
# Logit bias must be a dict if we passed it to openai api.
logit_bias=logit_bias if logit_bias else {},
user=user,
)
# get first element because prompt is single.
return response.choices[0].text | null |
4,489 | from promptflow import tool
import ast
import json
def infinite_loop_check(code_snippet):
def syntax_error_check(code_snippet):
def error_fix(code_snippet):
def code_refine(original_code: str) -> str:
try:
original_code = json.loads(original_code)["code"]
fixed_code = None
if infinite_loop_check(original_code):
fixed_code = error_fix(original_code)
else:
fixed_code = original_code
if syntax_error_check(fixed_code):
fixed_code = error_fix(fixed_code)
return fixed_code
except json.JSONDecodeError:
return "JSONDecodeError"
except Exception as e:
return "Unknown Error:" + str(e) | null |
4,490 | from promptflow import tool
def prepare_example():
return [
{
"question": "What is 37593 * 67?",
"code": "{\n \"code\": \"print(37593 * 67)\"\n}",
"answer": "2512641",
},
{
"question": "What is the value of x in the equation 2x + 3 = 11?",
"code": "{\n \"code\": \"print((11-3)/2)\"\n}",
"answer": "4",
},
{
"question": "How many of the integers between 0 and 99 inclusive are divisible by 8?",
"code": "{\n \"code\": \"count = 0\\nfor i in range(100):\\n \
if i % 8 == 0:\\n count += 1\\nprint(count)\"\n}",
"answer": "10",
},
{
"question": "Janet's ducks lay 16 eggs per day. \
She eats three for breakfast every morning and bakes muffins for her friends every day with four.\
She sells the remainder at the farmers' market daily for $2 per fresh duck egg. \
How much in dollars does she make every day at the farmers' market?",
"code": "{\n \"code\": \"print((16-3-4)*2)\"\n}",
"answer": "18",
},
{
"question": "What is the sum of the powers of 3 (3^i) that are smaller than 100?",
"code": "{\n \"code\": \"sum = 0\\ni = 0\n\
while 3**i < 100:\\n sum += 3**i\\n i += 1\\nprint(sum)\"\n}",
"answer": "40",
},
{
"question": "Carla is downloading a 200 GB file. She can download 2 GB/minute, \
but 40% of the way through the download, the download fails.\
Then Carla has to restart the download from the beginning. \
How load did it take her to download the file in minutes?",
"code": "{\n \"code\": \"print(200/2*1.4)\"\n}",
"answer": "140",
},
{
"question": "What is the sum of the 10 first positive integers?",
"code": "{\n \"code\": \"print(sum(range(1,11)))\"\n}",
"answer": "55",
}
] | null |
4,491 | from promptflow import tool
import sys
from io import StringIO
def func_exe(code_snippet: str):
if code_snippet == "JSONDecodeError" or code_snippet.startswith("Unknown Error:"):
return code_snippet
# Define the result variable before executing the code snippet
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
# Execute the code snippet
try:
exec(code_snippet.lstrip())
except Exception as e:
sys.stdout = old_stdout
return str(e)
sys.stdout = old_stdout
return redirected_output.getvalue().strip() | null |
4,492 | import time
from typing import List
import re
import tiktoken
import logging
import sys
import json
FORMATTER = logging.Formatter(
fmt="[%(asctime)s] %(name)-8s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %z",
)
def get_logger(name: str, level=logging.INFO) -> logging.Logger:
logger = logging.Logger(name)
# log to sys.stdout for backward compatibility.
# TODO: May need to be removed in the future, after local/blob file stream are fully supported.
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(FORMATTER)
logger.addHandler(stdout_handler)
logger.setLevel(level)
return logger | null |
4,493 | import time
from typing import List
import re
import tiktoken
import logging
import sys
import json
def preprocess_json_input(input_str: str) -> str:
# Replace single backslashes with double backslashes, while leaving already escaped ones intact
corrected_str = re.sub(r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str)
return corrected_str
def parse_reply(text: str):
try:
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
preprocessed_text = preprocess_json_input(text)
try:
parsed = json.loads(preprocessed_text, strict=False)
except Exception:
return {"Error": f"Could not parse invalid json: {text}"}
except TypeError:
return {"Error": f"the JSON object must be str, bytes or bytearray not {type(text)}"}
return parsed | null |
4,494 | import time
from typing import List
import re
import tiktoken
import logging
import sys
import json
The provided code snippet includes necessary dependencies for implementing the `count_string_tokens` function. Write a Python function `def count_string_tokens(string: str, model_name="gpt-3.5-turbo") -> int` to solve the following problem:
Returns the number of tokens in a text string. Args: string (str): The text string. model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo") Returns: int: The number of tokens in the text string.
Here is the function:
def count_string_tokens(string: str, model_name="gpt-3.5-turbo") -> int:
"""
Returns the number of tokens in a text string.
Args:
string (str): The text string.
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
Returns:
int: The number of tokens in the text string.
"""
encoding = tiktoken.encoding_for_model(model_name)
return len(encoding.encode(string)) | Returns the number of tokens in a text string. Args: string (str): The text string. model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo") Returns: int: The number of tokens in the text string. |
4,495 | import time
from typing import List
import re
import tiktoken
import logging
import sys
import json
def count_message_tokens(
messages: List, model: str = "gpt-3.5-turbo-0301"
) -> int:
def create_chat_message(role, content, name=None):
def generate_context(prompt, full_message_history, user_prompt, model="gpt-3.5-turbo"):
current_context = [
create_chat_message("system", prompt),
create_chat_message(
"system", f"The current time and date is {time.strftime('%c')}"
),
create_chat_message("user", user_prompt),
]
# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1
insertion_index = len(current_context)
# Count the currently used tokens
current_tokens_used = count_message_tokens(current_context, model)
return (
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
) | null |
4,496 | import time
from typing import List
import re
import tiktoken
import logging
import sys
import json
def construct_prompt(current_context):
update_current_context = []
for item in current_context:
role = item.get("role", None)
content = item.get("content", None)
name = item.get("name", None)
if name is not None:
update_current_context.append(":\n".join([role, "name", name]) + "\n" + ":\n".join(["content", content]))
else:
update_current_context.append(":\n".join([role, content]))
update_current_context = "\n".join(update_current_context)
return update_current_context | null |
4,497 | from promptflow import tool
def functions_format() -> list:
functions = [
{
"name": "search",
"description": """The action will search this entity name on Wikipedia and returns the first {count}
sentences if it exists. If not, it will return some related entities to search next.""",
"parameters": {
"type": "object",
"properties": {
"entity": {
"type": "string",
"description": "Entity name which is used for Wikipedia search.",
},
"count": {
"type": "integer",
"default": 10,
"description": "Returned sentences count if entity name exists Wikipedia.",
},
},
"required": ["entity"],
},
},
{
"name": "python",
"description": """A Python shell. Use this to execute python commands. Input should be a valid python
command and you should print result with `print(...)` to see the output.""",
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The command you want to execute in python",
}
},
"required": ["command"]
},
},
{
"name": "finish",
"description": """use this to signal that you have finished all your goals and remember show your
results""",
"parameters": {
"type": "object",
"properties": {
"response": {
"type": "string",
"description": "final response to let people know you have finished your goals and remember "
"show your results",
},
},
"required": ["response"],
},
},
]
return functions | null |
4,498 | import sys
from io import StringIO
import functools
import logging
import ast
from typing import Dict, Optional
logger = logging.getLogger(__name__)
def warn_once() -> None:
# Warn that the PythonREPL
logger.warning("Python REPL can execute arbitrary code. Use with caution.") | null |
4,499 | from promptflow import tool
The provided code snippet includes necessary dependencies for implementing the `generate_goal` function. Write a Python function `def generate_goal(items: list = []) -> str` to solve the following problem:
Generate a numbered list from given items based on the item_type. Args: items (list): A list of items to be numbered. Returns: str: The formatted numbered list.
Here is the function:
def generate_goal(items: list = []) -> str:
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
Returns:
str: The formatted numbered list.
"""
return "\n".join(f"{i + 1}. {item}" for i, item in enumerate(items)) | Generate a numbered list from given items based on the item_type. Args: items (list): A list of items to be numbered. Returns: str: The formatted numbered list. |
4,500 | from typing import Union
from promptflow import tool
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
def search(entity: str, count: int = 10):
"""
The input is an exact entity name. The action will search this entity name on Wikipedia and returns the first
count sentences if it exists. If not, it will return some related entities to search next.
"""
entity_ = entity.replace(" ", "+")
search_url = f"https://en.wikipedia.org/w/index.php?search={entity_}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
response_text = requests.get(search_url, headers=headers).text
soup = BeautifulSoup(response_text, features="html.parser")
result_divs = soup.find_all("div", {"class": "mw-search-result-heading"})
if result_divs: # mismatch
result_titles = [decode_str(div.get_text().strip()) for div in result_divs]
result_titles = [remove_nested_parentheses(result_title) for result_title in result_titles]
obs = f"Could not find {entity}. Similar: {result_titles[:5]}."
else:
page_content = [p_ul.get_text().strip() for p_ul in soup.find_all("p") + soup.find_all("ul")]
if any("may refer to:" in p for p in page_content):
obs = search("[" + entity + "]")
else:
page = ""
for content in page_content:
if len(content.split(" ")) > 2:
page += decode_str(content)
if not content.endswith("\n"):
page += "\n"
obs = get_page_sentence(page, count=count)
return obs
def python(command: str):
"""
A Python shell. Use this to execute python commands. Input should be a valid python command.
If you want to see the output of a value, you should print it out with `print(...)`.
"""
command = command.strip().strip("```")
return python_repl.run(command)
class AutoGPT:
def __init__(
self,
connection,
tools,
full_message_history,
functions,
system_prompt=None,
triggering_prompt=None,
user_prompt=None,
model_or_deployment_name=None
):
self.tools = tools
self.full_message_history = full_message_history
self.functions = functions
self.system_prompt = system_prompt
self.connection = connection
self.model_or_deployment_name = model_or_deployment_name
self.triggering_prompt = triggering_prompt
self.user_prompt = user_prompt
def chat_with_ai(self, token_limit):
"""Interact with the OpenAI API, sending the prompt, message history and functions."""
# Reserve 1000 tokens for the response
send_token_limit = token_limit - 1000
(
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
) = generate_context(self.system_prompt, self.full_message_history, self.user_prompt)
# Account for user input (appended later)
current_tokens_used += count_message_tokens([create_chat_message("user", self.triggering_prompt)])
current_tokens_used += 500 # Account for memory (appended later)
# Add Messages until the token limit is reached or there are no more messages to add.
while next_message_to_add_index >= 0:
message_to_add = self.full_message_history[next_message_to_add_index]
tokens_to_add = count_message_tokens([message_to_add])
if current_tokens_used + tokens_to_add > send_token_limit:
break
# Add the most recent message to the start of the current context, after the two system prompts.
current_context.insert(
insertion_index, self.full_message_history[next_message_to_add_index]
)
# Count the currently used tokens
current_tokens_used += tokens_to_add
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
# Append user input, the length of this is accounted for above
current_context.extend([create_chat_message("user", self.triggering_prompt)])
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
current_context = construct_prompt(current_context)
if isinstance(self.connection, AzureOpenAIConnection):
try:
response = aoai_chat(
connection=self.connection,
prompt=current_context,
deployment_name=self.model_or_deployment_name,
max_tokens=tokens_remaining,
functions=self.functions)
return response
except Exception as e:
if "The API deployment for this resource does not exist" in str(e):
raise Exception(
"Please fill in the deployment name of your Azure OpenAI resource gpt-4 model.")
elif isinstance(self.connection, OpenAIConnection):
response = openai_chat(
connection=self.connection,
prompt=current_context,
model=self.model_or_deployment_name,
max_tokens=tokens_remaining,
functions=self.functions)
return response
else:
raise ValueError("Connection must be an instance of AzureOpenAIConnection or OpenAIConnection")
def run(self):
tools = {t.__name__: t for t in self.tools}
while True:
# Send message to AI, get response
response = self.chat_with_ai(token_limit=4000)
if "function_call" in response:
# Update full message history
function_name = response["function_call"]["name"]
parsed_output = parse_reply(response["function_call"]["arguments"])
if "Error" in parsed_output:
error_message = parsed_output["Error"]
autogpt_logger.info(f"Error: {error_message}")
command_result = f"Error: {error_message}"
else:
autogpt_logger.info(f"Function generation requested, function = {function_name}, args = "
f"{parsed_output}")
self.full_message_history.append(
create_chat_message("assistant", f"Function generation requested, function = {function_name}, "
f"args = {parsed_output}")
)
if function_name == "finish":
response = parsed_output["response"]
autogpt_logger.info(f"Responding to user: {response}")
return response
if function_name in tools:
tool = tools[function_name]
try:
autogpt_logger.info(f"Next function = {function_name}, arguments = {parsed_output}")
result = tool(**parsed_output)
command_result = f"Executed function {function_name} and returned: {result}"
except Exception as e:
command_result = (
f"Error: {str(e)}, {type(e).__name__}"
)
result_length = count_string_tokens(command_result)
if result_length + 600 > 4000:
command_result = f"Failure: function {function_name} returned too much output. Do not " \
f"execute this function again with the same arguments."
else:
command_result = f"Unknown function '{function_name}'. Please refer to available functions " \
f"defined in functions parameter."
# Append command result to the message history
self.full_message_history.append(create_chat_message("function", str(command_result), function_name))
autogpt_logger.info(f"function: {command_result}")
else:
autogpt_logger.info(f"No function generated, returned: {response['content']}")
self.full_message_history.append(
create_chat_message("assistant", f"No function generated, returned: {response['content']}")
)
def autogpt_easy_start(connection: Union[AzureOpenAIConnection, OpenAIConnection], system_prompt: str, user_prompt: str,
triggering_prompt: str, functions: list, model_or_deployment_name: str):
from wiki_search import search
from python_repl import python
from autogpt_class import AutoGPT
full_message_history = []
tools = [
search,
python
]
agent = AutoGPT(
full_message_history=full_message_history,
tools=tools,
system_prompt=system_prompt,
connection=connection,
model_or_deployment_name=model_or_deployment_name,
functions=functions,
user_prompt=user_prompt,
triggering_prompt=triggering_prompt
)
result = agent.run()
return result | null |
4,501 | import os
from openai.version import VERSION as OPENAI_VERSION
from dotenv import load_dotenv
from promptflow import tool
def to_bool(value) -> bool:
return str(value).lower() == "true"
def get_client():
if OPENAI_VERSION.startswith("0."):
raise Exception(
"Please upgrade your OpenAI package to version >= 1.0.0 or using the command: pip install --upgrade openai."
)
api_key = os.environ["AZURE_OPENAI_API_KEY"]
conn = dict(
api_key=os.environ["AZURE_OPENAI_API_KEY"],
)
if api_key.startswith("sk-"):
from openai import OpenAI as Client
else:
from openai import AzureOpenAI as Client
conn.update(
azure_endpoint=os.environ["AZURE_OPENAI_API_BASE"],
api_version=os.environ.get("OPENAI_API_VERSION", "2023-07-01-preview"),
)
return Client(**conn)
def my_python_tool(
prompt: str,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
suffix: str = None,
max_tokens: int = 120,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
if "AZURE_OPENAI_API_KEY" not in os.environ:
# load environment variables from .env file
load_dotenv()
if "AZURE_OPENAI_API_KEY" not in os.environ:
raise Exception("Please specify environment variables: AZURE_OPENAI_API_KEY")
# TODO: remove below type conversion after client can pass json rather than string.
echo = to_bool(echo)
response = get_client().completions.create(
prompt=prompt,
model=deployment_name,
# empty string suffix should be treated as None.
suffix=suffix if suffix else None,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
n=int(n),
logprobs=int(logprobs) if logprobs else None,
echo=echo,
# fix bug "[] is not valid under any of the given schemas-'stop'"
stop=stop if stop else None,
presence_penalty=float(presence_penalty),
frequency_penalty=float(frequency_penalty),
best_of=int(best_of),
# Logit bias must be a dict if we passed it to openai api.
logit_bias=logit_bias if logit_bias else {},
user=user,
)
# get first element because prompt is single.
return response.choices[0].text | null |
4,502 | import io
from promptflow import tool
from promptflow.contracts.multimedia import Image
from PIL import Image as PIL_Image
def passthrough(input_image: Image) -> Image:
image_stream = io.BytesIO(input_image)
pil_image = PIL_Image.open(image_stream)
flipped_image = pil_image.transpose(PIL_Image.FLIP_LEFT_RIGHT)
buffer = io.BytesIO()
flipped_image.save(buffer, format="PNG")
return Image(buffer.getvalue(), mime_type="image/png") | null |
4,503 | from promptflow import tool
def generate_result(llm_result="", default_result="") -> str:
if llm_result:
return llm_result
else:
return default_result | null |
4,504 | from promptflow import tool
import random
def content_safety_check(text: str) -> str:
# You can use a content safety node to replace this tool.
return random.choice([True, False]) | null |
4,505 | from promptflow import tool
def llm_result(question: str) -> str:
# You can use an LLM node to replace this tool.
return (
"Prompt flow is a suite of development tools designed to streamline "
"the end-to-end development cycle of LLM-based AI applications."
) | null |
4,506 | from promptflow import tool
def default_result(question: str) -> str:
return f"I'm not familiar with your query: {question}." | null |
4,508 | import difflib
import webbrowser
def show_diff(left_content, right_content, name="file"):
d = difflib.HtmlDiff()
html = d.make_file(
left_content.splitlines(),
right_content.splitlines(),
"origin " + name,
"new " + name,
context=True,
numlines=20)
html = html.encode()
html_name = name + "_diff.html"
with open(html_name, "w+b") as fp:
fp.write(html)
webbrowser.open(html_name) | null |
4,509 | from promptflow import tool
from divider import Divider
from typing import List
class Divider:
language = 'py'
def divide_file(cls, text) -> List[str]:
matches = list(re.finditer(Settings.divide_file[Divider.language], text))
splitted_content = []
min_pos = matches[0].start() if len(matches) > 0 else len(text)
for i in range(len(matches)):
start = matches[i].start()
end = matches[i + 1].start() if i + 1 < len(matches) else len(text)
splitted_content.append(text[start:end])
if min_pos != 0:
splitted_content.insert(0, text[0:min_pos])
return splitted_content
def divide_half(cls, text) -> List[str]:
"""
Divide the content into two parts, but ensure that the function body is not split.
"""
_, pos = Divider.get_functions_and_pos(text)
if len(pos) > 1: # Divide the code into two parts and every part start with a function.
i = len(pos) // 2
return [text[0:pos[i][0]], text[pos[i][0]:]]
if len(pos) == 1: # Divide the code into two parts, [function define + body, other body].
body = text[pos[0][1]:]
body_lines = body.split('\n')
body_ten_lines = '\n'.join(body_lines[0:10])
return [text[0:pos[0][1]] + body_ten_lines, body[len(body_ten_lines):]]
return [text]
def get_functions_and_pos(cls, text):
matches = re.finditer(Settings.divide_func[Divider.language], text)
functions = []
pos = []
for match in matches:
matched_text = match.group().replace('\n', '')
func = re.sub(r' +', ' ', matched_text).replace(' :', ':')
func = re.sub(r'[\s,]+\)', ')', func)
func = re.sub(r'\([\s,]+', '(', func)
functions.append(func.strip())
pos.append((match.start(), match.end()))
return functions, pos
def combine(cls, divided: List[str]):
return ''.join(divided)
def merge_doc2code(cls, docstring: str, origin_code: str) -> str:
funcs1, pos1 = Divider.get_functions_and_pos(docstring)
funcs2, pos2 = Divider.get_functions_and_pos(origin_code)
pattern = r'""".*?"""'
code = origin_code if len(funcs2) == 0 else origin_code[0:pos2[0][0]]
pos1.append((len(docstring), len(docstring))) # avoid index out of range
pos2.append((len(origin_code), len(origin_code))) # avoid index out of range
for i2 in range(len(funcs2)): # add docstring for each function in origin_code
part_full_code = origin_code[pos2[i2][0]:pos2[i2 + 1][0]]
try:
i1 = funcs1.index(funcs2[i2])
except ValueError:
logging.warning(f"No docstring found for {funcs2[i2]}")
code += part_full_code
continue
new_doc = re.findall(pattern, docstring[pos1[i1][1]:pos1[i1 + 1][0]], re.DOTALL)
if new_doc:
func_line = origin_code[pos2[i2][0]:pos2[i2][1]].replace('\n', '')
empty_line_num = (len(func_line) - len(func_line.lstrip()) + 4)
func_body = origin_code[pos2[i2][1]:pos2[i2 + 1][0]]
code_doc = list(re.finditer(pattern, func_body, re.DOTALL))
format_new_doc = Divider.format_indentation(new_doc[0], empty_line_num)
is_replace_doc = len(code_doc) > 0 and (re.sub(r'\s+', '', func_body[0:code_doc[0].start()]) == '')
if is_replace_doc:
code += part_full_code.replace(code_doc[0].group(), format_new_doc.strip(), 1)
else:
code += origin_code[pos2[i2][0]:pos2[i2][1]] + '\n' + format_new_doc + '\n' + origin_code[
pos2[i2][1]:
pos2[i2 + 1][0]]
else:
code += part_full_code
return code
def format_indentation(cls, text, empty_line_num):
lines = text.splitlines()
last_line_space_num = len(lines[-1]) - len(lines[-1].lstrip())
need_add_space = max(empty_line_num - last_line_space_num, 0) * ' '
lines[0] = last_line_space_num * ' ' + lines[0].lstrip() # Align the first row to the last row
indented_lines = [(need_add_space + line).rstrip() for line in lines]
indented_string = '\n'.join(indented_lines)
return indented_string
def has_class_or_func(cls, text):
funcs, _ = Divider.get_functions_and_pos(text)
return len(funcs) > 0
def combine_code(divided: List[str]):
code = Divider.combine(divided)
return code | null |
4,510 | from promptflow import tool
from file import File
class File:
def __init__(self, source: str):
self._source = source
self._is_url = source.startswith("http://") or source.startswith("https://")
if self._is_url:
parsed_url = urlparse(source)
path = parsed_url.path
else:
path = source
self._path = os.path.normpath(os.path.abspath(path))
self._dirname = os.path.dirname(self._path)
self._filename = os.path.basename(self._path).split(".")[0]
self._language = os.path.basename(self._path).split(".")[1]
def _read_content(self):
if self._is_url:
response = requests.get(self.source)
if response.status_code == 200:
content = response.text
return content
else:
print(f"Failed to retrieve content from URL: {self.source}")
return None
else:
try:
with open(self._path, "r") as file:
content = file.read()
return content
except FileNotFoundError:
print(f"File not found: {self.source}")
return None
def content(self) -> str:
if not hasattr(self, "_text"):
self._content = self._read_content()
return self._content
def language(self) -> str:
return self._language
def filename(self) -> str:
return self._filename
def dirname(self) -> str:
return self._dirname
def source(self) -> str:
return self._source
def override_origin_file(self, content: str) -> None:
if not self._is_url:
with open(self._path, "w") as f:
f.write(content)
else:
logging.warning(
"Cannot override origin file from URL, create a new file instead."
)
self.create_new_file(content)
def create_new_file(self, content: str) -> None:
if self._is_url:
path = os.path.join(
"./",
self.filename + f"_doc.{self.language}",
)
else:
path = os.path.join(
self.dirname,
self.filename + f"_doc.{self.language}",
)
with open(path, "w") as f:
f.write(content)
def load_code(source: str):
file = File(source)
return file.content | null |
4,511 | import ast
import asyncio
import logging
import os
import sys
from typing import Union, List
from promptflow import tool
from azure_open_ai import ChatLLM
from divider import Divider
from prompt import docstring_prompt, PromptLimitException
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
def get_imports(content):
tree = ast.parse(content)
import_statements = []
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for n in node.names:
import_statements.append(f"import {n.name}")
elif isinstance(node, ast.ImportFrom):
module_name = node.module
for n in node.names:
import_statements.append(f"from {module_name} import {n.name}")
return import_statements | null |
4,512 | import ast
import asyncio
import logging
import os
import sys
from typing import Union, List
from promptflow import tool
from azure_open_ai import ChatLLM
from divider import Divider
from prompt import docstring_prompt, PromptLimitException
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
async def async_generate_docstring(divided: List[str]):
llm = ChatLLM()
divided = list(reversed(divided))
all_divided = []
# If too many imports result in tokens exceeding the limit, please set an empty string.
modules = '' # '\n'.join(get_imports(divided[-1]))
modules_tokens = llm.count_tokens(modules)
if modules_tokens > 300:
logging.warning(f'Too many imports, the number of tokens is {modules_tokens}')
if modules_tokens > 500:
logging.warning(f'Too many imports, the number of tokens is {modules_tokens}, will set an empty string.')
modules = ''
# Divide the code into two parts if the global class/function is too long.
while len(divided):
item = divided.pop()
try:
llm.validate_tokens(llm.create_prompt(docstring_prompt(code=item, module=modules)))
except PromptLimitException as e:
logging.warning(e.message + ', will divide the code into two parts.')
divided_tmp = Divider.divide_half(item)
if len(divided_tmp) > 1:
divided.extend(list(reversed(divided_tmp)))
continue
except Exception as e:
logging.warning(e)
all_divided.append(item)
tasks = []
last_code = ''
for item in all_divided:
if Divider.has_class_or_func(item):
tasks.append(llm.async_query(docstring_prompt(last_code=last_code, code=item, module=modules)))
else: # If the code has not function or class, no need to generate docstring.
tasks.append(asyncio.sleep(0))
last_code = item
res_doc = await asyncio.gather(*tasks)
new_code = []
for i in range(len(all_divided)):
if type(res_doc[i]) is str:
new_code.append(Divider.merge_doc2code(res_doc[i], all_divided[i]))
else:
new_code.append(all_divided[i])
return new_code
def generate_docstring(divided: List[str],
connection: Union[AzureOpenAIConnection, OpenAIConnection] = None,
model: str = None):
if isinstance(connection, AzureOpenAIConnection):
os.environ["OPENAI_API_KEY"] = connection.api_key
os.environ["OPENAI_API_BASE"] = connection.api_base
os.environ["OPENAI_API_VERSION"] = connection.api_version
os.environ["API_TYPE"] = connection.api_type
elif isinstance(connection, OpenAIConnection):
os.environ["OPENAI_API_KEY"] = connection.api_key
os.environ["ORGANIZATION"] = connection.organization
if model:
os.environ["MODEL"] = model
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
return asyncio.run(async_generate_docstring(divided)) | null |
4,513 | from promptflow import tool
from divider import Divider
class Divider:
def divide_file(cls, text) -> List[str]:
def divide_half(cls, text) -> List[str]:
def get_functions_and_pos(cls, text):
def combine(cls, divided: List[str]):
def merge_doc2code(cls, docstring: str, origin_code: str) -> str:
def format_indentation(cls, text, empty_line_num):
def has_class_or_func(cls, text):
def divide_code(file_content: str):
# Divide the code into several parts according to the global import/class/function.
divided = Divider.divide_file(file_content)
return divided | null |
4,514 | from promptflow import tool
def order_search(query: str) -> str:
print(f"Your query is {query}.\nSearching for order...")
return "Your order is being mailed, please wait patiently." | null |
4,515 | from promptflow import tool
def product_info(query: str) -> str:
print(f"Your query is {query}.\nLooking for product information...")
return "This product is produced by Microsoft." | null |
4,516 | from promptflow import tool
def generate_response(order_search="", product_info="", product_recommendation="") -> str:
default_response = "Sorry, no results matching your search were found."
responses = [order_search, product_info, product_recommendation]
return next((response for response in responses if response), default_response) | null |
4,517 | from promptflow import tool
def product_recommendation(query: str) -> str:
print(f"Your query is {query}.\nRecommending products...")
return "I recommend promptflow to you, which can solve your problem very well." | null |
4,518 | from promptflow import tool
def class_check(llm_result: str) -> str:
intentions_list = ["order_search", "product_info", "product_recommendation"]
matches = [intention for intention in intentions_list if intention in llm_result.lower()]
return matches[0] if matches else "unknown" | null |
4,519 | import bs4
import requests
from promptflow import tool
def fetch_text_content_from_url(url: str):
# Send a request to the URL
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = bs4.BeautifulSoup(response.text, "html.parser")
soup.prettify()
return soup.get_text()[:2000]
else:
msg = (
f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: "
f"{response.text[:100]}"
)
print(msg)
return "No available content"
except Exception as e:
print("Get url failed with error: {}".format(e))
return "No available content" | null |
4,520 | import json
from promptflow import tool
def convert_to_dict(input_str: str):
try:
return json.loads(input_str)
except Exception as e:
print("The input is not valid, error: {}".format(e))
return {"category": "None", "evidence": "None"} | null |
4,521 | from promptflow import tool
def prepare_examples():
return [
{
"url": "https://play.google.com/store/apps/details?id=com.spotify.music",
"text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and "
"original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. "
"It has a variety of features such as creating and sharing music playlists, discovering "
"new music, and listening to popular and exclusive podcasts. It also has a Premium "
"subscription option which allows users to download and listen offline, and access "
"ad-free music. It is available on all devices and has a variety of genres and artists "
"to choose from.",
"category": "App",
"evidence": "Both",
},
{
"url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw",
"text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL "
"games on YouTube. It is available in 2023 and is subject to the terms and privacy policy "
"of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.",
"category": "Channel",
"evidence": "URL",
},
{
"url": "https://arxiv.org/abs/2303.04671",
"text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and "
"receiving not only languages but also images, providing complex visual questions or "
"visual editing instructions, and providing feedback and asking for corrected results. "
"It incorporates different Visual Foundation Models and is publicly available. Experiments "
"show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with "
"the help of Visual Foundation Models.",
"category": "Academic",
"evidence": "Text content",
},
{
"url": "https://ab.politiaromana.ro/",
"text_content": "There is no content available for this text.",
"category": "None",
"evidence": "None",
},
] | null |
4,522 | from promptflow import tool
from promptflow.connections import CustomStrongTypeConnection
from promptflow.contracts.types import Secret
class MyCustomConnection(CustomStrongTypeConnection):
def my_tool(connection: MyCustomConnection, input_text: str) -> str:
# Replace with your tool code.
# Use custom strong type connection like: connection.api_key, connection.api_base
return "Hello " + input_text | null |
4,523 |
def hello(input_text: str) -> str:
# Replace with your own code.
return "Hello " + input_text | null |
4,524 | from promptflow import tool
from typing import List, Union, Dict
The provided code snippet includes necessary dependencies for implementing the `my_list_func` function. Write a Python function `def my_list_func(prefix: str = "", size: int = 10, **kwargs) -> List[Dict[str, Union[str, int, float, list, Dict]]]` to solve the following problem:
This is a dummy function to generate a list of items. :param prefix: prefix to add to each item. :param size: number of items to generate. :param kwargs: other parameters. :return: a list of items. Each item is a dict with the following keys: - value: for backend use. Required. - display_value: for UI display. Optional. - hyperlink: external link. Optional. - description: information icon tip. Optional.
Here is the function:
def my_list_func(prefix: str = "", size: int = 10, **kwargs) -> List[Dict[str, Union[str, int, float, list, Dict]]]:
"""This is a dummy function to generate a list of items.
:param prefix: prefix to add to each item.
:param size: number of items to generate.
:param kwargs: other parameters.
:return: a list of items. Each item is a dict with the following keys:
- value: for backend use. Required.
- display_value: for UI display. Optional.
- hyperlink: external link. Optional.
- description: information icon tip. Optional.
"""
import random
words = ["apple", "banana", "cherry", "date", "elderberry", "fig", "grape", "honeydew", "kiwi", "lemon"]
result = []
for i in range(size):
random_word = f"{random.choice(words)}{i}"
cur_item = {
"value": random_word,
"display_value": f"{prefix}_{random_word}",
"hyperlink": f'https://www.bing.com/search?q={random_word}',
"description": f"this is {i} item",
}
result.append(cur_item)
return result | This is a dummy function to generate a list of items. :param prefix: prefix to add to each item. :param size: number of items to generate. :param kwargs: other parameters. :return: a list of items. Each item is a dict with the following keys: - value: for backend use. Required. - display_value: for UI display. Optional. - hyperlink: external link. Optional. - description: information icon tip. Optional. |
4,525 | from promptflow import tool
from typing import List, Union, Dict
The provided code snippet includes necessary dependencies for implementing the `list_endpoint_names` function. Write a Python function `def list_endpoint_names(subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None, prefix: str = "") -> List[Dict[str, str]]` to solve the following problem:
This is an example to show how to get Azure ML resource in tool input list function. :param subscription_id: Azure subscription id. :param resource_group_name: Azure resource group name. :param workspace_name: Azure ML workspace name. :param prefix: prefix to add to each item.
Here is the function:
def list_endpoint_names(subscription_id: str = None,
resource_group_name: str = None,
workspace_name: str = None,
prefix: str = "") -> List[Dict[str, str]]:
"""This is an example to show how to get Azure ML resource in tool input list function.
:param subscription_id: Azure subscription id.
:param resource_group_name: Azure resource group name.
:param workspace_name: Azure ML workspace name.
:param prefix: prefix to add to each item.
"""
# return an empty list if workspace triad is not available.
if not subscription_id or not resource_group_name or not workspace_name:
return []
from azure.ai.ml import MLClient
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
credential.get_token("https://management.azure.com/.default")
ml_client = MLClient(
credential=credential,
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name)
result = []
for ep in ml_client.online_endpoints.list():
hyperlink = (
f"https://ml.azure.com/endpoints/realtime/{ep.name}/detail?wsid=/subscriptions/"
f"{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft."
f"MachineLearningServices/workspaces/{workspace_name}"
)
cur_item = {
"value": ep.name,
"display_value": f"{prefix}_{ep.name}",
# external link to jump to the endpoint page.
"hyperlink": hyperlink,
"description": f"this is endpoint: {ep.name}",
}
result.append(cur_item)
return result | This is an example to show how to get Azure ML resource in tool input list function. :param subscription_id: Azure subscription id. :param resource_group_name: Azure resource group name. :param workspace_name: Azure ML workspace name. :param prefix: prefix to add to each item. |
4,526 | from promptflow import tool
from typing import List, Union, Dict
def my_tool(input_prefix: str, input_text: list, endpoint_name: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)} {endpoint_name}" | null |
4,527 | from pathlib import Path
from ruamel.yaml import YAML
def collect_tools_from_directory(base_dir) -> dict:
tools = {}
yaml = YAML()
for f in Path(base_dir).glob("**/*.yaml"):
with open(f, "r") as f:
tools_in_file = yaml.load(f)
for identifier, tool in tools_in_file.items():
tools[identifier] = tool
return tools
The provided code snippet includes necessary dependencies for implementing the `list_package_tools` function. Write a Python function `def list_package_tools()` to solve the following problem:
List package tools
Here is the function:
def list_package_tools():
"""List package tools"""
yaml_dir = Path(__file__).parents[1] / "yamls"
return collect_tools_from_directory(yaml_dir) | List package tools |
4,528 | from promptflow import tool
from promptflow.connections import CustomStrongTypeConnection
from promptflow.contracts.types import Secret
class MyCustomConnection(CustomStrongTypeConnection):
"""My custom strong type connection.
:param api_key: The api key get from "https://xxx.com".
:type api_key: Secret
:param api_base: The api base.
:type api_base: String
"""
api_key: Secret
api_base: str = "This is a fake api base."
def my_tool(connection: MyCustomConnection, input_text: str) -> str:
# Replace with your tool code.
# Use custom strong type connection like: connection.api_key, connection.api_base
return "Hello " + input_text | null |
4,529 | from enum import Enum
from promptflow import tool
class UserType(str, Enum):
STUDENT = "student"
TEACHER = "teacher"
The provided code snippet includes necessary dependencies for implementing the `my_tool` function. Write a Python function `def my_tool(user_type: Enum, student_id: str = "", teacher_id: str = "") -> str` to solve the following problem:
This is a dummy function to support cascading inputs. :param user_type: user type, student or teacher. :param student_id: student id. :param teacher_id: teacher id. :return: id of the user. If user_type is student, return student_id. If user_type is teacher, return teacher_id.
Here is the function:
def my_tool(user_type: Enum, student_id: str = "", teacher_id: str = "") -> str:
"""This is a dummy function to support cascading inputs.
:param user_type: user type, student or teacher.
:param student_id: student id.
:param teacher_id: teacher id.
:return: id of the user.
If user_type is student, return student_id.
If user_type is teacher, return teacher_id.
"""
if user_type == UserType.STUDENT:
return student_id
elif user_type == UserType.TEACHER:
return teacher_id
else:
raise Exception("Invalid user.") | This is a dummy function to support cascading inputs. :param user_type: user type, student or teacher. :param student_id: student id. :param teacher_id: teacher id. :return: id of the user. If user_type is student, return student_id. If user_type is teacher, return teacher_id. |
4,530 | from promptflow import tool
from promptflow.connections import CustomConnection
def my_tool(connection: CustomConnection, input_text: str) -> str:
# Replace with your tool code.
# Usually connection contains configs to connect to an API.
# Use CustomConnection is a dict. You can use it like: connection.api_key, connection.api_base
# Not all tools need a connection. You can remove it if you don't need it.
return "Hello " + input_text | null |
4,531 | from jinja2 import Template
from promptflow import tool
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
def my_tool(
connection: CustomConnection,
api: str,
deployment_name: str,
temperature: float,
prompt: PromptTemplate,
**kwargs
) -> str:
# Replace with your tool code, customise your own code to handle and use the prompt here.
# Usually connection contains configs to connect to an API.
# Not all tools need a connection. You can remove it if you don't need it.
rendered_prompt = Template(prompt, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
return rendered_prompt | null |
4,532 | import importlib
from pathlib import Path
from promptflow import tool
from promptflow.contracts.types import FilePath
def my_tool(input_file: FilePath, input_text: str) -> str:
# customise your own code to handle and use the input_file here
new_module = importlib.import_module(Path(input_file).stem)
return new_module.hello(input_text) | null |
4,533 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
The provided code snippet includes necessary dependencies for implementing the `generate_index_json` function. Write a Python function `def generate_index_json( index_type: str, index: str = "", index_connection: CognitiveSearchConnection = "", index_name: str = "", content_field: str = "", embedding_field: str = "", metadata_field: str = "", semantic_configuration: str = "", embedding_connection: Union[AzureOpenAIConnection, OpenAIConnection] = "", embedding_deployment: str = "" ) -> str` to solve the following problem:
This is a dummy function to generate a index json based on the inputs.
Here is the function:
def generate_index_json(
index_type: str,
index: str = "",
index_connection: CognitiveSearchConnection = "",
index_name: str = "",
content_field: str = "",
embedding_field: str = "",
metadata_field: str = "",
semantic_configuration: str = "",
embedding_connection: Union[AzureOpenAIConnection, OpenAIConnection] = "",
embedding_deployment: str = ""
) -> str:
"""This is a dummy function to generate a index json based on the inputs.
"""
import json
inputs = ""
if index_type == "Azure Cognitive Search":
# 1. Call to create a new index
# 2. Call to get the index yaml and return as a json
inputs = {
"index_type": index_type,
"index": "retrieved_index",
"index_connection": index_connection,
"index_name": index_name,
"content_field": content_field,
"embedding_field": embedding_field,
"metadata_field": metadata_field,
"semantic_configuration": semantic_configuration,
"embedding_connection": embedding_connection,
"embedding_deployment": embedding_deployment
}
elif index_type == "Workspace MLIndex":
# Call to get the index yaml and return as a json
inputs = {
"index_type": index_type,
"index": index,
"index_connection": "retrieved_index_connection",
"index_name": "retrieved_index_name",
"content_field": "retrieved_content_field",
"embedding_field": "retrieved_embedding_field",
"metadata_field": "retrieved_metadata_field",
"semantic_configuration": "retrieved_semantic_configuration",
"embedding_connection": "retrieved_embedding_connection",
"embedding_deployment": "retrieved_embedding_deployment"
}
result = json.dumps(inputs)
return result | This is a dummy function to generate a index json based on the inputs. |
4,534 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
The provided code snippet includes necessary dependencies for implementing the `reverse_generate_index_json` function. Write a Python function `def reverse_generate_index_json(index_json: str) -> Dict` to solve the following problem:
This is a dummy function to generate origin inputs from index_json.
Here is the function:
def reverse_generate_index_json(index_json: str) -> Dict:
"""This is a dummy function to generate origin inputs from index_json.
"""
import json
# Calculate the UI inputs based on the index_json
result = json.loads(index_json)
return result | This is a dummy function to generate origin inputs from index_json. |
4,535 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def list_index_types(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [
{"value": "Azure Cognitive Search"},
{"value": "PineCone"},
{"value": "FAISS"},
{"value": "Workspace MLIndex"},
{"value": "MLIndex from path"}
] | null |
4,536 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def list_indexes(
subscription_id,
resource_group_name,
workspace_name
) -> List[Dict[str, Union[str, int, float, list, Dict]]]:
import random
words = ["apple", "banana", "cherry", "date", "elderberry", "fig", "grape", "honeydew", "kiwi", "lemon"]
result = []
for i in range(10):
random_word = f"{random.choice(words)}{i}"
cur_item = {
"value": random_word,
"display_value": f"index_{random_word}",
"hyperlink": f'https://www.bing.com/search?q={random_word}',
"description": f"this is {i} item",
}
result.append(cur_item)
return result | null |
4,537 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def list_fields(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [
{"value": "id"},
{"value": "content"},
{"value": "catelog"},
{"value": "sourcepage"},
{"value": "sourcefile"},
{"value": "title"},
{"value": "content_hash"},
{"value": "meta_json_string"},
{"value": "content_vector_open_ai"}
] | null |
4,538 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def list_semantic_configuration(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [{"value": "azureml-default"}] | null |
4,539 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def list_embedding_deployment(embedding_connection: str) -> List[str]:
return [{"value": "text-embedding-ada-002"}, {"value": "ada-1k-tpm"}] | null |
4,540 | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def my_tool(index_json: str, queries: str, top_k: int) -> str:
return f"Hello {index_json}" | null |
4,541 | import argparse
import time
from pathlib import Path
import requests
from azure.ai.ml import MLClient, load_environment
from azure.identity import AzureCliCredential
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--path", help="Path to config.json", type=str)
return parser.parse_args() | null |
4,542 | import argparse
import time
from pathlib import Path
import requests
from azure.ai.ml import MLClient, load_environment
from azure.identity import AzureCliCredential
def init_ml_client(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
) -> MLClient:
return MLClient(
credential=AzureCliCredential(),
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
) | null |
4,543 | import argparse
import time
from pathlib import Path
import requests
from azure.ai.ml import MLClient, load_environment
from azure.identity import AzureCliCredential
ENVIRONMENT_YAML = Path(__file__).parent / "runtime-env" / "env.yaml"
def create_environment(ml_client: MLClient) -> str:
environment = load_environment(source=ENVIRONMENT_YAML)
env = ml_client.environments.create_or_update(environment)
# have observed delay between environment creation and asset id availability
while True:
try:
ml_client.environments.get(name=env.name, version=env.version)
break
except Exception:
time.sleep(10)
# get workspace id from REST workspace object
resource_group_name = ml_client._operation_scope.resource_group_name
workspace_name = ml_client._operation_scope.workspace_name
location = ml_client.workspaces.get().location
workspace_id = ml_client._workspaces._operation.get(
resource_group_name=resource_group_name, workspace_name=workspace_name
).workspace_id
# concat environment asset id
asset_id = (
f"azureml://locations/{location}/workspaces/{workspace_id}"
f"/environments/{env.name}/versions/{env.version}"
)
return asset_id | null |
4,544 | import argparse
import json
import os
import re
from datetime import datetime, timedelta
from azure.storage.blob import (
AccountSasPermissions,
BlobServiceClient,
ContentSettings,
ResourceTypes,
generate_account_sas,
)
The provided code snippet includes necessary dependencies for implementing the `get_wheel_distribution_name` function. Write a Python function `def get_wheel_distribution_name(package_name)` to solve the following problem:
The wheel filename is {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl. The distribution name is normalized from the package name.
Here is the function:
def get_wheel_distribution_name(package_name):
"""The wheel filename is {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl.
The distribution name is normalized from the package name."""
return package_name.replace(".", "_").replace("-", "_").replace(" ", "_") | The wheel filename is {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl. The distribution name is normalized from the package name. |
4,545 | import argparse
import json
import os
import re
from datetime import datetime, timedelta
from azure.storage.blob import (
AccountSasPermissions,
BlobServiceClient,
ContentSettings,
ResourceTypes,
generate_account_sas,
)
def get_connection_string(storage_account, storage_key):
return f"DefaultEndpointsProtocol=https;AccountName={storage_account};AccountKey={storage_key};EndpointSuffix=core.windows.net" # noqa: E501
def get_object_sas_token(storage_account, storage_key):
sas_token = generate_account_sas(
account_name=storage_account,
account_key=storage_key,
resource_types=ResourceTypes(object=True),
permission=AccountSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(days=365),
)
return sas_token
def package_name_based_blob_prefix(package_name):
"""Convert package name to a valid blob prefix."""
prefix = package_name.replace(".", "-")
prefix = prefix.replace("_", "-")
prefix = prefix.lower()
return prefix
def override_version_with_latest(distribution_name):
return re.sub("-([0-9.]*)-", "-latest-", distribution_name, count=1)
def publish_package_internal(package_dir_path, storage_key, release_config):
index = release_config["index"]
index_config = config_json["targets"][index]
storage_account = index_config["storage_account"]
packages_container = index_config["packages_container"]
index_container = index_config["index_container"]
blob_prefix = index_config["blob_prefix"]
pypi_endpoint = index_config["endpoint"]
account_url = f"https://{storage_account}.blob.core.windows.net"
wheel_pattern = re.compile(r".+\.whl$")
whl_distributions = [d for d in os.listdir(package_dir_path) if wheel_pattern.match(d)]
if len(whl_distributions) != 1:
print(
f"[Error] Found {len(whl_distributions)} wheel distributions in {package_dir_path}. "
"There should be exactly one."
)
exit(1)
whl_distribution = whl_distributions[0]
# Create the BlobServiceClient with connection string
blob_service_client = BlobServiceClient.from_connection_string(get_connection_string(storage_account, storage_key))
container_client = blob_service_client.get_container_client(packages_container)
# Upload the wheel package to blob storage
package_blob = os.path.join(blob_prefix, whl_distribution)
package_blob_client = blob_service_client.get_blob_client(container=packages_container, blob=package_blob)
upload_file_path = os.path.join(package_dir_path, whl_distribution)
with open(file=upload_file_path, mode="rb") as package_file:
print(f"[Debug] Uploading {whl_distribution} to container: {packages_container}, blob: {package_blob}...")
package_blob_client.upload_blob(package_file, overwrite=True)
if upload_as_latest:
latest_distribution = override_version_with_latest(whl_distribution)
latest_package_blob = os.path.join(blob_prefix, latest_distribution)
latest_package_blob_client = blob_service_client.get_blob_client(
container=packages_container, blob=latest_package_blob
)
upload_file_path = os.path.join(package_dir_path, whl_distribution)
with open(file=upload_file_path, mode="rb") as package_file:
print(
f"[Debug] Uploading {whl_distribution} as latest distribution to "
f"container: {packages_container}, blob: {latest_package_blob}..."
)
latest_package_blob_client.upload_blob(package_file, overwrite=True)
# List the blobs and generate download sas urls
sas_token = get_object_sas_token(storage_account, storage_key)
print(f"Listing wheel packages with prefix {blob_prefix} in container...")
blob_list = container_client.list_blobs(name_starts_with=f"{blob_prefix}/")
distribution_blobs = [d for d in blob_list if wheel_pattern.match(d.name)]
# Reverse the list so that the latest distribution is at the top
distribution_blobs.reverse()
packages_indexes = {} # {package_name: [distributions]}
for blob in distribution_blobs:
distribution_name = blob.name.split("/")[-1]
package_name = package_name_based_blob_prefix(distribution_name.split("-")[0])
print(f"[Debug] Blob: {blob.name}. Package distribution: {distribution_name}. Package name: {package_name}")
download_link = f"{account_url}/{blob.container}/{blob.name}?{sas_token}"
index_item = f"<a href='{download_link}' rel='external'>{distribution_name}</a><br/>"
if package_name in packages_indexes:
packages_indexes[package_name].append(index_item)
else:
packages_indexes[package_name] = [index_item]
# Update index.html in the top level blob prefix for the project
project_index_file = "project_index.html"
with open(project_index_file, "w", encoding="utf8") as index_file:
index_file.write("<!DOCTYPE html>\n")
index_file.write(
"<html lang='en'><head><meta charset='utf-8'>"
"<meta name='api-version' value='2'/>"
"<title>Simple Index</title></head><body>\n"
)
for package_name in packages_indexes:
package_index_url = f"https://{pypi_endpoint}/{blob_prefix}/{package_name}"
print(f"[Debug] Updated package_index_url: {package_index_url}")
index_file.write(f"<a href='{package_index_url}'>{package_name}</a><br/>\n")
index_file.write("</body></html>\n")
project_index_blob = os.path.join(blob_prefix, "index.html")
project_index_blob_client = blob_service_client.get_blob_client(container=index_container, blob=project_index_blob)
content_settings = ContentSettings(content_type="text/html")
with open(file=project_index_file, mode="rb") as index:
print(f"Uploading {project_index_file} to container: {index_container}, blob: {project_index_blob}...")
project_index_blob_client.upload_blob(index, overwrite=True, content_settings=content_settings)
# Update index.html for the package distributions
for package_name, distribution_indexes in packages_indexes.items():
package_index_file = f"{package_name}_index.html"
if len(distribution_indexes) > 0:
print(f"{len(distribution_indexes)} distributions found for package {package_name}. Updating index.html...")
with open(package_index_file, "w", encoding="utf8") as index_file:
index_file.write("<!DOCTYPE html>\n")
index_file.write(
f"<html lang='en'><head><meta charset='utf-8'><title>{package_name}</title></head><body>\n"
)
for item in distribution_indexes:
index_file.write(f"{item}\n")
index_file.write("</body></html>\n")
# Update the index.html to the blob with prefix: <blob_prefix>/<normalized package_name>
index_blob = os.path.join(blob_prefix, package_name, "index.html")
index_blob_client = blob_service_client.get_blob_client(container=index_container, blob=index_blob)
content_settings = ContentSettings(content_type="text/html")
with open(file=package_index_file, mode="rb") as index:
print(f"Uploading {package_index_file} to container: {index_container}, blob: {index_blob}...")
index_blob_client.upload_blob(index, overwrite=True, content_settings=content_settings) | null |
4,546 | import sys
from gallery_directive import GalleryDirective
class GalleryDirective(SphinxDirective):
"""A directive to show a gallery of images and links in a grid."""
name = "gallery-grid"
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
# A class to be added to the resulting container
"grid-columns": directives.unchanged,
"class-container": directives.unchanged,
"class-card": directives.unchanged,
}
def run(self) -> List[nodes.Node]: # noqa: C901
if self.arguments:
# If an argument is given, assume it's a path to a YAML file
# Parse it and load it into the directive content
path_data_rel = Path(self.arguments[0])
path_doc, _ = self.get_source_info()
path_doc = Path(path_doc).parent
path_data = (path_doc / path_data_rel).resolve()
if not path_data.exists():
logger.warn(f"Could not find grid data at {path_data}.")
nodes.text("No grid data found at {path_data}.")
return
yaml_string = path_data.read_text()
else:
yaml_string = "\n".join(self.content)
# Read in YAML so we can generate the gallery
grid_data = safe_load(yaml_string)
grid_items = []
for item in grid_data:
# Grid card parameters
options = {}
if "website" in item:
options["link"] = item["website"]
if "class-card" in self.options:
options["class-card"] = self.options["class-card"]
if "img-background" in item:
options["img-background"] = item["img-background"]
if "img-top" in item:
options["img-top"] = item["img-top"]
if "img-bottom" in item:
options["img-bottom"] = item["img-bottom"]
options_str = "\n".join(f":{k}: {v}" for k, v in options.items()) + "\n\n"
# Grid card content
content_str = ""
if "header" in item:
content_str += f"{item['header']}\n\n^^^\n\n"
if "image" in item:
content_str += f"\n\n"
if "content" in item:
content_str += f"{item['content']}\n\n"
if "footer" in item:
content_str += f"+++\n\n{item['footer']}\n\n"
title = item.get("title", "")
content_str += "\n"
grid_items.append(
GRID_CARD.format(
card_options=options_str, content=content_str, title=title
)
)
# Parse the template with Sphinx Design to create an output
container = nodes.container()
# Prep the options for the template grid
container_options = {"gutter": 2, "class-container": "gallery-directive"}
if "class-container" in self.options:
container_options[
"class-container"
] += f' {self.options["class-container"]}'
container_options_str = "\n".join(
f":{k}: {v}" for k, v in container_options.items()
)
# Create the directive string for the grid
grid_directive = TEMPLATE_GRID.format(
grid_columns=self.options.get("grid-columns", "1 2 3 4"),
container_options=container_options_str,
content="\n".join(grid_items),
)
# Parse content as a directive so Sphinx Design processes it
self.state.nested_parse([grid_directive], 0, container)
# Sphinx Design outputs a container too, so just use that
container = container.children[0]
# Add extra classes
if self.options.get("container-class", []):
container.attributes["classes"] += self.options.get("class", [])
return [container]
def setup(app):
# Add the gallery directive
app.add_directive("gallery-grid", GalleryDirective) | null |
4,547 | import argparse
import json
from pathlib import Path
from azure.keyvault.secrets import SecretClient
from azure.identity import ClientSecretCredential, DefaultAzureCredential
def get_secret_client(
tenant_id: str, client_id: str, client_secret: str
) -> SecretClient:
try:
if (tenant_id is None) or (client_id is None) or (client_secret is None):
credential = DefaultAzureCredential()
client = SecretClient(
vault_url="https://promptflowprod.vault.azure.net/",
credential=credential,
)
else:
credential = ClientSecretCredential(tenant_id, client_id, client_secret)
client = SecretClient(
vault_url="https://github-promptflow.vault.azure.net/",
credential=credential,
)
except Exception as e:
print(e)
return client | null |
4,548 | import argparse
import json
from pathlib import Path
from azure.keyvault.secrets import SecretClient
from azure.identity import ClientSecretCredential, DefaultAzureCredential
def get_secret(secret_name: str, client: SecretClient):
secret = client.get_secret(secret_name)
return secret.value | null |
4,549 | import argparse
import json
from pathlib import Path
from azure.keyvault.secrets import SecretClient
from azure.identity import ClientSecretCredential, DefaultAzureCredential
def list_secret_names(client: SecretClient) -> list:
secret_properties = client.list_properties_of_secrets()
return [secret.name for secret in secret_properties] | null |
4,550 | import argparse
import json
from pathlib import Path
from azure.keyvault.secrets import SecretClient
from azure.identity import ClientSecretCredential, DefaultAzureCredential
def fill_key_to_dict(template_dict, keys_dict):
if not isinstance(template_dict, dict):
return
for key, val in template_dict.items():
if isinstance(val, str) and val in keys_dict:
template_dict[key] = keys_dict[val]
continue
fill_key_to_dict(val, keys_dict) | null |
4,551 | import argparse
from pathlib import Path
from platform import system
from utils import print_blue, run_command
def print_blue(message):
def run_command(
commands, cwd=None, stderr=subprocess.STDOUT, shell=False, stream_stdout=True, throw_on_retcode=True, logger=None
):
def setup_promptflow(extra_deps: list, command_args: dict) -> None:
print_blue("- Setting up the promptflow SDK ")
print_blue("- Installing promptflow Python SDK from local directory")
package_location = f"{Path('./src/promptflow/').absolute()}"
if extra_deps:
print_blue(f"- Installing with extra dependencies: {extra_deps}")
extra_deps = ",".join(extra_deps)
package_location = f"{package_location}[{extra_deps}]"
cmds = ["pip", "install", "-e", package_location]
print_blue(f"Running {cmds}")
run_command(commands=cmds, **command_args)
run_command(
commands=["pip", "install", "-r", str(Path("./src/promptflow/dev_requirements.txt").absolute())],
**command_args,
) | null |
4,552 | import logging
import os
import subprocess
import sys
import time
import traceback
class Color:
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def print_red(message):
print(Color.RED + message + Color.END) | null |
4,553 | import logging
import os
import subprocess
import sys
import time
import traceback
module_logger = logging.getLogger(__name__)
def get_test_files(testpath):
if os.path.isfile(testpath):
return [testpath]
else:
res = []
for root, dirs, files in os.walk(testpath):
module_logger.debug("Searching %s for files ending in 'tests.py'", root)
res.extend([os.path.join(root, file) for file in files if file.endswith("tests.py")])
return res | null |
4,554 | import logging
import os
import subprocess
import sys
import time
import traceback
def retry(fn, num_attempts=3):
if num_attempts <= 0:
raise Exception("Illegal num_attempts: {}".format(num_attempts))
count = 0
for _ in range(0, num_attempts):
try:
return fn()
except Exception:
count += 1
print("Execution failed on attempt {} out of {}".format(count, num_attempts))
print("Exception trace:")
traceback.print_exc()
if count == num_attempts:
print("Execution failed after {} attempts".format(count))
raise | null |
4,555 | import os
import fnmatch
import subprocess
import time
import argparse
import json
import sys
github_repository = "microsoft/promptflow"
snippet_debug = os.getenv("SNIPPET_DEBUG", 0)
merge_commit = ""
loop_times = 30
github_workspace = os.path.expanduser("~/promptflow/")
failed_reason = ""
def trigger_checks(valid_status_array):
global failed_reason
global github_repository
global merge_commit
global snippet_debug
global pipelines
global pipelines_count
output = subprocess.check_output(
f"gh api /repos/{github_repository}/commits/{merge_commit}/check-suites?per_page=100",
shell=True,
)
check_suites = json.loads(output)["check_suites"]
for suite in check_suites:
if snippet_debug != 0:
print(f"check-suites id {suite['id']}")
suite_id = suite["id"]
output = subprocess.check_output(
f"gh api /repos/{github_repository}/check-suites/{suite_id}/check-runs?per_page=100",
shell=True,
)
check_runs = json.loads(output)["check_runs"]
for run in check_runs:
if snippet_debug != 0:
print(f"check runs name {run['name']}")
for key in pipelines.keys():
value = pipelines[key]
if value == 0:
continue
if key in run["name"]:
pipelines_count[key] += 1
valid_status_array.append(run)
for key in pipelines.keys():
if pipelines_count[key] < pipelines[key]:
failed_reason = "Not all pipelines are triggered."
def status_checks(valid_status_array):
global failed_reason
global pipelines
global pipelines_count
# Basic fact of sdk cli checked pipelines.
failed_reason = ""
# Loop through each valid status array.
for status in valid_status_array:
# Check if the pipeline was successful.
if status["conclusion"] and status["conclusion"].lower() == "success":
# Add 1 to the count of successful pipelines.
pass
# Check if the pipeline failed.
elif status["conclusion"] and status["conclusion"].lower() == "failure":
failed_reason = "Required pipelines are not successful."
# Check if the pipeline is still running.
else:
if failed_reason == "":
failed_reason = "Required pipelines are not finished."
# Print the status of the pipeline to the console.
print(status["name"] + " is checking.")
def trigger_prepare(input_paths):
global github_workspace
global checks
global reverse_checks
global pipelines
global pipelines_count
global failed_reason
global special_care
for input_path in input_paths:
if "samples_connections_connection" in checks:
continue
# Check if the input path contains "examples" or "samples".
if "examples" in input_path or "samples" in input_path:
sys.path.append(os.path.expanduser(github_workspace + "/scripts/readme"))
from readme import main as readme_main
os.chdir(os.path.expanduser(github_workspace))
# Get the list of pipelines from the readme file.
pipelines_samples = readme_main(check=True)
git_diff_files = [
item
for item in subprocess.check_output(
["git", "diff", "--name-only", "HEAD"]
)
.decode("utf-8")
.split("\n")
if item != ""
]
for _ in git_diff_files:
failed_reason = "Run readme generation before check in"
return
# Merge the pipelines from the readme file with the original list of pipelines.
for key in pipelines_samples.keys():
value = pipelines_samples[key]
checks[key] = value
# Reverse checks.
for key in checks.keys():
value = checks[key]
for path in value:
if path in reverse_checks:
reverse_checks[path].append(key)
else:
reverse_checks[path] = [key]
# Render pipelines and pipelines_count using input_paths.
for input_path in input_paths:
# Input pattern /**: input_path should match in the middle.
# Input pattern /*: input_path should match last but one.
# Other input pattern: input_path should match last.
keys = [
key for key in reverse_checks.keys() if fnmatch.fnmatch(input_path, key)
]
# Loop through each key in the list of keys.
for key_item in keys:
# Loop through each pipeline in the list of pipelines.
for key in reverse_checks[key_item]:
# Check if the pipeline is in the list of pipelines.
if key in special_care:
pipelines[key] = special_care[key]
else:
pipelines[key] = 1
# Set the pipeline count to 0.
pipelines_count[key] = 0
def run_checks():
global github_repository
global snippet_debug
global merge_commit
global loop_times
global github_workspace
global failed_reason
if merge_commit == "":
merge_commit = (
subprocess.check_output(["git", "log", "-1"]).decode("utf-8").split("\n")
)
if snippet_debug != 0:
print(merge_commit)
for line in merge_commit:
if "Merge" in line and "into" in line:
merge_commit = line.split(" ")[-3]
break
if snippet_debug != 0:
print("MergeCommit " + merge_commit)
not_started_counter = 5
os.chdir(github_workspace)
# Get diff of current branch and main branch.
try:
git_merge_base = (
subprocess.check_output(["git", "merge-base", "origin/main", "HEAD"])
.decode("utf-8")
.rstrip()
)
git_diff = (
subprocess.check_output(
["git", "diff", "--name-only", "--diff-filter=d", f"{git_merge_base}"],
stderr=subprocess.STDOUT,
)
.decode("utf-8")
.rstrip()
.split("\n")
)
except subprocess.CalledProcessError as e:
print("Exception on process, rc=", e.returncode, "output=", e.output)
raise e
# Prepare how many pipelines should be triggered.
trigger_prepare(git_diff)
if failed_reason != "":
raise Exception(failed_reason)
# Loop for 15 minutes at most.
for i in range(loop_times):
# Wait for 30 seconds.
time.sleep(30)
# Reset the failed reason.
failed_reason = ""
# Reset the valid status array.
valid_status_array = []
# Get all triggered pipelines.
# If not all pipelines are triggered, continue.
trigger_checks(valid_status_array)
if failed_reason != "":
if not_started_counter == 0:
raise Exception(failed_reason + " for 6 times.")
print(failed_reason)
not_started_counter -= 1
continue
# Get pipeline conclusion priority:
# 1. Not successful, Fail.
# 2. Not finished, Continue.
# 3. Successful, Break.
status_checks(valid_status_array)
# Check if the failed reason contains "not successful".
if "not successful" in failed_reason.lower():
raise Exception(failed_reason)
# Check if the failed reason contains "not finished".
elif "not finished" in failed_reason.lower():
print(failed_reason)
continue
# Otherwise, print that all required pipelines are successful.
else:
print("All required pipelines are successful.")
break
# Check if the failed reason is not empty.
if failed_reason != "":
raise Exception(failed_reason) | null |
4,556 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
def create_tmp_dir():
tmp_dir = tempfile.mkdtemp()
return tmp_dir | null |
4,557 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
def is_valid_sha256sum(a_file, expected_sum):
sha256 = hashlib.sha256()
with open(a_file, 'rb') as f:
sha256.update(f.read())
computed_hash = sha256.hexdigest()
return expected_sum == computed_hash | null |
4,558 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
def exec_command(command_list, cwd=None, env=None):
print_status('Executing: '+str(command_list))
subprocess.check_call(command_list, cwd=cwd, env=env)
def create_virtualenv(install_dir):
cmd = [sys.executable, '-m', 'venv', install_dir]
exec_command(cmd) | null |
4,559 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
def exec_command(command_list, cwd=None, env=None):
print_status('Executing: '+str(command_list))
subprocess.check_call(command_list, cwd=cwd, env=env)
def install_cli(install_dir, tmp_dir):
path_to_pip = os.path.join(install_dir, 'bin', 'pip')
cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow[azure,executable,azureml-serving]',
'--upgrade']
exec_command(cmd)
cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow-tools', '--upgrade']
exec_command(cmd)
cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'keyrings.alt', '--upgrade']
exec_command(cmd) | null |
4,560 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
PF_DISPATCH_TEMPLATE = """#!/usr/bin/env bash
export PF_INSTALLER=Script
{install_dir}/bin/python -m promptflow._cli._pf.entry "$@"
"""
PFAZURE_DISPATCH_TEMPLATE = """#!/usr/bin/env bash
{install_dir}/bin/python -m promptflow._cli._pf_azure.entry "$@"
"""
PFS_DISPATCH_TEMPLATE = """#!/usr/bin/env bash
{install_dir}/bin/python -m promptflow._sdk._service.entry "$@"
"""
PF_EXECUTABLE_NAME = 'pf'
PFAZURE_EXECUTABLE_NAME = 'pfazure'
PFS_EXECUTABLE_NAME = 'pfs'
def print_status(msg=''):
print('-- '+msg)
def create_dir(dir):
if not os.path.isdir(dir):
print_status("Creating directory '{}'.".format(dir))
os.makedirs(dir)
def create_executable(exec_dir, install_dir):
create_dir(exec_dir)
exec_filepaths = []
for filename, template in [(PF_EXECUTABLE_NAME, PF_DISPATCH_TEMPLATE),
(PFAZURE_EXECUTABLE_NAME, PFAZURE_DISPATCH_TEMPLATE),
(PFS_EXECUTABLE_NAME, PFS_DISPATCH_TEMPLATE)]:
exec_filepath = os.path.join(exec_dir, filename)
with open(exec_filepath, 'w') as exec_file:
exec_file.write(template.format(install_dir=install_dir))
cur_stat = os.stat(exec_filepath)
os.chmod(exec_filepath, cur_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
print_status("The executable is available at '{}'.".format(exec_filepath))
exec_filepaths.append(exec_filepath)
return exec_filepaths | null |
4,561 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
DEFAULT_INSTALL_DIR = os.path.expanduser(os.path.join('~', 'lib', 'promptflow'))
def print_status(msg=''):
print('-- '+msg)
def prompt_input_with_default(msg, default):
if default:
return prompt_input("{} (leave blank to use '{}'): ".format(msg, default)) or default
else:
return prompt_input('{}: '.format(msg))
def prompt_y_n(msg, default=None):
if default not in [None, 'y', 'n']:
raise ValueError("Valid values for default are 'y', 'n' or None")
y = 'Y' if default == 'y' else 'y'
n = 'N' if default == 'n' else 'n'
while True:
ans = prompt_input('{} ({}/{}): '.format(msg, y, n))
if ans.lower() == n.lower():
return False
if ans.lower() == y.lower():
return True
if default and not ans:
return default == y.lower()
def create_dir(dir):
if not os.path.isdir(dir):
print_status("Creating directory '{}'.".format(dir))
os.makedirs(dir)
def get_install_dir():
install_dir = None
while not install_dir:
prompt_message = 'In what directory would you like to place the install?'
install_dir = prompt_input_with_default(prompt_message, DEFAULT_INSTALL_DIR)
install_dir = os.path.realpath(os.path.expanduser(install_dir))
if ' ' in install_dir:
print_status("The install directory '{}' cannot contain spaces.".format(install_dir))
install_dir = None
else:
create_dir(install_dir)
if os.listdir(install_dir):
print_status("'{}' is not empty and may contain a previous installation.".format(install_dir))
ans_yes = prompt_y_n('Remove this directory?', 'n')
if ans_yes:
shutil.rmtree(install_dir)
print_status("Deleted '{}'.".format(install_dir))
create_dir(install_dir)
else:
# User opted to not delete the directory so ask for install directory again
install_dir = None
print_status("We will install at '{}'.".format(install_dir))
return install_dir | null |
4,562 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
DEFAULT_EXEC_DIR = os.path.expanduser(os.path.join('~', 'bin'))
PFAZURE_EXECUTABLE_NAME = 'pfazure'
PFS_EXECUTABLE_NAME = 'pfs'
def print_status(msg=''):
print('-- '+msg)
def prompt_input_with_default(msg, default):
if default:
return prompt_input("{} (leave blank to use '{}'): ".format(msg, default)) or default
else:
return prompt_input('{}: '.format(msg))
def create_dir(dir):
if not os.path.isdir(dir):
print_status("Creating directory '{}'.".format(dir))
os.makedirs(dir)
def get_exec_dir():
exec_dir = None
while not exec_dir:
prompt_message = (f"In what directory would you like to place the "
f"'{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' executable?")
exec_dir = prompt_input_with_default(prompt_message, DEFAULT_EXEC_DIR)
exec_dir = os.path.realpath(os.path.expanduser(exec_dir))
if ' ' in exec_dir:
print_status("The executable directory '{}' cannot contain spaces.".format(exec_dir))
exec_dir = None
create_dir(exec_dir)
print_status("The executable will be in '{}'.".format(exec_dir))
return exec_dir | null |
4,563 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
class CLIInstallError(Exception):
pass
def print_status(msg=''):
print('-- '+msg)
def prompt_y_n(msg, default=None):
if default not in [None, 'y', 'n']:
raise ValueError("Valid values for default are 'y', 'n' or None")
y = 'Y' if default == 'y' else 'y'
n = 'N' if default == 'n' else 'n'
while True:
ans = prompt_input('{} ({}/{}): '.format(msg, y, n))
if ans.lower() == n.lower():
return False
if ans.lower() == y.lower():
return True
if default and not ans:
return default == y.lower()
def _backup_rc(rc_file):
try:
shutil.copyfile(rc_file, rc_file+'.backup')
print_status("Backed up '{}' to '{}'".format(rc_file, rc_file+'.backup'))
except (OSError, IOError):
pass
def _modify_rc(rc_file_path, line_to_add):
if not _find_line_in_file(rc_file_path, line_to_add):
with open(rc_file_path, 'a', encoding="utf-8") as rc_file:
rc_file.write('\n'+line_to_add+'\n')
def get_rc_file_path():
rc_file = None
default_rc_file = _get_default_rc_file()
if not default_rc_file:
rc_file = _default_rc_file_creation_step()
rc_file = rc_file or prompt_input_with_default('Enter a path to an rc file to update', default_rc_file)
if rc_file:
rc_file_path = os.path.realpath(os.path.expanduser(rc_file))
if os.path.isfile(rc_file_path):
return rc_file_path
print_status("The file '{}' could not be found.".format(rc_file_path))
return None
def warn_other_azs_on_path(exec_dir, exec_filepath):
env_path = os.environ.get('PATH')
conflicting_paths = []
if env_path:
for p in env_path.split(':'):
for file in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]:
p_to_pf = os.path.join(p, file)
if p != exec_dir and os.path.isfile(p_to_pf):
conflicting_paths.append(p_to_pf)
if conflicting_paths:
print_status()
print_status(f"** WARNING: Other '{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' "
f"executables are on your $PATH. **")
print_status("Conflicting paths: {}".format(', '.join(conflicting_paths)))
print_status("You can run this installation of the promptflow with '{}'.".format(exec_filepath))
def handle_path_and_tab_completion(exec_filepath, exec_dir):
ans_yes = prompt_y_n('Modify profile to update your $PATH now?', 'y')
if ans_yes:
rc_file_path = get_rc_file_path()
if not rc_file_path:
raise CLIInstallError('No suitable profile file found.')
_backup_rc(rc_file_path)
line_to_add = "export PATH=$PATH:{}".format(exec_dir)
_modify_rc(rc_file_path, line_to_add)
warn_other_azs_on_path(exec_dir, exec_filepath)
print_status()
print_status('** Run `exec -l $SHELL` to restart your shell. **')
print_status()
else:
print_status("You can run the promptflow with '{}'.".format(exec_filepath)) | null |
4,564 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
class CLIInstallError(Exception):
pass
def print_status(msg=''):
print('-- '+msg)
def verify_python_version():
print_status('Verifying Python version.')
v = sys.version_info
if v < (3, 8):
raise CLIInstallError('The promptflow does not support Python versions less than 3.8.')
if 'conda' in sys.version:
raise CLIInstallError("This script does not support the Python Anaconda environment. "
"Create an Anaconda virtual environment and install with 'pip'")
print_status('Python version {}.{}.{} okay.'.format(v.major, v.minor, v.micro)) | null |
4,565 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
class CLIInstallError(Exception):
def print_status(msg=''):
def prompt_y_n(msg, default=None):
def _native_dependencies_for_dist(verify_cmd_args, install_cmd_args, dep_list):
try:
print_status("Executing: '{} {}'".format(' '.join(verify_cmd_args), ' '.join(dep_list)))
subprocess.check_output(verify_cmd_args + dep_list, stderr=subprocess.STDOUT)
print_status('Native dependencies okay.')
except subprocess.CalledProcessError:
err_msg = 'One or more of the following native dependencies are not currently installed and may be required.\n'
err_msg += '"{}"'.format(' '.join(install_cmd_args + dep_list))
print_status(err_msg)
ans_yes = prompt_y_n('Missing native dependencies. Attempt to continue anyway?', 'n')
if not ans_yes:
raise CLIInstallError('Please install the native dependencies and try again.') | null |
4,566 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
def _get_linux_distro():
if platform.system() != 'Linux':
return None, None
try:
with open('/etc/os-release') as lines:
tokens = [line.strip() for line in lines]
except Exception:
return None, None
release_info = {}
for token in tokens:
if '=' in token:
k, v = token.split('=', 1)
release_info[k.lower()] = v.strip('"')
return release_info.get('name', None), release_info.get('version_id', None) | null |
4,567 | import os
import sys
import platform
import stat
import tempfile
import shutil
import subprocess
import hashlib
PF_EXECUTABLE_NAME = 'pf'
PFAZURE_EXECUTABLE_NAME = 'pfazure'
PFS_EXECUTABLE_NAME = 'pfs'
class CLIInstallError(Exception):
pass
def verify_install_dir_exec_path_conflict(install_dir, exec_dir):
for exec_name in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]:
exec_path = os.path.join(exec_dir, exec_name)
if install_dir == exec_path:
raise CLIInstallError("The executable file '{}' would clash with the install directory of '{}'. Choose "
"either a different install directory or directory to place the "
"executable.".format(exec_path, install_dir)) | null |
4,568 | import argparse
import json
from pathlib import Path
from utils.secret_manager import get_secret, get_secret_client, list_secret_names
def fill_key_to_dict(template_dict, keys_dict):
if not isinstance(template_dict, dict):
return
for key, val in template_dict.items():
if isinstance(val, str) and val in keys_dict:
template_dict[key] = keys_dict[val]
continue
fill_key_to_dict(val, keys_dict) | null |
4,569 | import argparse
import os
import re
from jinja2 import Environment, FileSystemLoader
def make_pythonic_variable_name(input_string):
variable_name = input_string.strip()
variable_name = re.sub(r'\W|^(?=\d)', '_', variable_name)
if not variable_name[0].isalpha() and variable_name[0] != '_':
variable_name = f'_{variable_name}'
return variable_name | null |
4,570 | import argparse
import os
import re
from jinja2 import Environment, FileSystemLoader
def convert_tool_name_to_class_name(tool_name):
return ''.join(word.title() for word in tool_name.split('_'))
def create_file(path):
with open(path, 'w'):
pass
def create_folder(path):
os.makedirs(path, exist_ok=True)
def create_tool_project_structure(destination: str, package_name: str, tool_name: str,
function_name: str, is_class_way=False):
if is_class_way:
class_name = convert_tool_name_to_class_name(tool_name)
# Load templates
templates_abs_path = os.path.join(os.path.dirname(__file__), "templates")
file_loader = FileSystemLoader(templates_abs_path)
env = Environment(loader=file_loader)
# Create new directory
if os.path.exists(destination):
print("Destination already exists. Please choose another one.")
return
os.makedirs(destination, exist_ok=True)
# Generate setup.py
template = env.get_template('setup.py.j2')
output = template.render(package_name=package_name, tool_name=tool_name)
with open(os.path.join(destination, 'setup.py'), 'w') as f:
f.write(output)
# Generate MANIFEST.in
template = env.get_template('MANIFEST.in.j2')
output = template.render(package_name=package_name)
with open(os.path.join(destination, 'MANIFEST.in'), 'w') as f:
f.write(output)
# Create tools folder and __init__.py, tool.py inside it
tools_dir = os.path.join(destination, package_name, 'tools')
create_folder(tools_dir)
create_file(os.path.join(tools_dir, '__init__.py'))
with open(os.path.join(tools_dir, '__init__.py'), 'w') as f:
f.write('__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore\n')
# Generate tool.py
if is_class_way:
template = env.get_template('tool2.py.j2')
output = template.render(class_name=class_name, function_name=function_name)
else:
template = env.get_template('tool.py.j2')
output = template.render(function_name=function_name)
with open(os.path.join(tools_dir, f'{tool_name}.py'), 'w') as f:
f.write(output)
# Generate utils.py
template = env.get_template('utils.py.j2')
output = template.render()
with open(os.path.join(tools_dir, 'utils.py'), 'w') as f:
f.write(output)
create_file(os.path.join(destination, package_name, '__init__.py'))
with open(os.path.join(destination, package_name, '__init__.py'), 'w') as f:
f.write('__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore\n')
# Create yamls folder and __init__.py inside it
yamls_dir = os.path.join(destination, package_name, 'yamls')
create_folder(yamls_dir)
# Create tool yaml
if is_class_way:
template = env.get_template('tool2.yaml.j2')
output = template.render(package_name=package_name, tool_name=tool_name, class_name=class_name,
function_name=function_name)
else:
template = env.get_template('tool.yaml.j2')
output = template.render(package_name=package_name, tool_name=tool_name, function_name=function_name)
with open(os.path.join(yamls_dir, f'{tool_name}.yaml'), 'w') as f:
f.write(output)
# Create test folder and __init__.py inside it
tests_dir = os.path.join(destination, 'tests')
create_folder(tests_dir)
create_file(os.path.join(tests_dir, '__init__.py'))
# Create test_tool.py
if is_class_way:
template = env.get_template('test_tool2.py.j2')
output = template.render(package_name=package_name, tool_name=tool_name, class_name=class_name,
function_name=function_name)
else:
template = env.get_template('test_tool.py.j2')
output = template.render(package_name=package_name, tool_name=tool_name, function_name=function_name)
with open(os.path.join(tests_dir, f'test_{tool_name}.py'), 'w') as f:
f.write(output)
print(f'Generated tool package template for {package_name} at {destination}') | null |
4,571 | import argparse
import base64
import os
import io
from PIL import Image
def get_image_size(image_path):
with Image.open(image_path) as img:
width, height = img.size
return width, height | null |
4,572 | import argparse
import base64
import os
import io
from PIL import Image
def get_image_storage_size(image_path):
file_size_bytes = os.path.getsize(image_path)
file_size_mb = file_size_bytes / (1024 * 1024)
return file_size_mb | null |
4,573 | import argparse
import base64
import os
import io
from PIL import Image
def create_html_file(data_uri, output_path):
html_content = '<html>\n<body>\n<img src="{}" alt="My Image">\n</body>\n</html>'.format(data_uri)
with open(output_path, 'w') as file:
file.write(html_content) | null |
4,574 | import argparse
import base64
import os
import io
from PIL import Image
def image_to_data_url(image_path):
with open(image_path, "rb") as image_file:
# Create a BytesIO object from the image file
image_data = io.BytesIO(image_file.read())
# Open the image and resize it
img = Image.open(image_data)
if img.size != (16, 16):
img = img.resize((16, 16), Image.Resampling.LANCZOS)
# Save the resized image to a data URL
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue())
data_url = 'data:image/png;base64,' + img_str.decode('utf-8')
return data_url
def check_image_type(image_path):
file_extension = image_path.lower().split('.')[-1]
if file_extension not in SUPPORT_IMAGE_TYPES:
raise ValueError("Only png, jpg or bmp image types are supported.")
def check_image_type_and_generate_data_url(image_path):
check_image_type(image_path)
return image_to_data_url(image_path) | null |
4,575 | import inspect
import types
from dataclasses import asdict
from utils.tool_utils import function_to_interface
from promptflow.contracts.tool import Tool, ToolType
from promptflow._internal import ToolProvider
from promptflow.exceptions import ErrorTarget, UserErrorException
def generate_python_tools_in_module(module, name, description):
tool_functions = collect_tool_functions_in_module(module)
tool_methods = collect_tool_methods_in_module(module)
return [_parse_tool_from_function(f, name=name, description=description) for f in tool_functions] + [
_parse_tool_from_function(f, initialize_inputs, name=name, description=description)
for (f, initialize_inputs) in tool_methods
]
def _construct_tool_dict(tools, **advanced_features):
return {
f"{t.module}.{t.class_name}.{t.function}"
if t.class_name is not None
else f"{t.module}.{t.function}": asdict_with_advanced_features_without_none(t, **advanced_features)
for t in tools
}
def generate_python_tools_in_module_as_dict(module, name=None, description=None, **advanced_features):
tools = generate_python_tools_in_module(module, name, description)
return _construct_tool_dict(tools, **advanced_features) | null |
4,576 | import inspect
import types
from dataclasses import asdict
from utils.tool_utils import function_to_interface
from promptflow.contracts.tool import Tool, ToolType
from promptflow._internal import ToolProvider
from promptflow.exceptions import ErrorTarget, UserErrorException
def generate_custom_llm_tools_in_module(module, name, description):
tool_functions = collect_tool_functions_in_module(module)
tool_methods = collect_tool_methods_in_module(module)
return [
_parse_tool_from_function(f, tool_type=ToolType.CUSTOM_LLM, name=name, description=description)
for f in tool_functions
] + [
_parse_tool_from_function(
f, initialize_inputs, tool_type=ToolType.CUSTOM_LLM, name=name, description=description
)
for (f, initialize_inputs) in tool_methods
]
def _construct_tool_dict(tools, **advanced_features):
return {
f"{t.module}.{t.class_name}.{t.function}"
if t.class_name is not None
else f"{t.module}.{t.function}": asdict_with_advanced_features_without_none(t, **advanced_features)
for t in tools
}
def generate_custom_llm_tools_in_module_as_dict(module, name=None, description=None, **advanced_features):
tools = generate_custom_llm_tools_in_module(module, name, description)
return _construct_tool_dict(tools, **advanced_features) | null |
4,577 | import json
import os
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
import requests
scripts_dir = os.path.join(os.getcwd(), "scripts")
ado_promptflow_repo_url_format = "https://{0}@dev.azure.com/msdata/Vienna/_git/PromptFlow"
def replace_lines_from_file_under_hint(file_path, hint: str, lines_to_replace: list):
def create_remote_branch_in_ADO_with_new_tool_pkg_version(
ado_pat: str, tool_pkg_version: str, blob_prefix="test-promptflow"
) -> str:
# Clone the Azure DevOps repo
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
tmp_dir = os.path.join(parent_dir, "temp")
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
subprocess.run(["git", "config", "--global", "user.email", "github-promptflow@dummy.com"])
subprocess.run(["git", "config", "--global", "user.name", "github-promptflow"])
# Change directory to the 'tmp' directory
os.chdir(tmp_dir)
repo_dir = os.path.join(tmp_dir, "PromptFlow")
repo_url = ado_promptflow_repo_url_format.format(ado_pat)
subprocess.run(["git", "clone", repo_url, repo_dir])
# Change directory to the repo directory
os.chdir(repo_dir)
# Pull the devs/test branch
subprocess.run(["git", "reset", "."])
subprocess.run(["git", "checkout", "."])
subprocess.run(["git", "clean", "-f", "."])
subprocess.run(["git", "checkout", "main"])
subprocess.run(["git", "fetch"])
subprocess.run(["git", "pull"])
# Make changes
# 1. add test endpoint 'promptflow-gallery-tool-test.yaml'
# 2. update tool package version
source_file = Path(scripts_dir) / "tool/utils/configs/promptflow-gallery-tool-test.yaml"
destination_folder = "deploy/model"
shutil.copy(source_file, destination_folder)
new_lines = [
f"--extra-index-url https://azuremlsdktestpypi.azureedge.net/{blob_prefix}\n",
f"promptflow_tools=={tool_pkg_version}\n",
]
replace_lines_from_file_under_hint(
file_path="docker_build/linux/extra_requirements.txt",
hint="# Prompt-flow tool package",
lines_to_replace=new_lines,
)
# Create a new remote branch
new_branch_name = f"devs/test_tool_pkg_{tool_pkg_version}_{datetime.now().strftime('%Y%m%d%H%M%S')}"
subprocess.run(["git", "branch", "-D", "origin", new_branch_name])
subprocess.run(["git", "checkout", "-b", new_branch_name])
subprocess.run(["git", "add", "."])
subprocess.run(["git", "commit", "-m", f"Update tool package version to {tool_pkg_version}"])
subprocess.run(["git", "push", "-u", repo_url, new_branch_name])
return new_branch_name | null |
4,578 | import json
import os
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
import requests
scripts_dir = os.path.join(os.getcwd(), "scripts")
def deploy_test_endpoint(branch_name: str, ado_pat: str):
# PromptFlow-deploy-endpoint pipeline in ADO: https://msdata.visualstudio.com/Vienna/_build?definitionId=24767&_a=summary # noqa: E501
url = "https://dev.azure.com/msdata/Vienna/_apis/pipelines/24767/runs?api-version=7.0-preview.1"
request_body_file = Path(scripts_dir) / "tool/utils/configs/deploy-endpoint-request-body.json"
with open(request_body_file, "r") as f:
body = json.load(f)
body["resources"]["repositories"]["self"]["refName"] = f"refs/heads/{branch_name}"
print(f"request body: {body}")
response = requests.post(url, json=body, auth=("dummy_user_name", ado_pat))
print(response.status_code)
print(response.content) | null |
4,579 | import re
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.identity import ClientSecretCredential
from azure.keyvault.secrets import SecretClient
from exceptions import (
SecretNameAlreadyExistsException,
SecretNameInvalidException,
SecretNoSetPermissionException,
)
reserved_secret_names = []
def list_secret_names(client: SecretClient) -> list:
def init_used_secret_names(client: SecretClient):
global reserved_secret_names
reserved_secret_names = list_secret_names(client) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.