File size: 5,742 Bytes
5374a2d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 | # import os
# from dotenv import load_dotenv
# from evoagentx.benchmark import MBPP, AFlowMBPP
# from evoagentx.optimizers import AFlowOptimizer
# from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM
# load_dotenv()
# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
# EXPERIMENTAL_CONFIG = {
# "humaneval": {
# "question_type": "code",
# "operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
# },
# "mbpp": {
# "question_type": "code",
# "operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
# },
# "hotpotqa": {
# "question_type": "qa",
# "operators": ["Custom", "AnswerGenerate", "QAScEnsemble"]
# },
# "gsm8k": {
# "question_type": "math",
# "operators": ["Custom", "ScEnsemble", "Programmer"]
# },
# "math": {
# "question_type": "math",
# "operators": ["Custom", "ScEnsemble", "Programmer"]
# }
# }
# class MBPPSplits(AFlowMBPP):
# def _load_data(self):
# # load the original MBPP data
# mbpp_test_data = MBPP().get_test_data()
# # split the data into dev and test
# import numpy as np
# np.random.seed(42)
# permutation = np.random.permutation(len(mbpp_test_data))
# # radnomly select 50 samples for dev and 100 samples for test (be consistent with other models)
# dev_data_task_ids = [mbpp_test_data[idx]["task_id"] for idx in permutation[:50]]
# test_data_task_ids = [mbpp_test_data[idx]["task_id"] for idx in permutation[50:150]]
# super()._load_data()
# full_data = self._dev_data + self._test_data
# self._dev_data = [example for example in full_data if example["task_id"] in dev_data_task_ids]
# self._test_data = [example for example in full_data if example["task_id"] in test_data_task_ids]
# def main():
# claude_config = LiteLLMConfig(model="anthropic/claude-3-5-sonnet-20240620", anthropic_key=ANTHROPIC_API_KEY)
# optimizer_llm = LiteLLM(config=claude_config)
# openai_config = OpenAILLMConfig(model="gpt-4o-mini", openai_key=OPENAI_API_KEY)
# executor_llm = OpenAILLM(config=openai_config)
# # load benchmark
# mbpp = MBPPSplits()
# # create optimizer
# optimizer = AFlowOptimizer(
# graph_path = "examples/aflow/code_generation",
# optimized_path = "examples/aflow/mbpp/optimized",
# optimizer_llm=optimizer_llm,
# executor_llm=executor_llm,
# validation_rounds=3,
# eval_rounds=3,
# max_rounds=20,
# **EXPERIMENTAL_CONFIG["mbpp"]
# )
# # run optimization
# optimizer.optimize(mbpp)
# # run test
# optimizer.test(mbpp) # use `test_rounds: List[int]` to specify the rounds to test
# if __name__ == "__main__":
# main()
import os
from dotenv import load_dotenv
from evoagentx.benchmark import MBPP, AFlowMBPP
from evoagentx.optimizers import AFlowOptimizer
from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM
api_key = "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"
# Function to encode the image
import os
os.environ["OPENAI_API_KEY"] = api_key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
EXPERIMENTAL_CONFIG = {
"humaneval": {
"question_type": "code",
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
},
"mbpp": {
"question_type": "code",
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
},
"hotpotqa": {
"question_type": "qa",
"operators": ["Custom", "AnswerGenerate", "QAScEnsemble"]
},
"gsm8k": {
"question_type": "math",
"operators": ["Custom", "ScEnsemble", "Programmer"]
},
"math": {
"question_type": "math",
"operators": ["Custom", "ScEnsemble", "Programmer"]
}
}
class MBPPSplits(AFlowMBPP):
def _load_data(self):
# load the original MBPP data
mbpp_test_data = AFlowMBPP().get_dev_data()
# split the data into dev and test
import numpy as np
np.random.seed(42)
permutation = np.random.permutation(len(mbpp_test_data))
# radnomly select 50 samples for dev and 100 samples for test (be consistent with other models)
dev_data_task_ids = [mbpp_test_data[idx]["task_id"] for idx in permutation[:30]]
super()._load_data()
full_data = self._dev_data + self._test_data
self._dev_data = [example for example in full_data if example["task_id"] in dev_data_task_ids]
def main():
openai_config = OpenAILLMConfig(
model="gpt-4o-mini",
openai_key=OPENAI_API_KEY
)
claude_config = LiteLLMConfig(
model="gpt-4o-mini",
openai_key=OPENAI_API_KEY
)
executor_llm = OpenAILLM(config=openai_config)
optimizer_llm = LiteLLM(config=claude_config)
# load benchmark
mbpp = MBPPSplits()
# create optimizer
optimizer = AFlowOptimizer(
graph_path = "examples/aflow/code_generation",
optimized_path = "examples/aflow/mbpp_new/optimized",
optimizer_llm=optimizer_llm,
executor_llm=executor_llm,
validation_rounds=1,
eval_rounds=1,
max_rounds=20,
**EXPERIMENTAL_CONFIG["mbpp"]
)
# run optimization
optimizer.optimize(mbpp)
# run test
optimizer.test(mbpp) # use `test_rounds: List[int]` to specify the rounds to test
if __name__ == "__main__":
main() |