File size: 3,172 Bytes
5374a2d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 | import os
from dotenv import load_dotenv
from evoagentx.benchmark import MBPP, AFlowMBPP
from evoagentx.benchmark import SciCode, AFlowSciCode
from evoagentx.optimizers import AFlowOptimizer
from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM
api_key = "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"
# Function to encode the image
import os
os.environ["OPENAI_API_KEY"] = api_key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
EXPERIMENTAL_CONFIG = {
"humaneval": {
"question_type": "code",
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
},
"mbpp": {
"question_type": "code",
"operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"]
},
"hotpotqa": {
"question_type": "qa",
"operators": ["Custom", "AnswerGenerate", "QAScEnsemble"]
},
"gsm8k": {
"question_type": "math",
"operators": ["Custom", "ScEnsemble", "Programmer"]
},
"math": {
"question_type": "math",
"operators": ["Custom", "ScEnsemble", "Programmer"]
}
}
class SciCodeSplits(AFlowSciCode):
def _load_data(self):
# load the original MBPP data
mbpp_test_data = SciCode().get_dev_data()
# split the data into dev and test
import numpy as np
np.random.seed(42)
permutation = np.random.permutation(len(mbpp_test_data))
# radnomly select 50 samples for dev and 100 samples for test (be consistent with other models)
dev_data_task_ids = [mbpp_test_data[idx]["task_id"] for idx in range(0,30)]
super()._load_data()
full_data = self._dev_data + self._test_data
self._dev_data = [example for example in full_data if example["task_id"] in dev_data_task_ids]
empty_list = []
for item in range(len(self._dev_data)):
selectitem = self._dev_data[item]
selectitem['task_id'] = selectitem['task_id'] + '-' +str(item)
empty_list.append(selectitem)
self._dev_data = empty_list
def main():
openai_config = OpenAILLMConfig(
model="gpt-4o-mini",
openai_key=OPENAI_API_KEY
)
claude_config = LiteLLMConfig(
model="gpt-4o-mini",
openai_key=OPENAI_API_KEY
)
executor_llm = OpenAILLM(config=openai_config)
optimizer_llm = LiteLLM(config=claude_config)
# load benchmark
mbpp = SciCodeSplits()
mbpp.error_list = {}
# create optimizer
optimizer = AFlowOptimizer(
graph_path = "examples/aflow/code_generation",
optimized_path = "examples/aflow/scicode_full/optimized",
optimizer_llm=optimizer_llm,
executor_llm=executor_llm,
validation_rounds=3,
eval_rounds=1,
max_rounds=10,
**EXPERIMENTAL_CONFIG["mbpp"]
)
# # run optimization
optimizer.optimize(mbpp)
# run test
optimizer.test(mbpp) # use `test_rounds: List[int]` to specify the rounds to test
if __name__ == "__main__":
main() |