Update app.py
Browse files
app.py
CHANGED
|
@@ -53,12 +53,12 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 53 |
device_map="auto"
|
| 54 |
)
|
| 55 |
|
| 56 |
-
reasoning_tokenizer = AutoTokenizer.from_pretrained(config.reasoning_model_name)
|
| 57 |
-
reasoning_model = AutoModelForCausalLM.from_pretrained(
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
)
|
| 62 |
|
| 63 |
def generate(prompt):
|
| 64 |
"""
|
|
@@ -418,8 +418,8 @@ Information:
|
|
| 418 |
{information}
|
| 419 |
"""
|
| 420 |
|
| 421 |
-
raw_output = reasoning_generate(prompt)
|
| 422 |
-
|
| 423 |
|
| 424 |
logger.info(f"Raw Output: {raw_output}")
|
| 425 |
|
|
|
|
| 53 |
device_map="auto"
|
| 54 |
)
|
| 55 |
|
| 56 |
+
# reasoning_tokenizer = AutoTokenizer.from_pretrained(config.reasoning_model_name)
|
| 57 |
+
# reasoning_model = AutoModelForCausalLM.from_pretrained(
|
| 58 |
+
# config.reasoning_model_name,
|
| 59 |
+
# torch_dtype=torch.float16,
|
| 60 |
+
# device_map="auto"
|
| 61 |
+
# )
|
| 62 |
|
| 63 |
def generate(prompt):
|
| 64 |
"""
|
|
|
|
| 418 |
{information}
|
| 419 |
"""
|
| 420 |
|
| 421 |
+
# raw_output = reasoning_generate(prompt)
|
| 422 |
+
raw_output = generate(prompt)
|
| 423 |
|
| 424 |
logger.info(f"Raw Output: {raw_output}")
|
| 425 |
|