Spaces:
Sleeping
Sleeping
| import os | |
| import streamlit as st | |
| from groq import Groq | |
| from dotenv import load_dotenv | |
| # Load .env file | |
| load_dotenv() | |
| # Fetch API key | |
| GROQ_API_KEY = os.getenv("GROQ_API_KEY") | |
| if not GROQ_API_KEY: | |
| st.error("Please set the GROQ_API_KEY in the .env file and restart the app.") | |
| st.stop() | |
| # Initialize Groq client | |
| client = Groq(api_key=GROQ_API_KEY) | |
| def get_groq_response(prompt, model="llama-3.3-70b-versatile"): | |
| try: | |
| response = client.chat.completions.create( | |
| messages=[{"role": "user", "content": prompt}], | |
| model=model, | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Streamlit UI | |
| st.title("LLaMA Chatbot using Groq API") | |
| user_input = st.text_area("Enter your prompt:") | |
| if st.button("Generate Response"): | |
| if user_input.strip(): | |
| with st.spinner("Generating response..."): | |
| response = get_groq_response(user_input) | |
| st.write("### Response:") | |
| st.write(response) | |
| else: | |
| st.warning("Please enter a prompt.") | |