Spaces:
Paused
Paused
| from fastapi import FastAPI, HTTPException | |
| import requests | |
| from bs4 import BeautifulSoup | |
| import re | |
| app = FastAPI(title="Neon Anime Api") | |
| def home(): | |
| return {"status": "Neon Anime Api Alive"} | |
| def normalize_text(text: str) -> str: | |
| # Lowercase everything | |
| text = text.lower() | |
| # Remove all characters except letters, numbers, and spaces | |
| text = re.sub(r"[^a-z0-9\s]", "", text) | |
| # Collapse multiple spaces into one | |
| text = re.sub(r"\s+", " ", text) | |
| # Trim spaces at the start and end | |
| return text.strip() | |
| def search_anime(keyword: str): | |
| try: | |
| # Normalize the keyword | |
| normalized_keyword = normalize_text(keyword) | |
| url = f"https://www.aniwave.se/filter?keyword={normalized_keyword}" | |
| headers = {"User-Agent": "Mozilla/5.0"} | |
| resp = requests.get(url, headers=headers) | |
| if resp.status_code != 200: | |
| raise HTTPException(status_code=500, detail="Failed to fetch search page") | |
| soup = BeautifulSoup(resp.text, "html.parser") | |
| items = soup.select("div.item") | |
| results = [] | |
| for item in items: | |
| title_elem = item.select_one("a.name.d-title") | |
| title = title_elem.text.strip() if title_elem else None | |
| path = title_elem["href"] if title_elem else None | |
| thumb_elem = item.select_one("div.ani.poster img") | |
| thumbnail = thumb_elem.get("src") if thumb_elem else None | |
| sub_elem = item.select_one("span.ep-status.sub span") | |
| dub_elem = item.select_one("span.ep-status.dub span") | |
| total_elem = item.select_one("span.ep-status.total span") | |
| episodes = { | |
| "sub": int(sub_elem.text.strip()) if sub_elem else None, | |
| "dub": int(dub_elem.text.strip()) if dub_elem else None, | |
| "total": int(total_elem.text.strip()) if total_elem else None | |
| } | |
| type_elem = item.select_one("div.meta .inner .right") | |
| anime_type = type_elem.text.strip() if type_elem else None | |
| if title and path: | |
| results.append({ | |
| "title": title, | |
| "url": path, | |
| "thumbnail": thumbnail, | |
| "episodes": episodes, | |
| "type": anime_type | |
| }) | |
| # Custom ranking | |
| keyword_lower = keyword.lower() | |
| def rank(item): | |
| t = item["title"].lower() | |
| if t == keyword_lower: | |
| return 0 # exact match first | |
| elif t.startswith(keyword_lower): | |
| return 1 # starts with keyword second | |
| else: | |
| return 2 # the rest | |
| results = sorted(results, key=lambda x: (rank(x), x["title"].lower())) | |
| return {"keyword": keyword, "results": results} | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Failed to search anime: {str(e)}") | |
| def get_metadata(path: str): | |
| """ | |
| Fetch full anime metadata from AniWave with proper fields. | |
| """ | |
| try: | |
| full_url = f"https://www.aniwave.se{path}" | |
| headers = {"User-Agent": "Mozilla/5.0"} | |
| resp = requests.get(full_url, headers=headers) | |
| if resp.status_code != 200: | |
| raise HTTPException(status_code=500, detail="Failed to fetch anime page") | |
| soup = BeautifulSoup(resp.text, "html.parser") | |
| # Thumbnail | |
| thumb_elem = soup.select_one("img[itemprop='image']") | |
| thumbnail = thumb_elem["src"] if thumb_elem else None | |
| # Title | |
| title_elem = soup.select_one(".names.font-italic.mb-2") | |
| title = title_elem.text.strip() if title_elem else None | |
| # Rating / Quality / Sub / Dub | |
| rating_elem = soup.select_one(".meta.icons .rating") | |
| rating = rating_elem.text.strip() if rating_elem else None | |
| quality_elem = soup.select_one(".meta.icons .quality") | |
| quality = quality_elem.text.strip() if quality_elem else None | |
| has_sub = bool(soup.select_one(".meta.icons .sub")) | |
| has_dub = bool(soup.select_one(".meta.icons .dub")) | |
| # Description | |
| desc_elem = soup.select_one(".synopsis .content") | |
| description = desc_elem.text.strip() if desc_elem else None | |
| # bmeta fields | |
| bmeta_divs = soup.select(".bmeta .meta") | |
| type_ = source = premiered = date_aired = broadcast = status = mal_rating = duration = total_episodes = None | |
| genres = [] | |
| studios = [] | |
| producers = [] | |
| if bmeta_divs: | |
| for div in bmeta_divs: | |
| for item in div.find_all("div", recursive=False): | |
| text = item.get_text(separator="|", strip=True) | |
| if text.startswith("Type:"): | |
| type_elem = item.find("a") | |
| type_ = type_elem.text.strip() if type_elem else None | |
| elif text.startswith("Source:"): | |
| source_elem = item.find("span") | |
| source = source_elem.text.strip() if source_elem else None | |
| elif text.startswith("Premiered:"): | |
| premiered_elem = item.find("a") | |
| premiered = premiered_elem.text.strip() if premiered_elem else None | |
| elif text.startswith("Date aired:"): | |
| date_elem = item.find("span") | |
| date_aired = date_elem.text.strip() if date_elem else None | |
| elif text.startswith("Broadcast:"): | |
| broadcast = item.text.replace("Broadcast:", "").strip() | |
| elif text.startswith("Status:"): | |
| status_elem = item.find("a") | |
| status = status_elem.text.strip() if status_elem else None | |
| elif text.startswith("Genres:"): | |
| genres = [g.text.strip() for g in item.find_all("a")] | |
| elif text.startswith("MAL:"): | |
| raw = item.get_text(strip=True) # e.g., "MAL: 7.97 by 1786345 reviews" | |
| match = re.search(r"MAL:\s*([\d.]+)", raw) | |
| mal_rating = match.group(1) if match else None | |
| elif text.startswith("Duration:"): | |
| duration = item.text.replace("Duration:", "").strip() | |
| elif text.startswith("Episodes:"): | |
| ep_elem = item.find("span") | |
| total_episodes = ep_elem.text.strip() if ep_elem else None | |
| elif text.startswith("Studios:"): | |
| studios = list({s.text.strip() for s in soup.select("div.meta div:contains('Studios:') a")}) | |
| elif text.startswith("Producers:"): | |
| producers = list({p.text.strip() for p in soup.select("div.meta div:contains('Producers:') a")}) | |
| return { | |
| "title": title, | |
| "rating": rating, | |
| "quality": quality, | |
| "has_sub": has_sub, | |
| "has_dub": has_dub, | |
| "description": description, | |
| "type": type_, | |
| "source": source, | |
| "premiered": premiered, | |
| "date_aired": date_aired, | |
| "broadcast": broadcast, | |
| "status": status, | |
| "genres": genres, | |
| "mal_rating": mal_rating, | |
| "duration": duration, | |
| "total_episodes": total_episodes, | |
| "studios": studios, | |
| "producers": producers, | |
| "thumbnail": thumbnail, | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Failed to fetch metadata: {str(e)}") | |
| # ===== END PATCH ===== | |
| if __name__ == "__main__": | |
| import uvicorn | |
| port = int(os.environ.get("PORT", 7860)) # HF Spaces sets this automatically | |
| uvicorn.run(app, host="0.0.0.0", port=port) |