| | import os |
| | import re |
| |
|
| | import pandas as pd |
| | from bs4 import BeautifulSoup |
| |
|
| |
|
| | def extract_puzzle_data(html): |
| | """extract data from HTML content""" |
| | soup = BeautifulSoup(html, 'html.parser') |
| |
|
| | |
| | task_description_section = soup.find('div', id='tabs-2') or "" |
| | task_description = task_description_section.get_text(strip=True).replace('\xa0', ' ') |
| | task_description = task_description.removeprefix("Backstory and Goal") |
| | task_description = task_description.partition("Remember, as with all")[0] |
| |
|
| | categories = [cc.get_text(strip=True).replace('\xa0', ' ') for cc in soup.find_all('td', class_='answergrid_head')] |
| |
|
| | |
| | clues = [] |
| | for clue_div in soup.find_all('div', class_='clue'): |
| | clue_raw = clue_div.get_text(strip=True).replace('\xa0', ' ') |
| | |
| | cleaned_clue = re.sub(r'^\d+\.\s*', '', clue_raw) |
| | clues.append(cleaned_clue) |
| |
|
| | |
| | label_categories = dict(label_a=[]) |
| | for label in soup.find_all('td', class_='labelboxh'): |
| | if label['id'].startswith("labelleftA"): |
| | label_categories["label_a"].append(label.get_text(strip=True).replace('\xa0', ' ')) |
| | for letter in "bcd": |
| | pattern = re.compile(f'label{letter}_ary' + r'\[\d+]\s*=\s*"([^"]+)";') |
| | items = pattern.findall(html) |
| | label_categories[f"label_{letter}"] = items |
| | return dict(story=task_description, |
| | clues=clues, |
| | categories=categories, |
| | **label_categories) |
| |
|
| |
|
| | global_stories = set() |
| | global_clues = set() |
| |
|
| |
|
| | def process_one(difficulty, grid_size): |
| | puzzle_data = [] |
| | with open(f'urls/{difficulty}{grid_size}.txt') as rr: |
| | all_paths = [p.strip() for p in rr] |
| | dir_path = f'htmls/{difficulty}{grid_size}/' |
| | for c, puzzle_url in enumerate(all_paths): |
| | filename = puzzle_url.removeprefix("https://logic.puzzlebaron.com/") |
| | if c % 200 == 0: |
| | print(f"{c=}") |
| | file_path = os.path.join(dir_path, filename) |
| | with open(file_path, 'r', encoding='utf-8') as file: |
| | html_content = file.read() |
| | data = extract_puzzle_data(html_content) |
| | if len(global_clues.intersection(data['clues'])) >= 3: |
| | continue |
| | |
| | |
| | |
| | |
| | |
| | |
| | global_clues.update(data['clues']) |
| | data['grid_size'] = grid_size |
| | data['difficulty'] = difficulty |
| | data['url'] = puzzle_url |
| | puzzle_data.append(data) |
| | return puzzle_data |
| |
|
| |
|
| | OUTPUT_DIR = "dataframes" |
| |
|
| |
|
| | def main(): |
| | if not os.path.exists(OUTPUT_DIR): |
| | os.makedirs(OUTPUT_DIR) |
| | for grid_size in ['4x7', '4x6', '4x5', '4x4', '3x5', '3x4']: |
| | for difficulty in ['challenging', 'moderate', 'easy']: |
| | puzzle_data = process_one(difficulty, grid_size) |
| | df = pd.DataFrame(puzzle_data) |
| | jsonl_file_path = f'{OUTPUT_DIR}/{difficulty}{grid_size}.jsonl' |
| | df.to_json(jsonl_file_path, orient='records', lines=True) |
| | print(f'Data saved to {jsonl_file_path}', df.shape) |
| |
|
| |
|
| | main() |
| |
|