svincoff commited on
Commit
a887ffc
·
1 Parent(s): 37761e5

hydra restructure

Browse files
Files changed (42) hide show
  1. .gitignore +14 -7
  2. .project-root +0 -0
  3. configs/data_task/clean/remap.yaml +12 -0
  4. configs/data_task/download/genome.yaml +5 -0
  5. configs/data_task/download/remap.yaml +12 -0
  6. configs/data_task/fimo/post_fimo.yaml +6 -0
  7. configs/data_task/fimo/pre_fimo.yaml +6 -0
  8. configs/data_task/fimo/run_fimo.yaml +22 -0
  9. configs/hydra/default.yaml +17 -0
  10. configs/paths/default.yaml +16 -0
  11. configs/preprocess.yaml +9 -0
  12. configs/train.yaml +0 -0
  13. dpacman/classifier/__init__.py +0 -0
  14. dpacman/data/README.md +0 -41
  15. dpacman/data/chip_atlas/full_data_loading.py +0 -97
  16. dpacman/data/chip_atlas/smaller_data_loading.py +0 -160
  17. dpacman/data/consistency.py +0 -10
  18. dpacman/data/remap/analyze.py +0 -48
  19. dpacman/data/remap/pre_fimo.py +0 -61
  20. dpacman/data/tfclust/analyze.py +0 -77
  21. dpacman/data/tfclust/api_download.py +0 -448
  22. dpacman/data/tfclust/combine.py +0 -114
  23. dpacman/data/tfclust/download.py +0 -462
  24. dpacman/data_tasks/clean/__init__.py +0 -0
  25. dpacman/data_tasks/clean/remap.py +243 -0
  26. dpacman/data_tasks/download/README.md +26 -0
  27. dpacman/data_tasks/download/__init__.py +0 -0
  28. dpacman/data_tasks/download/download_unzip.sh +41 -0
  29. dpacman/data_tasks/download/genome.py +233 -0
  30. dpacman/data_tasks/download/remap.py +87 -0
  31. dpacman/data_tasks/embeddings/__init__.py +0 -0
  32. dpacman/{data → data_tasks/embeddings}/compute_embeddings.py +83 -45
  33. dpacman/{data/remap → data_tasks/fimo}/post_fimo.py +42 -26
  34. dpacman/data_tasks/fimo/pre_fimo.py +72 -0
  35. dpacman/{data/remap → data_tasks/fimo}/run_fimo.py +132 -84
  36. dpacman/data_tasks/visualize/__init__.py +0 -0
  37. dpacman/{data → data_tasks/visualize}/visualizations.py +24 -22
  38. dpacman/scripts/__init__.py +0 -0
  39. dpacman/scripts/preprocess.py +58 -0
  40. dpacman/scripts/run_download.sh +16 -0
  41. dpacman/scripts/run_fimo.sh +16 -0
  42. environment.yaml +5 -1
.gitignore CHANGED
@@ -1,11 +1,18 @@
1
  dpacman/data_files
2
- dpacman/data/tfclust/*.log
3
- dpacman/data/tfclust/temp.py
4
  bigBedToBed
5
- dpacman/data/remap/*.log
6
- dpacman/data/remap/temp.py
7
- dpacman/data/tfclust/figures
8
  dpacman/softwares/meme*
9
- dpacman/data/remap/crm_example.csv
10
- dpacman/data/remap/crm_example_ERG.csv
 
 
 
 
 
 
 
11
  tree.txt
 
1
  dpacman/data_files
2
+ dpacman/preprocess/tfclust/*.log
3
+ dpacman/preprocess/tfclust/temp.py
4
  bigBedToBed
5
+ dpacman/preprocess/remap/*.log
6
+ dpacman/preprocess/remap/temp.py
7
+ dpacman/preprocess/tfclust/figures
8
  dpacman/softwares/meme*
9
+ dpacman/preprocess/remap/crm_example.csv
10
+ dpacman/preprocess/remap/crm_example_ERG.csv
11
+ dpacman/classifier/old
12
+ dpacman/classifier/__pycache__/
13
+ dpacman/data_tasks/clean/__pycache__/
14
+ dpacman/data_tasks/download/__pycache__/
15
+ dpacman/data_tasks/fimo/__pycache__/
16
+ dpacman/scripts/__pycache__/
17
+ logs/
18
  tree.txt
.project-root ADDED
File without changes
configs/data_task/clean/remap.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: remap
2
+ type: clean
3
+
4
+ nr_raw_path: dpacman/data_files/raw/remap/remap2022_nr_macs2_hg38_v1_0.bed
5
+ nr_processed_dir: dpacman/data_files/processed/remap
6
+ nr_processed_filename: remap2022_nr_macs2_hg38_v1_0_clean.tsv
7
+
8
+ crm_raw_path: dpacman/data_files/raw/remap/remap2022_crm_macs2_hg38_v1_0.bed
9
+ crm_processed_dir: dpacman/data_files/processed/remap
10
+ crm_processed_filename: remap2022_crm_macs2_hg38_v1_0_clean.tsv
11
+
12
+ save_example_files: true
configs/data_task/download/genome.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ name: genome
2
+ type: download
3
+ output_dir: dpacman/classifier/data_files/raw/genomes
4
+ genomes:
5
+ - hg38
configs/data_task/download/remap.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: remap
2
+ type: download
3
+
4
+ nr_url: https://remap.univ-amu.fr/storage/remap2022/hg38/MACS2/remap2022_nr_macs2_hg38_v1_0.bed.gz
5
+ nr_output_dir: dpacman/data_files/raw/remap
6
+ nr_filename: remap2022_nr_macs2_hg38_v1_0.bed.gz
7
+
8
+ crm_url: https://remap.univ-amu.fr/storage/remap2022/hg38/MACS2/remap2022_crm_macs2_hg38_v1_0.bed.gz
9
+ crm_output_dir: dpacman/data_files/raw/remap
10
+ crm_filename: remap2022_crm_macs2_hg38_v1_0.bed.gz
11
+
12
+ delete_zip: true
configs/data_task/fimo/post_fimo.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ name: post_fimo
2
+ type: fimo
3
+
4
+ input_csv: dpacman/data_files/processed/fimo/remap2022_crm_fimo_output.csv
5
+ output_csv: dpacman/data_files/processed/fimo/remap2022_crm_fimo_output_processed.csv
6
+ json_dir: dpacman/data_files/raw/genomes/hg38
configs/data_task/fimo/pre_fimo.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ name: pre_fimo
2
+ type: fimo
3
+
4
+ input_csv: dpacman/data_files/processed/remap/remap2022_crm_macs2_hg38_v1_0_clean.tsv
5
+ output_csv: dpacman/data_files/processed/fimo/remap2022_crm_fimo_input.tsv
6
+ window_total: 500
configs/data_task/fimo/run_fimo.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: post_fimo
2
+ type: fimo
3
+
4
+ paths:
5
+ input_csv: dpacman/data_files/processed/fimo/remap2022_crm_fimo_input.tsv
6
+ output_csv: dpacman/data_files/processed/fimo/remap2022_crm_fimo_output.csv
7
+ json_dir: dpacman/data_files/raw/genomes/hg38
8
+
9
+ meme:
10
+ fimo_bin: dpacman/softwares/meme/bin/fimo
11
+ fasta_get_markov: dpacman/softwares/meme/libexec/meme-5.5.8/fasta-get-markov
12
+ jaspar_motif_file: dpacman/softwares/meme-5.5.8/tests/common/JASPAR_CORE_2014_vertebrates.meme
13
+
14
+ fnames:
15
+ seq_fasta: to_scan.fa
16
+ bg_model: bg_model.txt
17
+ fimo_outdir: fimo_out
18
+
19
+ fimo:
20
+ pval_thresh: 1e-4
21
+ max_stored: 1000000
22
+ njobs: max
configs/hydra/default.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # configs/hydra/default.yaml
2
+
3
+ defaults:
4
+ - override hydra_logging: colorlog
5
+ - override job_logging: colorlog
6
+
7
+ run:
8
+ dir: ${paths.log_dir}/${task_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S}
9
+
10
+ sweep:
11
+ dir: ${paths.log_dir}/${task_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
12
+ subdir: ${hydra.job.num}
13
+
14
+ job_logging:
15
+ handlers:
16
+ file:
17
+ filename: ${hydra.runtime.output_dir}/task.log
configs/paths/default.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # configs/paths/default.yaml
2
+
3
+ # project root
4
+ root_dir: ${oc.env:PROJECT_ROOT}
5
+
6
+ # path to raw and processed data
7
+ data_dir: ${paths.root_dir}/interactome/data_files/
8
+
9
+ # path to logs
10
+ log_dir: ${paths.root_dir}/logs/
11
+
12
+ # hydra-managed output dir
13
+ output_dir: ${hydra:runtime.output_dir}
14
+
15
+ # working dir (original CWD when launched)
16
+ work_dir: ${hydra:runtime.cwd}
configs/preprocess.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # configs/download.yaml
2
+
3
+ defaults:
4
+ - _self_
5
+ - paths: default
6
+ - hydra: default # ← tells Hydra to use the logging/output config
7
+ - data_task: download/genome
8
+
9
+ task_name: preprocess/${data_task.type}
configs/train.yaml ADDED
File without changes
dpacman/classifier/__init__.py ADDED
File without changes
dpacman/data/README.md DELETED
@@ -1,41 +0,0 @@
1
- # Data download directory
2
-
3
- ## UCSC
4
-
5
- ### Raw data download
6
- 1. `encRegTfbsClusteredWithCells.hg38.bed.gz`
7
-
8
- ```
9
- wget https://hgdownload.soe.ucsc.edu/goldenPath/hg38/encRegTfbsClustered/encRegTfbsClusteredWithCells.hg38.bed.gz
10
- gunzip encRegTfbsClusteredWithCells.hg38.bed.gz
11
- ```
12
-
13
- 2. `encRegTfbsClusteredWithCells.hg19.bed.gz`
14
-
15
- ```
16
- wget https://hgdownload.soe.ucsc.edu/goldenPath/hg19/encRegTfbsClustered/encRegTfbsClusteredWithCells.hg19.bed.gz
17
- gunzip encRegTfbsClusteredWithCells.hg19.bed.gz
18
- ```
19
-
20
- 3. ReMap big bed file
21
- ```
22
- wget https://hgdownload.soe.ucsc.edu/gbdb/hg38/reMap/reMap2022.bb
23
- wget http://hgdownload.soe.ucsc.edu/admin/exe/linux.x86_64/bigBedToBed
24
- chmod +x bigBedToBed
25
- ./bigBedToBed /home/a03-svincoff/DPACMAN/dpacman/data_files/raw/remap/reMap2022.bb /home/a03-svincoff/DPACMAN/dpacman/data_files/raw/remap/reMap2022.bed
26
-
27
- ```
28
-
29
- 4. ReMap CRM file from their actual website
30
- ```
31
- wget https://remap.univ-amu.fr/storage/remap2022/hg38/MACS2/remap2022_crm_macs2_hg38_v1_0.bed.gz
32
- gunzip remap2022_crm_macs2_hg38_v1_0.bed.gz
33
- ```
34
-
35
- 4. Run `download.py` to download:
36
- - Full sequences of each chromosome for genomes hg38 and hg19
37
- - encRegTfbsClusteredWithCells, a table of clustered transcription factors by their binding sites, for hg38 and hg19
38
- - processed databases per genome per chromosome with the following columns: "bin","chrom","chromStart","chromEnd","name","score","scoreCount","sourceIds","sourceScores","seq","seq_flanked","chromStart_flanked","chromEnd_flanked","flank5","flank3"
39
-
40
- ### Data Processing
41
- 1. Run `combine.py` to combine these individual files into one large DataFrame
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/chip_atlas/full_data_loading.py DELETED
@@ -1,97 +0,0 @@
1
- import pandas as pd
2
- from pathlib import Path
3
- import subprocess
4
-
5
- # Read only cols 0–2, no header
6
- df = pd.read_csv(
7
- "experimentList.tab",
8
- sep="\t",
9
- header=None,
10
- usecols=[0, 1, 2],
11
- names=["exp_id", "genome", "assay_group"],
12
- engine="python",
13
- on_bad_lines="skip",
14
- dtype=str,
15
- )
16
-
17
- # Keep only known genome assemblies
18
- VALID_GENOMES = {
19
- "hg19",
20
- "hg38",
21
- "mm9",
22
- "mm10",
23
- "rn6",
24
- "dm3",
25
- "dm6",
26
- "ce10",
27
- "ce11",
28
- "sacCer3",
29
- }
30
- df = df[df["genome"].isin(VALID_GENOMES)]
31
- print("Assemblies in filtered data:", df["genome"].unique())
32
-
33
-
34
- # Classify assay type
35
- def modality(track):
36
- t = track.lower()
37
- if "atac" in t:
38
- return "ATAC"
39
- if "dnase" in t:
40
- return "DNase"
41
- if "bisulfite" in t or "methyl" in t:
42
- return "BS"
43
- return "ChIP"
44
-
45
-
46
- df["modality"] = df["assay_group"].apply(modality)
47
-
48
-
49
- # URL templates
50
- def make_urls(exp, genome, mod):
51
- urls = []
52
- if mod in ("ChIP", "ATAC", "DNase"):
53
- urls.append(f"https://chip-atlas.dbcls.jp/data/{genome}/eachData/bw/{exp}.bw")
54
- for thr in ("05", "10", "20"):
55
- urls.append(
56
- f"https://chip-atlas.dbcls.jp/data/{genome}/eachData/bed{thr}/{exp}.{thr}.bed"
57
- )
58
- urls.append(
59
- f"https://chip-atlas.dbcls.jp/data/{genome}/eachData/bb{thr}/{exp}.{thr}.bb"
60
- )
61
- else:
62
- urls.append(
63
- f"https://chip-atlas.dbcls.jp/data/{genome}/eachData/bs/methyl/{exp}.methyl.bw"
64
- )
65
- urls.append(
66
- f"https://chip-atlas.dbcls.jp/data/{genome}/eachData/bs/cover/{exp}.cover.bw"
67
- )
68
- for sub in ("hmr", "pmd", "hypermr"):
69
- urls.append(
70
- f"https://chip-atlas.dbcls.jp/data/{genome}/eachData/bs/{sub}/Bed/{exp}.{sub}.bed"
71
- )
72
- urls.append(
73
- f"https://chip-atlas.dbcls.jp/data/{genome}/eachData/bs/{sub}/BigBed/{exp}.{sub}.bb"
74
- )
75
- return urls
76
-
77
-
78
- # Write URL lists per genome
79
- urls_dir = Path("urls_by_genome")
80
- urls_dir.mkdir(exist_ok=True)
81
- for genome, group in df.groupby("genome"):
82
- all_urls = []
83
- for _, row in group.iterrows():
84
- all_urls += make_urls(row.exp_id, genome, row.modality)
85
- uniq = sorted(set(all_urls))
86
- (urls_dir / f"urls_{genome}.txt").write_text("\n".join(uniq))
87
- print(f"{genome}: {len(uniq)} URLs")
88
-
89
- # Download into raw/{genome}/
90
- for url_file in urls_dir.glob("urls_*.txt"):
91
- genome = url_file.stem.split("_", 1)[1]
92
- dest = Path("raw") / genome
93
- dest.mkdir(parents=True, exist_ok=True)
94
- print(f"\nDownloading {genome} → {dest}/…")
95
- subprocess.run(["wget", "-nc", "-i", str(url_file), "-P", str(dest)], check=True)
96
-
97
- print("Done! Check raw/{genome}/ for your files.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/chip_atlas/smaller_data_loading.py DELETED
@@ -1,160 +0,0 @@
1
- #!/usr/bin/env python3
2
- import os, sys, zipfile
3
- import subprocess
4
- import random
5
- from pathlib import Path
6
- import requests
7
- import pandas as pd
8
- from tqdm import tqdm
9
-
10
- # ─── PARAMETERS ───────────────────────────────────────────────────────────────
11
- # total target regions (rough guide; you'll filter post‐download if needed)
12
- TARGET_REGIONS = 200_000
13
-
14
- # Assemblies to include
15
- ASSEMBLIES = [
16
- "hg19",
17
- "hg38",
18
- "mm9",
19
- "mm10",
20
- "rn6",
21
- "dm3",
22
- "dm6",
23
- "ce10",
24
- "ce11",
25
- "sacCer3",
26
- ]
27
-
28
- # How many experiments to sample at most per protein (tune up/down)
29
- MAX_EXPS_PER_PROTEIN = 50
30
-
31
- # Number of parallel connections for aria2c
32
- ARIA2C_CONN = 16
33
-
34
- # Working directories
35
- WORKDIR = Path("chip_atlas_fetch")
36
- WORKDIR.mkdir(exist_ok=True)
37
- LIST_DIR = WORKDIR / "lists"
38
- LIST_DIR.mkdir(exist_ok=True)
39
- DL_DIR = WORKDIR / "downloads"
40
- DL_DIR.mkdir(exist_ok=True)
41
-
42
- # ─── HELPERS ──────────────────────────────────────────────────────────────────
43
-
44
-
45
- def download_and_extract(url, extract_to: Path):
46
- """Fetch a ZIP and unzip it."""
47
- local = extract_to / Path(url).name
48
- if not local.exists():
49
- print(f"→ Downloading {url}")
50
- resp = requests.get(url, stream=True)
51
- resp.raise_for_status()
52
- with open(local, "wb") as f:
53
- for chunk in resp.iter_content(1 << 20):
54
- f.write(chunk)
55
- with zipfile.ZipFile(local, "r") as z:
56
- z.extractall(extract_to)
57
-
58
-
59
- # ─── 1) GET MASTER LISTS ────────────────────────────────────────────────────
60
-
61
- print("1) Fetching master file & experiment lists…")
62
- FILELIST_URL = (
63
- "https://dbarchive.biosciencedbc.jp/data/chip-atlas/LATEST/chip_atlas_file_list.zip"
64
- )
65
- EXPERIMENTLIST_URL = "https://dbarchive.biosciencedbc.jp/data/chip-atlas/LATEST/chip_atlas_experiment_list.zip"
66
-
67
- download_and_extract(FILELIST_URL, LIST_DIR)
68
- download_and_extract(EXPERIMENTLIST_URL, LIST_DIR)
69
-
70
- filelist_txt = LIST_DIR / "chip_atlas_file_list.csv"
71
- experiment_txt = LIST_DIR / "chip_atlas_experiment_list.csv"
72
-
73
- # ─── 2) PARSE EXPERIMENT METADATA ────────────────────────────────────────────
74
-
75
- print("2) Parsing experiment → protein lookup…")
76
- exp_df = pd.read_csv(
77
- experiment_txt,
78
- sep=None, # let python engine guess (comma vs. tab)
79
- engine="python", # required when sep=None
80
- encoding="latin1", # to avoid UnicodeDecodeErrors
81
- )
82
-
83
- print("Columns in experiment list:", exp_df.columns.tolist())
84
-
85
- exp_df = exp_df.loc[:, ["Experimental ID", "Genome assembly", "Antigen"]].rename(
86
- columns={
87
- "Experimental ID": "exp_id",
88
- "Genome assembly": "genome",
89
- "Antigen": "assay_group",
90
- }
91
- )
92
-
93
- exp_df["protein"] = exp_df["assay_group"].str.replace(r"_ChIP.*", "", regex=True)
94
-
95
- # Finally, filter to only the assemblies you care about:
96
- exp_df = exp_df[exp_df["genome"].isin(ASSEMBLIES)]
97
-
98
- # build lookup
99
- exp_to_genome = exp_df.set_index("exp_id")["genome"].to_dict()
100
- exp_to_protein = exp_df.set_index("exp_id")["protein"].to_dict()
101
-
102
- # ─── 3) BUILD URL LIST DIRECTLY ───────────────────────────────────────────────
103
-
104
- print("3) Building URL list for .bw + .10.bed…")
105
- BASE = "https://dbarchive.biosciencedbc.jp/data/chip-atlas"
106
- urls_by_exp = {}
107
- for exp, genome in exp_to_genome.items():
108
- urls_by_exp[exp] = [
109
- f"{BASE}/data/{genome}/eachData/bw/{exp}.bw",
110
- f"{BASE}/data/{genome}/eachData/bed10/{exp}.10.bed",
111
- ]
112
-
113
- # bucket experiments by protein
114
- from collections import defaultdict
115
-
116
- prot_exps = defaultdict(list)
117
- for exp, prot in exp_to_protein.items():
118
- if exp in urls_by_exp:
119
- prot_exps[prot].append(exp)
120
-
121
- # sample up to MAX_EXPS_PER_PROTEIN per protein
122
- sampled_exps = []
123
- for prot, exps in prot_exps.items():
124
- k = min(len(exps), MAX_EXPS_PER_PROTEIN)
125
- sampled_exps += random.sample(exps, k)
126
-
127
- print(f" → Sampling {len(sampled_exps):,} experiments across {len(prot_exps)} proteins")
128
-
129
- # collect URLs for just those experiments
130
- final_urls = []
131
- for exp in sampled_exps:
132
- final_urls += urls_by_exp[exp]
133
- random.shuffle(final_urls)
134
-
135
- # write out for aria2c
136
- url_list_file = WORKDIR / "to_download.txt"
137
- with open(url_list_file, "w") as f:
138
- for u in final_urls:
139
- f.write(u + "\n")
140
- print(f" → Wrote {len(final_urls):,} URLs to {url_list_file}")
141
-
142
- # ─── 4) PARALLEL DOWNLOAD VIA aria2c ─────────────────────────────────────────
143
-
144
- print("4) Downloading with aria2c…")
145
- subprocess.run(
146
- [
147
- "aria2c",
148
- f"-x{ARIA2C_CONN}",
149
- "--dir",
150
- str(DL_DIR),
151
- "--input-file",
152
- str(url_list_file),
153
- "--auto-file-renaming=false",
154
- "--allow-overwrite=true",
155
- ],
156
- check=True,
157
- )
158
-
159
- print("✅ Finished downloading all selected files.")
160
- print(f"Your files are in: {DL_DIR.resolve()}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/consistency.py DELETED
@@ -1,10 +0,0 @@
1
- """
2
- Check for consistency between Remap and Tfclust data
3
- """
4
-
5
- import logging
6
- import pandas as pd
7
- import logging
8
- import os
9
- import dask.dataframe as dd
10
-
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/remap/analyze.py DELETED
@@ -1,48 +0,0 @@
1
- import logging
2
- import pandas as pd
3
-
4
- def main(logger=None):
5
- if logger is None:
6
- logger = logging.getLogger(__name__)
7
-
8
- # Read the BED file
9
- bed_file_path = "../../data_files/raw/remap/reMap2022.bed"
10
- df = pd.read_csv(bed_file_path, sep="\t", header=None)
11
- df.columns = ["#chrom", "chromStart", "chromEnd", "name", "score", "strand", "thickStart", "thickEnd", "reserved", "TF", "Biotypes"]
12
- print(f"{len(df):,}")
13
- crm["chromLen"] = crm["chromEnd"] - crm["chromStart"]
14
- print(crm["chromLen"].describe())
15
- print(df.head(50))
16
-
17
- crm_bed_file_path = "../../data_files/raw/remap/remap2022_crm_macs2_hg38_v1_0.bed"
18
- crm = pd.read_csv(crm_bed_file_path, sep="\t", header=None)
19
- crm.columns = ["#chrom", "chromStart", "chromEnd", "name", "score", "strand", "thickStart", "thickEnd", "reserved"]
20
- crm["chromLen"] = crm["chromEnd"] - crm["chromStart"]
21
- crm["thickLen"] = crm["thickEnd"] - crm["thickStart"]
22
- print(f"{len(crm):,}")
23
- print(f"thick length statistics:")
24
- print(crm["thickLen"].describe())
25
- print(f"chrom length statistics:")
26
- print(crm["chromLen"].describe())
27
- print(crm[["#chrom", "chromStart", "chromEnd", "name", "score", "strand", "thickStart", "thickEnd", "reserved"]].head(50))
28
- crm.head(50).to_csv("crm_example.csv",index=False)
29
-
30
- crm["name"] = crm["name"].apply(lambda x: x.split(","))
31
- crm = crm.explode("name").reset_index(drop=True)
32
- crm.loc[crm["name"]=="ERG"].reset_index(drop=True).head(50).to_csv("crm_example_ERG.csv",index=False)
33
-
34
- if __name__ == "__main__":
35
- log_path = "analyze.log"
36
-
37
- logger = logging.getLogger(__name__)
38
- logger.setLevel(logging.DEBUG)
39
-
40
- # Create file handler
41
- file_handler = logging.FileHandler(log_path, mode="w", encoding="utf-8")
42
- formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
43
- file_handler.setFormatter(formatter)
44
-
45
- # Attach handlers
46
- logger.addHandler(file_handler)
47
-
48
- main(logger)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/remap/pre_fimo.py DELETED
@@ -1,61 +0,0 @@
1
- #!/usr/bin/env python3
2
- import pandas as pd
3
- import numpy as np
4
-
5
- # ------------------------------------------------------------------
6
- # PARAMETERS
7
- INPUT_CSV = "/home/a03-akrishna/DPACMAN/dpacman/data/remap/full_crm.csv"
8
- OUTPUT_CSV = "/home/a03-akrishna/DPACMAN/data_files/processed/clean_pre_fimo.csv"
9
- WINDOW_TOTAL = 500 # total extra context bp around each peak
10
- # ------------------------------------------------------------------
11
-
12
- def main():
13
- # 1) load
14
- df = pd.read_csv(INPUT_CSV)
15
-
16
- # 2) normalize chromosomes and exclude non-whole chromosomes
17
- df = df.rename(columns={"#chrom": "chrom"})
18
- df["chrom"] = df["chrom"].str.replace(r"^chr", "", regex=True)
19
-
20
- valid = [str(i) for i in range(1,23)] + ["X", "Y"]
21
- df = df[df["chrom"].isin(valid)].reset_index(drop=True)
22
-
23
- # 3) explode TF names
24
- df["TF_list"] = df["name"].str.split(",")
25
- df = df.explode("TF_list").rename(columns={"TF_list": "TF"})
26
- df["TF"] = df["TF"].str.strip()
27
-
28
- # 4) draw a random left‐flank between 0 and WINDOW_TOTAL,
29
- # then right‐flank is whatever remains to sum to WINDOW_TOTAL
30
- n = len(df)
31
- df["left_context"] = np.random.randint(0, WINDOW_TOTAL + 1, size=n)
32
- df["right_context"] = WINDOW_TOTAL - df["left_context"]
33
-
34
- # 5) compute contextStart / contextEnd
35
- df["contextStart"] = (df["chromStart"] - df["left_context"]).clip(lower=0).astype(int)
36
- df["contextEnd"] = (df["chromEnd"] + df["right_context"]).astype(int)
37
-
38
- # 6) assemble output
39
- out = df[[
40
- "chrom",
41
- "contextStart",
42
- "chromStart", # original ChIPStart
43
- "chromEnd", # original ChIPEnd
44
- "contextEnd",
45
- "score", # original score column
46
- "TF"
47
- ]].rename(columns={
48
- "chrom": "#chrom",
49
- "chromStart": "ChIPStart",
50
- "chromEnd": "ChIPEnd",
51
- "score": "chipscore"
52
- })
53
-
54
- # 7) write CSV
55
- out.to_csv(OUTPUT_CSV, index=False)
56
- print(f"Wrote {len(out)} rows to {OUTPUT_CSV}")
57
-
58
- if __name__ == "__main__":
59
- main()
60
-
61
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/tfclust/analyze.py DELETED
@@ -1,77 +0,0 @@
1
- import pandas as pd
2
- import logging
3
- import os
4
- import dask.dataframe as dd
5
- import matplotlib.pyplot as plt
6
-
7
- def plot_sequence_lengths_box(lengths, xlog=False, title="Sequence Lengths", out_dir="figures", fname="sequence_lengths_box.png"):
8
- """
9
- Plot sequence lengths. Can be used with original sequence or flank sequence.
10
- """
11
- os.makedirs(out_dir, exist_ok=True)
12
- out_path = os.path.join(out_dir, fname)
13
-
14
- plt.figure(figsize=(10, 4))
15
- plt.boxplot(lengths, vert=False)
16
- if xlog:
17
- plt.xscale('log')
18
- plt.xlabel("Sequence Length")
19
- plt.title(title)
20
- plt.grid(True, axis='y', linestyle='--', alpha=0.6)
21
- plt.tight_layout()
22
-
23
- plt.savefig(out_path, dpi=300)
24
-
25
- def plot_sequence_lengths_hist(lengths, xlog=False, title="Sequence Lengths", out_dir="figures", fname="sequence_lengths_hist.png"):
26
- """
27
- Plot sequence lengths. Can be used with original sequence or flank sequence.
28
- """
29
- os.makedirs(out_dir, exist_ok=True)
30
- out_path = os.path.join(out_dir, fname)
31
-
32
- plt.figure(figsize=(10, 4))
33
- plt.hist(lengths, bins=100, density=True, alpha=0.75)
34
- if xlog:
35
- plt.xscale('log')
36
- # percentage format
37
- plt.gca().yaxis.set_major_formatter(plt.FuncFormatter(lambda y, _: '{:.2%}'.format(100*y)))
38
- plt.xlabel("Sequence Length")
39
- plt.ylabel("Frequency")
40
- plt.title(title)
41
- plt.grid(True, axis='y', linestyle='--', alpha=0.6)
42
- plt.tight_layout()
43
-
44
- plt.savefig(out_path, dpi=300)
45
-
46
- def main(logger):
47
- df_dir = "../../data_files/processed/tfclust/combined"
48
- df_savepath = os.path.join(df_dir, "encRegTfbsClustered_hg38_hg19.parquet")
49
- logger.info("Starting to load data file from parquet")
50
- df = pd.read_parquet(df_savepath, engine="auto")
51
- logger.info(df.head())
52
-
53
- plot_sequence_lengths_hist(df["seq_len"], title="TF Binding Sites",fname="seq_lengths_hist.png")
54
- plot_sequence_lengths_hist(df["seq_flanked_len"], title="TF Binding Sites with 1000nt Flanks", fname="seq_lengths_flanked_hist.png")
55
- plot_sequence_lengths_box(df["seq_len"], title="TF Binding Sites",fname="seq_lengths_box.png")
56
- plot_sequence_lengths_box(df["seq_flanked_len"], title="TF Binding Sites with 1000nt Flanks", fname="seq_lengths_flanked_box.png")
57
-
58
- plot_sequence_lengths_hist(df["seq_len"], xlog=True, title="TF Binding Sites",fname="seq_lengths_xlog_hist.png")
59
- plot_sequence_lengths_hist(df["seq_flanked_len"], xlog=True, title="TF Binding Sites with 1000nt Flanks", fname="seq_lengths_flanked_xlog_hist.png")
60
- plot_sequence_lengths_box(df["seq_len"], xlog=True, title="TF Binding Sites",fname="seq_lengths_xlog_box.png")
61
- plot_sequence_lengths_box(df["seq_flanked_len"],xlog=True, title="TF Binding Sites with 1000nt Flanks", fname="seq_lengths_flanked_xlog_box.png")
62
-
63
- if __name__ == "__main__":
64
- log_path = "analyze.log"
65
-
66
- logger = logging.getLogger(__name__)
67
- logger.setLevel(logging.DEBUG)
68
-
69
- # Create file handler
70
- file_handler = logging.FileHandler(log_path, mode="w", encoding="utf-8")
71
- formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
72
- file_handler.setFormatter(formatter)
73
-
74
- # Attach handlers
75
- logger.addHandler(file_handler)
76
-
77
- main(logger)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/tfclust/api_download.py DELETED
@@ -1,448 +0,0 @@
1
- import requests
2
- from time import sleep
3
- import json
4
- import logging
5
- import multiprocessing
6
- from concurrent.futures import ThreadPoolExecutor, as_completed
7
- import os
8
- import pandas as pd
9
-
10
- def get_all_tfs(genome: str = "hg38"):
11
- """
12
- Get all the transcription factors from the appropriate encRegTfbsClusteredWithCells.genome.bed file.
13
- Available in data_files/raw/tfclust for genomes hg38 and hg19
14
- """
15
- # Read raw file
16
- raw_data = pd.read_csv(
17
- "../../data_files/encode3TfbsClusteredWithCells.bed", sep="\t", header=None
18
- )
19
- raw_data.columns = ["chrom", "start", "end", "tf_name", "score", "cell_line"]
20
-
21
- # Extract all unique TF names
22
- all_tfs = encode_raw["tf_name"].unique().tolist()
23
- logging.info(f"Found {len(all_tfs)} transcription factors in genome {genome}.")
24
-
25
- return all_tfs
26
-
27
-
28
- def get_all_chroms(genome: str = "hg38", exclude: list=None, include: list=None, logger: logging.Logger=None):
29
- """
30
- Fetch all chromosome names for a genome.
31
- Note: some chromosomes are in unexpected formats (e.g. there is 'chr15', but also 'chr15_ML143371v1_fix')
32
- """
33
- if logger is None:
34
- logger = logging.getLogger(__name__)
35
-
36
- url = f"https://api.genome.ucsc.edu/list/chromosomes?genome={genome}"
37
- try:
38
- r = requests.get(url)
39
- r.raise_for_status()
40
- except:
41
- raise ValueError(f"Failed to fetch all chromosomes for genome {genome}")
42
-
43
- if include is not None and exclude is not None:
44
- raise ValueError(f"Must pass EITHER exclude or include. Cannot pass both.")
45
-
46
- all_chroms = [chrom for chrom in r.json()["chromosomes"]]
47
- if include:
48
- logger.info(f"Including only the following chromosomes: {include}")
49
- all_chroms = [chrom for chrom in all_chroms if chrom in include]
50
- if exclude:
51
- logger.info(f"Excluding the following chromosomes: {exclude}")
52
- all_chroms = [chrom for chrom in all_chroms if not(chrom in exclude)]
53
-
54
- logger.info(f"Found {len(all_chroms)} chromosomes in genome {genome}.")
55
-
56
- return all_chroms
57
-
58
- def fetch_tfbs_track(chrom: str, genome: str = "hg38", logger:logging.Logger=None):
59
- """
60
- Fetch raw data from the track encRegTfbsClustered.
61
- Returns json data for the specified chromosome, where key information appears as follows:
62
- "encRegTfbsClustered": [
63
- {
64
- "bin": 585,
65
- "chrom": "chr1",
66
- "chromStart": 9917,
67
- "chromEnd": 10247,
68
- "name": "NUFIP1",
69
- "score": 680,
70
- "sourceCount": 1,
71
- "sourceIds": "1063",
72
- "sourceScores": "680"
73
- },...
74
- ]
75
-
76
- """
77
- if logger is None:
78
- logger = logging.getLogger(__name__)
79
-
80
- params = {"genome": genome, "track": "encRegTfbsClustered", "chrom": chrom}
81
- url = f"https://api.genome.ucsc.edu/getData/track?genome={params['genome']};track={params['track']};chrom={params['chrom']}"
82
- try:
83
- r = requests.get(url)
84
- r.raise_for_status()
85
- except:
86
- raise ValueError(
87
- f"Failed to fetch encRegTfbsClustered for {chrom} in genome {genome}"
88
- )
89
-
90
- # Extract the output and save it
91
- json_out_dir = f"../../data_files/raw/tfclust/encRegTfbsClustered_data/{genome}"
92
- os.makedirs(json_out_dir, exist_ok=True)
93
-
94
- # Save it
95
- json_output = r.json()
96
- with open(
97
- f"{json_out_dir}/{params['genome']}_{params['track']}_{params['chrom']}.json",
98
- "w",
99
- ) as f:
100
- json.dump(json_output, f, indent=4)
101
-
102
- logger.info(
103
- f"Saved to {json_out_dir}/{params['genome']}_{params['track']}_{params['chrom']}.json"
104
- )
105
- return json_output
106
-
107
-
108
- def get_sequence(
109
- chrom: str,
110
- start: int,
111
- end: int,
112
- flank5: int = 0,
113
- flank3: int = 0,
114
- genome: str = "hg38",
115
- logger: logging.Logger=None
116
- ):
117
- """
118
- Given genome, start position, end position, chromosome, and desired flank size, extract the raw DNA sequence
119
- """
120
- if logger is None:
121
- logger = logging.getLogger(__name__)
122
-
123
- new_start = max(0, start - flank5)
124
- new_end = end + flank3
125
- region = f"{chrom}:{new_start}-{new_end}"
126
- url = f"https://api.genome.ucsc.edu/getData/sequence?genome={genome};chrom={chrom};start={new_start};end={new_end}"
127
-
128
- try:
129
- r = requests.get(url)
130
- r.raise_for_status()
131
- except:
132
- raise ValueError(f"Failed to fetch sequence for {region} in genome {genome}")
133
-
134
- results_dict = {
135
- "chromStart": new_start,
136
- "chromEnd": new_end,
137
- "seq": r.json()["dna"],
138
- }
139
- return results_dict
140
-
141
-
142
- def extract_tfbs_with_context(
143
- genome: str = "hg38",
144
- flank5: int = 500,
145
- flank3: int = 500,
146
- control_run: bool = True, # if there's a flank, whether to also run without flank
147
- out_dir: str = "../../data_files/processed/tfclust",
148
- allowed_tfs: list = None, # e.g., ['CTCF', 'MAX']
149
- chroms: list = None,
150
- logger: logging.Logger = None
151
- ):
152
- """
153
- Loop through raw downloads and extract TF binding sites (bs) with flanks
154
- Builds a DataFrame with all the available data for each TF. Columns = ["bin", "chrom", "chromStart", "chromEnd", "name", "score", "scoreCount", "sourceIds", "sourceScores", "seq", "seq_flanked", "chromStart_flanked", "chromEnd_flanked"]
155
- """
156
- # Prepare logger
157
- if logger is None:
158
- logger = logging.getLogger(__name__)
159
- # Prepare to save output
160
- os.makedirs(out_dir, exist_ok=True)
161
-
162
- # Get chromosomes
163
- if chroms is None:
164
- logger.info(
165
- "No chromosomes provided, fetching all chromosomes for the given genome..."
166
- )
167
- chroms = get_all_chroms(genome, logger = logger)
168
- count = 0
169
- # Initialize the final DF
170
- results_cols = [
171
- "bin",
172
- "chrom",
173
- "chromStart",
174
- "chromEnd",
175
- "name",
176
- "score",
177
- "scoreCount",
178
- "sourceIds",
179
- "sourceScores",
180
- "seq",
181
- "seq_flanked",
182
- "chromStart_flanked",
183
- "chromEnd_flanked",
184
- "flank5",
185
- "flank3",
186
- ]
187
- results_init = pd.DataFrame(columns=results_cols)
188
-
189
- # Make a list of the types of runs we need
190
- queries = [{"flank5": flank5, "flank3": flank3}]
191
- if not ((flank5 == 0) and (flank3 == 0) and control_run):
192
- queries.append({"type": "control", "flank5": 0, "flank3": 0})
193
- queries[0]["type"] = "flank"
194
- elif (flank5 == 0) and (flank3 == 0):
195
- queries[0]["type"] = "control"
196
-
197
- # For each chromosome, download the encRegTfbsClustered track, extract the features, and fetch the sequences
198
- # Loop through chroms
199
- for chrom in chroms:
200
- chrom_write_count = 0
201
- chrom_output_fname = f"{out_dir}/encRegTfbsClustered_{genome}_{chrom}.csv"
202
- results_init.to_csv(
203
- chrom_output_fname, index=False
204
- )
205
- logger.info(f"Fetching {chrom}...")
206
- # Fetch the data json (has start and end positions in the chrom, but not the sequence)
207
- try:
208
- data = fetch_tfbs_track(chrom, genome=genome, logger=logger)
209
- logger.info(f" → Fetched {chrom} successfully")
210
- features = data.get("encRegTfbsClustered", {})
211
- logger.info(f" → Found {len(features)} features")
212
- except Exception as e:
213
- logger.info(f" Failed to fetch {chrom}: {e}")
214
- continue
215
-
216
- # Get the sequences of the DNA binding sites
217
- for feature_no, feature in enumerate(features):
218
- # Initialize new results row
219
- new_row = {}
220
-
221
- # Check if tf is valid
222
- tf_name = feature.get("name", "UnknownTF")
223
- if allowed_tfs and tf_name not in allowed_tfs:
224
- logger.warning(f"TF name {tf_name} not in allowed_tfs. Skipping.")
225
- continue
226
- # Make sure the chromosomes match and we have the right sequence!
227
- assert (
228
- feature["chrom"] == chrom
229
- ), f"Chromosome mismatch: {feature['chrom']} != {chrom}"
230
-
231
- # Add all the cols already in the json, add
232
- for c in results_cols:
233
- if c in feature:
234
- new_row[c] = feature[c]
235
-
236
- ### Extract sequence
237
- start = feature["chromStart"]
238
- end = feature["chromEnd"]
239
-
240
- for query in queries:
241
- try:
242
- results_dict = get_sequence(
243
- chrom,
244
- start,
245
- end,
246
- flank5=query["flank5"],
247
- flank3=query["flank3"],
248
- genome=genome,
249
- logger = logger
250
- )
251
- # Add the returned info
252
- if query["type"] == "control":
253
- new_row["seq"] = results_dict["seq"]
254
- elif query["type"] == "flank":
255
- new_row["seq_flanked"] = results_dict["seq"]
256
- new_row["chromStart_flanked"] = results_dict["chromStart"]
257
- new_row["chromEnd_flanked"] = results_dict["chromEnd"]
258
- new_row["flank5"] = flank5
259
- new_row["flank3"] = flank3
260
- logger.info(
261
- f" Success on feat. {feature_no} {chrom}:{start}-{end}, type {query['type']}"
262
- )
263
- except Exception as e:
264
- logger.info(
265
- f" Skipped feat. {feature_no} {chrom}:{start}-{end} due to error: {e}"
266
- )
267
- continue
268
-
269
- sleep(0.05) # Stay within UCSC's 20 req/sec rate limit
270
-
271
- # Fill out any blank columns
272
- try:
273
- for c in results_cols:
274
- if c not in new_row:
275
- new_row[c] = None
276
-
277
- new_row_df = pd.DataFrame(data=new_row, index=[0])
278
- if new_row_df["seq"] is not None:
279
- new_row_df.to_csv(
280
- chrom_output_fname,
281
- mode="a",
282
- index=False,
283
- header=False,
284
- )
285
- logger.info(
286
- f"Wrote new row to {out_dir}/encRegTfbsClustered_{chrom}.csv"
287
- )
288
- chrom_write_count += 1
289
- else:
290
- logger.info(f"Did not write new row. {new_row}")
291
- except Exception as e:
292
- logger.error(F"Failed to write new row to {out_dir}/encRegTfbsClustered_{chrom}.csv: error {e}")
293
-
294
- logger.info(f"Done. Wrote {chrom_write_count} sequences to {out_dir}/encRegTfbsClustered_{chrom}.csv")
295
- count += chrom_write_count
296
-
297
- logger.info(f"Done with all chroms. Wrote {count} sequences to {out_dir}.")
298
-
299
- def setup_chrom_logger(chrom: str, genome: str, out_dir: str) -> logging.Logger:
300
- """Set up a dedicated logger for a given chromosome."""
301
- logger = logging.getLogger(f"{genome}_{chrom}")
302
- logger.setLevel(logging.DEBUG)
303
- logger.propagate = False
304
-
305
- # Avoid duplicate handlers if reused
306
- if not logger.handlers:
307
- os.makedirs(out_dir, exist_ok=True)
308
- log_path = os.path.join(out_dir, f"log_{genome}_{chrom}.log")
309
- handler = logging.FileHandler(log_path, mode='w', encoding='utf-8')
310
- formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
311
- handler.setFormatter(formatter)
312
- logger.addHandler(handler)
313
-
314
- return logger
315
-
316
- # Thread function for one chromosome
317
- def process_chrom(
318
- chrom: str = "chr1",
319
- genome: str = "hg38",
320
- flank5: int = 500,
321
- flank3: int = 500,
322
- control_run: bool = True,
323
- out_dir: str = "../../data_files/processed/tfclust",
324
- allowed_tfs: list = None,
325
- ):
326
- """
327
- Called within parallel method to strat a thread
328
- """
329
- chrom_logger = setup_chrom_logger(chrom, genome, f"{out_dir}/logs")
330
- chrom_logger.info(f"Starting thread for {chrom}")
331
-
332
- logging.info(f"Starting thread for {chrom}")
333
- try:
334
- extract_tfbs_with_context(
335
- genome=genome,
336
- flank5=flank5,
337
- flank3=flank3,
338
- control_run=control_run,
339
- out_dir=out_dir,
340
- allowed_tfs=allowed_tfs,
341
- chroms=[chrom], # important: wrap in list
342
- logger=chrom_logger
343
- )
344
- chrom_logger.info(f"Finished {chrom}")
345
- except Exception as e:
346
- chrom_logger.error(f"Error processing {chrom}: {e}")
347
-
348
-
349
- import multiprocessing
350
- from concurrent.futures import ThreadPoolExecutor, as_completed
351
-
352
- def parallel_extract_tfbs_for_genome(
353
- genome: str,
354
- flank5: int,
355
- flank3: int,
356
- control_run: bool,
357
- out_dir: str,
358
- allowed_tfs: list,
359
- chroms: list,
360
- max_workers: int,
361
- ):
362
- logger = logging.getLogger(f"{genome}")
363
- logger.info(f"Using {max_workers} threads for {genome}...")
364
-
365
- if chroms is None:
366
- chroms = get_all_chroms(genome=genome)
367
-
368
- futures = {}
369
- with ThreadPoolExecutor(max_workers=max_workers) as executor:
370
- for chrom in chroms:
371
- future = executor.submit(
372
- process_chrom,
373
- chrom=chrom,
374
- genome=genome,
375
- flank5=flank5,
376
- flank3=flank3,
377
- control_run=control_run,
378
- out_dir=f"{out_dir}/{genome}",
379
- allowed_tfs=allowed_tfs,
380
- )
381
- futures[future] = f"{genome}:{chrom}"
382
-
383
- for future in as_completed(futures):
384
- label = futures[future]
385
- try:
386
- future.result()
387
- except Exception as e:
388
- logger.error(f"{label} raised an exception: {e}")
389
-
390
-
391
- def parallel_extract_tfbs_with_context(
392
- genomes=["hg38", "hg19"],
393
- flank5=500,
394
- flank3=500,
395
- control_run=True,
396
- out_dir="../../data_files/processed/tfclust",
397
- allowed_tfs=None,
398
- chroms=None,
399
- ):
400
- total_cpus = multiprocessing.cpu_count()
401
- cpu_per_genome = total_cpus // len(genomes)
402
-
403
- logging.info(f"Total CPUs: {total_cpus}")
404
- logging.info(f"Launching {len(genomes)} genome pipelines with {cpu_per_genome} threads each")
405
-
406
- processes = []
407
- for genome in genomes:
408
- p = multiprocessing.Process(
409
- target=parallel_extract_tfbs_for_genome,
410
- args=(
411
- genome,
412
- flank5,
413
- flank3,
414
- control_run,
415
- out_dir,
416
- allowed_tfs,
417
- chroms,
418
- cpu_per_genome
419
- )
420
- )
421
- p.start()
422
- processes.append(p)
423
-
424
- for p in processes:
425
- p.join()
426
-
427
- def main():
428
- genomes = ["hg38", "hg19"]
429
-
430
- parallel_extract_tfbs_with_context(
431
- genomes=genomes,
432
- flank5=500,
433
- flank3=500,
434
- control_run=True, # if there's a flank, whether to also run without flank
435
- out_dir=f"../../data_files/processed/tfclust",
436
- allowed_tfs=None, # e.g., ['CTCF', 'MAX']
437
- chroms=None,
438
- )
439
-
440
- if __name__ == "__main__":
441
- logger = logging.getLogger(__name__)
442
- logging.basicConfig(
443
- filename="download.log",
444
- encoding="utf-8",
445
- level=logging.DEBUG,
446
- filemode="w",
447
- )
448
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/tfclust/combine.py DELETED
@@ -1,114 +0,0 @@
1
- import pandas as pd
2
- import logging
3
- import os
4
- import dask.dataframe as dd
5
-
6
- def combine_hg38_hg19(hg38_dir, hg19_dir):
7
- # See how many files there are
8
- hg38_files = [os.path.join(hg38_dir,x) for x in os.listdir(hg38_dir) if os.path.isfile(os.path.join(hg38_dir,x))]
9
- hg19_files = [os.path.join(hg19_dir,x) for x in os.listdir(hg19_dir) if os.path.isfile(os.path.join(hg19_dir,x))]
10
-
11
- logging.info(f"Total hg38 files: {len(hg38_files)}")
12
- logging.info(f"Total hg19 files: {len(hg19_files)}")
13
-
14
- # See how many datapoints there are
15
- hg38_complete = pd.read_csv(os.path.join(hg38_dir,"logs/completed.txt"), sep="\t")
16
- hg19_complete = pd.read_csv(os.path.join(hg19_dir,"logs/completed.txt"), sep="\t")
17
-
18
- logging.info(f"Total hg38 rows: {sum(hg38_complete['row_count']):,}")
19
- logging.info(f"Total hg19 rows: {sum(hg19_complete['row_count']):,}")
20
- logging.info(f"Total: {sum(hg38_complete['row_count']) + sum(hg19_complete['row_count']) :,}")
21
-
22
- # Now try to combine all the files into one
23
-
24
- # Read all CSVs in the folder as a single Dask dataframe
25
- # Read each genome separately
26
- full_df_hg38 = dd.read_csv(hg38_files)
27
- full_df_hg38 = full_df_hg38.assign(genome="hg38") # ✅ Dask-safe assignment
28
-
29
- full_df_hg19 = dd.read_csv(hg19_files)
30
- full_df_hg19 = full_df_hg19.assign(genome="hg19")
31
-
32
- # Concatenate both into one Dask DataFrame
33
- full_df = dd.concat([full_df_hg38, full_df_hg19])
34
-
35
- logging.info(f"Added all files to ccombined DataFrame. Total rows: {len(full_df)}")
36
-
37
- full_df["seq_len"] = full_df["seq"].str.len()
38
- full_df["seq_flanked_len"] = full_df["seq_flanked"].str.len()
39
- logging.info(f"Added sequence length column.")
40
-
41
- full_df_dir = "../../data_files/processed/tfclust/combined"
42
- full_df_savepath = os.path.join(full_df_dir, "encRegTfbsClustered_hg38_hg19.parquet")
43
- os.makedirs(full_df_dir, exist_ok=True)
44
- full_df.to_parquet(full_df_savepath) # much faster and more compact
45
- logging.info(f"Saved combined DataFrame to {full_df_savepath}.")
46
-
47
-
48
- # Define the aggregation function
49
- def collapse_group(group):
50
- return pd.Series({
51
- "name": ",".join(group["name"]),
52
- "score": ",".join(map(str, group["score"])),
53
- "bin": ",".join(map(str, group["bin"])),
54
- "scoreCount": ",".join(map(str, group["scoreCount"])),
55
- "sourceIds": ",".join(map(str, group["sourceIds"])),
56
- "sourceScores": ",".join(map(str, group["sourceScores"])),
57
- })
58
-
59
- def reorg_like_remap(genome_dir, fname):
60
- """
61
- Reorganize a chromosome from tfclust processing to be in the format of remap files:
62
- #chrom,chromStart,chromEnd,name,score,strand,thickStart,thickEnd,reserved,chromLen,thickLen
63
-
64
- Original format of my processed tfclust files
65
- bin,chrom,chromStart,chromEnd,name,score,scoreCount,sourceIds,sourceScores,seq,seq_flanked,chromStart_flanked,chromEnd_flanked,flank5,flank3
66
- """
67
-
68
- fpath = os.path.join(genome_dir, fname)
69
- df = dd.read_csv(fpath)
70
-
71
- # Show the head
72
- print(df.head())
73
-
74
- # Keep everything except the sequences
75
- df = df[[
76
- "chrom", "chromStart", "chromEnd", "name", "score", # same as other file
77
- "bin","scoreCount","sourceIds","sourceScores"
78
- ]].rename(columns={"chrom":"#chrom"})
79
-
80
- # Apply groupby with known output types (meta)
81
- meta = {
82
- "name": str,
83
- "score": str,
84
- "bin": str,
85
- "scoreCount": str,
86
- "sourceIds": str,
87
- "sourceScores": str
88
- }
89
-
90
- grouped = df.groupby(["#chrom", "chromStart", "chromEnd"]).apply(collapse_group, meta=meta)
91
-
92
- # You can now compute it
93
- result = grouped.compute()
94
-
95
- # save the result
96
- savepath = os.path.join(genome_dir, "remap_reorg")
97
- os.makedirs(savepath, exist_ok=True)
98
- savepath = os.path.join(savepath, fname.replace(".csv", "_reorg.csv"))
99
- result.to_csv(os.path.join(genome_dir), index=True)
100
- logging.info(f"Saved reorganized file to {savepath}")
101
-
102
- def main():
103
- hg38_dir = "../../data_files/processed/tfclust/hg38"
104
- hg19_dir = "../../data_files/processed/tfclust/hg19"
105
-
106
- #combine_hg38_hg19(hg38_dir, hg19_dir)
107
-
108
- for chrom in ["chr1"]:
109
- reorg_like_remap(hg38_dir, f"encRegTfbsClustered_hg38_{chrom}.csv")
110
-
111
- if __name__ == "__main__":
112
- logging.basicConfig(filename="combine.log", encoding="utf-8", level=logging.DEBUG, filemode="w")
113
-
114
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data/tfclust/download.py DELETED
@@ -1,462 +0,0 @@
1
- import os
2
- import logging
3
- import requests
4
- import pandas as pd
5
- import json
6
- import multiprocessing
7
- from math import ceil
8
- from datetime import datetime
9
-
10
- def get_all_tfs(genome: str = "hg38"):
11
- raw_data = pd.read_csv(
12
- f"../../data_files/encode3TfbsClusteredWithCells.bed", sep="\t", header=None
13
- )
14
- raw_data.columns = ["chrom", "start", "end", "tf_name", "score", "cell_line"]
15
- all_tfs = raw_data["tf_name"].unique().tolist()
16
- logging.info(f"Found {len(all_tfs)} transcription factors in genome {genome}.")
17
- return all_tfs
18
-
19
- def get_all_chroms(genome: str = "hg38", exclude: list=None, include: list=None, logger: logging.Logger=None):
20
- """
21
- Fetch all chromosome names for a genome.
22
- Note: some chromosomes are in unexpected formats (e.g. there is 'chr15', but also 'chr15_ML143371v1_fix')
23
- """
24
- if logger is None:
25
- logger = logging.getLogger(__name__)
26
-
27
- url = f"https://api.genome.ucsc.edu/list/chromosomes?genome={genome}"
28
- try:
29
- r = requests.get(url)
30
- r.raise_for_status()
31
- except:
32
- logger.error(f"Failed to fetch all chromosomes for genome {genome}")
33
-
34
- if include is not None and exclude is not None:
35
- raise ValueError(f"Must pass EITHER exclude or include. Cannot pass both.")
36
-
37
- all_chroms = list(r.json()["chromosomes"].keys())
38
- if include is not None:
39
- logger.info(f"Including only the following chromosomes: {include}")
40
- all_chroms = [chrom for chrom in all_chroms if chrom in include]
41
- if exclude is not None:
42
- logger.info(f"Excluding the following chromosomes: {exclude}")
43
- all_chroms = [chrom for chrom in all_chroms if not(chrom in exclude)]
44
-
45
- logger.info(f"Found {len(all_chroms)} chromosomes in genome {genome}.")
46
-
47
- return all_chroms
48
-
49
- def get_all_chrom_fasta_files(genome: str = "hg38", exclude: list=None, include: list=None, logger: logging.Logger=None, out_dir="../../data_files/raw/genomes"):
50
- """
51
- Get FASTA files for each chromosome for a current genome
52
- """
53
- if logger is None:
54
- logger = logging.getLogger(__name__)
55
-
56
- if include is not None and exclude is not None:
57
- raise ValueError(f"Must pass EITHER exclude or include. Cannot pass both.")
58
-
59
- chroms = get_all_chroms(genome=genome, exclude=exclude, include=include, logger=logger)
60
-
61
- genome_out_dir = os.path.join(out_dir,genome)
62
- os.makedirs(genome_out_dir, exist_ok=True)
63
-
64
- for chrom in chroms:
65
- chrom_save_path = os.path.join(genome_out_dir,f"{genome}_{chrom}.json")
66
- if not(os.path.exists(chrom_save_path)):
67
- url = f"https://api.genome.ucsc.edu/getData/sequence?genome={genome};chrom={chrom}"
68
- try:
69
- r = requests.get(url)
70
- r.raise_for_status()
71
- json_output = r.json()
72
-
73
- with open(chrom_save_path, "w") as f:
74
- json.dump(json_output, f, indent=4)
75
-
76
- logger.info(f"Downloaded {chrom} in genome {genome}.")
77
-
78
- except:
79
- logger.error(f"Failed to fetch all {chrom} for genome {genome}")
80
- else:
81
- logger.info(f"Already downloaded {chrom} in genome {genome}. Skipping.")
82
-
83
- logger.info(f"Downloaded {len(chroms)} chromosomes in genome {genome}.")
84
-
85
- return chroms
86
-
87
- def fetch_tfbs_track(chrom: str, genome: str = "hg38", logger:logging.Logger=None):
88
- """
89
- Fetch raw data from the track encRegTfbsClustered.
90
- Returns json data for the specified chromosome, where key information appears as follows:
91
- "encRegTfbsClustered": [
92
- {
93
- "bin": 585,
94
- "chrom": "chr1",
95
- "chromStart": 9917,
96
- "chromEnd": 10247,
97
- "name": "NUFIP1",
98
- "score": 680,
99
- "sourceCount": 1,
100
- "sourceIds": "1063",
101
- "sourceScores": "680"
102
- },...
103
- ]
104
-
105
- """
106
- if logger is None:
107
- logger = logging.getLogger(__name__)
108
-
109
- params = {"genome": genome, "track": "encRegTfbsClustered", "chrom": chrom}
110
- json_out_dir = os.path.join("../../data_files/raw/tfclust/encRegTfbsClustered_data", genome)
111
- json_out_path = os.path.join(json_out_dir, f"{params['genome']}_{params['track']}_{params['chrom']}.json")
112
- if not(os.path.exists(json_out_path)):
113
- url = f"https://api.genome.ucsc.edu/getData/track?genome={params['genome']};track={params['track']};chrom={params['chrom']}"
114
- try:
115
- r = requests.get(url)
116
- r.raise_for_status()
117
-
118
- # Extract the output and save it
119
- os.makedirs(json_out_dir, exist_ok=True)
120
-
121
- # Save it
122
- json_output = r.json()
123
- with open(json_out_path, "w") as f:
124
- json.dump(json_output, f, indent=4)
125
-
126
- logger.info(
127
- f"Saved to {json_out_path}"
128
- )
129
- except:
130
- logger.error(
131
- f"Failed to fetch encRegTfbsClustered for {chrom} in genome {genome}"
132
- )
133
- else:
134
- logging.info(f"Already downloaded encRegTfbsClustered for {chrom} in {genome}. Skipping download.")
135
- with open(json_out_path, "r") as f:
136
- json_output = json.load(f)
137
-
138
- return json_output
139
-
140
- def get_sequence(
141
- chrom_json: dict,
142
- start: int,
143
- end: int,
144
- flank5: int = 0,
145
- flank3: int = 0,
146
- genome: str = "hg38",
147
- logger: logging.Logger=None
148
- ):
149
- """
150
- Given genome, start position, end position, chromosome json, and desired flank size, extract the raw DNA sequence
151
-
152
- chrom_json has keys: "downloadTime", "downloadTimeStamp","genome", "chrom", "start", "end", "dna"
153
-
154
- """
155
- if logger is None:
156
- logger = logging.getLogger(__name__)
157
-
158
- chrom_seq = chrom_json["dna"]
159
- chrom = chrom_json["chrom"]
160
- if chrom_json["start"] != 0:
161
- logger.warning(f"Start position of chromosome is not 0. Start position: {chrom_json['start']}")
162
-
163
- # Calculate new start and end indices
164
- new_start = max(0, start - flank5)
165
- new_end = end + flank3
166
- if new_end > chrom_json["end"]:
167
- logger.warning(f"Attempting to query {chrom} from {new_start} to {new_end}, but last index is {chrom_json['end']}. Manually setting last index to {chrom_json['end']}")
168
- new_end = chrom_json['end']
169
-
170
- results_dict = {
171
- "chromStart": new_start,
172
- "chromEnd": new_end,
173
- "seq": chrom_seq[new_start:new_end]# end is NOT inclusive!!
174
- }
175
- return results_dict
176
-
177
- def extract_tfbs_with_context(
178
- genome: str,
179
- flank5: int=500,
180
- flank3: int=500,
181
- allowed_tfs: list=None,
182
- out_dir: str="../../data_files/processed/tfclust",
183
- control_run: bool = True, # if there's a flank, whether to also run without flank
184
- chroms: list = None,
185
- logger: logging.Logger=None,
186
- redo: bool = False, # whether to redo even if we've already processed this
187
- idx: int=0 # index of worker
188
- ):
189
- """
190
- Main method for a genome. By calling helpers, gets all chromosomes and their sequences, gets encRegTfbsClustered, and queries the feature indices in encRegTfbsClustered against chrom seqs for binding site sequences.
191
- """
192
- if logger is None:
193
- logger = logging.getLogger(__name__)
194
-
195
- # Get all chromosomes for the current genome, including downloading thier sequences
196
- if chroms is None:
197
- all_chroms = get_all_chrom_fasta_files(genome=genome, logger=logger)
198
- else:
199
- all_chroms = get_all_chrom_fasta_files(
200
- genome=genome,
201
- exclude=[c for c in get_all_chroms(genome) if c not in chroms],
202
- logger=logger
203
- )
204
-
205
- # For each chrom, (1) download full fasta sequence, (2) download encRegTfbsClustered, (3) query features from [2] through [1]
206
- # Initialize the final DF
207
- results_cols = [
208
- "bin",
209
- "chrom",
210
- "chromStart",
211
- "chromEnd",
212
- "name",
213
- "score",
214
- "scoreCount",
215
- "sourceIds",
216
- "sourceScores",
217
- "seq",
218
- "seq_flanked",
219
- "chromStart_flanked",
220
- "chromEnd_flanked",
221
- "flank5",
222
- "flank3",
223
- ]
224
- results_init = pd.DataFrame(columns=results_cols)
225
-
226
- # Make a list of the types of runs we need
227
- queries = [{"flank5": flank5, "flank3": flank3}]
228
- if not ((flank5 == 0) and (flank3 == 0) and control_run):
229
- queries.append({"type": "control", "flank5": 0, "flank3": 0})
230
- queries[0]["type"] = "flank"
231
- elif (flank5 == 0) and (flank3 == 0):
232
- queries[0]["type"] = "control"
233
-
234
- merged_done_txt_path = os.path.join("../../data_files/processed/tfclust", genome, "logs", f"completed.txt")
235
- done_txt_path = os.path.join("../../data_files/processed/tfclust", genome, "logs", f"completed_worker_{idx}.txt")
236
- if os.path.exists(merged_done_txt_path):
237
- completed_chroms = pd.read_csv(merged_done_txt_path, sep="\t")
238
- completed_chroms = list(completed_chroms["chrom"])
239
- else:
240
- completed_chroms = []
241
-
242
- with open(done_txt_path, "w") as f:
243
- f.write("chrom\trow_count\n")
244
-
245
- logger.info(f"{len(completed_chroms)} already complete: {','.join(completed_chroms)}")
246
-
247
- count = 0
248
- # Iterate through chromosomes (1) download encRegTfbsClustered, (2) query each feature in the chrom sequence
249
- for chrom in all_chroms:
250
- chrom_write_count = 0
251
- chrom_output_fname = os.path.join("../../data_files/processed/tfclust", genome, f"encRegTfbsClustered_{genome}_{chrom}.csv")
252
-
253
- # If we've already done it, no need
254
- if chrom in completed_chroms and not(redo):
255
- chrom_write_count = len(pd.read_csv(chrom_output_fname))
256
- with open(done_txt_path, "a") as f:
257
- f.write(f"{chrom}\t{chrom_write_count}\n")
258
- continue
259
-
260
- #### If we ARE processing this, process it!
261
- # Load chromosome sequence
262
- with open(os.path.join("../../data_files/raw/genomes",genome,f"{genome}_{chrom}.json"), "r") as f:
263
- chrom_json = json.load(f)
264
-
265
- results_init.to_csv(
266
- chrom_output_fname, index=False
267
- )
268
- logger.info(f"Fetching {chrom}...")
269
-
270
- # Fetch the data json (has start and end positions in the chrom, but not the sequence)
271
- try:
272
- data = fetch_tfbs_track(chrom, genome=genome, logger=logger)
273
- logger.info(f" → Fetched {chrom} successfully")
274
- features = data.get("encRegTfbsClustered", {})
275
- logger.info(f" → Found {len(features)} features")
276
- except Exception as e:
277
- logger.info(f" Failed to fetch {chrom}: {e}")
278
- continue
279
-
280
- # Get the sequences of the DNA binding sites
281
- for feature_no, feature in enumerate(features):
282
- # Initialize new results row
283
- new_row = {}
284
-
285
- # Check if tf is valid
286
- tf_name = feature.get("name", "UnknownTF")
287
- if allowed_tfs and tf_name not in allowed_tfs:
288
- logger.warning(f"TF name {tf_name} not in allowed_tfs. Skipping.")
289
- continue
290
- # Make sure the chromosomes match and we have the right sequence!
291
- assert (
292
- feature["chrom"] == chrom
293
- ), f"Chromosome mismatch: {feature['chrom']} != {chrom}"
294
-
295
- # Add all the cols already in the json, add
296
- for c in results_cols:
297
- if c in feature:
298
- new_row[c] = feature[c]
299
-
300
- ### Extract sequence
301
- start = feature["chromStart"]
302
- end = feature["chromEnd"]
303
-
304
- for query in queries:
305
- results_dict = get_sequence(
306
- chrom_json,
307
- start,
308
- end,
309
- flank5=query["flank5"],
310
- flank3=query["flank3"],
311
- genome=genome,
312
- logger = logger
313
- )
314
- # Add the returned info
315
- if query["type"] == "control":
316
- new_row["seq"] = results_dict["seq"] # note: these sequences will have soft-masked repeats!
317
- elif query["type"] == "flank":
318
- new_row["seq_flanked"] = results_dict["seq"]
319
- new_row["chromStart_flanked"] = results_dict["chromStart"]
320
- new_row["chromEnd_flanked"] = results_dict["chromEnd"]
321
- new_row["flank5"] = flank5
322
- new_row["flank3"] = flank3
323
-
324
- # Fill out any blank columns
325
- try:
326
- for c in results_cols:
327
- if c not in new_row:
328
- new_row[c] = None
329
-
330
- new_row_df = pd.DataFrame(data=new_row, index=[0])
331
- new_row_df = new_row_df[results_cols] # assert the right column order
332
- if new_row_df["seq"] is not None:
333
- new_row_df.to_csv(
334
- chrom_output_fname,
335
- mode="a",
336
- index=False,
337
- header=False,
338
- )
339
- logger.info(
340
- f"Wrote new row to {out_dir}/encRegTfbsClustered_{chrom}.csv"
341
- )
342
- chrom_write_count += 1
343
- else:
344
- logger.info(f"Did not write new row. {new_row}")
345
- except Exception as e:
346
- logger.error(F"Failed to write new row to {out_dir}/encRegTfbsClustered_{chrom}.csv: error {e}")
347
-
348
- logger.info(f"Done. Wrote {chrom_write_count} sequences to {out_dir}/encRegTfbsClustered_{chrom}.csv")
349
- with open(done_txt_path, "a") as f:
350
- f.write(f"{chrom}\t{chrom_write_count}\n")
351
- count += chrom_write_count
352
-
353
- logger.info(f"Done with all chroms. Wrote {count} sequences to {out_dir}.")
354
-
355
- def merge_completed_files(genome: str):
356
- """
357
- Merge all completed_worker_*.txt files into a single completed.txt file
358
- """
359
- logs_dir = os.path.join("../../data_files/processed/tfclust", genome, "logs")
360
- merged_path = os.path.join(logs_dir, "completed.txt")
361
-
362
- with open(merged_path, "w") as outfile:
363
- outfile.write("chrom\trow_count\n") # header
364
-
365
- for fname in os.listdir(logs_dir):
366
- if fname.startswith("completed_worker_") and fname.endswith(".txt"):
367
- with open(os.path.join(logs_dir, fname), "r") as infile:
368
- for line in infile:
369
- if line.startswith("chrom"): # skip header lines
370
- continue
371
- outfile.write(line)
372
-
373
- print(f"Merged completed_worker_*.txt into {merged_path}")
374
-
375
- def worker(args):
376
- """
377
- Worker function for parallel processing
378
- """
379
- # Extract args
380
- chrom_group, idx, genome, flank5, flank3, logs_dir = args
381
- os.makedirs(logs_dir, exist_ok=True)
382
-
383
- # Define logger
384
- logger = logging.getLogger(f"worker_{idx}")
385
- logger.setLevel(logging.DEBUG)
386
- logger.propagate = False
387
-
388
- log_file = os.path.join(logs_dir, f"worker_{idx}.log")
389
- fh = logging.FileHandler(log_file, mode="w", encoding="utf-8")
390
- fh.setLevel(logging.DEBUG)
391
- formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
392
- fh.setFormatter(formatter)
393
- logger.addHandler(fh)
394
-
395
- logger.info(f"Starting worker {idx} for chromosomes: {chrom_group}")
396
-
397
- extract_tfbs_with_context(
398
- genome=genome,
399
- flank5=flank5,
400
- flank3=flank3,
401
- allowed_tfs=None,
402
- out_dir=f"../../data_files/processed/tfclust",
403
- control_run=True,
404
- chroms=chrom_group,
405
- logger=logger,
406
- idx=idx
407
- )
408
-
409
- logger.info(f"Finished worker {idx}")
410
-
411
-
412
- def parallel_extract(genome: str, flank5: int, flank3: int):
413
- """
414
- Run extract_tfbs_with_context in parallel for groups of chromosomes in the genome to speed up processing.
415
- """
416
- chroms = get_all_chroms(genome)
417
- num_cores = multiprocessing.cpu_count()
418
-
419
- # Separate primary vs accessory chromosomes
420
- primary_chroms = [c for c in chroms if "_" not in c]
421
- accessory_chroms = [c for c in chroms if "_" in c]
422
-
423
- # Distribute primary chromosomes round-robin across workers
424
- chunks = [[] for _ in range(num_cores)]
425
- for i, chrom in enumerate(primary_chroms):
426
- chunks[i % num_cores].append(chrom)
427
-
428
- # Now add accessory chromosomes to the least-loaded chunk (by count)
429
- for chrom in accessory_chroms:
430
- min_idx = min(range(num_cores), key=lambda i: len(chunks[i]))
431
- chunks[min_idx].append(chrom)
432
-
433
- # Log how we split it up - want to see which chromosomes are in which chunks.
434
- logging.info(f"{num_cores} CPU cores available. Primary chromosomes distributed round-robin.")
435
- for chunk_no, chunk in enumerate(chunks):
436
- logging.info(f"Chunk {chunk_no}. Chromosomes = {','.join(chunk)}")
437
-
438
- logs_dir = os.path.join("../../data_files/processed/tfclust", genome, "logs")
439
- os.makedirs(logs_dir, exist_ok=True)
440
-
441
- args_list = [(chunk, i, genome, flank5, flank3, logs_dir) for i, chunk in enumerate(chunks)]
442
-
443
- with multiprocessing.Pool(processes=num_cores) as pool:
444
- pool.map(worker, args_list)
445
-
446
- merge_completed_files(genome)
447
-
448
- def main():
449
- genomes = ["hg38", "hg19"]
450
- flank5 = 1000
451
- flank3 = 1000
452
-
453
- # Iterate through genomes
454
- for genome in genomes:
455
- # Extract TF binding sites from bed - 500 flank
456
- parallel_extract(genome, flank5, flank3)
457
-
458
- if __name__ == "__main__":
459
- logging.basicConfig(filename="download.log", encoding="utf-8", level=logging.DEBUG, filemode="w")
460
- logger = logging.getLogger(__name__)
461
-
462
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dpacman/data_tasks/clean/__init__.py ADDED
File without changes
dpacman/data_tasks/clean/remap.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from omegaconf import DictConfig
3
+ from pathlib import Path
4
+ import rootutils
5
+ import logging
6
+ import os
7
+
8
+ root = rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def clean_nr(nr_raw_path: Path | str):
13
+ """
14
+ Clean the non-redundant peaks BED file.
15
+ Delete duplicate rows, assign columns, only keep columns we need.
16
+ """
17
+ nr = pd.read_csv(nr_raw_path, sep="\t", header=None)
18
+ nr.columns = [
19
+ "chrom",
20
+ "chromStart",
21
+ "chromEnd",
22
+ "biotypes",
23
+ "score",
24
+ "strand",
25
+ "thickStart",
26
+ "thickEnd",
27
+ "itemRgb",
28
+ ]
29
+
30
+ # make sure we correctly interpret column "biotype" as having one transcription factor separated from all relevant biotypes by ONE colon
31
+ biotype_colon_counts = (
32
+ nr["biotypes"]
33
+ .str.count(":")
34
+ .value_counts()
35
+ .reset_index()["biotypes"]
36
+ .unique()
37
+ .tolist()
38
+ )
39
+ assert biotype_colon_counts == [
40
+ 1
41
+ ] # confirm belief that : separates the name of a transcription factor from its biotype - just ONE biotype.
42
+
43
+ # then split the column accordingly into tr (transcriptional regulator) and biotypes
44
+ nr[["tr", "biotypes"]] = nr["biotypes"].str.split(":", expand=True)
45
+
46
+ # group and concat the scores
47
+ logger.info(
48
+ f"Keeping only the following columns: chrom, chromStart, chromEnd, biotypes, tr, score."
49
+ )
50
+ nr = nr[["chrom", "chromStart", "chromEnd", "biotypes", "score", "tr"]]
51
+
52
+ # drop duplicate rows - all fields
53
+ logger.info(f"Size of database before dropping duplicate rows: {len(nr)}")
54
+ nr = nr.drop_duplicates().reset_index(drop=True)
55
+ logger.info(f"Size of database after dropping duplicate rows: {len(nr)}")
56
+
57
+ # look for duplicate rows where it's clearly the same experiment but somehow different scores - chrom, chromStart, chromEnd, tr, biotypes
58
+ experiment_dups = len(
59
+ nr.loc[
60
+ nr.duplicated(subset=["chrom", "chromStart", "chromEnd", "tr", "biotypes"])
61
+ ]
62
+ )
63
+ logger.info(
64
+ f"{experiment_dups} total rows with same chrom, chromStart, chromEnd, biotypes, tr but different score."
65
+ )
66
+
67
+ logger.info(
68
+ f"Grouping by everything except score, comma-concatenating unique scores"
69
+ )
70
+ nr = (
71
+ nr.groupby(["chrom", "chromStart", "chromEnd", "tr", "biotypes"])
72
+ .agg({"score": lambda x: ",".join(map(str, sorted(set(x))))})
73
+ .reset_index()
74
+ )
75
+
76
+ logger.info(f"Final database size: {len(nr)}")
77
+
78
+ nr["chromLen"] = nr["chromEnd"] - nr["chromStart"]
79
+
80
+ return nr
81
+
82
+
83
+ def clean_crm(crm_raw_path: Path | str):
84
+ """
85
+ Clean the CRM BED file.
86
+ Delete duplicate rows, assign columns, only keep columns we need.
87
+ """
88
+
89
+ crm = pd.read_csv(crm_raw_path, sep="\t", header=None)
90
+ crm.columns = [
91
+ "chrom",
92
+ "chromStart",
93
+ "chromEnd",
94
+ "tr",
95
+ "score",
96
+ "strand",
97
+ "thickStart",
98
+ "thickEnd",
99
+ "reserved",
100
+ ]
101
+
102
+ # group and concat the scores
103
+ logger.info(
104
+ f"Keeping only the following columns: chrom, chromStart, chromEnd, tr, score."
105
+ )
106
+ crm = crm[["chrom", "chromStart", "chromEnd", "tr", "score"]]
107
+
108
+ # drop duplicate rows - all fields
109
+ logger.info(f"Size of database before dropping duplicate rows: {len(crm)}")
110
+ crm = crm.drop_duplicates().reset_index(drop=True)
111
+ logger.info(f"Size of database after dropping duplicate rows: {len(crm)}")
112
+
113
+ # look for duplicate rows where it's clearly the same experiment but somehow different scores - chrom, chromStart, chromEnd, tr
114
+ experiment_dups = len(
115
+ crm.loc[crm.duplicated(subset=["chrom", "chromStart", "chromEnd", "tr"])]
116
+ )
117
+ logger.info(
118
+ f"{experiment_dups} total rows with same chrom, chromStart, chromEnd, tr but different score."
119
+ )
120
+
121
+ logger.info(
122
+ f"Grouping by everything except score, comma-concatenating unique scores"
123
+ )
124
+ crm = (
125
+ crm.groupby(["chrom", "chromStart", "chromEnd", "tr"])
126
+ .agg({"score": lambda x: ",".join(map(str, sorted(set(x))))})
127
+ .reset_index()
128
+ )
129
+
130
+ logger.info(f"Final database size: {len(crm)}")
131
+
132
+ crm["chromLen"] = crm["chromEnd"] - crm["chromStart"]
133
+
134
+ return crm
135
+
136
+
137
+ def main(cfg: DictConfig):
138
+ # Define the paths
139
+ nr_raw_path = Path(root) / cfg.data_task.nr_raw_path
140
+ nr_processed_dir = Path(root) / cfg.data_task.nr_processed_dir
141
+ nr_processed_filename = cfg.data_task.nr_processed_filename
142
+ nr_savepath = os.path.join(nr_processed_dir, nr_processed_filename)
143
+
144
+ crm_raw_path = Path(root) / cfg.data_task.crm_raw_path
145
+ crm_processed_dir = Path(root) / cfg.data_task.crm_processed_dir
146
+ crm_processed_filename = cfg.data_task.crm_processed_filename
147
+ crm_savepath = os.path.join(crm_processed_dir, crm_processed_filename)
148
+
149
+ os.makedirs(nr_processed_dir, exist_ok=True)
150
+ os.makedirs(crm_processed_dir, exist_ok=True)
151
+
152
+ # Clean and save the non redundant peaks file
153
+ if not (os.path.exists(nr_savepath)):
154
+ nr_cleaned = clean_nr(nr_raw_path)
155
+ nr_cleaned.to_csv(nr_savepath, sep="\t", index=False)
156
+ logger.info(
157
+ f"Saved cleaned non-redundant peaks (NR) database to: {nr_savepath}"
158
+ )
159
+ else:
160
+ nr_cleaned = None
161
+ logger.info(f"File already exists at {nr_savepath}. Skipping")
162
+
163
+ # Clean and save the CRM file
164
+ if not (os.path.exists(crm_savepath)):
165
+ crm_cleaned = clean_crm(crm_raw_path)
166
+ crm_cleaned.to_csv(crm_savepath, sep="\t", index=False)
167
+ logger.info(
168
+ f"Saved cleaned cis-regulatory modules (CRM) database to: {crm_savepath}"
169
+ )
170
+ else:
171
+ crm_cleaned = None
172
+ logger.info(f"File already exists at {crm_savepath}. Skipping")
173
+
174
+ # Save example files
175
+ if cfg.data_task.save_example_files:
176
+ example_nr_dir = nr_processed_dir / "examples"
177
+ os.makedirs(example_nr_dir, exist_ok=True)
178
+ example_nr_savepath = os.path.join(
179
+ example_nr_dir, "example500_" + nr_processed_filename
180
+ )
181
+
182
+ if not (os.path.exists(example_nr_savepath)):
183
+ if nr_cleaned is None:
184
+ nr_cleaned = pd.read_csv(nr_savepath, sep="\t")
185
+ nr_cleaned.sample(n=500, random_state=42).reset_index(drop=True).to_csv(
186
+ example_nr_savepath, sep="\t", index=False
187
+ )
188
+ logger.info(
189
+ f"Saved example NR file with 500 rows to: {example_nr_savepath}"
190
+ )
191
+ else:
192
+ logger.info(
193
+ f"Example file already exists at {example_nr_savepath}. Skipping"
194
+ )
195
+
196
+ # CRM example
197
+ example_crm_dir = crm_processed_dir / "examples"
198
+ os.makedirs(example_crm_dir, exist_ok=True)
199
+ example_crm_savepath = os.path.join(
200
+ example_crm_dir, "example500_" + crm_processed_filename
201
+ )
202
+ if not (os.path.exists(example_crm_savepath)):
203
+ if crm_cleaned is None:
204
+ crm_cleaned = pd.read_csv(crm_savepath, sep="\t")
205
+ crm_cleaned.sample(n=500, random_state=42).reset_index(drop=True).to_csv(
206
+ example_crm_savepath, sep="\t", index=False
207
+ )
208
+ logger.info(
209
+ f"Saved example CRM file with 500 rows to: {example_crm_savepath}"
210
+ )
211
+ else:
212
+ logger.info(
213
+ f"Example file already exists at {example_crm_savepath}. Skipping"
214
+ )
215
+
216
+ # CRM example for one transcription factor
217
+ example_crm_tf_savepath = os.path.join(
218
+ example_crm_dir, "example500_ERG_" + crm_processed_filename
219
+ )
220
+ if not (os.path.exists(example_crm_tf_savepath)):
221
+ if crm_cleaned is None:
222
+ crm_cleaned = pd.read_csv(crm_savepath, sep="\t")
223
+ crm_example_tf_db = crm_cleaned.copy(deep=True)
224
+ crm_example_tf_db["tr"] = crm_example_tf_db["tr"].apply(
225
+ lambda x: x.split(",")
226
+ )
227
+ crm_example_tf_db = crm_example_tf_db.explode("tr").reset_index(drop=True)
228
+ crm_example_tf_db = crm_example_tf_db.loc[crm_example_tf_db["tr"] == "ERG"]
229
+ crm_example_tf_db = crm_example_tf_db.sample(
230
+ n=min(500, len(crm_example_tf_db)), random_state=42
231
+ ).reset_index(drop=True)
232
+ crm_example_tf_db.to_csv(example_crm_tf_savepath, sep="\t", index=False)
233
+ logger.info(
234
+ f"Saved example CRM file for one TF with 500 rows to: {example_crm_tf_savepath}"
235
+ )
236
+ else:
237
+ logger.info(
238
+ f"Example file already exists at {example_crm_tf_savepath}. Skipping"
239
+ )
240
+
241
+
242
+ if __name__ == "__main__":
243
+ main()
dpacman/data_tasks/download/README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This directory holds functions for downloading raw data needed to train the binding site predictor (classifier) on processed ChIP-seq peaks.
2
+
3
+ ## Human genome
4
+ `genome.py`
5
+ * Download the sequences of all chromosomes from a certain genome (e.g. hg38, used for this project)
6
+ * Configurations associated with this download can be found in `./configs/data_task/download/genome.yaml`
7
+
8
+ ### Running the download
9
+ To run this download, please change directory to `DPACMAN/dpacman` and run:
10
+
11
+ ```
12
+ python -u -m scripts.preprocess data_task=download/genome
13
+ ```
14
+
15
+ ## ReMap 2022
16
+ `remap.py`
17
+ * Download non-redundant peaks: `remap2022_nr_macs2_hg38_v1_0.bed`
18
+ * Download cis-regulatory modules (CRMS): `remap2022_crm_macs2_hg38_v1_0.bed`
19
+ * Configurations associated with this download can be found in `./configs/data_task/download/remap.yaml`
20
+
21
+ ### Running the download
22
+ To run this download, please change directory to `DPACMAN/dpacman` and run:
23
+
24
+ ```
25
+ python -u -m scripts.preprocess data_task=download/remap
26
+ ```
dpacman/data_tasks/download/__init__.py ADDED
File without changes
dpacman/data_tasks/download/download_unzip.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ URL="$1"
4
+ DEST_DIR="$2"
5
+ FILENAME="$3" # e.g., intact.zip or biogrid.tab3.gz
6
+ DELETE_ZIP="$4" # "true" or "false"
7
+
8
+ FILE="$DEST_DIR/$FILENAME"
9
+
10
+ mkdir -p "$DEST_DIR"
11
+
12
+ echo "Starting download from $URL..."
13
+ wget "$URL" -O "$FILE"
14
+
15
+ # Handle .zip files
16
+ if [[ "$FILENAME" == *.zip ]]; then
17
+ echo "File is a .zip archive. Unzipping to $DEST_DIR in 10s..."
18
+ sleep 10
19
+ unzip "$FILE" -d "$DEST_DIR"
20
+
21
+ if [[ "$DELETE_ZIP" == "true" ]]; then
22
+ echo "delete_zip=true: removing $FILE"
23
+ rm -f "$FILE"
24
+ fi
25
+
26
+ # Handle .gz files
27
+ elif [[ "$FILENAME" == *.gz ]]; then
28
+ echo "File is a .gz archive. Extracting in 10s..."
29
+ sleep 10
30
+ gunzip -c "$FILE" > "${FILE%.gz}"
31
+
32
+ if [[ "$DELETE_ZIP" == "true" ]]; then
33
+ echo "delete_zip=true: removing $FILE"
34
+ rm -f "$FILE"
35
+ fi
36
+
37
+ else
38
+ echo "File is not a .zip or .gz archive. Skipping extraction and deletion."
39
+ fi
40
+
41
+ echo "Done."
dpacman/data_tasks/download/genome.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Script for downloading the genome, hg38
3
+ """
4
+
5
+ import rootutils
6
+
7
+ root = rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
8
+
9
+ import os
10
+ import logging
11
+ import requests
12
+ import json
13
+ import hydra
14
+ from omegaconf import DictConfig
15
+ from pathlib import Path
16
+ import logging
17
+ import multiprocessing
18
+ from hydra.core.hydra_config import HydraConfig
19
+
20
+ base_logger = logging.getLogger(__name__)
21
+
22
+
23
+ def get_all_chroms(
24
+ genome: str = "hg38",
25
+ exclude: list = None,
26
+ logger: logging.Logger = None,
27
+ include: list = None,
28
+ ):
29
+ """
30
+ Fetch all chromosome names for a genome.
31
+ Note: some chromosomes are in unexpected formats (e.g. there is 'chr15', but also 'chr15_ML143371v1_fix')
32
+ """
33
+ if logger is None:
34
+ logger = logging.getLogger(__name__)
35
+
36
+ url = f"https://api.genome.ucsc.edu/list/chromosomes?genome={genome}"
37
+ try:
38
+ r = requests.get(url)
39
+ r.raise_for_status()
40
+ except:
41
+ logger.error(f"Failed to fetch all chromosomes for genome {genome}")
42
+
43
+ if include is not None and exclude is not None:
44
+ raise ValueError(f"Must pass EITHER exclude or include. Cannot pass both.")
45
+
46
+ all_chroms = list(r.json()["chromosomes"].keys())
47
+ if include is not None:
48
+ logger.info(f"Including only the following chromosomes: {include}")
49
+ all_chroms = [chrom for chrom in all_chroms if chrom in include]
50
+ if exclude is not None:
51
+ logger.info(f"Excluding the following chromosomes: {exclude}")
52
+ all_chroms = [chrom for chrom in all_chroms if not (chrom in exclude)]
53
+
54
+ logger.info(f"Found {len(all_chroms)} chromosomes in genome {genome}.")
55
+
56
+ return all_chroms
57
+
58
+
59
+ def get_all_chrom_fasta_files(
60
+ genome: str = "hg38",
61
+ exclude: list = None,
62
+ include: list = None,
63
+ logger: logging.Logger = None,
64
+ output_dir="../../data_files/raw/genomes",
65
+ ):
66
+ """
67
+ Get FASTA files for each chromosome for a current genome
68
+ """
69
+ if logger is None:
70
+ logger = logging.getLogger(__name__)
71
+
72
+ if include is not None and exclude is not None:
73
+ raise ValueError(f"Must pass EITHER exclude or include. Cannot pass both.")
74
+
75
+ chroms = get_all_chroms(
76
+ genome=genome, exclude=exclude, include=include, logger=logger
77
+ )
78
+
79
+ logger.info(f"Saving downloaded chromosomes to {output_dir}")
80
+ os.makedirs(output_dir, exist_ok=True)
81
+
82
+ for chrom in chroms:
83
+ chrom_save_path = os.path.join(output_dir, f"{genome}_{chrom}.json")
84
+ if not (os.path.exists(chrom_save_path)):
85
+ url = f"https://api.genome.ucsc.edu/getData/sequence?genome={genome};chrom={chrom}"
86
+ try:
87
+ r = requests.get(url)
88
+ r.raise_for_status()
89
+ json_output = r.json()
90
+
91
+ with open(chrom_save_path, "w") as f:
92
+ json.dump(json_output, f, indent=4)
93
+
94
+ logger.info(
95
+ f"Downloaded {chrom} in genome {genome}. Saved to: {chrom_save_path}"
96
+ )
97
+
98
+ except:
99
+ logger.error(f"Failed to fetch all {chrom} for genome {genome}")
100
+ else:
101
+ logger.info(f"Already downloaded {chrom} in genome {genome}. Skipping.")
102
+
103
+ logger.info(f"Downloaded {len(chroms)} chromosomes in genome {genome}.")
104
+
105
+ return chroms
106
+
107
+
108
+ def merge_completed_files(genome: str, logs_dir: Path):
109
+ """
110
+ Merge all completed_worker_*.txt files into a single completed.txt file
111
+ """
112
+ merged_path = os.path.join(logs_dir, "completed.txt")
113
+
114
+ with open(merged_path, "w") as outfile:
115
+ outfile.write("chrom\trow_count\n") # header
116
+
117
+ for fname in os.listdir(logs_dir):
118
+ if fname.startswith("completed_worker_") and fname.endswith(".txt"):
119
+ with open(os.path.join(logs_dir, fname), "r") as infile:
120
+ for line in infile:
121
+ if line.startswith("chrom"): # skip header lines
122
+ continue
123
+ outfile.write(line)
124
+
125
+ print(f"Merged completed_worker_*.txt into {merged_path}")
126
+
127
+
128
+ def worker(args):
129
+ """
130
+ Worker function for parallel processing
131
+ """
132
+ # Extract args
133
+ chrom_group, idx, genome, logs_dir, output_dir = args
134
+ os.makedirs(logs_dir, exist_ok=True)
135
+
136
+ # Define logger
137
+ wlogger = logging.getLogger(f"worker_{idx}")
138
+ wlogger.setLevel(logging.DEBUG)
139
+ wlogger.propagate = False
140
+
141
+ log_file = os.path.join(logs_dir, f"worker_{idx}.log")
142
+ fh = logging.FileHandler(log_file, mode="w", encoding="utf-8")
143
+ fh.setLevel(logging.DEBUG)
144
+ formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
145
+ fh.setFormatter(formatter)
146
+ wlogger.addHandler(fh)
147
+
148
+ wlogger.info(f"Starting worker {idx} for chromosomes: {chrom_group}")
149
+
150
+ all_chroms = get_all_chrom_fasta_files(
151
+ genome=genome, include=chrom_group, logger=wlogger, output_dir=output_dir
152
+ )
153
+
154
+ wlogger.info(f"Finished worker {idx}")
155
+
156
+
157
+ def parallel_extract(
158
+ genome: str,
159
+ include: list = None,
160
+ exclude: list = None,
161
+ output_dir: Path = None,
162
+ logs_dir: Path = None,
163
+ ):
164
+ """
165
+ Run extract_tfbs_with_context in parallel for groups of chromosomes in the genome to speed up processing.
166
+ """
167
+ # Get all chromosomes whose sequences we want to download.
168
+ chroms = get_all_chroms(genome, exclude=exclude, include=include)
169
+ num_cores = multiprocessing.cpu_count() - 1
170
+
171
+ # Separate primary vs accessory chromosomes
172
+ primary_chroms = [c for c in chroms if "_" not in c]
173
+ accessory_chroms = [c for c in chroms if "_" in c]
174
+
175
+ base_logger.info(f"Total primary chromosomes: {len(primary_chroms)}")
176
+ for pc in primary_chroms:
177
+ base_logger.info(pc)
178
+ base_logger.info(f"Total accessory chromosomes: {len(accessory_chroms)}")
179
+ for ac in accessory_chroms:
180
+ base_logger.info(ac)
181
+
182
+ # Distribute primary chromosomes round-robin across workers
183
+ chunks = [[] for _ in range(num_cores)]
184
+ for i, chrom in enumerate(primary_chroms):
185
+ chunks[i % num_cores].append(chrom)
186
+
187
+ # Now add accessory chromosomes to the least-loaded chunk (by count)
188
+ for chrom in accessory_chroms:
189
+ min_idx = min(range(num_cores), key=lambda i: len(chunks[i]))
190
+ chunks[min_idx].append(chrom)
191
+
192
+ # Log how we split it up - want to see which chromosomes are in which chunks.
193
+ logging.info(
194
+ f"{num_cores} CPU cores available (leaving 1 empty). Primary chromosomes distributed round-robin."
195
+ )
196
+ for chunk_no, chunk in enumerate(chunks):
197
+ logging.info(f"Chunk {chunk_no}. Chromosomes = {','.join(chunk)}")
198
+
199
+ args_list = [
200
+ (chunk, i, genome, logs_dir, output_dir) for i, chunk in enumerate(chunks)
201
+ ]
202
+
203
+ with multiprocessing.Pool(processes=num_cores) as pool:
204
+ pool.map(worker, args_list)
205
+
206
+ merge_completed_files(genome, logs_dir)
207
+
208
+
209
+ def main(cfg: DictConfig):
210
+ include = cfg.get("include", None)
211
+ exclude = cfg.get("exclude", None)
212
+
213
+ output_dir = Path(root) / cfg.data_task.output_dir
214
+ os.makedirs(output_dir, exist_ok=True)
215
+
216
+ # Download the sequences of all chromosomes
217
+ for genome in cfg.data_task.genomes:
218
+ base_logger.info(f"Downloading all chromsoomes for genome {genome}")
219
+
220
+ # Make a subfolder for this specific genome and its logs
221
+ genome_output_dir = output_dir / genome
222
+ genome_logs_dir = Path(HydraConfig.get().run.dir) / genome / "logs"
223
+ parallel_extract(
224
+ genome,
225
+ include=include,
226
+ exclude=exclude,
227
+ output_dir=genome_output_dir,
228
+ logs_dir=genome_logs_dir,
229
+ )
230
+
231
+
232
+ if __name__ == "__main__":
233
+ main()
dpacman/data_tasks/download/remap.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from omegaconf import DictConfig
2
+ from pathlib import Path
3
+ import logging
4
+ import subprocess
5
+ import os
6
+
7
+ import rootutils
8
+
9
+ root = rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def recap(result):
15
+ """
16
+ Print out info about the success of the download job.
17
+ """
18
+ logger.info("STDOUT:\n" + result.stdout)
19
+ if result.stderr:
20
+ logger.warning("STDERR:\n" + result.stderr)
21
+
22
+ if result.returncode != 0:
23
+ logger.error(f"Download script exited with code {result.returncode}")
24
+ else:
25
+ logger.info("Download completed successfully.")
26
+
27
+
28
+ def main(cfg: DictConfig):
29
+ """
30
+ Download IntAct file, which is one zip file
31
+ """
32
+
33
+ script_path = root / "dpacman/data_tasks/download/download_unzip.sh"
34
+ delete_zip = str(cfg.data_task.get("delete_zip", False)).lower()
35
+ assert delete_zip in ("true", "false")
36
+
37
+ nr_url = cfg.data_task.nr_url
38
+ nr_output_dir = root / cfg.data_task.nr_output_dir
39
+ nr_filename = cfg.data_task.nr_filename
40
+
41
+ logger.info(f"Running {cfg.data_task.type} for {cfg.data_task.name}")
42
+
43
+ ##### Non-redundant peaks download #####
44
+ logger.info(f"Script: {script_path}")
45
+ logger.info(f"Non-Redundant Peaks - URL: {nr_url}")
46
+ logger.info(f"Non-Redundant Peaks - Output: {nr_output_dir / nr_filename}")
47
+
48
+ os.makedirs(nr_output_dir, exist_ok=True)
49
+
50
+ # Run the download.sh script as a subproces
51
+ result = subprocess.run(
52
+ ["bash", str(script_path), nr_url, str(nr_output_dir), nr_filename, delete_zip],
53
+ capture_output=True,
54
+ text=True,
55
+ )
56
+
57
+ recap(result)
58
+
59
+ ##### CRMs download #####
60
+ crm_url = cfg.data_task.crm_url
61
+ crm_output_dir = root / cfg.data_task.crm_output_dir
62
+ crm_filename = cfg.data_task.crm_filename
63
+
64
+ logger.info(f"CRMs - URL: {crm_url}")
65
+ logger.info(f"CRMs - Output: {crm_output_dir / crm_filename}")
66
+
67
+ os.makedirs(crm_output_dir, exist_ok=True)
68
+
69
+ # Run the download.sh script as a subproces
70
+ result = subprocess.run(
71
+ [
72
+ "bash",
73
+ str(script_path),
74
+ crm_url,
75
+ str(crm_output_dir),
76
+ crm_filename,
77
+ delete_zip,
78
+ ],
79
+ capture_output=True,
80
+ text=True,
81
+ )
82
+
83
+ recap(result)
84
+
85
+
86
+ if __name__ == "__main__":
87
+ main()
dpacman/data_tasks/embeddings/__init__.py ADDED
File without changes
dpacman/{data → data_tasks/embeddings}/compute_embeddings.py RENAMED
@@ -14,6 +14,7 @@ Usage example (DNA + protein in one go):
14
  --out-dir ../data_files/processed/tfclust/hg38_tf/embeddings \
15
  --device cuda
16
  """
 
17
  import os
18
  import re
19
  import argparse
@@ -28,6 +29,7 @@ import time
28
 
29
  # ---- model wrappers ----
30
 
 
31
  class CaduceusEmbedder:
32
  def __init__(self, device, chunk_size=131_072, overlap=0):
33
  """
@@ -39,12 +41,14 @@ class CaduceusEmbedder:
39
  self.tokenizer = AutoTokenizer.from_pretrained(
40
  model_name, trust_remote_code=True
41
  )
42
- self.model = AutoModel.from_pretrained(
43
- model_name, trust_remote_code=True
44
- ).to(device).eval()
45
- self.device = device
 
 
46
  self.chunk_size = chunk_size
47
- self.step = chunk_size - overlap
48
 
49
  def embed(self, seqs):
50
  """
@@ -78,11 +82,11 @@ class CaduceusEmbedder:
78
  return_tensors="pt",
79
  padding=False,
80
  truncation=True,
81
- max_length=self.chunk_size
82
  ).to(self.device)
83
  with torch.no_grad():
84
  out = self.model(**toks).last_hidden_state # (1, L, D)
85
- outputs.append(out.cpu().numpy()[0]) # (L, D)
86
 
87
  return np.stack(outputs, axis=0) # (N, L, D)
88
 
@@ -106,21 +110,29 @@ class CaduceusEmbedder:
106
  t1 = time.perf_counter()
107
  print(f" length={sz:6,d} time={(t1-t0)*1000:7.1f} ms")
108
 
 
109
  class DNABertEmbedder:
110
  def __init__(self, device):
111
- self.tokenizer = AutoTokenizer.from_pretrained("zhihan1996/DNA_bert_6", trust_remote_code=True)
112
- self.model = AutoModel.from_pretrained("zhihan1996/DNA_bert_6", trust_remote_code=True).to(device)
113
- self.device = device
 
 
 
 
114
 
115
  def embed(self, seqs):
116
  embs = []
117
  for s in seqs:
118
- tokens = self.tokenizer(s, return_tensors="pt", padding=True)["input_ids"].to(self.device)
 
 
119
  with torch.no_grad():
120
  out = self.model(tokens).last_hidden_state.mean(1)
121
  embs.append(out.cpu().numpy())
122
  return np.vstack(embs)
123
 
 
124
  class NucleotideTransformerEmbedder:
125
  def __init__(self, device):
126
  # HF “feature-extraction” returns a list of (L, D) arrays for each input
@@ -128,7 +140,9 @@ class NucleotideTransformerEmbedder:
128
  self.pipe = pipeline(
129
  "feature-extraction",
130
  model="InstaDeepAI/nucleotide-transformer-500m-1000g",
131
- device= -1 if device=="cpu" else 0 # HF uses -1 for CPU, 0 for GPU #:contentReference[oaicite:0]{index=0}
 
 
132
  )
133
 
134
  def embed(self, seqs):
@@ -138,8 +152,9 @@ class NucleotideTransformerEmbedder:
138
  """
139
  all_embeddings = self.pipe(seqs, truncation=True, padding=True)
140
  # all_embeddings is a List of shape (L, D) arrays
141
- pooled = [ np.mean(x, axis=0) for x in all_embeddings ]
142
- return np.vstack(pooled)
 
143
 
144
  class ESMEmbedder:
145
  def __init__(self, device):
@@ -157,12 +172,15 @@ class ESMEmbedder:
157
  reps = results["representations"][33]
158
  return reps[:, 1:-1].mean(1).cpu().numpy()
159
 
 
160
  class ESMDBPEmbedder:
161
  def __init__(self, device):
162
- base_model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
163
  model_path = (
164
  Path(__file__).resolve().parent.parent
165
- / "pretrained" / "ESM-DBP" / "ESM-DBP.model"
 
 
166
  )
167
  checkpoint = torch.load(model_path, map_location="cpu")
168
  clean_sd = {}
@@ -189,6 +207,7 @@ class ESMDBPEmbedder:
189
  # skip start/end tokens
190
  return reps[:, 1:-1].mean(1).cpu().numpy()
191
 
 
192
  class GPNEmbedder:
193
  def __init__(self, device):
194
  model_name = "songlab/gpn-msa-sapiens"
@@ -200,16 +219,14 @@ class GPNEmbedder:
200
 
201
  def embed(self, seqs):
202
  inputs = self.tokenizer(
203
- seqs,
204
- return_tensors="pt",
205
- padding=True,
206
- truncation=True
207
  ).to(self.device)
208
 
209
  with torch.no_grad():
210
  last_hidden = self.model(**inputs).last_hidden_state
211
  return last_hidden.mean(dim=1).cpu().numpy()
212
 
 
213
  class ProGenEmbedder:
214
  def __init__(self, device):
215
  model_name = "jinyuan22/ProGen2-base"
@@ -219,28 +236,34 @@ class ProGenEmbedder:
219
 
220
  def embed(self, seqs):
221
  inputs = self.tokenizer(
222
- seqs,
223
- return_tensors="pt",
224
- padding=True,
225
- truncation=True
226
  ).to(self.device)
227
  with torch.no_grad():
228
  last_hidden = self.model(**inputs).last_hidden_state
229
  return last_hidden.mean(dim=1).cpu().numpy()
230
 
 
231
  # ---- main pipeline ----
232
 
 
233
  def get_embedder(name, device, for_dna=True):
234
  name = name.lower()
235
  if for_dna:
236
- if name=="caduceus": return CaduceusEmbedder(device)
237
- if name=="dnabert": return DNABertEmbedder(device)
238
- if name=="nucleotide": return NucleotideTransformerEmbedder(device)
239
- if name=="gpn": return GPNEmbedder(device)
 
 
 
 
240
  else:
241
- if name in ("esm",): return ESMEmbedder(device)
242
- if name in ("esm-dbp","esm_dbp"): return ESMDBPEmbedder(device)
243
- if name=="progen": return ProGenEmbedder(device)
 
 
 
244
  raise ValueError(f"Unknown model {name} (for_dna={for_dna})")
245
 
246
 
@@ -250,23 +273,34 @@ def embed_and_save(seqs, ids, embedder, out_path):
250
  with open(out_path.with_suffix(".ids"), "w") as f:
251
  f.write("\n".join(ids))
252
 
253
- if __name__=="__main__":
 
254
 
255
  p = argparse.ArgumentParser()
256
- p.add_argument("--genome-json-dir", default="data_files/raw/genomes/hg38", help="dir of UCSC JSONs")
257
- p.add_argument("--skip-dna", action="store_true", help="if set, skip the chromosome embedding step") #if glm embeddings successful but not plm embeddings
258
- p.add_argument("--tf-fasta", required=True, help="input TF FASTA file")
259
- p.add_argument("--chrom-model", default="caduceus")
260
- p.add_argument("--tf-model", default="esm-dbp")
261
- p.add_argument("--out-dir", default="data_files/processed/tfclust/hg38_tf/embeddings")
262
- p.add_argument("--device", default="cpu")
 
 
 
 
 
 
 
 
 
 
263
  args = p.parse_args()
264
 
265
  os.makedirs(args.out_dir, exist_ok=True)
266
  device = args.device
267
 
268
  if not args.skip_dna:
269
- #Load only primary chromosome JSONs (chr1–22, X, Y, M)
270
  genome_dir = Path(args.genome_json_dir)
271
  chrom_seqs, chrom_ids = [], []
272
  primary_pattern = re.compile(r"^hg38_chr(?:[1-9]|1[0-9]|2[0-2]|X|Y|M)\.json$")
@@ -274,13 +308,17 @@ if __name__=="__main__":
274
  if not primary_pattern.match(j.name):
275
  continue
276
  data = json.loads(j.read_text())
277
- seq = data.get("dna") or data.get("sequence")
278
  chrom = data.get("chrom") or j.stem.split("_")[-1]
279
  chrom_seqs.append(seq)
280
  chrom_ids.append(chrom)
281
  ########################
282
  cutoff = CaduceusEmbedder(device).chunk_size
283
- long_chroms = [(chrom, len(seq)) for chrom, seq in zip(chrom_ids, chrom_seqs) if len(seq) > cutoff]
 
 
 
 
284
  if long_chroms:
285
  print("⚠️ Chromosomes exceeding Caduceus max tokens ({}):".format(cutoff))
286
  for chrom, L in long_chroms:
@@ -290,10 +328,10 @@ if __name__=="__main__":
290
 
291
  ####################
292
  chrom_embedder = get_embedder(args.chrom_model, device, for_dna=True)
293
- out_chrom = Path(args.out_dir)/f"chrom_{args.chrom_model}.npy"
294
  embed_and_save(chrom_seqs, chrom_ids, chrom_embedder, out_chrom)
295
 
296
- #Load TF sequences
297
  tf_seqs, tf_ids = [], []
298
  for record in SeqIO.parse(args.tf_fasta, "fasta"):
299
  tf_ids.append(record.id)
@@ -304,4 +342,4 @@ if __name__=="__main__":
304
  out_tf = Path(args.out_dir) / f"tf_{args.tf_model}.npy"
305
  embed_and_save(tf_seqs, tf_ids, tf_embedder, out_tf)
306
 
307
- print("Done.")
 
14
  --out-dir ../data_files/processed/tfclust/hg38_tf/embeddings \
15
  --device cuda
16
  """
17
+
18
  import os
19
  import re
20
  import argparse
 
29
 
30
  # ---- model wrappers ----
31
 
32
+
33
  class CaduceusEmbedder:
34
  def __init__(self, device, chunk_size=131_072, overlap=0):
35
  """
 
41
  self.tokenizer = AutoTokenizer.from_pretrained(
42
  model_name, trust_remote_code=True
43
  )
44
+ self.model = (
45
+ AutoModel.from_pretrained(model_name, trust_remote_code=True)
46
+ .to(device)
47
+ .eval()
48
+ )
49
+ self.device = device
50
  self.chunk_size = chunk_size
51
+ self.step = chunk_size - overlap
52
 
53
  def embed(self, seqs):
54
  """
 
82
  return_tensors="pt",
83
  padding=False,
84
  truncation=True,
85
+ max_length=self.chunk_size,
86
  ).to(self.device)
87
  with torch.no_grad():
88
  out = self.model(**toks).last_hidden_state # (1, L, D)
89
+ outputs.append(out.cpu().numpy()[0]) # (L, D)
90
 
91
  return np.stack(outputs, axis=0) # (N, L, D)
92
 
 
110
  t1 = time.perf_counter()
111
  print(f" length={sz:6,d} time={(t1-t0)*1000:7.1f} ms")
112
 
113
+
114
  class DNABertEmbedder:
115
  def __init__(self, device):
116
+ self.tokenizer = AutoTokenizer.from_pretrained(
117
+ "zhihan1996/DNA_bert_6", trust_remote_code=True
118
+ )
119
+ self.model = AutoModel.from_pretrained(
120
+ "zhihan1996/DNA_bert_6", trust_remote_code=True
121
+ ).to(device)
122
+ self.device = device
123
 
124
  def embed(self, seqs):
125
  embs = []
126
  for s in seqs:
127
+ tokens = self.tokenizer(s, return_tensors="pt", padding=True)[
128
+ "input_ids"
129
+ ].to(self.device)
130
  with torch.no_grad():
131
  out = self.model(tokens).last_hidden_state.mean(1)
132
  embs.append(out.cpu().numpy())
133
  return np.vstack(embs)
134
 
135
+
136
  class NucleotideTransformerEmbedder:
137
  def __init__(self, device):
138
  # HF “feature-extraction” returns a list of (L, D) arrays for each input
 
140
  self.pipe = pipeline(
141
  "feature-extraction",
142
  model="InstaDeepAI/nucleotide-transformer-500m-1000g",
143
+ device=(
144
+ -1 if device == "cpu" else 0
145
+ ), # HF uses -1 for CPU, 0 for GPU #:contentReference[oaicite:0]{index=0}
146
  )
147
 
148
  def embed(self, seqs):
 
152
  """
153
  all_embeddings = self.pipe(seqs, truncation=True, padding=True)
154
  # all_embeddings is a List of shape (L, D) arrays
155
+ pooled = [np.mean(x, axis=0) for x in all_embeddings]
156
+ return np.vstack(pooled)
157
+
158
 
159
  class ESMEmbedder:
160
  def __init__(self, device):
 
172
  reps = results["representations"][33]
173
  return reps[:, 1:-1].mean(1).cpu().numpy()
174
 
175
+
176
  class ESMDBPEmbedder:
177
  def __init__(self, device):
178
+ base_model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
179
  model_path = (
180
  Path(__file__).resolve().parent.parent
181
+ / "pretrained"
182
+ / "ESM-DBP"
183
+ / "ESM-DBP.model"
184
  )
185
  checkpoint = torch.load(model_path, map_location="cpu")
186
  clean_sd = {}
 
207
  # skip start/end tokens
208
  return reps[:, 1:-1].mean(1).cpu().numpy()
209
 
210
+
211
  class GPNEmbedder:
212
  def __init__(self, device):
213
  model_name = "songlab/gpn-msa-sapiens"
 
219
 
220
  def embed(self, seqs):
221
  inputs = self.tokenizer(
222
+ seqs, return_tensors="pt", padding=True, truncation=True
 
 
 
223
  ).to(self.device)
224
 
225
  with torch.no_grad():
226
  last_hidden = self.model(**inputs).last_hidden_state
227
  return last_hidden.mean(dim=1).cpu().numpy()
228
 
229
+
230
  class ProGenEmbedder:
231
  def __init__(self, device):
232
  model_name = "jinyuan22/ProGen2-base"
 
236
 
237
  def embed(self, seqs):
238
  inputs = self.tokenizer(
239
+ seqs, return_tensors="pt", padding=True, truncation=True
 
 
 
240
  ).to(self.device)
241
  with torch.no_grad():
242
  last_hidden = self.model(**inputs).last_hidden_state
243
  return last_hidden.mean(dim=1).cpu().numpy()
244
 
245
+
246
  # ---- main pipeline ----
247
 
248
+
249
  def get_embedder(name, device, for_dna=True):
250
  name = name.lower()
251
  if for_dna:
252
+ if name == "caduceus":
253
+ return CaduceusEmbedder(device)
254
+ if name == "dnabert":
255
+ return DNABertEmbedder(device)
256
+ if name == "nucleotide":
257
+ return NucleotideTransformerEmbedder(device)
258
+ if name == "gpn":
259
+ return GPNEmbedder(device)
260
  else:
261
+ if name in ("esm",):
262
+ return ESMEmbedder(device)
263
+ if name in ("esm-dbp", "esm_dbp"):
264
+ return ESMDBPEmbedder(device)
265
+ if name == "progen":
266
+ return ProGenEmbedder(device)
267
  raise ValueError(f"Unknown model {name} (for_dna={for_dna})")
268
 
269
 
 
273
  with open(out_path.with_suffix(".ids"), "w") as f:
274
  f.write("\n".join(ids))
275
 
276
+
277
+ if __name__ == "__main__":
278
 
279
  p = argparse.ArgumentParser()
280
+ p.add_argument(
281
+ "--genome-json-dir",
282
+ default="data_files/raw/genomes/hg38",
283
+ help="dir of UCSC JSONs",
284
+ )
285
+ p.add_argument(
286
+ "--skip-dna",
287
+ action="store_true",
288
+ help="if set, skip the chromosome embedding step",
289
+ ) # if glm embeddings successful but not plm embeddings
290
+ p.add_argument("--tf-fasta", required=True, help="input TF FASTA file")
291
+ p.add_argument("--chrom-model", default="caduceus")
292
+ p.add_argument("--tf-model", default="esm-dbp")
293
+ p.add_argument(
294
+ "--out-dir", default="data_files/processed/tfclust/hg38_tf/embeddings"
295
+ )
296
+ p.add_argument("--device", default="cpu")
297
  args = p.parse_args()
298
 
299
  os.makedirs(args.out_dir, exist_ok=True)
300
  device = args.device
301
 
302
  if not args.skip_dna:
303
+ # Load only primary chromosome JSONs (chr1–22, X, Y, M)
304
  genome_dir = Path(args.genome_json_dir)
305
  chrom_seqs, chrom_ids = [], []
306
  primary_pattern = re.compile(r"^hg38_chr(?:[1-9]|1[0-9]|2[0-2]|X|Y|M)\.json$")
 
308
  if not primary_pattern.match(j.name):
309
  continue
310
  data = json.loads(j.read_text())
311
+ seq = data.get("dna") or data.get("sequence")
312
  chrom = data.get("chrom") or j.stem.split("_")[-1]
313
  chrom_seqs.append(seq)
314
  chrom_ids.append(chrom)
315
  ########################
316
  cutoff = CaduceusEmbedder(device).chunk_size
317
+ long_chroms = [
318
+ (chrom, len(seq))
319
+ for chrom, seq in zip(chrom_ids, chrom_seqs)
320
+ if len(seq) > cutoff
321
+ ]
322
  if long_chroms:
323
  print("⚠️ Chromosomes exceeding Caduceus max tokens ({}):".format(cutoff))
324
  for chrom, L in long_chroms:
 
328
 
329
  ####################
330
  chrom_embedder = get_embedder(args.chrom_model, device, for_dna=True)
331
+ out_chrom = Path(args.out_dir) / f"chrom_{args.chrom_model}.npy"
332
  embed_and_save(chrom_seqs, chrom_ids, chrom_embedder, out_chrom)
333
 
334
+ # Load TF sequences
335
  tf_seqs, tf_ids = [], []
336
  for record in SeqIO.parse(args.tf_fasta, "fasta"):
337
  tf_ids.append(record.id)
 
342
  out_tf = Path(args.out_dir) / f"tf_{args.tf_model}.npy"
343
  embed_and_save(tf_seqs, tf_ids, tf_embedder, out_tf)
344
 
345
+ print("Done.")
dpacman/{data/remap → data_tasks/fimo}/post_fimo.py RENAMED
@@ -7,11 +7,12 @@ import numpy as np
7
 
8
  # ─────────────────────────────────────────────────────────────────────────────
9
  # PATHS — edit these if needed
10
- INPUT_CSV = "/home/a03-akrishna/DPACMAN/data_files/processed/post_fimo.csv"
11
  OUTPUT_CSV = "/home/a03-akrishna/DPACMAN/data_files/processed/final.csv"
12
- JSON_DIR = "/home/a03-svincoff/DPACMAN/dpacman/data_files/raw/genomes/hg38"
13
  # ─────────────────────────────────────────────────────────────────────────────
14
 
 
15
  def load_chrom_dna(chrom, cache):
16
  """Load & cache the full chromosome 'dna' string from hg38_chr{chrom}.json."""
17
  if chrom in cache:
@@ -24,32 +25,34 @@ def load_chrom_dna(chrom, cache):
24
  cache[chrom] = data["dna"]
25
  return cache[chrom]
26
 
 
27
  def sigmoid_array(arr: np.ndarray) -> np.ndarray:
28
  """Elementwise logistic sigmoid → values in (0,1)."""
29
  return 1.0 / (1.0 + np.exp(-arr))
30
 
 
31
  def main():
32
  # 1) load post‐FIMO results
33
  df = pd.read_csv(INPUT_CSV)
34
 
35
  dna_cache = {}
36
- records = []
37
 
38
  # 2) for each TF‐peak row, extract sequence & build scores
39
  for _, row in df.iterrows():
40
- tfid = row["TF_id"]
41
- chrom = str(row["#chrom"])
42
- cstart = int(row["contextStart"])
43
- cend = int(row["contextEnd"])
44
- peak_s = int(row["ChIPStart"])
45
- peak_e = int(row["ChIPEnd"])
46
  chipscore = int(row["chipscore"])
47
- jaspar = str(row["jaspar"])
48
 
49
  # pull out the exact context sequence (including any Ns)
50
  dna = load_chrom_dna(chrom, dna_cache)
51
  seq = dna[cstart:cend]
52
- L = len(seq)
53
 
54
  # initialize base‐resolution scores
55
  scores = np.zeros(L, dtype=int)
@@ -68,37 +71,50 @@ def main():
68
  scores[hs_i:he_i] = chipscore + 100
69
 
70
  # stringify the raw scores
71
- score_str = ",".join(map(str, scores.tolist()))
72
 
73
  # sigmoid‐transform
74
- sig_vals = sigmoid_array(scores.astype(float))
75
- score_sig = ",".join(f"{v:.4f}" for v in sig_vals.tolist())
76
-
77
- records.append({
78
- "TF_id": tfid,
79
- "dna_sequence": seq,
80
- "score_str": score_str,
81
- "score_sig_r2": score_sig
82
- })
 
 
83
 
84
  # 3) assemble into a DataFrame
85
  final_df = pd.DataFrame.from_records(records)
86
 
87
  # 4) drop any exact TF+DNA duplicates
88
- final_df = final_df.drop_duplicates(subset=["TF_id","dna_sequence"]).reset_index(drop=True)
 
 
89
 
90
  # 5) assign random IDs
91
- tf_map = {tf: uuid.uuid4().hex[:8] for tf in final_df["TF_id"].unique()}
92
  dna_map = {sq: uuid.uuid4().hex[:8] for sq in final_df["dna_sequence"].unique()}
93
 
94
- final_df["tf_seq_id"] = final_df["TF_id"].map(tf_map)
95
  final_df["dna_seq_id"] = final_df["dna_sequence"].map(dna_map)
96
- final_df["ID"] = final_df["tf_seq_id"] + "_" + final_df["dna_seq_id"]
97
 
98
  # 6) reorder and write out
99
- cols = ["TF_id","tf_seq_id","dna_sequence","dna_seq_id","score_str","score_sig_r2","ID"]
 
 
 
 
 
 
 
 
100
  final_df[cols].to_csv(OUTPUT_CSV, index=False)
101
  print(f"Wrote {len(final_df)} rows → {OUTPUT_CSV}")
102
 
 
103
  if __name__ == "__main__":
104
  main()
 
7
 
8
  # ─────────────────────────────────────────────────────────────────────────────
9
  # PATHS — edit these if needed
10
+ INPUT_CSV = "/home/a03-akrishna/DPACMAN/data_files/processed/post_fimo.csv"
11
  OUTPUT_CSV = "/home/a03-akrishna/DPACMAN/data_files/processed/final.csv"
12
+ JSON_DIR = "/home/a03-svincoff/DPACMAN/dpacman/data_files/raw/genomes/hg38"
13
  # ─────────────────────────────────────────────────────────────────────────────
14
 
15
+
16
  def load_chrom_dna(chrom, cache):
17
  """Load & cache the full chromosome 'dna' string from hg38_chr{chrom}.json."""
18
  if chrom in cache:
 
25
  cache[chrom] = data["dna"]
26
  return cache[chrom]
27
 
28
+
29
  def sigmoid_array(arr: np.ndarray) -> np.ndarray:
30
  """Elementwise logistic sigmoid → values in (0,1)."""
31
  return 1.0 / (1.0 + np.exp(-arr))
32
 
33
+
34
  def main():
35
  # 1) load post‐FIMO results
36
  df = pd.read_csv(INPUT_CSV)
37
 
38
  dna_cache = {}
39
+ records = []
40
 
41
  # 2) for each TF‐peak row, extract sequence & build scores
42
  for _, row in df.iterrows():
43
+ tfid = row["TF_id"]
44
+ chrom = str(row["#chrom"])
45
+ cstart = int(row["contextStart"])
46
+ cend = int(row["contextEnd"])
47
+ peak_s = int(row["ChIPStart"])
48
+ peak_e = int(row["ChIPEnd"])
49
  chipscore = int(row["chipscore"])
50
+ jaspar = str(row["jaspar"])
51
 
52
  # pull out the exact context sequence (including any Ns)
53
  dna = load_chrom_dna(chrom, dna_cache)
54
  seq = dna[cstart:cend]
55
+ L = len(seq)
56
 
57
  # initialize base‐resolution scores
58
  scores = np.zeros(L, dtype=int)
 
71
  scores[hs_i:he_i] = chipscore + 100
72
 
73
  # stringify the raw scores
74
+ score_str = ",".join(map(str, scores.tolist()))
75
 
76
  # sigmoid‐transform
77
+ sig_vals = sigmoid_array(scores.astype(float))
78
+ score_sig = ",".join(f"{v:.4f}" for v in sig_vals.tolist())
79
+
80
+ records.append(
81
+ {
82
+ "TF_id": tfid,
83
+ "dna_sequence": seq,
84
+ "score_str": score_str,
85
+ "score_sig_r2": score_sig,
86
+ }
87
+ )
88
 
89
  # 3) assemble into a DataFrame
90
  final_df = pd.DataFrame.from_records(records)
91
 
92
  # 4) drop any exact TF+DNA duplicates
93
+ final_df = final_df.drop_duplicates(subset=["TF_id", "dna_sequence"]).reset_index(
94
+ drop=True
95
+ )
96
 
97
  # 5) assign random IDs
98
+ tf_map = {tf: uuid.uuid4().hex[:8] for tf in final_df["TF_id"].unique()}
99
  dna_map = {sq: uuid.uuid4().hex[:8] for sq in final_df["dna_sequence"].unique()}
100
 
101
+ final_df["tf_seq_id"] = final_df["TF_id"].map(tf_map)
102
  final_df["dna_seq_id"] = final_df["dna_sequence"].map(dna_map)
103
+ final_df["ID"] = final_df["tf_seq_id"] + "_" + final_df["dna_seq_id"]
104
 
105
  # 6) reorder and write out
106
+ cols = [
107
+ "TF_id",
108
+ "tf_seq_id",
109
+ "dna_sequence",
110
+ "dna_seq_id",
111
+ "score_str",
112
+ "score_sig_r2",
113
+ "ID",
114
+ ]
115
  final_df[cols].to_csv(OUTPUT_CSV, index=False)
116
  print(f"Wrote {len(final_df)} rows → {OUTPUT_CSV}")
117
 
118
+
119
  if __name__ == "__main__":
120
  main()
dpacman/data_tasks/fimo/pre_fimo.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import pandas as pd
3
+ import numpy as np
4
+ import rootutils
5
+ import logging
6
+ import os
7
+ from omegaconf import DictConfig
8
+ from pathlib import Path
9
+
10
+ root = rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def main(cfg: DictConfig):
15
+ # 1) load
16
+ input_path = Path(root) / cfg.data_task.input_csv
17
+ df = pd.read_csv(input_path, sep="\t")
18
+
19
+ # 2) normalize chromosomes and exclude non-whole chromosomes
20
+ df["chrom"] = df["chrom"].str.replace(r"^chr", "", regex=True)
21
+
22
+ valid = [str(i) for i in range(1, 23)] + ["X", "Y"]
23
+ df = df[df["chrom"].isin(valid)].reset_index(drop=True)
24
+
25
+ # 3) explode TF names
26
+ df["tr_list"] = df["tr"].str.split(",")
27
+ df = df.explode("tr_list").rename(columns={"tr_list": "TR"})
28
+ df["TR"] = df["TR"].str.strip()
29
+
30
+ # 4) draw a random left‐flank between 0 and WINDOW_TOTAL,
31
+ # then right‐flank is whatever remains to sum to WINDOW_TOTAL
32
+ n = len(df)
33
+ df["left_context"] = np.random.randint(0, cfg.data_task.window_total + 1, size=n)
34
+ df["right_context"] = cfg.data_task.window_total - df["left_context"]
35
+
36
+ # 5) compute contextStart / contextEnd
37
+ df["contextStart"] = (
38
+ (df["chromStart"] - df["left_context"]).clip(lower=0).astype(int)
39
+ )
40
+ df["contextEnd"] = (df["chromEnd"] + df["right_context"]).astype(int)
41
+
42
+ # 6) assemble output
43
+ out = df[
44
+ [
45
+ "chrom",
46
+ "contextStart",
47
+ "chromStart", # original ChIPStart
48
+ "chromEnd", # original ChIPEnd
49
+ "contextEnd",
50
+ "score", # original score column
51
+ "TF",
52
+ ]
53
+ ].rename(
54
+ columns={
55
+ "chrom": "#chrom",
56
+ "chromStart": "ChIPStart",
57
+ "chromEnd": "ChIPEnd",
58
+ "score": "chipscore",
59
+ }
60
+ )
61
+
62
+ # 7 make folder for tsv
63
+ output_path = Path(root) / cfg.data_task.output_csv
64
+ os.makedirs(output_path.parent, exist_ok=True)
65
+
66
+ # 8) write tsv
67
+ out.to_csv(output_path, sep="\t", index=False)
68
+ print(f"Wrote {len(out)} rows to {output_path}")
69
+
70
+
71
+ if __name__ == "__main__":
72
+ main()
dpacman/{data/remap → data_tasks/fimo}/run_fimo.py RENAMED
@@ -5,63 +5,50 @@ import subprocess
5
  import pandas as pd
6
  from multiprocessing import Pool, cpu_count
7
  from tqdm import tqdm
 
 
 
 
8
 
9
- # ─────────────────────────────────────────────────────────────────────────────
10
- # CONFIG — edit these paths if needed
11
- INPUT_CSV = "/home/a03-akrishna/DPACMAN/data_files/processed/clean_pre_fimo.csv"
12
- OUTPUT_CSV = "/home/a03-akrishna/DPACMAN/data_files/processed/post_fimo.csv"
13
- JSON_DIR = "/home/a03-svincoff/DPACMAN/dpacman/data_files/raw/genomes/hg38"
14
 
15
- # Full paths to MEME‐suite binaries
16
- FIMO_BIN = "/home/a03-svincoff/meme/bin/fimo"
17
- FASTA_GET_MARKOV = "/home/a03-svincoff/meme/libexec/meme-5.5.8/fasta-get-markov"
18
 
19
- # JASPAR MEME file
20
- MOTIF_FILE = "/home/a03-svincoff/DPACMAN/dpacman/softwares/meme-5.5.8/tests/common/JASPAR_CORE_2014_vertebrates.meme"
21
-
22
- # Working filenames
23
- SEQ_FASTA = "to_scan.fa"
24
- BG_MODEL = "bg_model.txt"
25
- FIMO_OUTDIR = "fimo_out"
26
-
27
- # FIMO parameters
28
- PVAL_THRESH = 1e-4
29
- MAX_STORED = 1000000
30
-
31
- # How many parallel FIMO jobs (defaults to all cores)
32
- N_JOBS = cpu_count()
33
- # ─────────────────────────────────────────────────────────────────────────────
34
-
35
- def load_chrom_dna(chrom, cache):
36
  if chrom in cache:
37
  return cache[chrom]
38
- fname = os.path.join(JSON_DIR, f"hg38_chr{chrom}.json")
39
  if not os.path.isfile(fname):
40
  raise FileNotFoundError(f"Chrom JSON not found: {fname}")
41
  with open(fname) as f:
42
  cache[chrom] = json.load(f)["dna"]
43
  return cache[chrom]
44
 
45
- def extract_sequences(df):
 
46
  dna_cache = {}
47
- with open(SEQ_FASTA, "w") as fa:
48
  for idx, row in df.iterrows():
49
  chrom = str(row["#chrom"])
50
- dna = load_chrom_dna(chrom, dna_cache)
51
  start = int(row["contextStart"])
52
- end = int(row["contextEnd"])
53
- seq = dna[start:end]
54
  fa.write(f">{idx}\n{seq}\n")
55
 
56
- def run_markov():
57
- subprocess.check_call([FASTA_GET_MARKOV, SEQ_FASTA, BG_MODEL],
58
- stdout=subprocess.DEVNULL,
59
- stderr=subprocess.DEVNULL)
60
 
61
- def split_fasta(n_chunks):
 
 
 
 
 
 
 
 
62
  """Round-robin split SEQ_FASTA into chunked FASTA files."""
63
- out_handles = [open(f"to_scan_{i}.fa","w") for i in range(n_chunks)]
64
- with open(SEQ_FASTA) as inf:
65
  header = None
66
  seq_lines = []
67
  for line in inf:
@@ -83,78 +70,139 @@ def split_fasta(n_chunks):
83
  o.close()
84
  return [f"to_scan_{i}.fa" for i in range(n_chunks)]
85
 
86
- def run_fimo_chunk(args):
87
- """Run FIMO on one FASTA chunk."""
88
- chunk_id, fasta_path = args
89
- outdir = f"{FIMO_OUTDIR}_{chunk_id}"
 
 
 
 
 
 
 
 
 
 
 
 
90
  os.makedirs(outdir, exist_ok=True)
91
- print(f"Chunk {chunk_id} starting FIMO", flush=True)
92
- subprocess.check_call([
93
- FIMO_BIN,
94
- "--oc", outdir,
95
- "--bgfile", BG_MODEL,
96
- "--max-stored-scores", str(MAX_STORED),
97
- "--thresh", str(PVAL_THRESH),
98
- MOTIF_FILE,
99
- fasta_path
100
- ])
101
- print(f"▶ Chunk {chunk_id} finished", flush=True)
 
 
 
 
 
 
102
  return os.path.join(outdir, "fimo.tsv")
103
 
 
104
  def annotate_with_fimo(df, fimo_tsv):
105
  fdf = pd.read_csv(fimo_tsv, sep="\t", comment="#")
106
  fdf["idx"] = fdf["sequence_name"].astype(int)
107
- fdf = fdf.merge(df[["idx","contextStart"]], on="idx", how="left")
108
  fdf["genomic_start"] = fdf["contextStart"] + fdf["start"] - 1
109
- fdf["genomic_end"] = fdf["contextStart"] + fdf["stop"]
110
  fdf["coord"] = (
111
- fdf["genomic_start"].astype(str)
112
- + "-" +
113
- fdf["genomic_end"].astype(str)
114
  )
115
  agg = fdf.groupby("idx")["coord"].agg(lambda hits: ",".join(hits))
116
  df["jaspar"] = df["idx"].map(agg).fillna("")
117
  return df
118
 
119
- def main():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  # 1) load & explode
121
- df = pd.read_csv(INPUT_CSV, low_memory=False)
122
- df = df.reset_index().rename(columns={"index":"idx"})
 
123
  df["TF_occurrence"] = df.groupby("TF").cumcount() + 1
124
- df["TF_id"] = df["TF"] + "_seq" + df["TF_occurrence"].astype(str)
125
 
126
  # 2) extract sequences & build BG model
127
- extract_sequences(df)
128
- print("Building background model…", flush=True)
129
- run_markov()
130
 
131
  # 3) chunk FASTA and run FIMO in parallel
132
- chunks = split_fasta(N_JOBS)
133
- print(f"▶ Running FIMO in parallel ({N_JOBS} jobs)…", flush=True)
134
- with Pool(N_JOBS) as pool:
135
- tsv_paths = list(tqdm(
136
- pool.imap(run_fimo_chunk, enumerate(chunks)),
137
- total=len(chunks),
138
- desc="FIMO chunks",
139
- leave=True
140
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
  # 4) merge chunked TSVs
143
- combined = pd.concat([
144
- pd.read_csv(tsv, sep="\t", comment="#")
145
- for tsv in tsv_paths
146
- ], ignore_index=True)
147
  merged_tsv = "fimo_combined.tsv"
148
  combined.to_csv(merged_tsv, sep="\t", index=False)
149
 
150
  # 5) annotate & write final CSV
151
  df = annotate_with_fimo(df, merged_tsv)
152
- final = df[[
153
- "#chrom","contextStart","ChIPStart","ChIPEnd",
154
- "contextEnd","chipscore","TF","TF_id","jaspar"
155
- ]]
156
- final.to_csv(OUTPUT_CSV, index=False)
157
- print(f"▶ Wrote {len(final)} rows → {OUTPUT_CSV}")
 
 
 
 
 
 
 
 
 
 
 
158
 
159
  if __name__ == "__main__":
160
  main()
 
5
  import pandas as pd
6
  from multiprocessing import Pool, cpu_count
7
  from tqdm import tqdm
8
+ import rootutils
9
+ import logging
10
+ from omegaconf import DictConfig
11
+ from pathlib import Path
12
 
13
+ root = rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
14
+ logger = logging.getLogger(__name__)
 
 
 
15
 
 
 
 
16
 
17
+ def load_chrom_dna(chrom, cache, json_dir):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  if chrom in cache:
19
  return cache[chrom]
20
+ fname = os.path.join(json_dir, f"hg38_chr{chrom}.json")
21
  if not os.path.isfile(fname):
22
  raise FileNotFoundError(f"Chrom JSON not found: {fname}")
23
  with open(fname) as f:
24
  cache[chrom] = json.load(f)["dna"]
25
  return cache[chrom]
26
 
27
+
28
+ def extract_sequences(df, seq_fasta, json_dir):
29
  dna_cache = {}
30
+ with open(seq_fasta, "w") as fa:
31
  for idx, row in df.iterrows():
32
  chrom = str(row["#chrom"])
33
+ dna = load_chrom_dna(chrom, dna_cache, json_dir)
34
  start = int(row["contextStart"])
35
+ end = int(row["contextEnd"])
36
+ seq = dna[start:end]
37
  fa.write(f">{idx}\n{seq}\n")
38
 
 
 
 
 
39
 
40
+ def run_markov(fasta_get_markov, seq_fasta, bg_model):
41
+ subprocess.check_call(
42
+ [fasta_get_markov, seq_fasta, bg_model],
43
+ stdout=subprocess.DEVNULL,
44
+ stderr=subprocess.DEVNULL,
45
+ )
46
+
47
+
48
+ def split_fasta(n_chunks, seq_fasta):
49
  """Round-robin split SEQ_FASTA into chunked FASTA files."""
50
+ out_handles = [open(f"to_scan_{i}.fa", "w") for i in range(n_chunks)]
51
+ with open(seq_fasta) as inf:
52
  header = None
53
  seq_lines = []
54
  for line in inf:
 
70
  o.close()
71
  return [f"to_scan_{i}.fa" for i in range(n_chunks)]
72
 
73
+
74
+ def run_fimo_chunk(cfg):
75
+ """
76
+ Run FIMO for a chunk.
77
+ Args:
78
+ cfg: dict with keys:
79
+ - chunk_id
80
+ - fasta_path
81
+ - fimo_outdir
82
+ - fimo_bin
83
+ - bg_model
84
+ - max_stored
85
+ - motif_file
86
+ - pval_thresh
87
+ """
88
+ outdir = f"{cfg['fimo_outdir']}_{cfg['chunk_id']}"
89
  os.makedirs(outdir, exist_ok=True)
90
+ logger.info(f"Chunk {cfg['chunk_id']} starting FIMO")
91
+ subprocess.check_call(
92
+ [
93
+ cfg["fimo_bin"],
94
+ "--oc",
95
+ outdir,
96
+ "--bgfile",
97
+ cfg["bg_model"],
98
+ "--max-stored-scores",
99
+ str(cfg["max_stored"]),
100
+ "--thresh",
101
+ str(cfg["pval_thresh"]),
102
+ cfg["motif_file"],
103
+ cfg["fasta_path"],
104
+ ]
105
+ )
106
+ logger.info(f"Chunk {cfg['chunk_id']} finished")
107
  return os.path.join(outdir, "fimo.tsv")
108
 
109
+
110
  def annotate_with_fimo(df, fimo_tsv):
111
  fdf = pd.read_csv(fimo_tsv, sep="\t", comment="#")
112
  fdf["idx"] = fdf["sequence_name"].astype(int)
113
+ fdf = fdf.merge(df[["idx", "contextStart"]], on="idx", how="left")
114
  fdf["genomic_start"] = fdf["contextStart"] + fdf["start"] - 1
115
+ fdf["genomic_end"] = fdf["contextStart"] + fdf["stop"]
116
  fdf["coord"] = (
117
+ fdf["genomic_start"].astype(str) + "-" + fdf["genomic_end"].astype(str)
 
 
118
  )
119
  agg = fdf.groupby("idx")["coord"].agg(lambda hits: ",".join(hits))
120
  df["jaspar"] = df["idx"].map(agg).fillna("")
121
  return df
122
 
123
+
124
+ def main(cfg: DictConfig):
125
+ """
126
+ Main method for running FIMO analysis, searching JASPAR motifs against ChIP-seq peaks
127
+ """
128
+ # 0) configs
129
+ paths = cfg.data_task.paths
130
+ fimo = cfg.data_task.fimo
131
+ fnames = cfg.data_task.fnames
132
+ meme = cfg.data_task.meme
133
+
134
+ # set njobs to max or whatever # is specified by user
135
+ njobs = fimo.njobs
136
+ if njobs == "max":
137
+ njobs = cpu_count() - 1
138
+ else:
139
+ njobs = min(cpu_count() - 1, int(njobs))
140
+
141
  # 1) load & explode
142
+ input_csv_path = Path(root) / paths.input_csv
143
+ df = pd.read_csv(input_csv_path, low_memory=False)
144
+ df = df.reset_index().rename(columns={"index": "idx"})
145
  df["TF_occurrence"] = df.groupby("TF").cumcount() + 1
146
+ df["TF_id"] = df["TF"] + "_seq" + df["TF_occurrence"].astype(str)
147
 
148
  # 2) extract sequences & build BG model
149
+ extract_sequences(df, fnames.seq_fasta, paths.json_dir)
150
+ logger.info("Building background model…")
151
+ run_markov(meme.fasta_get_markov, fnames.seq_fasta, fnames.bg_model)
152
 
153
  # 3) chunk FASTA and run FIMO in parallel
154
+ chunks = split_fasta(njobs)
155
+ chunk_cfgs = [
156
+ dict(
157
+ chunk_id=i,
158
+ fasta_path=chunk,
159
+ fimo_outdir=fnames.fimo_outdir,
160
+ fimo_bin=paths.fimo_bin,
161
+ bg_model=fnames.bg_model,
162
+ max_stored=fimo.max_stored,
163
+ motif_file=meme.jaspar_motif_file,
164
+ pval_thresh=fimo.pval_thresh,
165
+ )
166
+ for i, chunk in enumerate(chunks)
167
+ ]
168
+ logger.info(f"Running FIMO in parallel ({njobs} jobs)…")
169
+ with Pool(njobs) as pool:
170
+ tsv_paths = list(
171
+ tqdm(
172
+ pool.imap(run_fimo_chunk, chunk_cfgs),
173
+ total=len(chunks),
174
+ desc="FIMO chunks",
175
+ leave=True,
176
+ )
177
+ )
178
 
179
  # 4) merge chunked TSVs
180
+ combined = pd.concat(
181
+ [pd.read_csv(tsv, sep="\t", comment="#") for tsv in tsv_paths],
182
+ ignore_index=True,
183
+ )
184
  merged_tsv = "fimo_combined.tsv"
185
  combined.to_csv(merged_tsv, sep="\t", index=False)
186
 
187
  # 5) annotate & write final CSV
188
  df = annotate_with_fimo(df, merged_tsv)
189
+ final = df[
190
+ [
191
+ "#chrom",
192
+ "contextStart",
193
+ "ChIPStart",
194
+ "ChIPEnd",
195
+ "contextEnd",
196
+ "chipscore",
197
+ "TF",
198
+ "TF_id",
199
+ "jaspar",
200
+ ]
201
+ ]
202
+ output_csv_path = Path(root) / paths.output_csv
203
+ final.to_csv(output_csv_path, index=False)
204
+ logger.info(f"Wrote {len(final)} rows → {output_csv_path}")
205
+
206
 
207
  if __name__ == "__main__":
208
  main()
dpacman/data_tasks/visualize/__init__.py ADDED
File without changes
dpacman/{data → data_tasks/visualize}/visualizations.py RENAMED
@@ -5,6 +5,7 @@ import glob
5
  import re
6
  from pathlib import Path
7
 
 
8
  def trim_sequence(seq: str, seq_flanked: str, total_len: int):
9
  """
10
  Return a substring of seq_flanked of length total_len that contains seq
@@ -34,47 +35,48 @@ def process_and_plot(input_csv: str, total_len: int, output_csv: Path, fig_dir:
34
  ups, downs, abs_pos, rel_pos = [], [], [], []
35
  trimmed_seqs = []
36
  for _, row in df.iterrows():
37
- trimmed, u, d = trim_sequence(row['seq'], row['seq_flanked'], total_len)
38
  trimmed_seqs.append(trimmed)
39
  ups.append(u)
40
  downs.append(d)
41
  abs_pos.append(u)
42
- rel_pos.append(u / (total_len - len(row['seq'])))
43
  df_out = df.copy()
44
- df_out['seq_trimmed'] = trimmed_seqs
45
- df_out['motif_abs_start'] = abs_pos
46
- df_out['motif_rel_pos'] = rel_pos
47
  df_out.to_csv(output_csv, index=False)
48
 
49
  basename = input_csv.stem
50
  # Absolute position histogram
51
- plt.figure(figsize=(6,4))
52
- plt.hist(df_out['motif_abs_start'], bins=50, edgecolor='k')
53
- plt.title(f'{basename}: Absolute Motif Start')
54
- plt.xlabel('Start Index (nt)')
55
- plt.ylabel('Count')
56
  plt.tight_layout()
57
  plt.savefig(fig_dir / f"{basename}_abs.png")
58
  plt.close()
59
  # Relative position histogram
60
- plt.figure(figsize=(6,4))
61
- plt.hist(df_out['motif_rel_pos'], bins=50, edgecolor='k')
62
- plt.title(f'{basename}: Relative Motif Position')
63
- plt.xlabel('Relative Position')
64
- plt.ylabel('Count')
65
  plt.tight_layout()
66
  plt.savefig(fig_dir / f"{basename}_rel.png")
67
  plt.close()
68
 
69
- if __name__ == '__main__':
 
70
  # === USER SETTINGS ===
71
- PATTERN = '/home/a03-svincoff/DPACMAN/dpacman/data_files/processed/tfclust/hg38/encRegTfbsClustered_hg38_chr*.csv'
72
- CHR_FILTER = re.compile(
73
- r'encRegTfbsClustered_hg38_chr([1-9]|1[0-9]|2[0-2]|X|Y)\.csv$'
74
  )
75
- DESIRED_LEN = 1000
76
- OUTPUT_DIR = Path('trimmed_csvs')
77
- FIG_DIR = Path('figures')
78
  # =====================
79
 
80
  OUTPUT_DIR.mkdir(exist_ok=True)
 
5
  import re
6
  from pathlib import Path
7
 
8
+
9
  def trim_sequence(seq: str, seq_flanked: str, total_len: int):
10
  """
11
  Return a substring of seq_flanked of length total_len that contains seq
 
35
  ups, downs, abs_pos, rel_pos = [], [], [], []
36
  trimmed_seqs = []
37
  for _, row in df.iterrows():
38
+ trimmed, u, d = trim_sequence(row["seq"], row["seq_flanked"], total_len)
39
  trimmed_seqs.append(trimmed)
40
  ups.append(u)
41
  downs.append(d)
42
  abs_pos.append(u)
43
+ rel_pos.append(u / (total_len - len(row["seq"])))
44
  df_out = df.copy()
45
+ df_out["seq_trimmed"] = trimmed_seqs
46
+ df_out["motif_abs_start"] = abs_pos
47
+ df_out["motif_rel_pos"] = rel_pos
48
  df_out.to_csv(output_csv, index=False)
49
 
50
  basename = input_csv.stem
51
  # Absolute position histogram
52
+ plt.figure(figsize=(6, 4))
53
+ plt.hist(df_out["motif_abs_start"], bins=50, edgecolor="k")
54
+ plt.title(f"{basename}: Absolute Motif Start")
55
+ plt.xlabel("Start Index (nt)")
56
+ plt.ylabel("Count")
57
  plt.tight_layout()
58
  plt.savefig(fig_dir / f"{basename}_abs.png")
59
  plt.close()
60
  # Relative position histogram
61
+ plt.figure(figsize=(6, 4))
62
+ plt.hist(df_out["motif_rel_pos"], bins=50, edgecolor="k")
63
+ plt.title(f"{basename}: Relative Motif Position")
64
+ plt.xlabel("Relative Position")
65
+ plt.ylabel("Count")
66
  plt.tight_layout()
67
  plt.savefig(fig_dir / f"{basename}_rel.png")
68
  plt.close()
69
 
70
+
71
+ if __name__ == "__main__":
72
  # === USER SETTINGS ===
73
+ PATTERN = "/home/a03-svincoff/DPACMAN/dpacman/data_files/processed/tfclust/hg38/encRegTfbsClustered_hg38_chr*.csv"
74
+ CHR_FILTER = re.compile(
75
+ r"encRegTfbsClustered_hg38_chr([1-9]|1[0-9]|2[0-2]|X|Y)\.csv$"
76
  )
77
+ DESIRED_LEN = 1000
78
+ OUTPUT_DIR = Path("trimmed_csvs")
79
+ FIG_DIR = Path("figures")
80
  # =====================
81
 
82
  OUTPUT_DIR.mkdir(exist_ok=True)
dpacman/scripts/__init__.py ADDED
File without changes
dpacman/scripts/preprocess.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import rootutils
2
+
3
+ root = rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
4
+
5
+ import hydra
6
+ from omegaconf import DictConfig
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ # import your processing entry points here
12
+ from dpacman.data_tasks.download.genome import main as download_genome_main
13
+ from dpacman.data_tasks.download.remap import main as download_remap_main
14
+ from dpacman.data_tasks.clean.remap import main as clean_remap_main
15
+ from dpacman.data_tasks.fimo.pre_fimo import main as pre_fimo_main
16
+ from dpacman.data_tasks.fimo.run_fimo import main as run_fimo_main
17
+ from dpacman.data_tasks.fimo.post_fimo import main as post_fimo_main
18
+
19
+
20
+ @hydra.main(
21
+ config_path=str(root / "configs"), config_name="preprocess", version_base="1.3"
22
+ )
23
+ def main(cfg: DictConfig):
24
+ task_type = cfg.data_task.type
25
+ task_name = cfg.data_task.name.lower()
26
+
27
+ logger.info(f"Running {task_type} task: {task_name}")
28
+
29
+ if task_type == "download":
30
+ if task_name == "genome":
31
+ download_genome_main(cfg)
32
+ elif task_name == "remap":
33
+ download_remap_main(cfg)
34
+ else:
35
+ raise ValueError(f"No download pipeline defined for: {task_name}")
36
+
37
+ elif task_type == "clean":
38
+ if task_name == "remap":
39
+ clean_remap_main(cfg)
40
+ else:
41
+ raise ValueError(f"No clean pipeline defined for: {task_name}")
42
+
43
+ elif task_type == "fimo":
44
+ if task_name == "pre_fimo":
45
+ pre_fimo_main(cfg)
46
+ elif task_name == "run_fimo":
47
+ run_fimo_main(cfg)
48
+ elif task_name == "post_fimo":
49
+ post_fimo_main(cfg)
50
+ else:
51
+ raise ValueError(f"No clean pipeline defined for: {task_name}")
52
+
53
+ else:
54
+ raise ValueError(f"Unknown task type: {task_type}")
55
+
56
+
57
+ if __name__ == "__main__":
58
+ main()
dpacman/scripts/run_download.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Manually specify values used in the config
4
+ main_task="preprocess"
5
+ data_task_type="download"
6
+ timestamp=$(date "+%Y-%m-%d_%H-%M-%S")
7
+
8
+ run_dir="$HOME/DPACMAN/logs/${main_task}/${data_task_type}/runs/${timestamp}"
9
+ mkdir -p "$run_dir"
10
+
11
+ CUDA_VISIBLE_DEVICES=0 nohup python -u -m scripts.preprocess \
12
+ hydra.run.dir="${run_dir}" \
13
+ data_task=${data_task_type}/remap \
14
+ > "${run_dir}/run.log" 2>&1 &
15
+
16
+ echo $! > "${run_dir}/pid.txt"
dpacman/scripts/run_fimo.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Manually specify values used in the config
4
+ main_task="preprocess"
5
+ data_task_type="fimo"
6
+ timestamp=$(date "+%Y-%m-%d_%H-%M-%S")
7
+
8
+ run_dir="$HOME/DPACMAN/logs/${main_task}/${data_task_type}/runs/${timestamp}"
9
+ mkdir -p "$run_dir"
10
+
11
+ CUDA_VISIBLE_DEVICES=0 nohup python -u -m scripts.preprocess \
12
+ hydra.run.dir="${run_dir}" \
13
+ data_task=${data_task_type}/pre_fimo \
14
+ > "${run_dir}/run.log" 2>&1 &
15
+
16
+ echo $! > "${run_dir}/pid.txt"
environment.yaml CHANGED
@@ -26,9 +26,13 @@ dependencies:
26
  - pip>=23
27
  - pip:
28
  - rootutils
 
 
 
29
  - pandas==2.2.3
30
  - lxml==5.3.0
31
  - pymex==0.9.31
32
  - gitpython==3.1.44
33
- - matplotlib
 
34
  - -e .
 
26
  - pip>=23
27
  - pip:
28
  - rootutils
29
+ - hydra-core==1.3.2 # Hydra for config management
30
+ - hydra-colorlog==1.2.0 # Allow colorful logging in Hydra
31
+ - omegaconf==2.3.0 # Required by hydra-core
32
  - pandas==2.2.3
33
  - lxml==5.3.0
34
  - pymex==0.9.31
35
  - gitpython==3.1.44
36
+ - black==25.1.0 # code formatter
37
+ - matplotlib==3.10.3
38
  - -e .