File size: 1,620 Bytes
0161e74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#!/bin/bash
#PJM -L rscgrp=b-batch
#PJM -L gpu=1
#PJM -L elapse=24:00:00
#PJM -N ccfm_1gpu_cached
#PJM -j
#PJM -o logs/ccfm_1gpu_cached_%j.out

module load cuda/12.2.2
module load cudnn/8.9.7
module load gcc-toolset/12

source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate

cd /home/hp250092/ku50001222/qian/aivc/lfj/transfer/code/CCFM

export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256

echo "=========================================="
echo "Job ID:    $PJM_JOBID"
echo "Job Name:  $PJM_JOBNAME"
echo "Start:     $(date)"
echo "Node:      $(hostname)"
echo "GPU:       $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')"
echo "Run:       CCFM 1GPU cached scGPT"
echo "=========================================="

accelerate launch --num_processes=1 scripts/run_cascaded.py \
    --data-name norman \
    --d-model 128 \
    --nhead 8 \
    --nlayers 4 \
    --batch-size 48 \
    --lr 5e-5 \
    --steps 200000 \
    --fusion-method differential_perceiver \
    --perturbation-function crisper \
    --noise-type Gaussian \
    --infer-top-gene 1000 \
    --n-top-genes 5000 \
    --use-mmd-loss \
    --gamma 0.5 \
    --split-method additive \
    --fold 1 \
    --scgpt-dim 512 \
    --bottleneck-dim 128 \
    --latent-weight 1.0 \
    --choose-latent-p 0.4 \
    --dh-depth 2 \
    --print-every 5000 \
    --topk 30 \
    --use-negative-edge \
    --scgpt-cache-path scgpt_cache_norman.h5 \
    --result-path ./result_scgpt

echo "=========================================="
echo "Finished:  $(date)"
echo "=========================================="