File size: 929 Bytes
0161e74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#!/bin/sh
#PJM -L rscgrp=b-batch
#PJM -L gpu=1
#PJM -L elapse=1:00:00
#PJM -N ps_shared
#PJM -j
#PJM -o logs/ps_shared_%j.out

module load cuda/12.2.2
module load cudnn/8.9.7
module load gcc-toolset/12

source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate

cd /home/hp250092/ku50001222/qian/aivc/lfj/transfer

export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
export HDF5_USE_FILE_LOCKING=FALSE

echo "=========================================="
echo "Job ID:    $PJM_JOBID"
echo "Job Name:  $PJM_JOBNAME"
echo "Start:     $(date)"
echo "Node:      $(hostname)"
echo "GPU:       $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')"
echo "=========================================="

python code/prompt_selection/run_pipeline.py --shared-only

echo "=========================================="
echo "Finished:  $(date)"
echo "=========================================="