Datasets:

Modalities:
Text
ArXiv:
File size: 972 Bytes
f9dff7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#!/bin/bash

MONTHS=(2407 2408 2409 2410 2411 2412 2501 2502 2503 2504 2505 2506)


IN_ROOT="./litgpt/data/arxiv_split/"
OUT_ROOT="./litgpt/data/arxiv_qwen2_tokenized/"
# "./litgpt/data/arxiv_openllama_tokenized/"
TOKENIZER_PATH="./checkpoints/Qwen/Qwen2-7B"
#"./checkpoints/openlm-research/open_llama_3b"
CHUNK_SIZE=$((32769*1024))
#CHUNK_SIZE=$((2049*16384))

export CUDA_VISIBLE_DEVICES="0,1,2,3"

mkdir -p ${OUT_ROOT}

START_IDX=0
for ((i=$START_IDX; i<${#MONTHS[@]}; i++)); do
    MONTH=${MONTHS[$i]}

    IN_DIR="${IN_ROOT}/${MONTH}/train"
    OUT_DIR="${OUT_ROOT}/${MONTH}/train"

    python ./litgpt/data/prepare_arxiv.py --input_dir "$IN_DIR" --output_dir "$OUT_DIR" --tokenizer_path "$TOKENIZER_PATH" --chunk_size "$CHUNK_SIZE"

    IN_DIR="${IN_ROOT}/${MONTH}/val"
    OUT_DIR="${OUT_ROOT}/${MONTH}/val"

    python ./litgpt/data/prepare_arxiv.py --input_dir "$IN_DIR" --output_dir "$OUT_DIR" --tokenizer_path "$TOKENIZER_PATH" --chunk_size "$CHUNK_SIZE"

done