Datasets:

Modalities:
Text
ArXiv:
llmtcl / scripts /get_tokenized_data.sh
Maple222's picture
Add files using upload-large-folder tool
f9dff7a verified
#!/bin/bash
MONTHS=(2407 2408 2409 2410 2411 2412 2501 2502 2503 2504 2505 2506)
IN_ROOT="./litgpt/data/arxiv_split/"
OUT_ROOT="./litgpt/data/arxiv_qwen2_tokenized/"
# "./litgpt/data/arxiv_openllama_tokenized/"
TOKENIZER_PATH="./checkpoints/Qwen/Qwen2-7B"
#"./checkpoints/openlm-research/open_llama_3b"
CHUNK_SIZE=$((32769*1024))
#CHUNK_SIZE=$((2049*16384))
export CUDA_VISIBLE_DEVICES="0,1,2,3"
mkdir -p ${OUT_ROOT}
START_IDX=0
for ((i=$START_IDX; i<${#MONTHS[@]}; i++)); do
MONTH=${MONTHS[$i]}
IN_DIR="${IN_ROOT}/${MONTH}/train"
OUT_DIR="${OUT_ROOT}/${MONTH}/train"
python ./litgpt/data/prepare_arxiv.py --input_dir "$IN_DIR" --output_dir "$OUT_DIR" --tokenizer_path "$TOKENIZER_PATH" --chunk_size "$CHUNK_SIZE"
IN_DIR="${IN_ROOT}/${MONTH}/val"
OUT_DIR="${OUT_ROOT}/${MONTH}/val"
python ./litgpt/data/prepare_arxiv.py --input_dir "$IN_DIR" --output_dir "$OUT_DIR" --tokenizer_path "$TOKENIZER_PATH" --chunk_size "$CHUNK_SIZE"
done