| |
|
| | --- |
| | dataset_info: |
| | features: |
| | - name: text |
| | dtype: string |
| | - name: id |
| | dtype: string |
| | - name: dump |
| | dtype: string |
| | - name: url |
| | dtype: string |
| | - name: date |
| | dtype: string |
| | - name: file_path |
| | dtype: string |
| | - name: language |
| | dtype: string |
| | - name: language_score |
| | dtype: float64 |
| | - name: token_count |
| | dtype: int64 |
| | splits: |
| | - name: train |
| | num_bytes: 686749697.4094799 |
| | num_examples: 200000 |
| | download_size: 420184700 |
| | dataset_size: 686749697.4094799 |
| | configs: |
| | - config_name: default |
| | data_files: |
| | - split: train |
| | path: data/train-* |
| | --- |
| | |
| | 200k samples from Fineweb, around 140M tokens |