FangSen9000 commited on
Commit
f7ed453
·
verified ·
1 Parent(s): 9e31a93

Upload metadata.json

Browse files
Files changed (1) hide show
  1. metadata.json +51 -160
metadata.json CHANGED
@@ -24,6 +24,7 @@
24
  "md5": "cr:md5",
25
  "parentField": "cr:parentField",
26
  "path": "cr:path",
 
27
  "recordSet": "cr:recordSet",
28
  "references": "cr:references",
29
  "regex": "cr:regex",
@@ -44,9 +45,15 @@
44
  "url": "https://huggingface.co/datasets/SignerX/SignVerse-2M",
45
  "version": "1.0.0",
46
  "keywords": [
47
- "sign language", "pose estimation", "DWPose", "multilingual",
48
- "keypoint", "sign language generation", "sign language recognition",
49
- "pose-native", "video understanding"
 
 
 
 
 
 
50
  ],
51
  "creator": [
52
  {
@@ -73,16 +80,38 @@
73
  "isLiveDataset": false,
74
  "datePublished": "2026",
75
  "inLanguage": [
76
- "ase", "bfi", "gsg", "sgd", "lsf", "lse", "lis", "lgp",
77
- "ngt", "asf", "jsl", "kvk", "csl", "bzs", "lsm", "pjm",
78
- "rsl", "swl", "dsl", "fse", "nsl", "lsc", "aed", "tsm", "fsl"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  ],
80
  "distribution": [
81
  {
82
  "@type": "cr:FileObject",
83
  "@id": "hf-dataset-repo",
84
  "name": "HuggingFace dataset repository",
85
- "description": "Root of the SignVerse-2M HuggingFace dataset repository",
86
  "contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M",
87
  "encodingFormat": "git+https",
88
  "sha256": "main"
@@ -91,7 +120,7 @@
91
  "@type": "cr:FileSet",
92
  "@id": "pose-shards",
93
  "name": "DWPose keypoint shards",
94
- "description": "Numbered .tar archives each containing per-video directories with poses.npz (DWPose keypoints) and caption.json (structured subtitles).",
95
  "containedIn": {"@id": "hf-dataset-repo"},
96
  "encodingFormat": "application/x-tar",
97
  "includes": "dataset/Sign_DWPose_NPZ_*.tar"
@@ -106,157 +135,6 @@
106
  "sha256": "79e8d2c35b0d3ed31b7e3c32348a1ce64513ca0ef217d7c1cbbd6b14410e7f08"
107
  }
108
  ],
109
- "recordSet": [
110
- {
111
- "@type": "cr:RecordSet",
112
- "@id": "pose-frame-record",
113
- "name": "Per-frame DWPose keypoint record",
114
- "description": "Each record corresponds to one video frame. Keypoint coordinates are in pixel space; confidence scores are in [0, 1].",
115
- "field": [
116
- {
117
- "@type": "cr:Field",
118
- "@id": "pose-frame-record/video_id",
119
- "name": "video_id",
120
- "description": "YouTube video identifier",
121
- "dataType": "sc:Text",
122
- "source": {
123
- "fileSet": {"@id": "pose-shards"},
124
- "extract": {"jsonPath": "$.video_id"}
125
- }
126
- },
127
- {
128
- "@type": "cr:Field",
129
- "@id": "pose-frame-record/fps",
130
- "name": "fps",
131
- "description": "Sampling frame rate (24.0)",
132
- "dataType": "sc:Float",
133
- "source": {
134
- "fileSet": {"@id": "pose-shards"},
135
- "extract": {"jsonPath": "$.fps"}
136
- }
137
- },
138
- {
139
- "@type": "cr:Field",
140
- "@id": "pose-frame-record/frame_id",
141
- "name": "frame_id",
142
- "description": "0-indexed frame index within the video",
143
- "dataType": "sc:Integer",
144
- "source": {
145
- "fileSet": {"@id": "pose-shards"},
146
- "extract": {"jsonPath": "$.frames[*].frame_id"}
147
- }
148
- },
149
- {
150
- "@type": "cr:Field",
151
- "@id": "pose-frame-record/body_keypoints",
152
- "name": "body_keypoints",
153
- "description": "18 body keypoints per primary signer as float[18][3] arrays of (x, y, score) in pixel space",
154
- "dataType": "sc:Text",
155
- "source": {
156
- "fileSet": {"@id": "pose-shards"},
157
- "extract": {"jsonPath": "$.frames[*].person_0.body"}
158
- }
159
- },
160
- {
161
- "@type": "cr:Field",
162
- "@id": "pose-frame-record/left_hand_keypoints",
163
- "name": "left_hand_keypoints",
164
- "description": "21 left-hand keypoints per primary signer as float[21][3] arrays of (x, y, score)",
165
- "dataType": "sc:Text",
166
- "source": {
167
- "fileSet": {"@id": "pose-shards"},
168
- "extract": {"jsonPath": "$.frames[*].person_0.left_hand"}
169
- }
170
- },
171
- {
172
- "@type": "cr:Field",
173
- "@id": "pose-frame-record/right_hand_keypoints",
174
- "name": "right_hand_keypoints",
175
- "description": "21 right-hand keypoints per primary signer as float[21][3] arrays of (x, y, score)",
176
- "dataType": "sc:Text",
177
- "source": {
178
- "fileSet": {"@id": "pose-shards"},
179
- "extract": {"jsonPath": "$.frames[*].person_0.right_hand"}
180
- }
181
- },
182
- {
183
- "@type": "cr:Field",
184
- "@id": "pose-frame-record/face_keypoints",
185
- "name": "face_keypoints",
186
- "description": "68 facial keypoints per primary signer as float[68][3] arrays of (x, y, score)",
187
- "dataType": "sc:Text",
188
- "source": {
189
- "fileSet": {"@id": "pose-shards"},
190
- "extract": {"jsonPath": "$.frames[*].person_0.face"}
191
- }
192
- }
193
- ]
194
- },
195
- {
196
- "@type": "cr:RecordSet",
197
- "@id": "caption-record",
198
- "name": "Video caption record",
199
- "description": "Per-video structured caption with segment-level timestamps and English supervision.",
200
- "field": [
201
- {
202
- "@type": "cr:Field",
203
- "@id": "caption-record/video_id",
204
- "name": "video_id",
205
- "description": "YouTube video identifier",
206
- "dataType": "sc:Text",
207
- "source": {
208
- "fileSet": {"@id": "pose-shards"},
209
- "extract": {"jsonPath": "$.video_id"}
210
- }
211
- },
212
- {
213
- "@type": "cr:Field",
214
- "@id": "caption-record/sign_language",
215
- "name": "sign_language",
216
- "description": "ISO 639-3 sign language code (e.g. ase, bfi, gsg)",
217
- "dataType": "sc:Text",
218
- "source": {
219
- "fileSet": {"@id": "pose-shards"},
220
- "extract": {"jsonPath": "$.sign_language"}
221
- }
222
- },
223
- {
224
- "@type": "cr:Field",
225
- "@id": "caption-record/segments",
226
- "name": "segments",
227
- "description": "List of {start, end, text} subtitle segments with second-level timestamps",
228
- "dataType": "sc:Text",
229
- "repeated": true,
230
- "source": {
231
- "fileSet": {"@id": "pose-shards"},
232
- "extract": {"jsonPath": "$.segments[*]"}
233
- }
234
- },
235
- {
236
- "@type": "cr:Field",
237
- "@id": "caption-record/document_text",
238
- "name": "document_text",
239
- "description": "Concatenated document-level transcript for training-time consumption",
240
- "dataType": "sc:Text",
241
- "source": {
242
- "fileSet": {"@id": "pose-shards"},
243
- "extract": {"jsonPath": "$.document_text"}
244
- }
245
- },
246
- {
247
- "@type": "cr:Field",
248
- "@id": "caption-record/english_source",
249
- "name": "english_source",
250
- "description": "Provenance of English supervision: 'native' or 'translated_from:<lang>'",
251
- "dataType": "sc:Text",
252
- "source": {
253
- "fileSet": {"@id": "pose-shards"},
254
- "extract": {"jsonPath": "$.english_source"}
255
- }
256
- }
257
- ]
258
- }
259
- ],
260
  "rai:dataCollection": "DWPose keypoint sequences were automatically extracted from publicly available multilingual sign language videos sourced from YouTube. The pipeline (1) retrieves video metadata and subtitles, (2) decodes frames at 24 FPS via ffmpeg, (3) runs DWPose (RTMPose-based) to extract per-frame body, hand, and face keypoints, and (4) packages outputs into per-video poses.npz files. No human annotators labeled keypoints. Subtitle text is automatically downloaded and normalized from platform-exported WEBVTT captions.",
261
  "rai:dataCollectionType": "Web Scraping",
262
  "rai:dataCollectionMissingData": "Videos that failed to download, produced corrupted frames, or had no available subtitle track are excluded from the published corpus. Processing status is recorded per video in runtime_state/. Estimated processing success rate exceeds 90% of manifest entries.",
@@ -264,5 +142,18 @@
264
  "rai:dataBiases": "1. Language imbalance: the corpus inherits the long-tail distribution of YouTube-SL-25; ASL and BSL account for a disproportionate share of total hours. Models trained without language reweighting will underperform on low-resource languages. 2. Content-type bias: videos skew toward online educational and interpreter content; spontaneous or conversational signing is underrepresented. 3. Camera and production bias: professional interpreter videos with stable framing may be overrepresented relative to casual uploads. 4. Signer demographics: signer age, gender, and regional dialect distributions reflect what is available online and are not controlled.",
265
  "rai:dataLimitations": "1. Automatic pose extraction introduces errors on challenging frames (fast motion, partial occlusion, unusual viewpoints, multi-signer scenes). 2. DWPose's 21-keypoint hand model does not resolve all handshape distinctions required for lexical discrimination in sign languages. 3. The 68-point facial model captures only a partial representation of non-manual features (facial expressions, mouthing) that carry phonological and grammatical information. 4. Subtitle alignment is automatic and may contain temporal offsets or missing segments. 5. No signer identity, demographic metadata, or manual quality labels are provided. 6. The single-signer assumption (person_0) may be incorrect for multi-signer or relay-interpreted videos.",
266
  "rai:dataUseCases": "Intended for research use in: (1) multilingual sign language generation (text to pose to video); (2) pose-space sign language recognition and translation; (3) cross-lingual transfer and low-resource adaptation; (4) benchmarking pose-conditioned video generation models on sign language content. Not intended for safety-critical deployment (e.g., medical or legal interpretation) without independent validation, for re-identification of individuals, or for making definitive linguistic completeness claims about any specific sign language.",
267
- "rai:personalSensitiveInformation": "The dataset contains pose keypoint sequences derived from publicly posted YouTube videos of human signers. Raw video frames are NOT redistributed. Pose sequences may nonetheless allow re-identification of signers in combination with external metadata. No sensitive personal information (names, biometric identifiers beyond pose, health or financial data) is intentionally included. Users should be aware that sign language data inherently involves human subjects and should handle the data accordingly."
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  }
 
24
  "md5": "cr:md5",
25
  "parentField": "cr:parentField",
26
  "path": "cr:path",
27
+ "prov": "http://www.w3.org/ns/prov#",
28
  "recordSet": "cr:recordSet",
29
  "references": "cr:references",
30
  "regex": "cr:regex",
 
45
  "url": "https://huggingface.co/datasets/SignerX/SignVerse-2M",
46
  "version": "1.0.0",
47
  "keywords": [
48
+ "sign language",
49
+ "pose estimation",
50
+ "DWPose",
51
+ "multilingual",
52
+ "keypoint",
53
+ "sign language generation",
54
+ "sign language recognition",
55
+ "pose-native",
56
+ "video understanding"
57
  ],
58
  "creator": [
59
  {
 
80
  "isLiveDataset": false,
81
  "datePublished": "2026",
82
  "inLanguage": [
83
+ "ase",
84
+ "bfi",
85
+ "gsg",
86
+ "sgd",
87
+ "lsf",
88
+ "lse",
89
+ "lis",
90
+ "lgp",
91
+ "ngt",
92
+ "asf",
93
+ "jsl",
94
+ "kvk",
95
+ "csl",
96
+ "bzs",
97
+ "lsm",
98
+ "pjm",
99
+ "rsl",
100
+ "swl",
101
+ "dsl",
102
+ "fse",
103
+ "nsl",
104
+ "lsc",
105
+ "aed",
106
+ "tsm",
107
+ "fsl"
108
  ],
109
  "distribution": [
110
  {
111
  "@type": "cr:FileObject",
112
  "@id": "hf-dataset-repo",
113
  "name": "HuggingFace dataset repository",
114
+ "description": "Root of the SignVerse-2M HuggingFace dataset repository.",
115
  "contentUrl": "https://huggingface.co/datasets/SignerX/SignVerse-2M",
116
  "encodingFormat": "git+https",
117
  "sha256": "main"
 
120
  "@type": "cr:FileSet",
121
  "@id": "pose-shards",
122
  "name": "DWPose keypoint shards",
123
+ "description": "Numbered .tar archives each containing per-video directories with poses.npz (DWPose keypoints) and caption.json (structured subtitles). These shards are distributed as archive assets and are not directly expanded by the Croissant validator's record-generation path.",
124
  "containedIn": {"@id": "hf-dataset-repo"},
125
  "encodingFormat": "application/x-tar",
126
  "includes": "dataset/Sign_DWPose_NPZ_*.tar"
 
135
  "sha256": "79e8d2c35b0d3ed31b7e3c32348a1ce64513ca0ef217d7c1cbbd6b14410e7f08"
136
  }
137
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  "rai:dataCollection": "DWPose keypoint sequences were automatically extracted from publicly available multilingual sign language videos sourced from YouTube. The pipeline (1) retrieves video metadata and subtitles, (2) decodes frames at 24 FPS via ffmpeg, (3) runs DWPose (RTMPose-based) to extract per-frame body, hand, and face keypoints, and (4) packages outputs into per-video poses.npz files. No human annotators labeled keypoints. Subtitle text is automatically downloaded and normalized from platform-exported WEBVTT captions.",
139
  "rai:dataCollectionType": "Web Scraping",
140
  "rai:dataCollectionMissingData": "Videos that failed to download, produced corrupted frames, or had no available subtitle track are excluded from the published corpus. Processing status is recorded per video in runtime_state/. Estimated processing success rate exceeds 90% of manifest entries.",
 
142
  "rai:dataBiases": "1. Language imbalance: the corpus inherits the long-tail distribution of YouTube-SL-25; ASL and BSL account for a disproportionate share of total hours. Models trained without language reweighting will underperform on low-resource languages. 2. Content-type bias: videos skew toward online educational and interpreter content; spontaneous or conversational signing is underrepresented. 3. Camera and production bias: professional interpreter videos with stable framing may be overrepresented relative to casual uploads. 4. Signer demographics: signer age, gender, and regional dialect distributions reflect what is available online and are not controlled.",
143
  "rai:dataLimitations": "1. Automatic pose extraction introduces errors on challenging frames (fast motion, partial occlusion, unusual viewpoints, multi-signer scenes). 2. DWPose's 21-keypoint hand model does not resolve all handshape distinctions required for lexical discrimination in sign languages. 3. The 68-point facial model captures only a partial representation of non-manual features (facial expressions, mouthing) that carry phonological and grammatical information. 4. Subtitle alignment is automatic and may contain temporal offsets or missing segments. 5. No signer identity, demographic metadata, or manual quality labels are provided. 6. The single-signer assumption (person_0) may be incorrect for multi-signer or relay-interpreted videos.",
144
  "rai:dataUseCases": "Intended for research use in: (1) multilingual sign language generation (text to pose to video); (2) pose-space sign language recognition and translation; (3) cross-lingual transfer and low-resource adaptation; (4) benchmarking pose-conditioned video generation models on sign language content. Not intended for safety-critical deployment (e.g., medical or legal interpretation) without independent validation, for re-identification of individuals, or for making definitive linguistic completeness claims about any specific sign language.",
145
+ "rai:dataSocialImpact": "The dataset is intended to support research on sign language technology, multilingual accessibility, and low-resource sign language modeling. Potential positive impacts include improved research infrastructure for sign language generation, recognition, and translation. Potential negative impacts include overclaiming linguistic completeness, misuse in high-stakes interpretation settings, or re-identification risks when pose data are combined with external metadata. The dataset should therefore be used for research and benchmarking, not as a substitute for qualified human interpretation.",
146
+ "rai:hasSyntheticData": false,
147
+ "rai:personalSensitiveInformation": "The dataset contains pose keypoint sequences derived from publicly posted YouTube videos of human signers. Raw video frames are NOT redistributed. Pose sequences may nonetheless allow re-identification of signers in combination with external metadata. No sensitive personal information (names, biometric identifiers beyond pose, health or financial data) is intentionally included. Users should be aware that sign language data inherently involves human subjects and should handle the data accordingly.",
148
+ "prov:wasGeneratedBy": {
149
+ "@type": "prov:Activity",
150
+ "name": "SignVerse-2M processing pipeline",
151
+ "description": "A multi-stage pipeline that retrieves public YouTube videos and subtitles, normalizes subtitle text, extracts DWPose keypoints frame-by-frame, and packages the resulting pose tensors and caption metadata into publishable dataset shards.",
152
+ "prov:used": [
153
+ "Public YouTube sign language videos",
154
+ "Platform-exported subtitle tracks",
155
+ "ffmpeg frame decoding",
156
+ "DWPose / RTMPose keypoint extraction"
157
+ ]
158
+ }
159
  }