tamnd commited on
Commit
0ed270e
·
verified ·
1 Parent(s): 86914d7

Add 2014-07-05 — 214.8K events, 11 files

Browse files
README.md CHANGED
@@ -119,10 +119,8 @@ Events from today are captured in near-real-time from the GitHub Events API and
119
  | `payload_json` | string | Full event payload as JSON |
120
 
121
  ```python
122
- # Query today's live events with DuckDB.
123
- # Run: uv run live_events.py
124
  import duckdb
125
-
126
  duckdb.sql("""
127
  SELECT event_type, COUNT(*) as n
128
  FROM read_parquet('hf://datasets/open-index/open-github/today/raw/**/*.parquet')
@@ -145,7 +143,7 @@ duckdb.sql("""
145
  | 2011 | 243 | 14,096,144 | 58,008 | 2.7 GB | 1.4 GB | 1h06m | 50m30s | 1h55m |
146
  | 2012 | 291 | 34,256,841 | 117,721 | 9.2 GB | 3.2 GB | 2h14m | 3h16m | 2h50m |
147
  | 2013 | 344 | 74,483,412 | 216,521 | 22.7 GB | 7.0 GB | 3h27m | 10h53m | 4h29m |
148
- | 2014 | 182 | 59,126,968 | 324,873 | 18.1 GB | 5.4 GB | 1h59m | 10h22m | 2h43m |
149
  | 2015 | 2 | 511,749 | 255,874 | 166.6 MB | 85.1 MB | 20s | 2m59s | 48s |
150
 
151
 
@@ -163,10 +161,9 @@ Pushes are the most common event type, representing roughly half of all GitHub a
163
 
164
 
165
  ```sql
166
- -- Top 20 repos by push volume this year.
167
- -- Run: duckdb -c ".read pushes_top_repos.sql"
168
- SELECT repo_name, COUNT(*) as pushes, SUM(size) as commits
169
- FROM read_parquet('hf://datasets/open-index/open-github/data/pushes/2026/**/*.parquet')
170
  GROUP BY repo_name ORDER BY pushes DESC LIMIT 20;
171
  ```
172
 
@@ -184,12 +181,11 @@ Issue events track the full lifecycle: opened, closed, reopened, labeled, assign
184
 
185
 
186
  ```sql
187
- -- Repos with the most issues opened vs closed this year.
188
- -- Run: duckdb -c ".read issues_top_repos.sql"
189
  SELECT repo_name,
190
  COUNT(*) FILTER (WHERE action = 'opened') as opened,
191
  COUNT(*) FILTER (WHERE action = 'closed') as closed
192
- FROM read_parquet('hf://datasets/open-index/open-github/data/issues/2026/**/*.parquet')
193
  GROUP BY repo_name ORDER BY opened DESC LIMIT 20;
194
  ```
195
 
@@ -207,17 +203,16 @@ Pull request events cover the full review cycle: opened, merged, closed, review
207
 
208
 
209
  ```sql
210
- -- Top repos by merged PRs this year.
211
- -- Run: duckdb -c ".read prs_top_merged.sql"
212
  SELECT repo_name, COUNT(*) as merged_prs
213
- FROM read_parquet('hf://datasets/open-index/open-github/data/pull_requests/2026/**/*.parquet')
214
- WHERE action = 'merged'
215
  GROUP BY repo_name ORDER BY merged_prs DESC LIMIT 20;
216
  ```
217
 
218
  ### Stars per year
219
 
220
- Stars (WatchEvent in the GitHub API) reflect community interest and discovery. Starring patterns often correlate with Hacker News, Reddit, or Twitter posts. For 2012–2014 events, `repo_language`, `repo_stars_count`, and `repo_forks_count` are populated from the legacy Timeline API repository snapshot.
221
 
222
  ```
223
  2011 ██████░░░░░░░░░░░░░░░░░░░░░░░░ 1.4M
@@ -229,10 +224,9 @@ Stars (WatchEvent in the GitHub API) reflect community interest and discovery. S
229
 
230
 
231
  ```sql
232
- -- Most starred repos this year.
233
- -- Run: duckdb -c ".read stars_top_repos.sql"
234
  SELECT repo_name, COUNT(*) as stars
235
- FROM read_parquet('hf://datasets/open-index/open-github/data/stars/2026/**/*.parquet')
236
  GROUP BY repo_name ORDER BY stars DESC LIMIT 20;
237
  ```
238
 
@@ -241,48 +235,39 @@ GROUP BY repo_name ORDER BY stars DESC LIMIT 20;
241
  ### Python (`datasets`)
242
 
243
  ```python
244
- # Quick-start: load OpenGitHub data with the Hugging Face datasets library.
245
- # Run: uv run quickstart_datasets.py
246
  from datasets import load_dataset
247
 
248
- # Stream all stars without downloading everything
249
  ds = load_dataset("open-index/open-github", "stars", streaming=True)
250
  for row in ds["train"]:
251
  print(row["repo_name"], row["actor_login"], row["created_at"])
252
- break # remove to stream all
253
 
254
  # Load a specific month of issues
255
  ds = load_dataset("open-index/open-github", "issues",
256
- data_files="data/issues/2026/03/*.parquet")
257
- print(f"March 2026 issues: {len(ds['train'])}")
258
 
259
  # Load all pull requests into memory
260
  ds = load_dataset("open-index/open-github", "pull_requests")
261
- print(f"Total PRs: {len(ds['train'])}")
262
 
263
  # Query today's live events
264
  ds = load_dataset("open-index/open-github", "live", streaming=True)
265
  for row in ds["train"]:
266
  print(row["event_type"], row["repo_name"], row["created_at"])
267
- break # remove to stream all
268
  ```
269
 
270
  ### DuckDB
271
 
272
  ```sql
273
- -- Quick-start DuckDB queries for the OpenGitHub dataset.
274
- -- Run: duckdb -c ".read quickstart.sql"
275
-
276
  -- Top 20 most-starred repos this year
277
  SELECT repo_name, COUNT(*) as stars
278
- FROM read_parquet('hf://datasets/open-index/open-github/data/stars/2026/**/*.parquet')
279
  GROUP BY repo_name ORDER BY stars DESC LIMIT 20;
280
 
281
- -- Most active PR reviewers (approvals only)
282
- SELECT actor_login, COUNT(*) as approvals
283
- FROM read_parquet('hf://datasets/open-index/open-github/data/pr_reviews/2026/**/*.parquet')
284
  WHERE review_state = 'approved'
285
- GROUP BY actor_login ORDER BY approvals DESC LIMIT 20;
286
 
287
  -- Issue open/close rates by repo
288
  SELECT repo_name,
@@ -290,14 +275,13 @@ SELECT repo_name,
290
  COUNT(*) FILTER (WHERE action = 'closed') as closed,
291
  ROUND(COUNT(*) FILTER (WHERE action = 'closed') * 100.0 /
292
  NULLIF(COUNT(*) FILTER (WHERE action = 'opened'), 0), 1) as close_pct
293
- FROM read_parquet('hf://datasets/open-index/open-github/data/issues/2026/**/*.parquet')
294
- WHERE is_pull_request = false
295
  GROUP BY repo_name HAVING opened >= 10
296
  ORDER BY opened DESC LIMIT 20;
297
 
298
- -- Full activity timeline for a repo (one month)
299
  SELECT event_type, created_at, actor_login
300
- FROM read_parquet('hf://datasets/open-index/open-github/data/*/2026/03/*.parquet')
301
  WHERE repo_name = 'golang/go'
302
  ORDER BY created_at DESC LIMIT 100;
303
  ```
@@ -305,20 +289,15 @@ ORDER BY created_at DESC LIMIT 100;
305
  ### Bulk download (`huggingface_hub`)
306
 
307
  ```python
308
- # Download OpenGitHub data locally with huggingface_hub.
309
- # Run: uv run quickstart_download.py
310
- # For faster downloads: HF_HUB_ENABLE_HF_TRANSFER=1 uv run quickstart_download.py
311
  from huggingface_hub import snapshot_download
312
 
313
  # Download only stars data
314
- snapshot_download("open-index/open-github", repo_type="dataset",
315
- local_dir="./open-github/",
316
- allow_patterns="data/stars/**/*.parquet")
317
-
318
- # Download a specific repo's data across all tables
319
- # snapshot_download("open-index/open-github", repo_type="dataset",
320
- # local_dir="./open-github/",
321
- # allow_patterns="data/*/2026/03/*.parquet")
322
  ```
323
 
324
  For faster downloads, install `pip install huggingface_hub[hf_transfer]` and set `HF_HUB_ENABLE_HF_TRANSFER=1`.
@@ -478,19 +457,13 @@ Line-level comments on pull request diffs. Includes the diff hunk for context an
478
 
479
  #### `stars`.WatchEvent
480
 
481
- Repository star events. Who starred which repo, and when. GitHub API quirk: the event is called `WatchEvent` but means starring. Action is always `"started"` so it is not stored.
482
 
483
- **Processing:** The WatchEvent payload carries no useful fields all signal is in the event envelope (actor, repo, timestamp). For 2012–2014 events the legacy Timeline API included a full repository snapshot, so `repo_language`, `repo_stars_count`, `repo_forks_count`, `repo_description`, and `repo_is_fork` are populated for that era. `actor_type` is also populated from the legacy `actor_attributes` object. For 2015+ events those fields are empty; `actor_avatar_url` is populated instead.
484
 
485
  | Column | Type | Description |
486
  |---|---|---|
487
- | `actor_avatar_url` | string | Actor avatar URL (2015+) |
488
- | `actor_type` | string | `User` or `Organization` (2012–2014 only) |
489
- | `repo_description` | string | Repo description at star time (2012–2014 only) |
490
- | `repo_language` | string | Primary language (2012–2014 only) |
491
- | `repo_stars_count` | int32 | Star count at star time (2012–2014 only) |
492
- | `repo_forks_count` | int32 | Fork count at star time (2012–2014 only) |
493
- | `repo_is_fork` | bool | Whether the starred repo is a fork (2012–2014 only) |
494
 
495
  #### `forks`.ForkEvent
496
 
 
119
  | `payload_json` | string | Full event payload as JSON |
120
 
121
  ```python
122
+ # Query today's live events with DuckDB
 
123
  import duckdb
 
124
  duckdb.sql("""
125
  SELECT event_type, COUNT(*) as n
126
  FROM read_parquet('hf://datasets/open-index/open-github/today/raw/**/*.parquet')
 
143
  | 2011 | 243 | 14,096,144 | 58,008 | 2.7 GB | 1.4 GB | 1h06m | 50m30s | 1h55m |
144
  | 2012 | 291 | 34,256,841 | 117,721 | 9.2 GB | 3.2 GB | 2h14m | 3h16m | 2h50m |
145
  | 2013 | 344 | 74,483,412 | 216,521 | 22.7 GB | 7.0 GB | 3h27m | 10h53m | 4h29m |
146
+ | 2014 | 182 | 59,126,968 | 324,873 | 18.1 GB | 5.4 GB | 1h59m | 10h22m | 2h44m |
147
  | 2015 | 2 | 511,749 | 255,874 | 166.6 MB | 85.1 MB | 20s | 2m59s | 48s |
148
 
149
 
 
161
 
162
 
163
  ```sql
164
+ -- Top 20 repos by push volume this year
165
+ SELECT repo_name, COUNT(*) as pushes, SUM(push_size) as commits
166
+ FROM read_parquet('hf://datasets/open-index/open-github/data/pushes/2025/**/*.parquet')
 
167
  GROUP BY repo_name ORDER BY pushes DESC LIMIT 20;
168
  ```
169
 
 
181
 
182
 
183
  ```sql
184
+ -- Repos with the most issues opened this year
 
185
  SELECT repo_name,
186
  COUNT(*) FILTER (WHERE action = 'opened') as opened,
187
  COUNT(*) FILTER (WHERE action = 'closed') as closed
188
+ FROM read_parquet('hf://datasets/open-index/open-github/data/issues/2025/**/*.parquet')
189
  GROUP BY repo_name ORDER BY opened DESC LIMIT 20;
190
  ```
191
 
 
203
 
204
 
205
  ```sql
206
+ -- Top repos by merged PRs this year
 
207
  SELECT repo_name, COUNT(*) as merged_prs
208
+ FROM read_parquet('hf://datasets/open-index/open-github/data/pull_requests/2025/**/*.parquet')
209
+ WHERE action = 'closed' AND merged = true
210
  GROUP BY repo_name ORDER BY merged_prs DESC LIMIT 20;
211
  ```
212
 
213
  ### Stars per year
214
 
215
+ Stars (WatchEvent in the GitHub API) reflect community interest and discovery. Starring patterns often correlate with Hacker News, Reddit, or Twitter posts.
216
 
217
  ```
218
  2011 ██████░░░░░░░░░░░░░░░░░░░░░░░░ 1.4M
 
224
 
225
 
226
  ```sql
227
+ -- Most starred repos this year
 
228
  SELECT repo_name, COUNT(*) as stars
229
+ FROM read_parquet('hf://datasets/open-index/open-github/data/stars/2025/**/*.parquet')
230
  GROUP BY repo_name ORDER BY stars DESC LIMIT 20;
231
  ```
232
 
 
235
  ### Python (`datasets`)
236
 
237
  ```python
 
 
238
  from datasets import load_dataset
239
 
240
+ # Stream all stars
241
  ds = load_dataset("open-index/open-github", "stars", streaming=True)
242
  for row in ds["train"]:
243
  print(row["repo_name"], row["actor_login"], row["created_at"])
 
244
 
245
  # Load a specific month of issues
246
  ds = load_dataset("open-index/open-github", "issues",
247
+ data_files="data/issues/2024/06/*.parquet")
 
248
 
249
  # Load all pull requests into memory
250
  ds = load_dataset("open-index/open-github", "pull_requests")
 
251
 
252
  # Query today's live events
253
  ds = load_dataset("open-index/open-github", "live", streaming=True)
254
  for row in ds["train"]:
255
  print(row["event_type"], row["repo_name"], row["created_at"])
 
256
  ```
257
 
258
  ### DuckDB
259
 
260
  ```sql
 
 
 
261
  -- Top 20 most-starred repos this year
262
  SELECT repo_name, COUNT(*) as stars
263
+ FROM read_parquet('hf://datasets/open-index/open-github/data/stars/2025/**/*.parquet')
264
  GROUP BY repo_name ORDER BY stars DESC LIMIT 20;
265
 
266
+ -- Most active PR reviewers (approved only)
267
+ SELECT actor_login, COUNT(*) as reviews
268
+ FROM read_parquet('hf://datasets/open-index/open-github/data/pr_reviews/2025/**/*.parquet')
269
  WHERE review_state = 'approved'
270
+ GROUP BY actor_login ORDER BY reviews DESC LIMIT 20;
271
 
272
  -- Issue open/close rates by repo
273
  SELECT repo_name,
 
275
  COUNT(*) FILTER (WHERE action = 'closed') as closed,
276
  ROUND(COUNT(*) FILTER (WHERE action = 'closed') * 100.0 /
277
  NULLIF(COUNT(*) FILTER (WHERE action = 'opened'), 0), 1) as close_pct
278
+ FROM read_parquet('hf://datasets/open-index/open-github/data/issues/2025/**/*.parquet')
 
279
  GROUP BY repo_name HAVING opened >= 10
280
  ORDER BY opened DESC LIMIT 20;
281
 
282
+ -- Full activity timeline for a repo
283
  SELECT event_type, created_at, actor_login
284
+ FROM read_parquet('hf://datasets/open-index/open-github/data/*/2025/03/*.parquet')
285
  WHERE repo_name = 'golang/go'
286
  ORDER BY created_at DESC LIMIT 100;
287
  ```
 
289
  ### Bulk download (`huggingface_hub`)
290
 
291
  ```python
 
 
 
292
  from huggingface_hub import snapshot_download
293
 
294
  # Download only stars data
295
+ folder = snapshot_download(
296
+ "open-index/open-github",
297
+ repo_type="dataset",
298
+ local_dir="./open-github/",
299
+ allow_patterns="data/stars/**/*.parquet",
300
+ )
 
 
301
  ```
302
 
303
  For faster downloads, install `pip install huggingface_hub[hf_transfer]` and set `HF_HUB_ENABLE_HF_TRANSFER=1`.
 
457
 
458
  #### `stars`.WatchEvent
459
 
460
+ Repository star events. Simple, high-signal data: who starred which repo, and when. The `action` field is always `"started"` (GitHub API naming quirk: `WatchEvent` means starring, not watching).
461
 
462
+ **Processing:** Minimal flattening: only the `action` field from payload. The event envelope (actor, repo, timestamp) carries all the useful information.
463
 
464
  | Column | Type | Description |
465
  |---|---|---|
466
+ | `action` | string | Always `started` |
 
 
 
 
 
 
467
 
468
  #### `forks`.ForkEvent
469
 
data/commit_comments/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38d4400d8869e855ce843466e702eca780df566ff72fbc3bb324d20a69c64acc
3
+ size 52884
data/creates/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac800e6e18f51eb64db8ccadcb596430e6b6ab916c3db90150fffed6b4a02bc4
3
+ size 845869
data/deletes/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd5852ac1745eaa3f9901c850f605c495430bccd3d41b0ed18aafc22e93f0258
3
+ size 93132
data/forks/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cacb622456d3bd07848a755a8f0b43e256a79adee663e6d276ad240691e87142
3
+ size 216852
data/issue_comments/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5102fffc8be2181c163495fee19d851b1a2dd453d6d55cda0f64ff84b48fc7a8
3
+ size 392592
data/issues/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae67b4c7b31360ed7e8d5446eef2e722f3fc2824ea7db57eeedf7f149e65d5d6
3
+ size 270576
data/public_events/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5598bd46af19079e25eb82d95ded3414c9c5973d2f51f9613a9efe0a90be98d
3
+ size 7854
data/pull_requests/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470769c381589ef5b14ddad08c47d7c59e626171d170129ecffe7224af590cc5
3
+ size 1408090
data/pushes/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00c02dc36f64ea33c852d645541d7a452416fec118648453690089a6f427689c
3
+ size 16398368
data/stars/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c56f1f27505c2f9fd4b2c2aff943384dfa86379a1de50028d2fe860e5bf3aecd
3
+ size 584461
data/wiki_pages/2014/07/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f68338e2f3bea3f7a241c24d141c5025738e0b0e12e6ae6368ed0a9f6c6cee
3
+ size 114877
stats.csv CHANGED
@@ -1057,7 +1057,7 @@ date,total_events,parse_errors,pushes,issues,issue_comments,pull_requests,pr_rev
1057
  2014-07-01,379868,1648,189541,19063,36490,18467,0,5505,35248,13747,44462,7005,1198,3031,4116,0,347,0,136549099,289.6,38646203,39.2,289.6,47.1
1058
  2014-07-02,396669,1612,194481,21293,39623,19122,0,5656,38848,14545,45075,6766,1123,3802,4378,0,345,0,139724749,290.0,38909083,57.6,290.1,42.8
1059
  2014-07-03,384108,1529,187822,22003,37389,18211,0,6606,39055,13800,42373,6737,1019,3033,4195,0,336,0,136822670,285.6,37599674,54.1,285.7,69.2
1060
- 2014-07-04,286151,1003,140538,15260,29960,12270,0,3507,29226,10278,33245,4818,737,2238,2845,0,226,0,94705025,199.9,27186254,41.3,199.9,30.9
1061
  2014-07-05,214785,789,112381,10450,15696,8119,0,1619,24312,7807,26049,3383,552,1326,2131,0,171,0,67845437,146.4,20385555,27.2,146.4,0.0
1062
  2015-01-01,218939,0,119242,9843,17045,8735,0,2173,21939,7144,23913,3843,816,1399,2196,474,177,0,73764980,79.7,37810232,15.9,79.7,0.0
1063
  2015-01-03,292810,0,155315,15037,26081,11958,0,2946,28410,9430,31862,4560,963,1829,3178,983,258,0,100890756,100.1,51394128,4.6,100.1,48.0
 
1057
  2014-07-01,379868,1648,189541,19063,36490,18467,0,5505,35248,13747,44462,7005,1198,3031,4116,0,347,0,136549099,289.6,38646203,39.2,289.6,47.1
1058
  2014-07-02,396669,1612,194481,21293,39623,19122,0,5656,38848,14545,45075,6766,1123,3802,4378,0,345,0,139724749,290.0,38909083,57.6,290.1,42.8
1059
  2014-07-03,384108,1529,187822,22003,37389,18211,0,6606,39055,13800,42373,6737,1019,3033,4195,0,336,0,136822670,285.6,37599674,54.1,285.7,69.2
1060
+ 2014-07-04,286151,1003,140538,15260,29960,12270,0,3507,29226,10278,33245,4818,737,2238,2845,0,226,0,94705025,199.9,27186254,41.3,199.9,117.1
1061
  2014-07-05,214785,789,112381,10450,15696,8119,0,1619,24312,7807,26049,3383,552,1326,2131,0,171,0,67845437,146.4,20385555,27.2,146.4,0.0
1062
  2015-01-01,218939,0,119242,9843,17045,8735,0,2173,21939,7144,23913,3843,816,1399,2196,474,177,0,73764980,79.7,37810232,15.9,79.7,0.0
1063
  2015-01-03,292810,0,155315,15037,26081,11958,0,2946,28410,9430,31862,4560,963,1829,3178,983,258,0,100890756,100.1,51394128,4.6,100.1,48.0