id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9,200
|
x02c.py
|
rembo10_headphones/lib/unidecode/x02c.py
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'L', # 0x60
'l', # 0x61
'L', # 0x62
'P', # 0x63
'R', # 0x64
'a', # 0x65
't', # 0x66
'H', # 0x67
'h', # 0x68
'K', # 0x69
'k', # 0x6a
'Z', # 0x6b
'z', # 0x6c
'', # 0x6d
'M', # 0x6e
'A', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| 3,596
|
Python
|
.py
| 257
| 12.992218
| 14
| 0.311171
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,201
|
x027.py
|
rembo10_headphones/lib/unidecode/x027.py
|
data = (
'[?]', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'*', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'|', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'', # 0x61
'!', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'[?]', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[', # 0xe6
'[?]', # 0xe7
'<', # 0xe8
'> ', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| 3,783
|
Python
|
.py
| 257
| 13.719844
| 16
| 0.290414
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,202
|
x078.py
|
rembo10_headphones/lib/unidecode/x078.py
|
data = (
'Dang ', # 0x00
'Ma ', # 0x01
'Sha ', # 0x02
'Dan ', # 0x03
'Jue ', # 0x04
'Li ', # 0x05
'Fu ', # 0x06
'Min ', # 0x07
'Nuo ', # 0x08
'Huo ', # 0x09
'Kang ', # 0x0a
'Zhi ', # 0x0b
'Qi ', # 0x0c
'Kan ', # 0x0d
'Jie ', # 0x0e
'Fen ', # 0x0f
'E ', # 0x10
'Ya ', # 0x11
'Pi ', # 0x12
'Zhe ', # 0x13
'Yan ', # 0x14
'Sui ', # 0x15
'Zhuan ', # 0x16
'Che ', # 0x17
'Dun ', # 0x18
'Pan ', # 0x19
'Yan ', # 0x1a
'[?] ', # 0x1b
'Feng ', # 0x1c
'Fa ', # 0x1d
'Mo ', # 0x1e
'Zha ', # 0x1f
'Qu ', # 0x20
'Yu ', # 0x21
'Luo ', # 0x22
'Tuo ', # 0x23
'Tuo ', # 0x24
'Di ', # 0x25
'Zhai ', # 0x26
'Zhen ', # 0x27
'Ai ', # 0x28
'Fei ', # 0x29
'Mu ', # 0x2a
'Zhu ', # 0x2b
'Li ', # 0x2c
'Bian ', # 0x2d
'Nu ', # 0x2e
'Ping ', # 0x2f
'Peng ', # 0x30
'Ling ', # 0x31
'Pao ', # 0x32
'Le ', # 0x33
'Po ', # 0x34
'Bo ', # 0x35
'Po ', # 0x36
'Shen ', # 0x37
'Za ', # 0x38
'Nuo ', # 0x39
'Li ', # 0x3a
'Long ', # 0x3b
'Tong ', # 0x3c
'[?] ', # 0x3d
'Li ', # 0x3e
'Aragane ', # 0x3f
'Chu ', # 0x40
'Keng ', # 0x41
'Quan ', # 0x42
'Zhu ', # 0x43
'Kuang ', # 0x44
'Huo ', # 0x45
'E ', # 0x46
'Nao ', # 0x47
'Jia ', # 0x48
'Lu ', # 0x49
'Wei ', # 0x4a
'Ai ', # 0x4b
'Luo ', # 0x4c
'Ken ', # 0x4d
'Xing ', # 0x4e
'Yan ', # 0x4f
'Tong ', # 0x50
'Peng ', # 0x51
'Xi ', # 0x52
'[?] ', # 0x53
'Hong ', # 0x54
'Shuo ', # 0x55
'Xia ', # 0x56
'Qiao ', # 0x57
'[?] ', # 0x58
'Wei ', # 0x59
'Qiao ', # 0x5a
'[?] ', # 0x5b
'Keng ', # 0x5c
'Xiao ', # 0x5d
'Que ', # 0x5e
'Chan ', # 0x5f
'Lang ', # 0x60
'Hong ', # 0x61
'Yu ', # 0x62
'Xiao ', # 0x63
'Xia ', # 0x64
'Mang ', # 0x65
'Long ', # 0x66
'Iong ', # 0x67
'Che ', # 0x68
'Che ', # 0x69
'E ', # 0x6a
'Liu ', # 0x6b
'Ying ', # 0x6c
'Mang ', # 0x6d
'Que ', # 0x6e
'Yan ', # 0x6f
'Sha ', # 0x70
'Kun ', # 0x71
'Yu ', # 0x72
'[?] ', # 0x73
'Kaki ', # 0x74
'Lu ', # 0x75
'Chen ', # 0x76
'Jian ', # 0x77
'Nue ', # 0x78
'Song ', # 0x79
'Zhuo ', # 0x7a
'Keng ', # 0x7b
'Peng ', # 0x7c
'Yan ', # 0x7d
'Zhui ', # 0x7e
'Kong ', # 0x7f
'Ceng ', # 0x80
'Qi ', # 0x81
'Zong ', # 0x82
'Qing ', # 0x83
'Lin ', # 0x84
'Jun ', # 0x85
'Bo ', # 0x86
'Ding ', # 0x87
'Min ', # 0x88
'Diao ', # 0x89
'Jian ', # 0x8a
'He ', # 0x8b
'Lu ', # 0x8c
'Ai ', # 0x8d
'Sui ', # 0x8e
'Que ', # 0x8f
'Ling ', # 0x90
'Bei ', # 0x91
'Yin ', # 0x92
'Dui ', # 0x93
'Wu ', # 0x94
'Qi ', # 0x95
'Lun ', # 0x96
'Wan ', # 0x97
'Dian ', # 0x98
'Gang ', # 0x99
'Pei ', # 0x9a
'Qi ', # 0x9b
'Chen ', # 0x9c
'Ruan ', # 0x9d
'Yan ', # 0x9e
'Die ', # 0x9f
'Ding ', # 0xa0
'Du ', # 0xa1
'Tuo ', # 0xa2
'Jie ', # 0xa3
'Ying ', # 0xa4
'Bian ', # 0xa5
'Ke ', # 0xa6
'Bi ', # 0xa7
'Wei ', # 0xa8
'Shuo ', # 0xa9
'Zhen ', # 0xaa
'Duan ', # 0xab
'Xia ', # 0xac
'Dang ', # 0xad
'Ti ', # 0xae
'Nao ', # 0xaf
'Peng ', # 0xb0
'Jian ', # 0xb1
'Di ', # 0xb2
'Tan ', # 0xb3
'Cha ', # 0xb4
'Seki ', # 0xb5
'Qi ', # 0xb6
'[?] ', # 0xb7
'Feng ', # 0xb8
'Xuan ', # 0xb9
'Que ', # 0xba
'Que ', # 0xbb
'Ma ', # 0xbc
'Gong ', # 0xbd
'Nian ', # 0xbe
'Su ', # 0xbf
'E ', # 0xc0
'Ci ', # 0xc1
'Liu ', # 0xc2
'Si ', # 0xc3
'Tang ', # 0xc4
'Bang ', # 0xc5
'Hua ', # 0xc6
'Pi ', # 0xc7
'Wei ', # 0xc8
'Sang ', # 0xc9
'Lei ', # 0xca
'Cuo ', # 0xcb
'Zhen ', # 0xcc
'Xia ', # 0xcd
'Qi ', # 0xce
'Lian ', # 0xcf
'Pan ', # 0xd0
'Wei ', # 0xd1
'Yun ', # 0xd2
'Dui ', # 0xd3
'Zhe ', # 0xd4
'Ke ', # 0xd5
'La ', # 0xd6
'[?] ', # 0xd7
'Qing ', # 0xd8
'Gun ', # 0xd9
'Zhuan ', # 0xda
'Chan ', # 0xdb
'Qi ', # 0xdc
'Ao ', # 0xdd
'Peng ', # 0xde
'Lu ', # 0xdf
'Lu ', # 0xe0
'Kan ', # 0xe1
'Qiang ', # 0xe2
'Chen ', # 0xe3
'Yin ', # 0xe4
'Lei ', # 0xe5
'Biao ', # 0xe6
'Qi ', # 0xe7
'Mo ', # 0xe8
'Qi ', # 0xe9
'Cui ', # 0xea
'Zong ', # 0xeb
'Qing ', # 0xec
'Chuo ', # 0xed
'[?] ', # 0xee
'Ji ', # 0xef
'Shan ', # 0xf0
'Lao ', # 0xf1
'Qu ', # 0xf2
'Zeng ', # 0xf3
'Deng ', # 0xf4
'Jian ', # 0xf5
'Xi ', # 0xf6
'Lin ', # 0xf7
'Ding ', # 0xf8
'Dian ', # 0xf9
'Huang ', # 0xfa
'Pan ', # 0xfb
'Za ', # 0xfc
'Qiao ', # 0xfd
'Di ', # 0xfe
'Li ', # 0xff
)
| 4,648
|
Python
|
.py
| 258
| 17.015504
| 21
| 0.409567
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,203
|
x0d1.py
|
rembo10_headphones/lib/unidecode/x0d1.py
|
data = (
'tyal', # 0x00
'tyalg', # 0x01
'tyalm', # 0x02
'tyalb', # 0x03
'tyals', # 0x04
'tyalt', # 0x05
'tyalp', # 0x06
'tyalh', # 0x07
'tyam', # 0x08
'tyab', # 0x09
'tyabs', # 0x0a
'tyas', # 0x0b
'tyass', # 0x0c
'tyang', # 0x0d
'tyaj', # 0x0e
'tyac', # 0x0f
'tyak', # 0x10
'tyat', # 0x11
'tyap', # 0x12
'tyah', # 0x13
'tyae', # 0x14
'tyaeg', # 0x15
'tyaegg', # 0x16
'tyaegs', # 0x17
'tyaen', # 0x18
'tyaenj', # 0x19
'tyaenh', # 0x1a
'tyaed', # 0x1b
'tyael', # 0x1c
'tyaelg', # 0x1d
'tyaelm', # 0x1e
'tyaelb', # 0x1f
'tyaels', # 0x20
'tyaelt', # 0x21
'tyaelp', # 0x22
'tyaelh', # 0x23
'tyaem', # 0x24
'tyaeb', # 0x25
'tyaebs', # 0x26
'tyaes', # 0x27
'tyaess', # 0x28
'tyaeng', # 0x29
'tyaej', # 0x2a
'tyaec', # 0x2b
'tyaek', # 0x2c
'tyaet', # 0x2d
'tyaep', # 0x2e
'tyaeh', # 0x2f
'teo', # 0x30
'teog', # 0x31
'teogg', # 0x32
'teogs', # 0x33
'teon', # 0x34
'teonj', # 0x35
'teonh', # 0x36
'teod', # 0x37
'teol', # 0x38
'teolg', # 0x39
'teolm', # 0x3a
'teolb', # 0x3b
'teols', # 0x3c
'teolt', # 0x3d
'teolp', # 0x3e
'teolh', # 0x3f
'teom', # 0x40
'teob', # 0x41
'teobs', # 0x42
'teos', # 0x43
'teoss', # 0x44
'teong', # 0x45
'teoj', # 0x46
'teoc', # 0x47
'teok', # 0x48
'teot', # 0x49
'teop', # 0x4a
'teoh', # 0x4b
'te', # 0x4c
'teg', # 0x4d
'tegg', # 0x4e
'tegs', # 0x4f
'ten', # 0x50
'tenj', # 0x51
'tenh', # 0x52
'ted', # 0x53
'tel', # 0x54
'telg', # 0x55
'telm', # 0x56
'telb', # 0x57
'tels', # 0x58
'telt', # 0x59
'telp', # 0x5a
'telh', # 0x5b
'tem', # 0x5c
'teb', # 0x5d
'tebs', # 0x5e
'tes', # 0x5f
'tess', # 0x60
'teng', # 0x61
'tej', # 0x62
'tec', # 0x63
'tek', # 0x64
'tet', # 0x65
'tep', # 0x66
'teh', # 0x67
'tyeo', # 0x68
'tyeog', # 0x69
'tyeogg', # 0x6a
'tyeogs', # 0x6b
'tyeon', # 0x6c
'tyeonj', # 0x6d
'tyeonh', # 0x6e
'tyeod', # 0x6f
'tyeol', # 0x70
'tyeolg', # 0x71
'tyeolm', # 0x72
'tyeolb', # 0x73
'tyeols', # 0x74
'tyeolt', # 0x75
'tyeolp', # 0x76
'tyeolh', # 0x77
'tyeom', # 0x78
'tyeob', # 0x79
'tyeobs', # 0x7a
'tyeos', # 0x7b
'tyeoss', # 0x7c
'tyeong', # 0x7d
'tyeoj', # 0x7e
'tyeoc', # 0x7f
'tyeok', # 0x80
'tyeot', # 0x81
'tyeop', # 0x82
'tyeoh', # 0x83
'tye', # 0x84
'tyeg', # 0x85
'tyegg', # 0x86
'tyegs', # 0x87
'tyen', # 0x88
'tyenj', # 0x89
'tyenh', # 0x8a
'tyed', # 0x8b
'tyel', # 0x8c
'tyelg', # 0x8d
'tyelm', # 0x8e
'tyelb', # 0x8f
'tyels', # 0x90
'tyelt', # 0x91
'tyelp', # 0x92
'tyelh', # 0x93
'tyem', # 0x94
'tyeb', # 0x95
'tyebs', # 0x96
'tyes', # 0x97
'tyess', # 0x98
'tyeng', # 0x99
'tyej', # 0x9a
'tyec', # 0x9b
'tyek', # 0x9c
'tyet', # 0x9d
'tyep', # 0x9e
'tyeh', # 0x9f
'to', # 0xa0
'tog', # 0xa1
'togg', # 0xa2
'togs', # 0xa3
'ton', # 0xa4
'tonj', # 0xa5
'tonh', # 0xa6
'tod', # 0xa7
'tol', # 0xa8
'tolg', # 0xa9
'tolm', # 0xaa
'tolb', # 0xab
'tols', # 0xac
'tolt', # 0xad
'tolp', # 0xae
'tolh', # 0xaf
'tom', # 0xb0
'tob', # 0xb1
'tobs', # 0xb2
'tos', # 0xb3
'toss', # 0xb4
'tong', # 0xb5
'toj', # 0xb6
'toc', # 0xb7
'tok', # 0xb8
'tot', # 0xb9
'top', # 0xba
'toh', # 0xbb
'twa', # 0xbc
'twag', # 0xbd
'twagg', # 0xbe
'twags', # 0xbf
'twan', # 0xc0
'twanj', # 0xc1
'twanh', # 0xc2
'twad', # 0xc3
'twal', # 0xc4
'twalg', # 0xc5
'twalm', # 0xc6
'twalb', # 0xc7
'twals', # 0xc8
'twalt', # 0xc9
'twalp', # 0xca
'twalh', # 0xcb
'twam', # 0xcc
'twab', # 0xcd
'twabs', # 0xce
'twas', # 0xcf
'twass', # 0xd0
'twang', # 0xd1
'twaj', # 0xd2
'twac', # 0xd3
'twak', # 0xd4
'twat', # 0xd5
'twap', # 0xd6
'twah', # 0xd7
'twae', # 0xd8
'twaeg', # 0xd9
'twaegg', # 0xda
'twaegs', # 0xdb
'twaen', # 0xdc
'twaenj', # 0xdd
'twaenh', # 0xde
'twaed', # 0xdf
'twael', # 0xe0
'twaelg', # 0xe1
'twaelm', # 0xe2
'twaelb', # 0xe3
'twaels', # 0xe4
'twaelt', # 0xe5
'twaelp', # 0xe6
'twaelh', # 0xe7
'twaem', # 0xe8
'twaeb', # 0xe9
'twaebs', # 0xea
'twaes', # 0xeb
'twaess', # 0xec
'twaeng', # 0xed
'twaej', # 0xee
'twaec', # 0xef
'twaek', # 0xf0
'twaet', # 0xf1
'twaep', # 0xf2
'twaeh', # 0xf3
'toe', # 0xf4
'toeg', # 0xf5
'toegg', # 0xf6
'toegs', # 0xf7
'toen', # 0xf8
'toenj', # 0xf9
'toenh', # 0xfa
'toed', # 0xfb
'toel', # 0xfc
'toelg', # 0xfd
'toelm', # 0xfe
'toelb', # 0xff
)
| 4,767
|
Python
|
.py
| 258
| 17.476744
| 19
| 0.487913
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,204
|
x061.py
|
rembo10_headphones/lib/unidecode/x061.py
|
data = (
'Qiao ', # 0x00
'Chou ', # 0x01
'Bei ', # 0x02
'Xuan ', # 0x03
'Wei ', # 0x04
'Ge ', # 0x05
'Qian ', # 0x06
'Wei ', # 0x07
'Yu ', # 0x08
'Yu ', # 0x09
'Bi ', # 0x0a
'Xuan ', # 0x0b
'Huan ', # 0x0c
'Min ', # 0x0d
'Bi ', # 0x0e
'Yi ', # 0x0f
'Mian ', # 0x10
'Yong ', # 0x11
'Kai ', # 0x12
'Dang ', # 0x13
'Yin ', # 0x14
'E ', # 0x15
'Chen ', # 0x16
'Mou ', # 0x17
'Ke ', # 0x18
'Ke ', # 0x19
'Yu ', # 0x1a
'Ai ', # 0x1b
'Qie ', # 0x1c
'Yan ', # 0x1d
'Nuo ', # 0x1e
'Gan ', # 0x1f
'Yun ', # 0x20
'Zong ', # 0x21
'Sai ', # 0x22
'Leng ', # 0x23
'Fen ', # 0x24
'[?] ', # 0x25
'Kui ', # 0x26
'Kui ', # 0x27
'Que ', # 0x28
'Gong ', # 0x29
'Yun ', # 0x2a
'Su ', # 0x2b
'Su ', # 0x2c
'Qi ', # 0x2d
'Yao ', # 0x2e
'Song ', # 0x2f
'Huang ', # 0x30
'Ji ', # 0x31
'Gu ', # 0x32
'Ju ', # 0x33
'Chuang ', # 0x34
'Ni ', # 0x35
'Xie ', # 0x36
'Kai ', # 0x37
'Zheng ', # 0x38
'Yong ', # 0x39
'Cao ', # 0x3a
'Sun ', # 0x3b
'Shen ', # 0x3c
'Bo ', # 0x3d
'Kai ', # 0x3e
'Yuan ', # 0x3f
'Xie ', # 0x40
'Hun ', # 0x41
'Yong ', # 0x42
'Yang ', # 0x43
'Li ', # 0x44
'Sao ', # 0x45
'Tao ', # 0x46
'Yin ', # 0x47
'Ci ', # 0x48
'Xu ', # 0x49
'Qian ', # 0x4a
'Tai ', # 0x4b
'Huang ', # 0x4c
'Yun ', # 0x4d
'Shen ', # 0x4e
'Ming ', # 0x4f
'[?] ', # 0x50
'She ', # 0x51
'Cong ', # 0x52
'Piao ', # 0x53
'Mo ', # 0x54
'Mu ', # 0x55
'Guo ', # 0x56
'Chi ', # 0x57
'Can ', # 0x58
'Can ', # 0x59
'Can ', # 0x5a
'Cui ', # 0x5b
'Min ', # 0x5c
'Te ', # 0x5d
'Zhang ', # 0x5e
'Tong ', # 0x5f
'Ao ', # 0x60
'Shuang ', # 0x61
'Man ', # 0x62
'Guan ', # 0x63
'Que ', # 0x64
'Zao ', # 0x65
'Jiu ', # 0x66
'Hui ', # 0x67
'Kai ', # 0x68
'Lian ', # 0x69
'Ou ', # 0x6a
'Song ', # 0x6b
'Jin ', # 0x6c
'Yin ', # 0x6d
'Lu ', # 0x6e
'Shang ', # 0x6f
'Wei ', # 0x70
'Tuan ', # 0x71
'Man ', # 0x72
'Qian ', # 0x73
'She ', # 0x74
'Yong ', # 0x75
'Qing ', # 0x76
'Kang ', # 0x77
'Di ', # 0x78
'Zhi ', # 0x79
'Lou ', # 0x7a
'Juan ', # 0x7b
'Qi ', # 0x7c
'Qi ', # 0x7d
'Yu ', # 0x7e
'Ping ', # 0x7f
'Liao ', # 0x80
'Cong ', # 0x81
'You ', # 0x82
'Chong ', # 0x83
'Zhi ', # 0x84
'Tong ', # 0x85
'Cheng ', # 0x86
'Qi ', # 0x87
'Qu ', # 0x88
'Peng ', # 0x89
'Bei ', # 0x8a
'Bie ', # 0x8b
'Chun ', # 0x8c
'Jiao ', # 0x8d
'Zeng ', # 0x8e
'Chi ', # 0x8f
'Lian ', # 0x90
'Ping ', # 0x91
'Kui ', # 0x92
'Hui ', # 0x93
'Qiao ', # 0x94
'Cheng ', # 0x95
'Yin ', # 0x96
'Yin ', # 0x97
'Xi ', # 0x98
'Xi ', # 0x99
'Dan ', # 0x9a
'Tan ', # 0x9b
'Duo ', # 0x9c
'Dui ', # 0x9d
'Dui ', # 0x9e
'Su ', # 0x9f
'Jue ', # 0xa0
'Ce ', # 0xa1
'Xiao ', # 0xa2
'Fan ', # 0xa3
'Fen ', # 0xa4
'Lao ', # 0xa5
'Lao ', # 0xa6
'Chong ', # 0xa7
'Han ', # 0xa8
'Qi ', # 0xa9
'Xian ', # 0xaa
'Min ', # 0xab
'Jing ', # 0xac
'Liao ', # 0xad
'Wu ', # 0xae
'Can ', # 0xaf
'Jue ', # 0xb0
'Cu ', # 0xb1
'Xian ', # 0xb2
'Tan ', # 0xb3
'Sheng ', # 0xb4
'Pi ', # 0xb5
'Yi ', # 0xb6
'Chu ', # 0xb7
'Xian ', # 0xb8
'Nao ', # 0xb9
'Dan ', # 0xba
'Tan ', # 0xbb
'Jing ', # 0xbc
'Song ', # 0xbd
'Han ', # 0xbe
'Jiao ', # 0xbf
'Wai ', # 0xc0
'Huan ', # 0xc1
'Dong ', # 0xc2
'Qin ', # 0xc3
'Qin ', # 0xc4
'Qu ', # 0xc5
'Cao ', # 0xc6
'Ken ', # 0xc7
'Xie ', # 0xc8
'Ying ', # 0xc9
'Ao ', # 0xca
'Mao ', # 0xcb
'Yi ', # 0xcc
'Lin ', # 0xcd
'Se ', # 0xce
'Jun ', # 0xcf
'Huai ', # 0xd0
'Men ', # 0xd1
'Lan ', # 0xd2
'Ai ', # 0xd3
'Lin ', # 0xd4
'Yan ', # 0xd5
'Gua ', # 0xd6
'Xia ', # 0xd7
'Chi ', # 0xd8
'Yu ', # 0xd9
'Yin ', # 0xda
'Dai ', # 0xdb
'Meng ', # 0xdc
'Ai ', # 0xdd
'Meng ', # 0xde
'Dui ', # 0xdf
'Qi ', # 0xe0
'Mo ', # 0xe1
'Lan ', # 0xe2
'Men ', # 0xe3
'Chou ', # 0xe4
'Zhi ', # 0xe5
'Nuo ', # 0xe6
'Nuo ', # 0xe7
'Yan ', # 0xe8
'Yang ', # 0xe9
'Bo ', # 0xea
'Zhi ', # 0xeb
'Kuang ', # 0xec
'Kuang ', # 0xed
'You ', # 0xee
'Fu ', # 0xef
'Liu ', # 0xf0
'Mie ', # 0xf1
'Cheng ', # 0xf2
'[?] ', # 0xf3
'Chan ', # 0xf4
'Meng ', # 0xf5
'Lan ', # 0xf6
'Huai ', # 0xf7
'Xuan ', # 0xf8
'Rang ', # 0xf9
'Chan ', # 0xfa
'Ji ', # 0xfb
'Ju ', # 0xfc
'Huan ', # 0xfd
'She ', # 0xfe
'Yi ', # 0xff
)
| 4,662
|
Python
|
.py
| 258
| 17.069767
| 20
| 0.415531
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,205
|
x054.py
|
rembo10_headphones/lib/unidecode/x054.py
|
data = (
'Mie ', # 0x00
'Xu ', # 0x01
'Mang ', # 0x02
'Chi ', # 0x03
'Ge ', # 0x04
'Xuan ', # 0x05
'Yao ', # 0x06
'Zi ', # 0x07
'He ', # 0x08
'Ji ', # 0x09
'Diao ', # 0x0a
'Cun ', # 0x0b
'Tong ', # 0x0c
'Ming ', # 0x0d
'Hou ', # 0x0e
'Li ', # 0x0f
'Tu ', # 0x10
'Xiang ', # 0x11
'Zha ', # 0x12
'Xia ', # 0x13
'Ye ', # 0x14
'Lu ', # 0x15
'A ', # 0x16
'Ma ', # 0x17
'Ou ', # 0x18
'Xue ', # 0x19
'Yi ', # 0x1a
'Jun ', # 0x1b
'Chou ', # 0x1c
'Lin ', # 0x1d
'Tun ', # 0x1e
'Yin ', # 0x1f
'Fei ', # 0x20
'Bi ', # 0x21
'Qin ', # 0x22
'Qin ', # 0x23
'Jie ', # 0x24
'Bu ', # 0x25
'Fou ', # 0x26
'Ba ', # 0x27
'Dun ', # 0x28
'Fen ', # 0x29
'E ', # 0x2a
'Han ', # 0x2b
'Ting ', # 0x2c
'Hang ', # 0x2d
'Shun ', # 0x2e
'Qi ', # 0x2f
'Hong ', # 0x30
'Zhi ', # 0x31
'Shen ', # 0x32
'Wu ', # 0x33
'Wu ', # 0x34
'Chao ', # 0x35
'Ne ', # 0x36
'Xue ', # 0x37
'Xi ', # 0x38
'Chui ', # 0x39
'Dou ', # 0x3a
'Wen ', # 0x3b
'Hou ', # 0x3c
'Ou ', # 0x3d
'Wu ', # 0x3e
'Gao ', # 0x3f
'Ya ', # 0x40
'Jun ', # 0x41
'Lu ', # 0x42
'E ', # 0x43
'Ge ', # 0x44
'Mei ', # 0x45
'Ai ', # 0x46
'Qi ', # 0x47
'Cheng ', # 0x48
'Wu ', # 0x49
'Gao ', # 0x4a
'Fu ', # 0x4b
'Jiao ', # 0x4c
'Hong ', # 0x4d
'Chi ', # 0x4e
'Sheng ', # 0x4f
'Ne ', # 0x50
'Tun ', # 0x51
'Fu ', # 0x52
'Yi ', # 0x53
'Dai ', # 0x54
'Ou ', # 0x55
'Li ', # 0x56
'Bai ', # 0x57
'Yuan ', # 0x58
'Kuai ', # 0x59
'[?] ', # 0x5a
'Qiang ', # 0x5b
'Wu ', # 0x5c
'E ', # 0x5d
'Shi ', # 0x5e
'Quan ', # 0x5f
'Pen ', # 0x60
'Wen ', # 0x61
'Ni ', # 0x62
'M ', # 0x63
'Ling ', # 0x64
'Ran ', # 0x65
'You ', # 0x66
'Di ', # 0x67
'Zhou ', # 0x68
'Shi ', # 0x69
'Zhou ', # 0x6a
'Tie ', # 0x6b
'Xi ', # 0x6c
'Yi ', # 0x6d
'Qi ', # 0x6e
'Ping ', # 0x6f
'Zi ', # 0x70
'Gu ', # 0x71
'Zi ', # 0x72
'Wei ', # 0x73
'Xu ', # 0x74
'He ', # 0x75
'Nao ', # 0x76
'Xia ', # 0x77
'Pei ', # 0x78
'Yi ', # 0x79
'Xiao ', # 0x7a
'Shen ', # 0x7b
'Hu ', # 0x7c
'Ming ', # 0x7d
'Da ', # 0x7e
'Qu ', # 0x7f
'Ju ', # 0x80
'Gem ', # 0x81
'Za ', # 0x82
'Tuo ', # 0x83
'Duo ', # 0x84
'Pou ', # 0x85
'Pao ', # 0x86
'Bi ', # 0x87
'Fu ', # 0x88
'Yang ', # 0x89
'He ', # 0x8a
'Zha ', # 0x8b
'He ', # 0x8c
'Hai ', # 0x8d
'Jiu ', # 0x8e
'Yong ', # 0x8f
'Fu ', # 0x90
'Que ', # 0x91
'Zhou ', # 0x92
'Wa ', # 0x93
'Ka ', # 0x94
'Gu ', # 0x95
'Ka ', # 0x96
'Zuo ', # 0x97
'Bu ', # 0x98
'Long ', # 0x99
'Dong ', # 0x9a
'Ning ', # 0x9b
'Tha ', # 0x9c
'Si ', # 0x9d
'Xian ', # 0x9e
'Huo ', # 0x9f
'Qi ', # 0xa0
'Er ', # 0xa1
'E ', # 0xa2
'Guang ', # 0xa3
'Zha ', # 0xa4
'Xi ', # 0xa5
'Yi ', # 0xa6
'Lie ', # 0xa7
'Zi ', # 0xa8
'Mie ', # 0xa9
'Mi ', # 0xaa
'Zhi ', # 0xab
'Yao ', # 0xac
'Ji ', # 0xad
'Zhou ', # 0xae
'Ge ', # 0xaf
'Shuai ', # 0xb0
'Zan ', # 0xb1
'Xiao ', # 0xb2
'Ke ', # 0xb3
'Hui ', # 0xb4
'Kua ', # 0xb5
'Huai ', # 0xb6
'Tao ', # 0xb7
'Xian ', # 0xb8
'E ', # 0xb9
'Xuan ', # 0xba
'Xiu ', # 0xbb
'Wai ', # 0xbc
'Yan ', # 0xbd
'Lao ', # 0xbe
'Yi ', # 0xbf
'Ai ', # 0xc0
'Pin ', # 0xc1
'Shen ', # 0xc2
'Tong ', # 0xc3
'Hong ', # 0xc4
'Xiong ', # 0xc5
'Chi ', # 0xc6
'Wa ', # 0xc7
'Ha ', # 0xc8
'Zai ', # 0xc9
'Yu ', # 0xca
'Di ', # 0xcb
'Pai ', # 0xcc
'Xiang ', # 0xcd
'Ai ', # 0xce
'Hen ', # 0xcf
'Kuang ', # 0xd0
'Ya ', # 0xd1
'Da ', # 0xd2
'Xiao ', # 0xd3
'Bi ', # 0xd4
'Yue ', # 0xd5
'[?] ', # 0xd6
'Hua ', # 0xd7
'Sasou ', # 0xd8
'Kuai ', # 0xd9
'Duo ', # 0xda
'[?] ', # 0xdb
'Ji ', # 0xdc
'Nong ', # 0xdd
'Mou ', # 0xde
'Yo ', # 0xdf
'Hao ', # 0xe0
'Yuan ', # 0xe1
'Long ', # 0xe2
'Pou ', # 0xe3
'Mang ', # 0xe4
'Ge ', # 0xe5
'E ', # 0xe6
'Chi ', # 0xe7
'Shao ', # 0xe8
'Li ', # 0xe9
'Na ', # 0xea
'Zu ', # 0xeb
'He ', # 0xec
'Ku ', # 0xed
'Xiao ', # 0xee
'Xian ', # 0xef
'Lao ', # 0xf0
'Bo ', # 0xf1
'Zhe ', # 0xf2
'Zha ', # 0xf3
'Liang ', # 0xf4
'Ba ', # 0xf5
'Mie ', # 0xf6
'Le ', # 0xf7
'Sui ', # 0xf8
'Fou ', # 0xf9
'Bu ', # 0xfa
'Han ', # 0xfb
'Heng ', # 0xfc
'Geng ', # 0xfd
'Shuo ', # 0xfe
'Ge ', # 0xff
)
| 4,583
|
Python
|
.py
| 258
| 16.763566
| 19
| 0.404855
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,206
|
x094.py
|
rembo10_headphones/lib/unidecode/x094.py
|
data = (
'Kui ', # 0x00
'Si ', # 0x01
'Liu ', # 0x02
'Nao ', # 0x03
'Heng ', # 0x04
'Pie ', # 0x05
'Sui ', # 0x06
'Fan ', # 0x07
'Qiao ', # 0x08
'Quan ', # 0x09
'Yang ', # 0x0a
'Tang ', # 0x0b
'Xiang ', # 0x0c
'Jue ', # 0x0d
'Jiao ', # 0x0e
'Zun ', # 0x0f
'Liao ', # 0x10
'Jie ', # 0x11
'Lao ', # 0x12
'Dui ', # 0x13
'Tan ', # 0x14
'Zan ', # 0x15
'Ji ', # 0x16
'Jian ', # 0x17
'Zhong ', # 0x18
'Deng ', # 0x19
'Ya ', # 0x1a
'Ying ', # 0x1b
'Dui ', # 0x1c
'Jue ', # 0x1d
'Nou ', # 0x1e
'Ti ', # 0x1f
'Pu ', # 0x20
'Tie ', # 0x21
'[?] ', # 0x22
'[?] ', # 0x23
'Ding ', # 0x24
'Shan ', # 0x25
'Kai ', # 0x26
'Jian ', # 0x27
'Fei ', # 0x28
'Sui ', # 0x29
'Lu ', # 0x2a
'Juan ', # 0x2b
'Hui ', # 0x2c
'Yu ', # 0x2d
'Lian ', # 0x2e
'Zhuo ', # 0x2f
'Qiao ', # 0x30
'Qian ', # 0x31
'Zhuo ', # 0x32
'Lei ', # 0x33
'Bi ', # 0x34
'Tie ', # 0x35
'Huan ', # 0x36
'Ye ', # 0x37
'Duo ', # 0x38
'Guo ', # 0x39
'Dang ', # 0x3a
'Ju ', # 0x3b
'Fen ', # 0x3c
'Da ', # 0x3d
'Bei ', # 0x3e
'Yi ', # 0x3f
'Ai ', # 0x40
'Zong ', # 0x41
'Xun ', # 0x42
'Diao ', # 0x43
'Zhu ', # 0x44
'Heng ', # 0x45
'Zhui ', # 0x46
'Ji ', # 0x47
'Nie ', # 0x48
'Ta ', # 0x49
'Huo ', # 0x4a
'Qing ', # 0x4b
'Bin ', # 0x4c
'Ying ', # 0x4d
'Kui ', # 0x4e
'Ning ', # 0x4f
'Xu ', # 0x50
'Jian ', # 0x51
'Jian ', # 0x52
'Yari ', # 0x53
'Cha ', # 0x54
'Zhi ', # 0x55
'Mie ', # 0x56
'Li ', # 0x57
'Lei ', # 0x58
'Ji ', # 0x59
'Zuan ', # 0x5a
'Kuang ', # 0x5b
'Shang ', # 0x5c
'Peng ', # 0x5d
'La ', # 0x5e
'Du ', # 0x5f
'Shuo ', # 0x60
'Chuo ', # 0x61
'Lu ', # 0x62
'Biao ', # 0x63
'Bao ', # 0x64
'Lu ', # 0x65
'[?] ', # 0x66
'[?] ', # 0x67
'Long ', # 0x68
'E ', # 0x69
'Lu ', # 0x6a
'Xin ', # 0x6b
'Jian ', # 0x6c
'Lan ', # 0x6d
'Bo ', # 0x6e
'Jian ', # 0x6f
'Yao ', # 0x70
'Chan ', # 0x71
'Xiang ', # 0x72
'Jian ', # 0x73
'Xi ', # 0x74
'Guan ', # 0x75
'Cang ', # 0x76
'Nie ', # 0x77
'Lei ', # 0x78
'Cuan ', # 0x79
'Qu ', # 0x7a
'Pan ', # 0x7b
'Luo ', # 0x7c
'Zuan ', # 0x7d
'Luan ', # 0x7e
'Zao ', # 0x7f
'Nie ', # 0x80
'Jue ', # 0x81
'Tang ', # 0x82
'Shu ', # 0x83
'Lan ', # 0x84
'Jin ', # 0x85
'Qiu ', # 0x86
'Yi ', # 0x87
'Zhen ', # 0x88
'Ding ', # 0x89
'Zhao ', # 0x8a
'Po ', # 0x8b
'Diao ', # 0x8c
'Tu ', # 0x8d
'Qian ', # 0x8e
'Chuan ', # 0x8f
'Shan ', # 0x90
'Ji ', # 0x91
'Fan ', # 0x92
'Diao ', # 0x93
'Men ', # 0x94
'Nu ', # 0x95
'Xi ', # 0x96
'Chai ', # 0x97
'Xing ', # 0x98
'Gai ', # 0x99
'Bu ', # 0x9a
'Tai ', # 0x9b
'Ju ', # 0x9c
'Dun ', # 0x9d
'Chao ', # 0x9e
'Zhong ', # 0x9f
'Na ', # 0xa0
'Bei ', # 0xa1
'Gang ', # 0xa2
'Ban ', # 0xa3
'Qian ', # 0xa4
'Yao ', # 0xa5
'Qin ', # 0xa6
'Jun ', # 0xa7
'Wu ', # 0xa8
'Gou ', # 0xa9
'Kang ', # 0xaa
'Fang ', # 0xab
'Huo ', # 0xac
'Tou ', # 0xad
'Niu ', # 0xae
'Ba ', # 0xaf
'Yu ', # 0xb0
'Qian ', # 0xb1
'Zheng ', # 0xb2
'Qian ', # 0xb3
'Gu ', # 0xb4
'Bo ', # 0xb5
'E ', # 0xb6
'Po ', # 0xb7
'Bu ', # 0xb8
'Ba ', # 0xb9
'Yue ', # 0xba
'Zuan ', # 0xbb
'Mu ', # 0xbc
'Dan ', # 0xbd
'Jia ', # 0xbe
'Dian ', # 0xbf
'You ', # 0xc0
'Tie ', # 0xc1
'Bo ', # 0xc2
'Ling ', # 0xc3
'Shuo ', # 0xc4
'Qian ', # 0xc5
'Liu ', # 0xc6
'Bao ', # 0xc7
'Shi ', # 0xc8
'Xuan ', # 0xc9
'She ', # 0xca
'Bi ', # 0xcb
'Ni ', # 0xcc
'Pi ', # 0xcd
'Duo ', # 0xce
'Xing ', # 0xcf
'Kao ', # 0xd0
'Lao ', # 0xd1
'Er ', # 0xd2
'Mang ', # 0xd3
'Ya ', # 0xd4
'You ', # 0xd5
'Cheng ', # 0xd6
'Jia ', # 0xd7
'Ye ', # 0xd8
'Nao ', # 0xd9
'Zhi ', # 0xda
'Dang ', # 0xdb
'Tong ', # 0xdc
'Lu ', # 0xdd
'Diao ', # 0xde
'Yin ', # 0xdf
'Kai ', # 0xe0
'Zha ', # 0xe1
'Zhu ', # 0xe2
'Xian ', # 0xe3
'Ting ', # 0xe4
'Diu ', # 0xe5
'Xian ', # 0xe6
'Hua ', # 0xe7
'Quan ', # 0xe8
'Sha ', # 0xe9
'Jia ', # 0xea
'Yao ', # 0xeb
'Ge ', # 0xec
'Ming ', # 0xed
'Zheng ', # 0xee
'Se ', # 0xef
'Jiao ', # 0xf0
'Yi ', # 0xf1
'Chan ', # 0xf2
'Chong ', # 0xf3
'Tang ', # 0xf4
'An ', # 0xf5
'Yin ', # 0xf6
'Ru ', # 0xf7
'Zhu ', # 0xf8
'Lao ', # 0xf9
'Pu ', # 0xfa
'Wu ', # 0xfb
'Lai ', # 0xfc
'Te ', # 0xfd
'Lian ', # 0xfe
'Keng ', # 0xff
)
| 4,661
|
Python
|
.py
| 258
| 17.065891
| 19
| 0.414717
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,207
|
x0b7.py
|
rembo10_headphones/lib/unidecode/x0b7.py
|
data = (
'ddwim', # 0x00
'ddwib', # 0x01
'ddwibs', # 0x02
'ddwis', # 0x03
'ddwiss', # 0x04
'ddwing', # 0x05
'ddwij', # 0x06
'ddwic', # 0x07
'ddwik', # 0x08
'ddwit', # 0x09
'ddwip', # 0x0a
'ddwih', # 0x0b
'ddyu', # 0x0c
'ddyug', # 0x0d
'ddyugg', # 0x0e
'ddyugs', # 0x0f
'ddyun', # 0x10
'ddyunj', # 0x11
'ddyunh', # 0x12
'ddyud', # 0x13
'ddyul', # 0x14
'ddyulg', # 0x15
'ddyulm', # 0x16
'ddyulb', # 0x17
'ddyuls', # 0x18
'ddyult', # 0x19
'ddyulp', # 0x1a
'ddyulh', # 0x1b
'ddyum', # 0x1c
'ddyub', # 0x1d
'ddyubs', # 0x1e
'ddyus', # 0x1f
'ddyuss', # 0x20
'ddyung', # 0x21
'ddyuj', # 0x22
'ddyuc', # 0x23
'ddyuk', # 0x24
'ddyut', # 0x25
'ddyup', # 0x26
'ddyuh', # 0x27
'ddeu', # 0x28
'ddeug', # 0x29
'ddeugg', # 0x2a
'ddeugs', # 0x2b
'ddeun', # 0x2c
'ddeunj', # 0x2d
'ddeunh', # 0x2e
'ddeud', # 0x2f
'ddeul', # 0x30
'ddeulg', # 0x31
'ddeulm', # 0x32
'ddeulb', # 0x33
'ddeuls', # 0x34
'ddeult', # 0x35
'ddeulp', # 0x36
'ddeulh', # 0x37
'ddeum', # 0x38
'ddeub', # 0x39
'ddeubs', # 0x3a
'ddeus', # 0x3b
'ddeuss', # 0x3c
'ddeung', # 0x3d
'ddeuj', # 0x3e
'ddeuc', # 0x3f
'ddeuk', # 0x40
'ddeut', # 0x41
'ddeup', # 0x42
'ddeuh', # 0x43
'ddyi', # 0x44
'ddyig', # 0x45
'ddyigg', # 0x46
'ddyigs', # 0x47
'ddyin', # 0x48
'ddyinj', # 0x49
'ddyinh', # 0x4a
'ddyid', # 0x4b
'ddyil', # 0x4c
'ddyilg', # 0x4d
'ddyilm', # 0x4e
'ddyilb', # 0x4f
'ddyils', # 0x50
'ddyilt', # 0x51
'ddyilp', # 0x52
'ddyilh', # 0x53
'ddyim', # 0x54
'ddyib', # 0x55
'ddyibs', # 0x56
'ddyis', # 0x57
'ddyiss', # 0x58
'ddying', # 0x59
'ddyij', # 0x5a
'ddyic', # 0x5b
'ddyik', # 0x5c
'ddyit', # 0x5d
'ddyip', # 0x5e
'ddyih', # 0x5f
'ddi', # 0x60
'ddig', # 0x61
'ddigg', # 0x62
'ddigs', # 0x63
'ddin', # 0x64
'ddinj', # 0x65
'ddinh', # 0x66
'ddid', # 0x67
'ddil', # 0x68
'ddilg', # 0x69
'ddilm', # 0x6a
'ddilb', # 0x6b
'ddils', # 0x6c
'ddilt', # 0x6d
'ddilp', # 0x6e
'ddilh', # 0x6f
'ddim', # 0x70
'ddib', # 0x71
'ddibs', # 0x72
'ddis', # 0x73
'ddiss', # 0x74
'dding', # 0x75
'ddij', # 0x76
'ddic', # 0x77
'ddik', # 0x78
'ddit', # 0x79
'ddip', # 0x7a
'ddih', # 0x7b
'ra', # 0x7c
'rag', # 0x7d
'ragg', # 0x7e
'rags', # 0x7f
'ran', # 0x80
'ranj', # 0x81
'ranh', # 0x82
'rad', # 0x83
'ral', # 0x84
'ralg', # 0x85
'ralm', # 0x86
'ralb', # 0x87
'rals', # 0x88
'ralt', # 0x89
'ralp', # 0x8a
'ralh', # 0x8b
'ram', # 0x8c
'rab', # 0x8d
'rabs', # 0x8e
'ras', # 0x8f
'rass', # 0x90
'rang', # 0x91
'raj', # 0x92
'rac', # 0x93
'rak', # 0x94
'rat', # 0x95
'rap', # 0x96
'rah', # 0x97
'rae', # 0x98
'raeg', # 0x99
'raegg', # 0x9a
'raegs', # 0x9b
'raen', # 0x9c
'raenj', # 0x9d
'raenh', # 0x9e
'raed', # 0x9f
'rael', # 0xa0
'raelg', # 0xa1
'raelm', # 0xa2
'raelb', # 0xa3
'raels', # 0xa4
'raelt', # 0xa5
'raelp', # 0xa6
'raelh', # 0xa7
'raem', # 0xa8
'raeb', # 0xa9
'raebs', # 0xaa
'raes', # 0xab
'raess', # 0xac
'raeng', # 0xad
'raej', # 0xae
'raec', # 0xaf
'raek', # 0xb0
'raet', # 0xb1
'raep', # 0xb2
'raeh', # 0xb3
'rya', # 0xb4
'ryag', # 0xb5
'ryagg', # 0xb6
'ryags', # 0xb7
'ryan', # 0xb8
'ryanj', # 0xb9
'ryanh', # 0xba
'ryad', # 0xbb
'ryal', # 0xbc
'ryalg', # 0xbd
'ryalm', # 0xbe
'ryalb', # 0xbf
'ryals', # 0xc0
'ryalt', # 0xc1
'ryalp', # 0xc2
'ryalh', # 0xc3
'ryam', # 0xc4
'ryab', # 0xc5
'ryabs', # 0xc6
'ryas', # 0xc7
'ryass', # 0xc8
'ryang', # 0xc9
'ryaj', # 0xca
'ryac', # 0xcb
'ryak', # 0xcc
'ryat', # 0xcd
'ryap', # 0xce
'ryah', # 0xcf
'ryae', # 0xd0
'ryaeg', # 0xd1
'ryaegg', # 0xd2
'ryaegs', # 0xd3
'ryaen', # 0xd4
'ryaenj', # 0xd5
'ryaenh', # 0xd6
'ryaed', # 0xd7
'ryael', # 0xd8
'ryaelg', # 0xd9
'ryaelm', # 0xda
'ryaelb', # 0xdb
'ryaels', # 0xdc
'ryaelt', # 0xdd
'ryaelp', # 0xde
'ryaelh', # 0xdf
'ryaem', # 0xe0
'ryaeb', # 0xe1
'ryaebs', # 0xe2
'ryaes', # 0xe3
'ryaess', # 0xe4
'ryaeng', # 0xe5
'ryaej', # 0xe6
'ryaec', # 0xe7
'ryaek', # 0xe8
'ryaet', # 0xe9
'ryaep', # 0xea
'ryaeh', # 0xeb
'reo', # 0xec
'reog', # 0xed
'reogg', # 0xee
'reogs', # 0xef
'reon', # 0xf0
'reonj', # 0xf1
'reonh', # 0xf2
'reod', # 0xf3
'reol', # 0xf4
'reolg', # 0xf5
'reolm', # 0xf6
'reolb', # 0xf7
'reols', # 0xf8
'reolt', # 0xf9
'reolp', # 0xfa
'reolh', # 0xfb
'reom', # 0xfc
'reob', # 0xfd
'reobs', # 0xfe
'reos', # 0xff
)
| 4,833
|
Python
|
.py
| 258
| 17.732558
| 19
| 0.495301
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,208
|
x031.py
|
rembo10_headphones/lib/unidecode/x031.py
|
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'B', # 0x05
'P', # 0x06
'M', # 0x07
'F', # 0x08
'D', # 0x09
'T', # 0x0a
'N', # 0x0b
'L', # 0x0c
'G', # 0x0d
'K', # 0x0e
'H', # 0x0f
'J', # 0x10
'Q', # 0x11
'X', # 0x12
'ZH', # 0x13
'CH', # 0x14
'SH', # 0x15
'R', # 0x16
'Z', # 0x17
'C', # 0x18
'S', # 0x19
'A', # 0x1a
'O', # 0x1b
'E', # 0x1c
'EH', # 0x1d
'AI', # 0x1e
'EI', # 0x1f
'AU', # 0x20
'OU', # 0x21
'AN', # 0x22
'EN', # 0x23
'ANG', # 0x24
'ENG', # 0x25
'ER', # 0x26
'I', # 0x27
'U', # 0x28
'IU', # 0x29
'V', # 0x2a
'NG', # 0x2b
'GN', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'g', # 0x31
'gg', # 0x32
'gs', # 0x33
'n', # 0x34
'nj', # 0x35
'nh', # 0x36
'd', # 0x37
'dd', # 0x38
'r', # 0x39
'lg', # 0x3a
'lm', # 0x3b
'lb', # 0x3c
'ls', # 0x3d
'lt', # 0x3e
'lp', # 0x3f
'rh', # 0x40
'm', # 0x41
'b', # 0x42
'bb', # 0x43
'bs', # 0x44
's', # 0x45
'ss', # 0x46
'', # 0x47
'j', # 0x48
'jj', # 0x49
'c', # 0x4a
'k', # 0x4b
't', # 0x4c
'p', # 0x4d
'h', # 0x4e
'a', # 0x4f
'ae', # 0x50
'ya', # 0x51
'yae', # 0x52
'eo', # 0x53
'e', # 0x54
'yeo', # 0x55
'ye', # 0x56
'o', # 0x57
'wa', # 0x58
'wae', # 0x59
'oe', # 0x5a
'yo', # 0x5b
'u', # 0x5c
'weo', # 0x5d
'we', # 0x5e
'wi', # 0x5f
'yu', # 0x60
'eu', # 0x61
'yi', # 0x62
'i', # 0x63
'', # 0x64
'nn', # 0x65
'nd', # 0x66
'ns', # 0x67
'nZ', # 0x68
'lgs', # 0x69
'ld', # 0x6a
'lbs', # 0x6b
'lZ', # 0x6c
'lQ', # 0x6d
'mb', # 0x6e
'ms', # 0x6f
'mZ', # 0x70
'mN', # 0x71
'bg', # 0x72
'', # 0x73
'bsg', # 0x74
'bst', # 0x75
'bj', # 0x76
'bt', # 0x77
'bN', # 0x78
'bbN', # 0x79
'sg', # 0x7a
'sn', # 0x7b
'sd', # 0x7c
'sb', # 0x7d
'sj', # 0x7e
'Z', # 0x7f
'', # 0x80
'N', # 0x81
'Ns', # 0x82
'NZ', # 0x83
'pN', # 0x84
'hh', # 0x85
'Q', # 0x86
'yo-ya', # 0x87
'yo-yae', # 0x88
'yo-i', # 0x89
'yu-yeo', # 0x8a
'yu-ye', # 0x8b
'yu-i', # 0x8c
'U', # 0x8d
'U-i', # 0x8e
'[?]', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'BU', # 0xa0
'ZI', # 0xa1
'JI', # 0xa2
'GU', # 0xa3
'EE', # 0xa4
'ENN', # 0xa5
'OO', # 0xa6
'ONN', # 0xa7
'IR', # 0xa8
'ANN', # 0xa9
'INN', # 0xaa
'UNN', # 0xab
'IM', # 0xac
'NGG', # 0xad
'AINN', # 0xae
'AUNN', # 0xaf
'AM', # 0xb0
'OM', # 0xb1
'ONG', # 0xb2
'INNN', # 0xb3
'P', # 0xb4
'T', # 0xb5
'K', # 0xb6
'H', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| 4,125
|
Python
|
.py
| 257
| 15.050584
| 19
| 0.340745
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,209
|
x09c.py
|
rembo10_headphones/lib/unidecode/x09c.py
|
data = (
'Huan ', # 0x00
'Quan ', # 0x01
'Ze ', # 0x02
'Wei ', # 0x03
'Wei ', # 0x04
'Yu ', # 0x05
'Qun ', # 0x06
'Rou ', # 0x07
'Die ', # 0x08
'Huang ', # 0x09
'Lian ', # 0x0a
'Yan ', # 0x0b
'Qiu ', # 0x0c
'Qiu ', # 0x0d
'Jian ', # 0x0e
'Bi ', # 0x0f
'E ', # 0x10
'Yang ', # 0x11
'Fu ', # 0x12
'Sai ', # 0x13
'Jian ', # 0x14
'Xia ', # 0x15
'Tuo ', # 0x16
'Hu ', # 0x17
'Muroaji ', # 0x18
'Ruo ', # 0x19
'Haraka ', # 0x1a
'Wen ', # 0x1b
'Jian ', # 0x1c
'Hao ', # 0x1d
'Wu ', # 0x1e
'Fang ', # 0x1f
'Sao ', # 0x20
'Liu ', # 0x21
'Ma ', # 0x22
'Shi ', # 0x23
'Shi ', # 0x24
'Yin ', # 0x25
'Z ', # 0x26
'Teng ', # 0x27
'Ta ', # 0x28
'Yao ', # 0x29
'Ge ', # 0x2a
'Rong ', # 0x2b
'Qian ', # 0x2c
'Qi ', # 0x2d
'Wen ', # 0x2e
'Ruo ', # 0x2f
'Hatahata ', # 0x30
'Lian ', # 0x31
'Ao ', # 0x32
'Le ', # 0x33
'Hui ', # 0x34
'Min ', # 0x35
'Ji ', # 0x36
'Tiao ', # 0x37
'Qu ', # 0x38
'Jian ', # 0x39
'Sao ', # 0x3a
'Man ', # 0x3b
'Xi ', # 0x3c
'Qiu ', # 0x3d
'Biao ', # 0x3e
'Ji ', # 0x3f
'Ji ', # 0x40
'Zhu ', # 0x41
'Jiang ', # 0x42
'Qiu ', # 0x43
'Zhuan ', # 0x44
'Yong ', # 0x45
'Zhang ', # 0x46
'Kang ', # 0x47
'Xue ', # 0x48
'Bie ', # 0x49
'Jue ', # 0x4a
'Qu ', # 0x4b
'Xiang ', # 0x4c
'Bo ', # 0x4d
'Jiao ', # 0x4e
'Xun ', # 0x4f
'Su ', # 0x50
'Huang ', # 0x51
'Zun ', # 0x52
'Shan ', # 0x53
'Shan ', # 0x54
'Fan ', # 0x55
'Jue ', # 0x56
'Lin ', # 0x57
'Xun ', # 0x58
'Miao ', # 0x59
'Xi ', # 0x5a
'Eso ', # 0x5b
'Kyou ', # 0x5c
'Fen ', # 0x5d
'Guan ', # 0x5e
'Hou ', # 0x5f
'Kuai ', # 0x60
'Zei ', # 0x61
'Sao ', # 0x62
'Zhan ', # 0x63
'Gan ', # 0x64
'Gui ', # 0x65
'Sheng ', # 0x66
'Li ', # 0x67
'Chang ', # 0x68
'Hatahata ', # 0x69
'Shiira ', # 0x6a
'Mutsu ', # 0x6b
'Ru ', # 0x6c
'Ji ', # 0x6d
'Xu ', # 0x6e
'Huo ', # 0x6f
'Shiira ', # 0x70
'Li ', # 0x71
'Lie ', # 0x72
'Li ', # 0x73
'Mie ', # 0x74
'Zhen ', # 0x75
'Xiang ', # 0x76
'E ', # 0x77
'Lu ', # 0x78
'Guan ', # 0x79
'Li ', # 0x7a
'Xian ', # 0x7b
'Yu ', # 0x7c
'Dao ', # 0x7d
'Ji ', # 0x7e
'You ', # 0x7f
'Tun ', # 0x80
'Lu ', # 0x81
'Fang ', # 0x82
'Ba ', # 0x83
'He ', # 0x84
'Bo ', # 0x85
'Ping ', # 0x86
'Nian ', # 0x87
'Lu ', # 0x88
'You ', # 0x89
'Zha ', # 0x8a
'Fu ', # 0x8b
'Bo ', # 0x8c
'Bao ', # 0x8d
'Hou ', # 0x8e
'Pi ', # 0x8f
'Tai ', # 0x90
'Gui ', # 0x91
'Jie ', # 0x92
'Kao ', # 0x93
'Wei ', # 0x94
'Er ', # 0x95
'Tong ', # 0x96
'Ze ', # 0x97
'Hou ', # 0x98
'Kuai ', # 0x99
'Ji ', # 0x9a
'Jiao ', # 0x9b
'Xian ', # 0x9c
'Za ', # 0x9d
'Xiang ', # 0x9e
'Xun ', # 0x9f
'Geng ', # 0xa0
'Li ', # 0xa1
'Lian ', # 0xa2
'Jian ', # 0xa3
'Li ', # 0xa4
'Shi ', # 0xa5
'Tiao ', # 0xa6
'Gun ', # 0xa7
'Sha ', # 0xa8
'Wan ', # 0xa9
'Jun ', # 0xaa
'Ji ', # 0xab
'Yong ', # 0xac
'Qing ', # 0xad
'Ling ', # 0xae
'Qi ', # 0xaf
'Zou ', # 0xb0
'Fei ', # 0xb1
'Kun ', # 0xb2
'Chang ', # 0xb3
'Gu ', # 0xb4
'Ni ', # 0xb5
'Nian ', # 0xb6
'Diao ', # 0xb7
'Jing ', # 0xb8
'Shen ', # 0xb9
'Shi ', # 0xba
'Zi ', # 0xbb
'Fen ', # 0xbc
'Die ', # 0xbd
'Bi ', # 0xbe
'Chang ', # 0xbf
'Shi ', # 0xc0
'Wen ', # 0xc1
'Wei ', # 0xc2
'Sai ', # 0xc3
'E ', # 0xc4
'Qiu ', # 0xc5
'Fu ', # 0xc6
'Huang ', # 0xc7
'Quan ', # 0xc8
'Jiang ', # 0xc9
'Bian ', # 0xca
'Sao ', # 0xcb
'Ao ', # 0xcc
'Qi ', # 0xcd
'Ta ', # 0xce
'Yin ', # 0xcf
'Yao ', # 0xd0
'Fang ', # 0xd1
'Jian ', # 0xd2
'Le ', # 0xd3
'Biao ', # 0xd4
'Xue ', # 0xd5
'Bie ', # 0xd6
'Man ', # 0xd7
'Min ', # 0xd8
'Yong ', # 0xd9
'Wei ', # 0xda
'Xi ', # 0xdb
'Jue ', # 0xdc
'Shan ', # 0xdd
'Lin ', # 0xde
'Zun ', # 0xdf
'Huo ', # 0xe0
'Gan ', # 0xe1
'Li ', # 0xe2
'Zhan ', # 0xe3
'Guan ', # 0xe4
'Niao ', # 0xe5
'Yi ', # 0xe6
'Fu ', # 0xe7
'Li ', # 0xe8
'Jiu ', # 0xe9
'Bu ', # 0xea
'Yan ', # 0xeb
'Fu ', # 0xec
'Diao ', # 0xed
'Ji ', # 0xee
'Feng ', # 0xef
'Nio ', # 0xf0
'Gan ', # 0xf1
'Shi ', # 0xf2
'Feng ', # 0xf3
'Ming ', # 0xf4
'Bao ', # 0xf5
'Yuan ', # 0xf6
'Zhi ', # 0xf7
'Hu ', # 0xf8
'Qin ', # 0xf9
'Fu ', # 0xfa
'Fen ', # 0xfb
'Wen ', # 0xfc
'Jian ', # 0xfd
'Shi ', # 0xfe
'Yu ', # 0xff
)
| 4,659
|
Python
|
.py
| 258
| 17.05814
| 22
| 0.417178
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,210
|
x080.py
|
rembo10_headphones/lib/unidecode/x080.py
|
data = (
'Yao ', # 0x00
'Lao ', # 0x01
'[?] ', # 0x02
'Kao ', # 0x03
'Mao ', # 0x04
'Zhe ', # 0x05
'Qi ', # 0x06
'Gou ', # 0x07
'Gou ', # 0x08
'Gou ', # 0x09
'Die ', # 0x0a
'Die ', # 0x0b
'Er ', # 0x0c
'Shua ', # 0x0d
'Ruan ', # 0x0e
'Er ', # 0x0f
'Nai ', # 0x10
'Zhuan ', # 0x11
'Lei ', # 0x12
'Ting ', # 0x13
'Zi ', # 0x14
'Geng ', # 0x15
'Chao ', # 0x16
'Hao ', # 0x17
'Yun ', # 0x18
'Pa ', # 0x19
'Pi ', # 0x1a
'Chi ', # 0x1b
'Si ', # 0x1c
'Chu ', # 0x1d
'Jia ', # 0x1e
'Ju ', # 0x1f
'He ', # 0x20
'Chu ', # 0x21
'Lao ', # 0x22
'Lun ', # 0x23
'Ji ', # 0x24
'Tang ', # 0x25
'Ou ', # 0x26
'Lou ', # 0x27
'Nou ', # 0x28
'Gou ', # 0x29
'Pang ', # 0x2a
'Ze ', # 0x2b
'Lou ', # 0x2c
'Ji ', # 0x2d
'Lao ', # 0x2e
'Huo ', # 0x2f
'You ', # 0x30
'Mo ', # 0x31
'Huai ', # 0x32
'Er ', # 0x33
'Zhe ', # 0x34
'Ting ', # 0x35
'Ye ', # 0x36
'Da ', # 0x37
'Song ', # 0x38
'Qin ', # 0x39
'Yun ', # 0x3a
'Chi ', # 0x3b
'Dan ', # 0x3c
'Dan ', # 0x3d
'Hong ', # 0x3e
'Geng ', # 0x3f
'Zhi ', # 0x40
'[?] ', # 0x41
'Nie ', # 0x42
'Dan ', # 0x43
'Zhen ', # 0x44
'Che ', # 0x45
'Ling ', # 0x46
'Zheng ', # 0x47
'You ', # 0x48
'Wa ', # 0x49
'Liao ', # 0x4a
'Long ', # 0x4b
'Zhi ', # 0x4c
'Ning ', # 0x4d
'Tiao ', # 0x4e
'Er ', # 0x4f
'Ya ', # 0x50
'Die ', # 0x51
'Gua ', # 0x52
'[?] ', # 0x53
'Lian ', # 0x54
'Hao ', # 0x55
'Sheng ', # 0x56
'Lie ', # 0x57
'Pin ', # 0x58
'Jing ', # 0x59
'Ju ', # 0x5a
'Bi ', # 0x5b
'Di ', # 0x5c
'Guo ', # 0x5d
'Wen ', # 0x5e
'Xu ', # 0x5f
'Ping ', # 0x60
'Cong ', # 0x61
'Shikato ', # 0x62
'[?] ', # 0x63
'Ting ', # 0x64
'Yu ', # 0x65
'Cong ', # 0x66
'Kui ', # 0x67
'Tsuraneru ', # 0x68
'Kui ', # 0x69
'Cong ', # 0x6a
'Lian ', # 0x6b
'Weng ', # 0x6c
'Kui ', # 0x6d
'Lian ', # 0x6e
'Lian ', # 0x6f
'Cong ', # 0x70
'Ao ', # 0x71
'Sheng ', # 0x72
'Song ', # 0x73
'Ting ', # 0x74
'Kui ', # 0x75
'Nie ', # 0x76
'Zhi ', # 0x77
'Dan ', # 0x78
'Ning ', # 0x79
'Qie ', # 0x7a
'Ji ', # 0x7b
'Ting ', # 0x7c
'Ting ', # 0x7d
'Long ', # 0x7e
'Yu ', # 0x7f
'Yu ', # 0x80
'Zhao ', # 0x81
'Si ', # 0x82
'Su ', # 0x83
'Yi ', # 0x84
'Su ', # 0x85
'Si ', # 0x86
'Zhao ', # 0x87
'Zhao ', # 0x88
'Rou ', # 0x89
'Yi ', # 0x8a
'Le ', # 0x8b
'Ji ', # 0x8c
'Qiu ', # 0x8d
'Ken ', # 0x8e
'Cao ', # 0x8f
'Ge ', # 0x90
'Di ', # 0x91
'Huan ', # 0x92
'Huang ', # 0x93
'Yi ', # 0x94
'Ren ', # 0x95
'Xiao ', # 0x96
'Ru ', # 0x97
'Zhou ', # 0x98
'Yuan ', # 0x99
'Du ', # 0x9a
'Gang ', # 0x9b
'Rong ', # 0x9c
'Gan ', # 0x9d
'Cha ', # 0x9e
'Wo ', # 0x9f
'Chang ', # 0xa0
'Gu ', # 0xa1
'Zhi ', # 0xa2
'Han ', # 0xa3
'Fu ', # 0xa4
'Fei ', # 0xa5
'Fen ', # 0xa6
'Pei ', # 0xa7
'Pang ', # 0xa8
'Jian ', # 0xa9
'Fang ', # 0xaa
'Zhun ', # 0xab
'You ', # 0xac
'Na ', # 0xad
'Hang ', # 0xae
'Ken ', # 0xaf
'Ran ', # 0xb0
'Gong ', # 0xb1
'Yu ', # 0xb2
'Wen ', # 0xb3
'Yao ', # 0xb4
'Jin ', # 0xb5
'Pi ', # 0xb6
'Qian ', # 0xb7
'Xi ', # 0xb8
'Xi ', # 0xb9
'Fei ', # 0xba
'Ken ', # 0xbb
'Jing ', # 0xbc
'Tai ', # 0xbd
'Shen ', # 0xbe
'Zhong ', # 0xbf
'Zhang ', # 0xc0
'Xie ', # 0xc1
'Shen ', # 0xc2
'Wei ', # 0xc3
'Zhou ', # 0xc4
'Die ', # 0xc5
'Dan ', # 0xc6
'Fei ', # 0xc7
'Ba ', # 0xc8
'Bo ', # 0xc9
'Qu ', # 0xca
'Tian ', # 0xcb
'Bei ', # 0xcc
'Gua ', # 0xcd
'Tai ', # 0xce
'Zi ', # 0xcf
'Ku ', # 0xd0
'Zhi ', # 0xd1
'Ni ', # 0xd2
'Ping ', # 0xd3
'Zi ', # 0xd4
'Fu ', # 0xd5
'Pang ', # 0xd6
'Zhen ', # 0xd7
'Xian ', # 0xd8
'Zuo ', # 0xd9
'Pei ', # 0xda
'Jia ', # 0xdb
'Sheng ', # 0xdc
'Zhi ', # 0xdd
'Bao ', # 0xde
'Mu ', # 0xdf
'Qu ', # 0xe0
'Hu ', # 0xe1
'Ke ', # 0xe2
'Yi ', # 0xe3
'Yin ', # 0xe4
'Xu ', # 0xe5
'Yang ', # 0xe6
'Long ', # 0xe7
'Dong ', # 0xe8
'Ka ', # 0xe9
'Lu ', # 0xea
'Jing ', # 0xeb
'Nu ', # 0xec
'Yan ', # 0xed
'Pang ', # 0xee
'Kua ', # 0xef
'Yi ', # 0xf0
'Guang ', # 0xf1
'Gai ', # 0xf2
'Ge ', # 0xf3
'Dong ', # 0xf4
'Zhi ', # 0xf5
'Xiao ', # 0xf6
'Xiong ', # 0xf7
'Xiong ', # 0xf8
'Er ', # 0xf9
'E ', # 0xfa
'Xing ', # 0xfb
'Pian ', # 0xfc
'Neng ', # 0xfd
'Zi ', # 0xfe
'Gui ', # 0xff
)
| 4,651
|
Python
|
.py
| 258
| 17.027132
| 23
| 0.413385
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,211
|
x0d7.py
|
rembo10_headphones/lib/unidecode/x0d7.py
|
data = (
'hwen', # 0x00
'hwenj', # 0x01
'hwenh', # 0x02
'hwed', # 0x03
'hwel', # 0x04
'hwelg', # 0x05
'hwelm', # 0x06
'hwelb', # 0x07
'hwels', # 0x08
'hwelt', # 0x09
'hwelp', # 0x0a
'hwelh', # 0x0b
'hwem', # 0x0c
'hweb', # 0x0d
'hwebs', # 0x0e
'hwes', # 0x0f
'hwess', # 0x10
'hweng', # 0x11
'hwej', # 0x12
'hwec', # 0x13
'hwek', # 0x14
'hwet', # 0x15
'hwep', # 0x16
'hweh', # 0x17
'hwi', # 0x18
'hwig', # 0x19
'hwigg', # 0x1a
'hwigs', # 0x1b
'hwin', # 0x1c
'hwinj', # 0x1d
'hwinh', # 0x1e
'hwid', # 0x1f
'hwil', # 0x20
'hwilg', # 0x21
'hwilm', # 0x22
'hwilb', # 0x23
'hwils', # 0x24
'hwilt', # 0x25
'hwilp', # 0x26
'hwilh', # 0x27
'hwim', # 0x28
'hwib', # 0x29
'hwibs', # 0x2a
'hwis', # 0x2b
'hwiss', # 0x2c
'hwing', # 0x2d
'hwij', # 0x2e
'hwic', # 0x2f
'hwik', # 0x30
'hwit', # 0x31
'hwip', # 0x32
'hwih', # 0x33
'hyu', # 0x34
'hyug', # 0x35
'hyugg', # 0x36
'hyugs', # 0x37
'hyun', # 0x38
'hyunj', # 0x39
'hyunh', # 0x3a
'hyud', # 0x3b
'hyul', # 0x3c
'hyulg', # 0x3d
'hyulm', # 0x3e
'hyulb', # 0x3f
'hyuls', # 0x40
'hyult', # 0x41
'hyulp', # 0x42
'hyulh', # 0x43
'hyum', # 0x44
'hyub', # 0x45
'hyubs', # 0x46
'hyus', # 0x47
'hyuss', # 0x48
'hyung', # 0x49
'hyuj', # 0x4a
'hyuc', # 0x4b
'hyuk', # 0x4c
'hyut', # 0x4d
'hyup', # 0x4e
'hyuh', # 0x4f
'heu', # 0x50
'heug', # 0x51
'heugg', # 0x52
'heugs', # 0x53
'heun', # 0x54
'heunj', # 0x55
'heunh', # 0x56
'heud', # 0x57
'heul', # 0x58
'heulg', # 0x59
'heulm', # 0x5a
'heulb', # 0x5b
'heuls', # 0x5c
'heult', # 0x5d
'heulp', # 0x5e
'heulh', # 0x5f
'heum', # 0x60
'heub', # 0x61
'heubs', # 0x62
'heus', # 0x63
'heuss', # 0x64
'heung', # 0x65
'heuj', # 0x66
'heuc', # 0x67
'heuk', # 0x68
'heut', # 0x69
'heup', # 0x6a
'heuh', # 0x6b
'hyi', # 0x6c
'hyig', # 0x6d
'hyigg', # 0x6e
'hyigs', # 0x6f
'hyin', # 0x70
'hyinj', # 0x71
'hyinh', # 0x72
'hyid', # 0x73
'hyil', # 0x74
'hyilg', # 0x75
'hyilm', # 0x76
'hyilb', # 0x77
'hyils', # 0x78
'hyilt', # 0x79
'hyilp', # 0x7a
'hyilh', # 0x7b
'hyim', # 0x7c
'hyib', # 0x7d
'hyibs', # 0x7e
'hyis', # 0x7f
'hyiss', # 0x80
'hying', # 0x81
'hyij', # 0x82
'hyic', # 0x83
'hyik', # 0x84
'hyit', # 0x85
'hyip', # 0x86
'hyih', # 0x87
'hi', # 0x88
'hig', # 0x89
'higg', # 0x8a
'higs', # 0x8b
'hin', # 0x8c
'hinj', # 0x8d
'hinh', # 0x8e
'hid', # 0x8f
'hil', # 0x90
'hilg', # 0x91
'hilm', # 0x92
'hilb', # 0x93
'hils', # 0x94
'hilt', # 0x95
'hilp', # 0x96
'hilh', # 0x97
'him', # 0x98
'hib', # 0x99
'hibs', # 0x9a
'his', # 0x9b
'hiss', # 0x9c
'hing', # 0x9d
'hij', # 0x9e
'hic', # 0x9f
'hik', # 0xa0
'hit', # 0xa1
'hip', # 0xa2
'hih', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| 4,559
|
Python
|
.py
| 257
| 16.7393
| 18
| 0.401906
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,212
|
x06d.py
|
rembo10_headphones/lib/unidecode/x06d.py
|
data = (
'Zhou ', # 0x00
'Ji ', # 0x01
'Yi ', # 0x02
'Hui ', # 0x03
'Hui ', # 0x04
'Zui ', # 0x05
'Cheng ', # 0x06
'Yin ', # 0x07
'Wei ', # 0x08
'Hou ', # 0x09
'Jian ', # 0x0a
'Yang ', # 0x0b
'Lie ', # 0x0c
'Si ', # 0x0d
'Ji ', # 0x0e
'Er ', # 0x0f
'Xing ', # 0x10
'Fu ', # 0x11
'Sa ', # 0x12
'Suo ', # 0x13
'Zhi ', # 0x14
'Yin ', # 0x15
'Wu ', # 0x16
'Xi ', # 0x17
'Kao ', # 0x18
'Zhu ', # 0x19
'Jiang ', # 0x1a
'Luo ', # 0x1b
'[?] ', # 0x1c
'An ', # 0x1d
'Dong ', # 0x1e
'Yi ', # 0x1f
'Mou ', # 0x20
'Lei ', # 0x21
'Yi ', # 0x22
'Mi ', # 0x23
'Quan ', # 0x24
'Jin ', # 0x25
'Mo ', # 0x26
'Wei ', # 0x27
'Xiao ', # 0x28
'Xie ', # 0x29
'Hong ', # 0x2a
'Xu ', # 0x2b
'Shuo ', # 0x2c
'Kuang ', # 0x2d
'Tao ', # 0x2e
'Qie ', # 0x2f
'Ju ', # 0x30
'Er ', # 0x31
'Zhou ', # 0x32
'Ru ', # 0x33
'Ping ', # 0x34
'Xun ', # 0x35
'Xiong ', # 0x36
'Zhi ', # 0x37
'Guang ', # 0x38
'Huan ', # 0x39
'Ming ', # 0x3a
'Huo ', # 0x3b
'Wa ', # 0x3c
'Qia ', # 0x3d
'Pai ', # 0x3e
'Wu ', # 0x3f
'Qu ', # 0x40
'Liu ', # 0x41
'Yi ', # 0x42
'Jia ', # 0x43
'Jing ', # 0x44
'Qian ', # 0x45
'Jiang ', # 0x46
'Jiao ', # 0x47
'Cheng ', # 0x48
'Shi ', # 0x49
'Zhuo ', # 0x4a
'Ce ', # 0x4b
'Pal ', # 0x4c
'Kuai ', # 0x4d
'Ji ', # 0x4e
'Liu ', # 0x4f
'Chan ', # 0x50
'Hun ', # 0x51
'Hu ', # 0x52
'Nong ', # 0x53
'Xun ', # 0x54
'Jin ', # 0x55
'Lie ', # 0x56
'Qiu ', # 0x57
'Wei ', # 0x58
'Zhe ', # 0x59
'Jun ', # 0x5a
'Han ', # 0x5b
'Bang ', # 0x5c
'Mang ', # 0x5d
'Zhuo ', # 0x5e
'You ', # 0x5f
'Xi ', # 0x60
'Bo ', # 0x61
'Dou ', # 0x62
'Wan ', # 0x63
'Hong ', # 0x64
'Yi ', # 0x65
'Pu ', # 0x66
'Ying ', # 0x67
'Lan ', # 0x68
'Hao ', # 0x69
'Lang ', # 0x6a
'Han ', # 0x6b
'Li ', # 0x6c
'Geng ', # 0x6d
'Fu ', # 0x6e
'Wu ', # 0x6f
'Lian ', # 0x70
'Chun ', # 0x71
'Feng ', # 0x72
'Yi ', # 0x73
'Yu ', # 0x74
'Tong ', # 0x75
'Lao ', # 0x76
'Hai ', # 0x77
'Jin ', # 0x78
'Jia ', # 0x79
'Chong ', # 0x7a
'Weng ', # 0x7b
'Mei ', # 0x7c
'Sui ', # 0x7d
'Cheng ', # 0x7e
'Pei ', # 0x7f
'Xian ', # 0x80
'Shen ', # 0x81
'Tu ', # 0x82
'Kun ', # 0x83
'Pin ', # 0x84
'Nie ', # 0x85
'Han ', # 0x86
'Jing ', # 0x87
'Xiao ', # 0x88
'She ', # 0x89
'Nian ', # 0x8a
'Tu ', # 0x8b
'Yong ', # 0x8c
'Xiao ', # 0x8d
'Xian ', # 0x8e
'Ting ', # 0x8f
'E ', # 0x90
'Su ', # 0x91
'Tun ', # 0x92
'Juan ', # 0x93
'Cen ', # 0x94
'Ti ', # 0x95
'Li ', # 0x96
'Shui ', # 0x97
'Si ', # 0x98
'Lei ', # 0x99
'Shui ', # 0x9a
'Tao ', # 0x9b
'Du ', # 0x9c
'Lao ', # 0x9d
'Lai ', # 0x9e
'Lian ', # 0x9f
'Wei ', # 0xa0
'Wo ', # 0xa1
'Yun ', # 0xa2
'Huan ', # 0xa3
'Di ', # 0xa4
'[?] ', # 0xa5
'Run ', # 0xa6
'Jian ', # 0xa7
'Zhang ', # 0xa8
'Se ', # 0xa9
'Fu ', # 0xaa
'Guan ', # 0xab
'Xing ', # 0xac
'Shou ', # 0xad
'Shuan ', # 0xae
'Ya ', # 0xaf
'Chuo ', # 0xb0
'Zhang ', # 0xb1
'Ye ', # 0xb2
'Kong ', # 0xb3
'Wo ', # 0xb4
'Han ', # 0xb5
'Tuo ', # 0xb6
'Dong ', # 0xb7
'He ', # 0xb8
'Wo ', # 0xb9
'Ju ', # 0xba
'Gan ', # 0xbb
'Liang ', # 0xbc
'Hun ', # 0xbd
'Ta ', # 0xbe
'Zhuo ', # 0xbf
'Dian ', # 0xc0
'Qie ', # 0xc1
'De ', # 0xc2
'Juan ', # 0xc3
'Zi ', # 0xc4
'Xi ', # 0xc5
'Yao ', # 0xc6
'Qi ', # 0xc7
'Gu ', # 0xc8
'Guo ', # 0xc9
'Han ', # 0xca
'Lin ', # 0xcb
'Tang ', # 0xcc
'Zhou ', # 0xcd
'Peng ', # 0xce
'Hao ', # 0xcf
'Chang ', # 0xd0
'Shu ', # 0xd1
'Qi ', # 0xd2
'Fang ', # 0xd3
'Chi ', # 0xd4
'Lu ', # 0xd5
'Nao ', # 0xd6
'Ju ', # 0xd7
'Tao ', # 0xd8
'Cong ', # 0xd9
'Lei ', # 0xda
'Zhi ', # 0xdb
'Peng ', # 0xdc
'Fei ', # 0xdd
'Song ', # 0xde
'Tian ', # 0xdf
'Pi ', # 0xe0
'Dan ', # 0xe1
'Yu ', # 0xe2
'Ni ', # 0xe3
'Yu ', # 0xe4
'Lu ', # 0xe5
'Gan ', # 0xe6
'Mi ', # 0xe7
'Jing ', # 0xe8
'Ling ', # 0xe9
'Lun ', # 0xea
'Yin ', # 0xeb
'Cui ', # 0xec
'Qu ', # 0xed
'Huai ', # 0xee
'Yu ', # 0xef
'Nian ', # 0xf0
'Shen ', # 0xf1
'Piao ', # 0xf2
'Chun ', # 0xf3
'Wa ', # 0xf4
'Yuan ', # 0xf5
'Lai ', # 0xf6
'Hun ', # 0xf7
'Qing ', # 0xf8
'Yan ', # 0xf9
'Qian ', # 0xfa
'Tian ', # 0xfb
'Miao ', # 0xfc
'Zhi ', # 0xfd
'Yin ', # 0xfe
'Mi ', # 0xff
)
| 4,651
|
Python
|
.py
| 258
| 17.027132
| 19
| 0.414751
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,213
|
sanitizer.py
|
rembo10_headphones/lib/feedparser/sanitizer.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from .html import _BaseHTMLProcessor
from .urls import make_safe_absolute_uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = {
'a',
'abbr',
'acronym',
'address',
'area',
'article',
'aside',
'audio',
'b',
'big',
'blockquote',
'br',
'button',
'canvas',
'caption',
'center',
'cite',
'code',
'col',
'colgroup',
'command',
'datagrid',
'datalist',
'dd',
'del',
'details',
'dfn',
'dialog',
'dir',
'div',
'dl',
'dt',
'em',
'event-source',
'fieldset',
'figcaption',
'figure',
'font',
'footer',
'form',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'header',
'hr',
'i',
'img',
'input',
'ins',
'kbd',
'keygen',
'label',
'legend',
'li',
'm',
'map',
'menu',
'meter',
'multicol',
'nav',
'nextid',
'noscript',
'ol',
'optgroup',
'option',
'output',
'p',
'pre',
'progress',
'q',
's',
'samp',
'section',
'select',
'small',
'sound',
'source',
'spacer',
'span',
'strike',
'strong',
'sub',
'sup',
'table',
'tbody',
'td',
'textarea',
'tfoot',
'th',
'thead',
'time',
'tr',
'tt',
'u',
'ul',
'var',
'video',
}
acceptable_attributes = {
'abbr',
'accept',
'accept-charset',
'accesskey',
'action',
'align',
'alt',
'autocomplete',
'autofocus',
'axis',
'background',
'balance',
'bgcolor',
'bgproperties',
'border',
'bordercolor',
'bordercolordark',
'bordercolorlight',
'bottompadding',
'cellpadding',
'cellspacing',
'ch',
'challenge',
'char',
'charoff',
'charset',
'checked',
'choff',
'cite',
'class',
'clear',
'color',
'cols',
'colspan',
'compact',
'contenteditable',
'controls',
'coords',
'data',
'datafld',
'datapagesize',
'datasrc',
'datetime',
'default',
'delay',
'dir',
'disabled',
'draggable',
'dynsrc',
'enctype',
'end',
'face',
'for',
'form',
'frame',
'galleryimg',
'gutter',
'headers',
'height',
'hidden',
'hidefocus',
'high',
'href',
'hreflang',
'hspace',
'icon',
'id',
'inputmode',
'ismap',
'keytype',
'label',
'lang',
'leftspacing',
'list',
'longdesc',
'loop',
'loopcount',
'loopend',
'loopstart',
'low',
'lowsrc',
'max',
'maxlength',
'media',
'method',
'min',
'multiple',
'name',
'nohref',
'noshade',
'nowrap',
'open',
'optimum',
'pattern',
'ping',
'point-size',
'poster',
'pqg',
'preload',
'prompt',
'radiogroup',
'readonly',
'rel',
'repeat-max',
'repeat-min',
'replace',
'required',
'rev',
'rightspacing',
'rows',
'rowspan',
'rules',
'scope',
'selected',
'shape',
'size',
'span',
'src',
'start',
'step',
'style',
'summary',
'suppress',
'tabindex',
'target',
'template',
'title',
'toppadding',
'type',
'unselectable',
'urn',
'usemap',
'valign',
'value',
'variable',
'volume',
'vrml',
'vspace',
'width',
'wrap',
'xml:lang',
}
unacceptable_elements_with_end_tag = {
'applet',
'script',
'style',
}
acceptable_css_properties = {
'azimuth',
'background-color',
'border-bottom-color',
'border-collapse',
'border-color',
'border-left-color',
'border-right-color',
'border-top-color',
'clear',
'color',
'cursor',
'direction',
'display',
'elevation',
'float',
'font',
'font-family',
'font-size',
'font-style',
'font-variant',
'font-weight',
'height',
'letter-spacing',
'line-height',
'overflow',
'pause',
'pause-after',
'pause-before',
'pitch',
'pitch-range',
'richness',
'speak',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speech-rate',
'stress',
'text-align',
'text-decoration',
'text-indent',
'unicode-bidi',
'vertical-align',
'voice-family',
'volume',
'white-space',
'width',
}
# survey of common keywords found in feeds
acceptable_css_keywords = {
'!important',
'aqua',
'auto',
'black',
'block',
'blue',
'bold',
'both',
'bottom',
'brown',
'center',
'collapse',
'dashed',
'dotted',
'fuchsia',
'gray',
'green',
'italic',
'left',
'lime',
'maroon',
'medium',
'navy',
'none',
'normal',
'nowrap',
'olive',
'pointer',
'purple',
'red',
'right',
'silver',
'solid',
'teal',
'top',
'transparent',
'underline',
'white',
'yellow',
}
valid_css_values = re.compile(
r'^('
r'#[0-9a-f]+' # Hex values
r'|rgb\(\d+%?,\d*%?,?\d*%?\)?' # RGB values
r'|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?' # Sizes/widths
r')$'
)
mathml_elements = {
'annotation',
'annotation-xml',
'maction',
'maligngroup',
'malignmark',
'math',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msubsup',
'msup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover',
'none',
'semantics',
}
mathml_attributes = {
'accent',
'accentunder',
'actiontype',
'align',
'alignmentscope',
'altimg',
'altimg-height',
'altimg-valign',
'altimg-width',
'alttext',
'bevelled',
'charalign',
'close',
'columnalign',
'columnlines',
'columnspacing',
'columnspan',
'columnwidth',
'crossout',
'decimalpoint',
'denomalign',
'depth',
'dir',
'display',
'displaystyle',
'edge',
'encoding',
'equalcolumns',
'equalrows',
'fence',
'fontstyle',
'fontweight',
'form',
'frame',
'framespacing',
'groupalign',
'height',
'href',
'id',
'indentalign',
'indentalignfirst',
'indentalignlast',
'indentshift',
'indentshiftfirst',
'indentshiftlast',
'indenttarget',
'infixlinebreakstyle',
'largeop',
'length',
'linebreak',
'linebreakmultchar',
'linebreakstyle',
'lineleading',
'linethickness',
'location',
'longdivstyle',
'lquote',
'lspace',
'mathbackground',
'mathcolor',
'mathsize',
'mathvariant',
'maxsize',
'minlabelspacing',
'minsize',
'movablelimits',
'notation',
'numalign',
'open',
'other',
'overflow',
'position',
'rowalign',
'rowlines',
'rowspacing',
'rowspan',
'rquote',
'rspace',
'scriptlevel',
'scriptminsize',
'scriptsizemultiplier',
'selection',
'separator',
'separators',
'shift',
'side',
'src',
'stackalign',
'stretchy',
'subscriptshift',
'superscriptshift',
'symmetric',
'voffset',
'width',
'xlink:href',
'xlink:show',
'xlink:type',
'xmlns',
'xmlns:xlink',
}
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = {
'a',
'animate',
'animateColor',
'animateMotion',
'animateTransform',
'circle',
'defs',
'desc',
'ellipse',
'font-face',
'font-face-name',
'font-face-src',
'foreignObject',
'g',
'glyph',
'hkern',
'line',
'linearGradient',
'marker',
'metadata',
'missing-glyph',
'mpath',
'path',
'polygon',
'polyline',
'radialGradient',
'rect',
'set',
'stop',
'svg',
'switch',
'text',
'title',
'tspan',
'use',
}
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = {
'accent-height',
'accumulate',
'additive',
'alphabetic',
'arabic-form',
'ascent',
'attributeName',
'attributeType',
'baseProfile',
'bbox',
'begin',
'by',
'calcMode',
'cap-height',
'class',
'color',
'color-rendering',
'content',
'cx',
'cy',
'd',
'descent',
'display',
'dur',
'dx',
'dy',
'end',
'fill',
'fill-opacity',
'fill-rule',
'font-family',
'font-size',
'font-stretch',
'font-style',
'font-variant',
'font-weight',
'from',
'fx',
'fy',
'g1',
'g2',
'glyph-name',
'gradientUnits',
'hanging',
'height',
'horiz-adv-x',
'horiz-origin-x',
'id',
'ideographic',
'k',
'keyPoints',
'keySplines',
'keyTimes',
'lang',
'marker-end',
'marker-mid',
'marker-start',
'markerHeight',
'markerUnits',
'markerWidth',
'mathematical',
'max',
'min',
'name',
'offset',
'opacity',
'orient',
'origin',
'overline-position',
'overline-thickness',
'panose-1',
'path',
'pathLength',
'points',
'preserveAspectRatio',
'r',
'refX',
'refY',
'repeatCount',
'repeatDur',
'requiredExtensions',
'requiredFeatures',
'restart',
'rotate',
'rx',
'ry',
'slope',
'stemh',
'stemv',
'stop-color',
'stop-opacity',
'strikethrough-position',
'strikethrough-thickness',
'stroke',
'stroke-dasharray',
'stroke-dashoffset',
'stroke-linecap',
'stroke-linejoin',
'stroke-miterlimit',
'stroke-opacity',
'stroke-width',
'systemLanguage',
'target',
'text-anchor',
'to',
'transform',
'type',
'u1',
'u2',
'underline-position',
'underline-thickness',
'unicode',
'unicode-range',
'units-per-em',
'values',
'version',
'viewBox',
'visibility',
'width',
'widths',
'x',
'x-height',
'x1',
'x2',
'xlink:actuate',
'xlink:arcrole',
'xlink:href',
'xlink:role',
'xlink:show',
'xlink:title',
'xlink:type',
'xml:base',
'xml:lang',
'xml:space',
'xmlns',
'xmlns:xlink',
'y',
'y1',
'y2',
'zoomAndPan',
}
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = {
'fill',
'fill-opacity',
'fill-rule',
'stroke',
'stroke-linecap',
'stroke-linejoin',
'stroke-opacity',
'stroke-width',
}
def __init__(self, encoding=None, _type='application/xhtml+xml'):
super(_HTMLSanitizer, self).__init__(encoding, _type)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def reset(self):
super(_HTMLSanitizer, self).reset()
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if tag not in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag == 'svg':
attrs.append(('xmlns', 'http://www.w3.org/2000/svg'))
if tag == 'math':
attrs.append(('xmlns', 'http://www.w3.org/1998/Math/MathML'))
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag == 'math' and ('xmlns', 'http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag == 'svg' and ('xmlns', 'http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# For most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case.
if not self.svg_attr_map:
lower = [attr.lower() for attr in self.svg_attributes]
mix = [a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = {a.lower(): a for a in mix}
lower = [attr.lower() for attr in self.svg_elements]
mix = [a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = {a.lower(): a for a in mix}
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag, tag)
keymap = self.svg_attr_map
elif tag not in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if any((a for a in attrs if a[0].startswith('xlink:'))):
if not ('xmlns:xlink', 'http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink', 'http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key == 'style' and 'style' in acceptable_attributes:
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key, clean_value))
elif key in acceptable_attributes:
key = keymap.get(key, key)
# make sure the uri uses an acceptable uri scheme
if key == 'href':
value = make_safe_absolute_uri(value)
clean_attrs.append((key, value))
super(_HTMLSanitizer, self).unknown_starttag(tag, clean_attrs)
def unknown_endtag(self, tag):
if tag not in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag, tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
super(_HTMLSanitizer, self).unknown_endtag(tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
super(_HTMLSanitizer, self).handle_data(text)
def sanitize_style(self, style):
# disallow urls
style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to
# pathological back-tracking.
if re.sub(r"\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']:
for keyword in value.split():
if (
keyword not in self.acceptable_css_keywords
and not self.valid_css_values.match(keyword)
):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = super(_HTMLSanitizer, self).parse_comment(i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitize_html(html_source, encoding, _type):
p = _HTMLSanitizer(encoding, _type)
html_source = html_source.replace('<![CDATA[', '<![CDATA[')
p.feed(html_source)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(br'^\s*<!ENTITY([^>]*?)>', re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(br'^\s*<!DOCTYPE([^>]*?)>', re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(br'\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
def replace_doctype(data):
"""Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
"""
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(br'<\w', data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(b'', head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or b''
if b'netscape' in doctype.lower():
version = 'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = b''
if len(doctype_results) == 1 and entity_results:
safe_entities = [
e
for e in entity_results
if RE_SAFE_ENTITY_PATTERN.match(e)
]
if safe_entities:
replacement = b'<!DOCTYPE feed [\n<!ENTITY' \
+ b'>\n<!ENTITY '.join(safe_entities) \
+ b'>\n]>'
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = {
k.decode('utf-8'): v.decode('utf-8')
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement)
}
return version, data, safe_entities
| 23,856
|
Python
|
.py
| 902
| 17.32816
| 104
| 0.489173
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,214
|
util.py
|
rembo10_headphones/lib/feedparser/util.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
class FeedParserDict(dict):
keymap = {
'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail',
}
def __getitem__(self, key):
"""
:return: A :class:`FeedParserDict`.
"""
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name, value) for (name, value) in link.items() if name != 'rel'])
return [
norel(link)
for link in dict.__getitem__(self, 'links')
if link['rel'] == 'enclosure'
]
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel'] == 'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if (
not dict.__contains__(self, 'updated')
and dict.__contains__(self, 'published')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.",
DeprecationWarning,
)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if (
not dict.__contains__(self, 'updated_parsed')
and dict.__contains__(self, 'published_parsed')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning,
)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
"""
:return: A :class:`FeedParserDict`.
"""
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, k, default):
if k not in self:
self[k] = default
return default
return self[k]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
# This is incorrect behavior -- dictionaries shouldn't be hashable.
# Note to self: remove this behavior in the future.
return id(self)
| 6,490
|
Python
|
.py
| 150
| 32.793333
| 115
| 0.581792
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,215
|
mixin.py
|
rembo10_headphones/lib/feedparser/mixin.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import base64
import binascii
import copy
import html.entities
import re
import xml.sax.saxutils
from .html import _cp1252
from .namespaces import _base, cc, dc, georss, itunes, mediarss, psc
from .sanitizer import _sanitize_html, _HTMLSanitizer
from .util import FeedParserDict
from .urls import _urljoin, make_safe_absolute_uri, resolve_relative_uris
class _FeedParserMixin(
_base.Namespace,
cc.Namespace,
dc.Namespace,
georss.Namespace,
itunes.Namespace,
mediarss.Namespace,
psc.Namespace,
):
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://creativecommons.org/ns#license': 'cc',
'http://web.resource.org/cc/': 'cc',
'http://cyber.law.harvard.edu/rss/creativeCommonsRssModule.html': 'creativeCommons',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = {
'comments',
'docs',
'href',
'icon',
'id',
'link',
'logo',
'url',
'wfw_comment',
'wfw_commentrss',
}
can_contain_relative_uris = {
'content',
'copyright',
'description',
'info',
'rights',
'subtitle',
'summary',
'tagline',
'title',
}
can_contain_dangerous_markup = {
'content',
'copyright',
'description',
'info',
'rights',
'subtitle',
'summary',
'tagline',
'title',
}
html_types = {
'application/xhtml+xml',
'text/html',
}
def __init__(self):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespaces_in_use = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.svgOK = 0
self.title_depth = -1
self.depth = 0
if self.lang:
self.feeddata['language'] = self.lang.replace('_', '-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
super(_FeedParserMixin, self).__init__()
def _normalize_attributes(self, kv):
raise NotImplementedError
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = [self._normalize_attributes(attr) for attr in attrs]
# track xml:base and xml:lang
attrs_d = dict(attrs)
baseuri = attrs_d.get('xml:base', attrs_d.get('base')) or self.baseuri
if isinstance(baseuri, bytes):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = make_safe_absolute_uri(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrs_d.get('xml:lang', attrs_d.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_', '-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.track_namespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.track_namespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') != -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespaces_in_use.get(prefix, '')
if tag == 'math' and namespace == 'http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns', namespace))
if tag == 'svg' and namespace == 'http://www.w3.org/2000/svg':
attrs.append(('xmlns', namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# Special hack for better tracking of empty textinput/image elements in
# illformed feeds.
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrs_d)
except AttributeError:
# Since there's no handler or something has gone wrong we
# explicitly add the element and its attributes.
unknown_tag = prefix + suffix
if len(attrs_d) == 0:
# No attributes so merge it into the enclosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._get_context()
context[unknown_tag] = attrs_d
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# Called for each character reference, e.g. for ' ', ref is '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = chr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# Called for each entity reference, e.g. for '©', ref is 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
html.entities.name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = chr(html.entities.name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# Called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = xml.sax.saxutils.escape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# Called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# Called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# Override internal declaration handler to handle CDATA blocks.
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(xml.sax.saxutils.escape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
@staticmethod
def map_content_type(content_type):
content_type = content_type.lower()
if content_type == 'text' or content_type == 'plain':
content_type = 'text/plain'
elif content_type == 'html':
content_type = 'text/html'
elif content_type == 'xhtml':
content_type = 'application/xhtml+xml'
return content_type
def track_namespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = 'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = 'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespaces_in_use[self._matchnamespaces[loweruri]] = uri
else:
self.namespaces_in_use[prefix or ''] = uri
def resolve_uri(self, uri):
return _urljoin(self.baseuri or '', uri)
@staticmethod
def decode_entities(element, data):
return data
@staticmethod
def strattrs(attrs):
return ''.join(
' %s="%s"' % (t[0], xml.sax.saxutils.escape(t[1], {'"': '"'}))
for t in attrs
)
def push(self, element, expecting_text):
self.elementstack.append([element, expecting_text, []])
def pop(self, element, strip_whitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expecting_text, pieces = self.elementstack.pop()
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if isinstance(v, bytes):
pieces[i] = v.decode('utf-8')
if self.version == 'atom10' and self.contentparams.get('type', 'text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces) > 1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces) > 1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1] == '</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output = ''.join(pieces)
if strip_whitespace:
output = output.strip()
if not expecting_text:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodebytes(output.encode('utf8')).decode('utf8')
except (binascii.Error, binascii.Incomplete, UnicodeDecodeError):
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolve_uri(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decode_entities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith('atom') and self.contentparams.get('type') == 'text/plain':
if self.looks_like_html(output):
self.contentparams['type'] = 'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.map_content_type(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and self.resolve_relative_uris:
if element in self.can_contain_relative_uris:
output = resolve_relative_uris(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# sanitize embedded markup
if is_htmlish and self.sanitize_html:
if element in self.can_contain_dangerous_markup:
output = _sanitize_html(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and isinstance(output, bytes):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and not isinstance(output, bytes):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if not isinstance(output, bytes):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category or
# _end_tags or _end_itunes_keywords
if element in ('category', 'tags', 'itunes_keywords'):
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = output.replace('&', '&')
output = re.sub("&([A-Za-z0-9_]+);", r"&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif self.infeed or self.insource: # and (not self.intextinput) and (not self.inimage):
context = self._get_context()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", r"&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def push_content(self, tag, attrs_d, default_content_type, expecting_text):
self.incontent += 1
if self.lang:
self.lang = self.lang.replace('_', '-')
self.contentparams = FeedParserDict({
'type': self.map_content_type(attrs_d.get('type', default_content_type)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._is_base64(attrs_d, self.contentparams)
self.push(tag, expecting_text)
def pop_content(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def looks_like_html(s):
"""
:type s: str
:rtype: bool
"""
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>', s) or re.search(r'&#?\w+;', s)):
return False
# all tags must be in a restricted subset of valid HTML tags
if any((t for t in re.findall(r'</?(\w+)', s) if t.lower() not in _HTMLSanitizer.acceptable_elements)):
return False
# all entities must have been defined as valid HTML entities
if any((e for e in re.findall(r'&(\w+);', s) if e not in html.entities.entitydefs)):
return False
return True
def _map_to_standard_prefix(self, name):
colonpos = name.find(':')
if colonpos != -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _get_attribute(self, attrs_d, name):
return attrs_d.get(self._map_to_standard_prefix(name))
def _is_base64(self, attrs_d, contentparams):
if attrs_d.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
@staticmethod
def _enforce_href(attrs_d):
href = attrs_d.get('url', attrs_d.get('uri', attrs_d.get('href', None)))
if href:
try:
del attrs_d['url']
except KeyError:
pass
try:
del attrs_d['uri']
except KeyError:
pass
attrs_d['href'] = href
return attrs_d
def _save(self, key, value, overwrite=False):
context = self._get_context()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _get_context(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._get_context()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._get_context()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._get_context()
detail = context.get('%ss' % key, [FeedParserDict()])[-1]
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes
# all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, detail)
if author:
detail['name'] = author
if email:
detail['email'] = email
def _add_tag(self, term, scheme, label):
context = self._get_context()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict(term=term, scheme=scheme, label=label)
if value not in tags:
tags.append(value)
def _start_tags(self, attrs_d):
# This is a completely-made up element. Its semantics are determined
# only by a single feed that precipitated bug report 392 on Google Code.
# In short, this is junk code.
self.push('tags', 1)
def _end_tags(self):
for term in self.pop('tags').split(','):
self._add_tag(term.strip(), None, None)
| 32,196
|
Python
|
.py
| 707
| 34.811881
| 187
| 0.553355
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,216
|
encodings.py
|
rembo10_headphones/lib/feedparser/encodings.py
|
# Character encoding routines
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import cgi
import codecs
import re
try:
try:
import cchardet as chardet
except ImportError:
import chardet
except ImportError:
chardet = None
lazy_chardet_encoding = None
else:
def lazy_chardet_encoding(data):
return chardet.detect(data)['encoding'] or ''
from .exceptions import (
CharacterEncodingOverride,
CharacterEncodingUnknown,
NonXMLContentType,
)
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = b'\x4C\x6F\xA7\x94'
UTF16BE_MARKER = b'\x00\x3C\x00\x3F'
UTF16LE_MARKER = b'\x3C\x00\x3F\x00'
UTF32BE_MARKER = b'\x00\x00\x00\x3C'
UTF32LE_MARKER = b'\x3C\x00\x00\x00'
ZERO_BYTES = '\x00\x00'
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile(r'^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(br'^<\?.*encoding=[\'"](.*?)[\'"].*\?>')
def convert_to_utf8(http_headers, data, result):
"""Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)"""
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = ''
xml_encoding = ''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = 'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = 'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = 'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = 'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = 'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = 'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = 'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = 'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
'u16', 'utf-16', 'utf16', 'utf_16',
'u32', 'utf-32', 'utf32', 'utf_32',
'iso-10646-ucs-2', 'iso-10646-ucs-4',
'csucs4', 'csunicode', 'ucs-2', 'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if isinstance(http_encoding, bytes):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd',
'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (
http_content_type in application_content_types
or (
http_content_type.startswith('application/')
and http_content_type.endswith('+xml')
)
):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or 'utf-8'
elif (
http_content_type in text_content_types
or (
http_content_type.startswith('text/')
and http_content_type.endswith('+xml')
)
):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or 'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or 'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == 'gb2312':
rfc3023_encoding = 'gb18030'
if xml_encoding.lower() == 'gb2312':
xml_encoding = 'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
lazy_chardet_encoding, 'utf-8', 'windows-1252', 'iso-8859-2'):
if callable(proposed_encoding):
proposed_encoding = proposed_encoding(data)
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + '\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = ''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
result['encoding'] = rfc3023_encoding
if error:
result['bozo'] = True
result['bozo_exception'] = error
return data
| 12,033
|
Python
|
.py
| 255
| 40.486275
| 114
| 0.673304
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,217
|
sgml.py
|
rembo10_headphones/lib/feedparser/sgml.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sgmllib
__all__ = [
'sgmllib',
'charref',
'tagfind',
'attrfind',
'entityref',
'incomplete',
'interesting',
'shorttag',
'shorttagopen',
'starttagopen',
'endbracket',
]
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile(r'&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r"""\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*"""
r"""('[^']*'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$()_#=~'"@]*))?"""
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile(
r'('
r"""[^'"<>]"""
r"""|"[^"]*"(?=>|/|\s|\w+=)"""
r"""|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])"""
r"""|.*?(?=[<>]"""
r')'
)
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
| 3,488
|
Python
|
.py
| 85
| 36.964706
| 77
| 0.693215
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,218
|
urls.py
|
rembo10_headphones/lib/feedparser/urls.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import urllib.parse
from .html import _BaseHTMLProcessor
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
uri = urllib.parse.urljoin(base, uri)
except ValueError:
uri = ''
return uri
def convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urllib.parse.urlunsplit(parts)
else:
return url
def make_safe_absolute_uri(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or '')
if not base:
return rel or ''
if not rel:
try:
scheme = urllib.parse.urlparse(base)[0]
except ValueError:
return ''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return ''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return ''
return uri
class RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = {
('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src'),
}
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolve_uri(self, uri):
return make_safe_absolute_uri(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolve_uri(value) or value) for key, value in attrs]
super(RelativeURIResolver, self).unknown_starttag(tag, attrs)
def resolve_relative_uris(html_source, base_uri, encoding, type_):
p = RelativeURIResolver(base_uri, encoding, type_)
p.feed(html_source)
return p.output()
| 5,490
|
Python
|
.py
| 138
| 33.963768
| 120
| 0.63149
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,219
|
api.py
|
rembo10_headphones/lib/feedparser/api.py
|
# The public API for feedparser
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import io
import urllib.error
import urllib.parse
import xml.sax
from .datetimes import registerDateHandler, _parse_date
from .encodings import convert_to_utf8
from .exceptions import *
from .html import _BaseHTMLProcessor
from . import http
from . import mixin
from .mixin import _FeedParserMixin
from .parsers.loose import _LooseFeedParser
from .parsers.strict import _StrictFeedParser
from .sanitizer import replace_doctype
from .sgml import *
from .urls import convert_to_idn, make_safe_absolute_uri
from .util import FeedParserDict
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
_XML_AVAILABLE = True
SUPPORTED_VERSIONS = {
'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
}
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers, result):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
:return: A bytes object.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string.read()
if isinstance(url_file_stream_or_string, str) \
and urllib.parse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
return http.get(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers, result)
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
with open(url_file_stream_or_string, 'rb') as f:
data = f.read()
except (IOError, UnicodeEncodeError, TypeError, ValueError):
# if url_file_stream_or_string is a str object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
else:
return data
# treat url_file_stream_or_string as string
if not isinstance(url_file_stream_or_string, bytes):
return url_file_stream_or_string.encode('utf-8')
return url_file_stream_or_string
LooseFeedParser = type(
'LooseFeedParser',
(_LooseFeedParser, _FeedParserMixin, _BaseHTMLProcessor, object),
{},
)
StrictFeedParser = type(
'StrictFeedParser',
(_StrictFeedParser, _FeedParserMixin, xml.sax.handler.ContentHandler, object),
{},
)
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None, resolve_relative_uris=None, sanitize_html=None):
"""Parse a feed from a URL, file, stream, or string.
:param url_file_stream_or_string:
File-like object, URL, file path, or string. Both byte and text strings
are accepted. If necessary, encoding will be derived from the response
headers or automatically detected.
Note that strings may trigger network I/O or filesystem access
depending on the value. Wrap an untrusted string in
a :class:`io.StringIO` or :class:`io.BytesIO` to avoid this. Do not
pass untrusted strings to this function.
When a URL is not passed the feed location to use in relative URL
resolution should be passed in the ``Content-Location`` response header
(see ``response_headers`` below).
:param str etag: HTTP ``ETag`` request header.
:param modified: HTTP ``Last-Modified`` request header.
:type modified: :class:`str`, :class:`time.struct_time` 9-tuple, or
:class:`datetime.datetime`
:param str agent: HTTP ``User-Agent`` request header, which defaults to
the value of :data:`feedparser.USER_AGENT`.
:param referrer: HTTP ``Referer`` [sic] request header.
:param request_headers:
A mapping of HTTP header name to HTTP header value to add to the
request, overriding internally generated values.
:type request_headers: :class:`dict` mapping :class:`str` to :class:`str`
:param response_headers:
A mapping of HTTP header name to HTTP header value. Multiple values may
be joined with a comma. If a HTTP request was made, these headers
override any matching headers in the response. Otherwise this specifies
the entirety of the response headers.
:type response_headers: :class:`dict` mapping :class:`str` to :class:`str`
:param bool resolve_relative_uris:
Should feedparser attempt to resolve relative URIs absolute ones within
HTML content? Defaults to the value of
:data:`feedparser.RESOLVE_RELATIVE_URIS`, which is ``True``.
:param bool sanitize_html:
Should feedparser skip HTML sanitization? Only disable this if you know
what you are doing! Defaults to the value of
:data:`feedparser.SANITIZE_HTML`, which is ``True``.
:return: A :class:`FeedParserDict`.
"""
if not agent or sanitize_html is None or resolve_relative_uris is None:
import feedparser
if not agent:
agent = feedparser.USER_AGENT
if sanitize_html is None:
sanitize_html = feedparser.SANITIZE_HTML
if resolve_relative_uris is None:
resolve_relative_uris = feedparser.RESOLVE_RELATIVE_URIS
result = FeedParserDict(
bozo=False,
entries=[],
feed=FeedParserDict(),
headers={},
)
try:
data = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers, result)
except urllib.error.URLError as error:
result.update({
'bozo': True,
'bozo_exception': error,
})
return result
if not data:
return result
# overwrite existing headers using response_headers
result['headers'].update(response_headers or {})
data = convert_to_utf8(result['headers'], data, result)
use_strict_parser = result['encoding'] and True or False
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = result['headers'].get('content-location', '')
href = result.get('href', '')
baseuri = make_safe_absolute_uri(href, contentloc) or make_safe_absolute_uri(contentloc) or href
baselang = result['headers'].get('content-language', None)
if isinstance(baselang, bytes) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = StrictFeedParser(baseuri, baselang, 'utf-8')
feedparser.resolve_relative_uris = resolve_relative_uris
feedparser.sanitize_html = sanitize_html
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(io.BytesIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException as e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.resolve_relative_uris = resolve_relative_uris
feedparser.sanitize_html = sanitize_html
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespaces_in_use
return result
| 11,379
|
Python
|
.py
| 236
| 42.470339
| 198
| 0.710142
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,220
|
__init__.py
|
rembo10_headphones/lib/feedparser/__init__.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE."""
from .api import parse
from .datetimes import registerDateHandler
from .exceptions import *
from .util import FeedParserDict
__author__ = 'Kurt McKee <contactme@kurtmckee.org>'
__license__ = 'BSD 2-clause'
__version__ = '6.0.8'
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "feedparser/%s +https://github.com/kurtmckee/feedparser/" % __version__
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
| 2,176
|
Python
|
.py
| 43
| 49.488372
| 84
| 0.790883
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,221
|
html.py
|
rembo10_headphones/lib/feedparser/html.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import html.entities
import re
from .sgml import *
_cp1252 = {
128: '\u20ac', # euro sign
130: '\u201a', # single low-9 quotation mark
131: '\u0192', # latin small letter f with hook
132: '\u201e', # double low-9 quotation mark
133: '\u2026', # horizontal ellipsis
134: '\u2020', # dagger
135: '\u2021', # double dagger
136: '\u02c6', # modifier letter circumflex accent
137: '\u2030', # per mille sign
138: '\u0160', # latin capital letter s with caron
139: '\u2039', # single left-pointing angle quotation mark
140: '\u0152', # latin capital ligature oe
142: '\u017d', # latin capital letter z with caron
145: '\u2018', # left single quotation mark
146: '\u2019', # right single quotation mark
147: '\u201c', # left double quotation mark
148: '\u201d', # right double quotation mark
149: '\u2022', # bullet
150: '\u2013', # en dash
151: '\u2014', # em dash
152: '\u02dc', # small tilde
153: '\u2122', # trade mark sign
154: '\u0161', # latin small letter s with caron
155: '\u203a', # single right-pointing angle quotation mark
156: '\u0153', # latin small ligature oe
158: '\u017e', # latin small letter z with caron
159: '\u0178', # latin capital letter y with diaeresis
}
class _BaseHTMLProcessor(sgmllib.SGMLParser, object):
special = re.compile("""[<>'"]""")
bare_ampersand = re.compile(r"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = {
'area',
'base',
'basefont',
'br',
'col',
'command',
'embed',
'frame',
'hr',
'img',
'input',
'isindex',
'keygen',
'link',
'meta',
'param',
'source',
'track',
'wbr',
}
def __init__(self, encoding=None, _type='application/xhtml+xml'):
if encoding:
self.encoding = encoding
self._type = _type
self.pieces = []
super(_BaseHTMLProcessor, self).__init__()
def reset(self):
self.pieces = []
super(_BaseHTMLProcessor, self).reset()
def _shorttag_replace(self, match):
"""
:type match: Match[str]
:rtype: str
"""
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
raise NotImplementedError
# Replace goahead with SGMLParser's goahead() code object.
try:
goahead.__code__ = sgmllib.SGMLParser.goahead.__code__
except AttributeError:
# Python 2
# noinspection PyUnresolvedReferences
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
raise NotImplementedError
# Replace __parse_starttag with SGMLParser's parse_starttag() code object.
try:
__parse_starttag.__code__ = sgmllib.SGMLParser.parse_starttag.__code__
except AttributeError:
# Python 2
# noinspection PyUnresolvedReferences
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self, i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j > 2 and self.rawdata[j-2:j] == '/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
"""
:type data: str
:rtype: None
"""
data = re.sub(r'<!((?!DOCTYPE|--|\[))', r'<!\1', data, re.IGNORECASE)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
super(_BaseHTMLProcessor, self).feed(data)
super(_BaseHTMLProcessor, self).close()
@staticmethod
def normalize_attrs(attrs):
"""
:type attrs: List[Tuple[str, str]]
:rtype: List[Tuple[str, str]]
"""
if not attrs:
return attrs
# utility method to be called by descendants
# Collapse any duplicate attribute names and values by converting
# *attrs* into a dictionary, then convert it back to a list.
attrs_d = {k.lower(): v for k, v in attrs}
attrs = [
(k, k in ('rel', 'type') and v.lower() or v)
for k, v in attrs_d.items()
]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
"""
:type tag: str
:type attrs: List[Tuple[str, str]]
:rtype: None
"""
# Called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs = ''
if attrs:
for key, value in attrs:
value = value.replace('>', '>')
value = value.replace('<', '<')
value = value.replace('"', '"')
value = self.bare_ampersand.sub("&", value)
uattrs.append((key, value))
strattrs = ''.join(
' %s="%s"' % (key, value)
for key, value in uattrs
)
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
"""
:type tag: str
:rtype: None
"""
# Called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
"""
:type ref: str
:rtype: None
"""
# Called for each character reference, e.g. ' ' will extract '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
"""
:type ref: str
:rtype: None
"""
# Called for each entity reference, e.g. '©' will extract 'copy'
# Reconstruct the original entity reference.
if ref in html.entities.name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
"""
:type text: str
:rtype: None
"""
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
"""
:type text: str
:rtype: None
"""
# Called for HTML comments, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
"""
:type text: str
:rtype: None
"""
# Called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
"""
:type text: str
:rtype: None
"""
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
"""
:type i: int
:type declstartpos: int
:rtype: Tuple[Optional[str], int]
"""
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
@staticmethod
def convert_charref(name):
"""
:type name: str
:rtype: str
"""
return '&#%s;' % name
@staticmethod
def convert_entityref(name):
"""
:type name: str
:rtype: str
"""
return '&%s;' % name
def output(self):
"""Return processed HTML as a single string.
:rtype: str
"""
return ''.join(self.pieces)
def parse_declaration(self, i):
"""
:type i: int
:rtype: int
"""
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# Escape the doctype declaration and continue parsing.
self.handle_data('<')
return i+1
| 11,258
|
Python
|
.py
| 308
| 28.464286
| 80
| 0.578006
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,222
|
exceptions.py
|
rembo10_headphones/lib/feedparser/exceptions.py
|
# Exceptions used throughout feedparser
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__all__ = [
'ThingsNobodyCaresAboutButMe',
'CharacterEncodingOverride',
'CharacterEncodingUnknown',
'NonXMLContentType',
'UndeclaredNamespace',
]
class ThingsNobodyCaresAboutButMe(Exception):
pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe):
pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe):
pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe):
pass
class UndeclaredNamespace(Exception):
pass
| 1,957
|
Python
|
.py
| 44
| 42.318182
| 82
| 0.805994
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,223
|
http.py
|
rembo10_headphones/lib/feedparser/http.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import base64
import datetime
import gzip
import io
import re
import struct
import urllib.parse
import urllib.request
import zlib
from .datetimes import _parse_date
from .urls import convert_to_idn
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
class _FeedURLHandler(urllib.request.HTTPDigestAuthHandler, urllib.request.HTTPRedirectHandler, urllib.request.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib.request.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, hdrs)
if not result:
return fp
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib.request.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urllib.parse.urlparse(req.get_full_url())[1]
if 'Authorization' not in req.headers or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = base64.decodebytes(req.headers['Authorization'].split(' ')[1].encode()).decode()
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _build_urllib2_request(url, agent, accept_header, etag, modified, referrer, auth, request_headers):
request = urllib.request.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, str):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
request.add_header('Accept-encoding', 'gzip, deflate')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if accept_header:
request.add_header('Accept', accept_header)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def get(url, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, result=None):
if handlers is None:
handlers = []
elif not isinstance(handlers, list):
handlers = [handlers]
if request_headers is None:
request_headers = {}
# Deal with the feed URI scheme
if url.startswith('feed:http'):
url = url[5:]
elif url.startswith('feed:'):
url = 'http:' + url[5:]
if not agent:
from . import USER_AGENT
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if not url.startswith('ftp:'):
url_pieces = urllib.parse.urlparse(url)
if url_pieces.username:
new_pieces = list(url_pieces)
new_pieces[1] = url_pieces.hostname
if url_pieces.port:
new_pieces[1] = f'{url_pieces.hostname}:{url_pieces.port}'
url = urllib.parse.urlunparse(new_pieces)
auth = base64.standard_b64encode(f'{url_pieces.username}:{url_pieces.password}'.encode()).decode()
# iri support
if not isinstance(url, bytes):
url = convert_to_idn(url)
# Prevent UnicodeEncodeErrors caused by Unicode characters in the path.
bits = []
for c in url:
try:
c.encode('ascii')
except UnicodeEncodeError:
bits.append(urllib.parse.quote(c))
else:
bits.append(c)
url = ''.join(bits)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url, agent, ACCEPT_HEADER, etag, modified, referrer, auth, request_headers)
opener = urllib.request.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
f = opener.open(request)
data = f.read()
f.close()
# lowercase all of the HTTP headers for comparisons per RFC 2616
result['headers'] = {k.lower(): v for k, v in f.headers.items()}
# if feed is gzip-compressed, decompress it
if data and 'gzip' in result['headers'].get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=io.BytesIO(data)).read()
except (EOFError, IOError, struct.error) as e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = True
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif data and 'deflate' in result['headers'].get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error as e:
result['bozo'] = True
result['bozo_exception'] = e
# save HTTP headers
if 'etag' in result['headers']:
etag = result['headers'].get('etag', '')
if isinstance(etag, bytes):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in result['headers']:
modified = result['headers'].get('last-modified', '')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if isinstance(f.url, bytes):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = getattr(f, 'status', None) or 200
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return data
| 9,844
|
Python
|
.py
| 205
| 40.980488
| 208
| 0.662473
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,224
|
greek.py
|
rembo10_headphones/lib/feedparser/datetimes/greek.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from .rfc822 import _parse_date_rfc822
# Unicode strings for Greek date strings
_greek_months = {
'\u0399\u03b1\u03bd': 'Jan', # c9e1ed in iso-8859-7
'\u03a6\u03b5\u03b2': 'Feb', # d6e5e2 in iso-8859-7
'\u039c\u03ac\u03ce': 'Mar', # ccdcfe in iso-8859-7
'\u039c\u03b1\u03ce': 'Mar', # cce1fe in iso-8859-7
'\u0391\u03c0\u03c1': 'Apr', # c1f0f1 in iso-8859-7
'\u039c\u03ac\u03b9': 'May', # ccdce9 in iso-8859-7
'\u039c\u03b1\u03ca': 'May', # cce1fa in iso-8859-7
'\u039c\u03b1\u03b9': 'May', # cce1e9 in iso-8859-7
'\u0399\u03bf\u03cd\u03bd': 'Jun', # c9effded in iso-8859-7
'\u0399\u03bf\u03bd': 'Jun', # c9efed in iso-8859-7
'\u0399\u03bf\u03cd\u03bb': 'Jul', # c9effdeb in iso-8859-7
'\u0399\u03bf\u03bb': 'Jul', # c9f9eb in iso-8859-7
'\u0391\u03cd\u03b3': 'Aug', # c1fde3 in iso-8859-7
'\u0391\u03c5\u03b3': 'Aug', # c1f5e3 in iso-8859-7
'\u03a3\u03b5\u03c0': 'Sep', # d3e5f0 in iso-8859-7
'\u039f\u03ba\u03c4': 'Oct', # cfeaf4 in iso-8859-7
'\u039d\u03bf\u03ad': 'Nov', # cdefdd in iso-8859-7
'\u039d\u03bf\u03b5': 'Nov', # cdefe5 in iso-8859-7
'\u0394\u03b5\u03ba': 'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = {
'\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7
'\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7
'\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7
'\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7
'\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7
'\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7
'\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = re.compile(r'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(date_string):
"""Parse a string according to a Greek 8-bit date format."""
m = _greek_date_format_re.match(date_string)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{
'wday': wday,
'day': m.group(2),
'month': month,
'year': m.group(4),
'hour': m.group(5),
'minute': m.group(6),
'second': m.group(7),
'zonediff': m.group(8),
}
return _parse_date_rfc822(rfc822date)
| 4,022
|
Python
|
.py
| 79
| 45.822785
| 115
| 0.635671
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,225
|
rfc822.py
|
rembo10_headphones/lib/feedparser/datetimes/rfc822.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
timezone_names = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
'met': 1, 'mest': 2,
}
day_names = {'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'}
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
:param str date: a date/time string that will be converted to a time tuple
:returns: a UTC time tuple, or None
:rtype: time.struct_time | None
"""
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in day_names:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this.
return None
# Handle the day and month name.
month = months.get(parts[1][:3])
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped.
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
month = months.get(parts[0][:3])
else:
return None
if not month:
return None
# Handle the year.
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on.
# Anything 89 or less is interpreted as 2089 or before.
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
# Handle the time (default to 00:00:00).
time_parts = parts[3].split(':')
time_parts.extend(('0',) * (3 - len(time_parts)))
try:
(hour, minute, second) = [int(i) for i in time_parts]
except ValueError:
return None
# Handle the timezone information, if any (default to +0000).
# Strip 'Etc/' from the timezone.
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
if ':' in parts[4]:
timezone_hours = int(parts[4][1:3])
timezone_minutes = int(parts[4][4:])
else:
timezone_hours = int(parts[4][1:3])
timezone_minutes = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
timezone_hours *= -1
timezone_minutes *= -1
else:
timezone_hours = timezone_names.get(parts[4], 0)
timezone_minutes = 0
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, timezone_minutes, timezone_hours)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
| 5,423
|
Python
|
.py
| 137
| 33.583942
| 78
| 0.630192
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,226
|
hungarian.py
|
rembo10_headphones/lib/feedparser/datetimes/hungarian.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from .w3dtf import _parse_date_w3dtf
# Unicode strings for Hungarian date strings
_hungarian_months = {
'janu\u00e1r': '01', # e1 in iso-8859-2
'febru\u00e1ri': '02', # e1 in iso-8859-2
'm\u00e1rcius': '03', # e1 in iso-8859-2
'\u00e1prilis': '04', # e1 in iso-8859-2
'm\u00e1ujus': '05', # e1 in iso-8859-2
'j\u00fanius': '06', # fa in iso-8859-2
'j\u00falius': '07', # fa in iso-8859-2
'augusztus': '08',
'szeptember': '09',
'okt\u00f3ber': '10', # f3 in iso-8859-2
'november': '11',
'december': '12',
}
_hungarian_date_format_re = re.compile(r'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})([+-](\d{,2}:\d{2}))')
def _parse_date_hungarian(date_string):
"""Parse a string according to a Hungarian 8-bit date format."""
m = _hungarian_date_format_re.match(date_string)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{
'year': m.group(1),
'month': month,
'day': day,
'hour': hour,
'minute': m.group(5),
'zonediff': m.group(6),
}
return _parse_date_w3dtf(w3dtfdate)
| 2,945
|
Python
|
.py
| 66
| 39.530303
| 104
| 0.655064
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,227
|
w3dtf.py
|
rembo10_headphones/lib/feedparser/datetimes/w3dtf.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
tzmin = 0
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
| 4,506
|
Python
|
.py
| 110
| 35.818182
| 79
| 0.653005
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,228
|
korean.py
|
rembo10_headphones/lib/feedparser/datetimes/korean.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from .w3dtf import _parse_date_w3dtf
# 8-bit date handling routines written by ytrewq1.
_korean_year = '\ub144' # b3e2 in euc-kr
_korean_month = '\uc6d4' # bff9 in euc-kr
_korean_day = '\uc77c' # c0cf in euc-kr
_korean_am = '\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = '\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = re.compile(
r'(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})'
% (_korean_year, _korean_month, _korean_day)
)
_korean_nate_date_re = re.compile(
r'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})'
% (_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
"""Parse a string according to the OnBlog 8-bit date format"""
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
def _parse_date_nate(dateString):
"""Parse a string according to the Nate 8-bit date format"""
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if ampm == _korean_pm:
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{
'year': m.group(1),
'month': m.group(2),
'day': m.group(3),
'hour': hour,
'minute': m.group(6),
'second': m.group(7),
'zonediff': '+09:00',
}
return _parse_date_w3dtf(w3dtfdate)
| 3,354
|
Python
|
.py
| 74
| 39.743243
| 91
| 0.647203
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,229
|
__init__.py
|
rembo10_headphones/lib/feedparser/datetimes/__init__.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .asctime import _parse_date_asctime
from .greek import _parse_date_greek
from .hungarian import _parse_date_hungarian
from .iso8601 import _parse_date_iso8601
from .korean import _parse_date_onblog, _parse_date_nate
from .perforce import _parse_date_perforce
from .rfc822 import _parse_date_rfc822
from .w3dtf import _parse_date_w3dtf
_date_handlers = []
def registerDateHandler(func):
"""Register a date handler function (takes string, returns 9-tuple date in GMT)"""
_date_handlers.insert(0, func)
def _parse_date(date_string):
"""Parses a variety of date formats into a 9-tuple in GMT"""
if not date_string:
return None
for handler in _date_handlers:
try:
date9tuple = handler(date_string)
except (KeyError, OverflowError, ValueError, AttributeError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
registerDateHandler(_parse_date_onblog)
registerDateHandler(_parse_date_nate)
registerDateHandler(_parse_date_greek)
registerDateHandler(_parse_date_hungarian)
registerDateHandler(_parse_date_perforce)
registerDateHandler(_parse_date_asctime)
registerDateHandler(_parse_date_iso8601)
registerDateHandler(_parse_date_rfc822)
registerDateHandler(_parse_date_w3dtf)
| 2,786
|
Python
|
.py
| 62
| 41.870968
| 86
| 0.775405
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,230
|
asctime.py
|
rembo10_headphones/lib/feedparser/datetimes/asctime.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .rfc822 import _parse_date_rfc822
_months = [
'jan',
'feb',
'mar',
'apr',
'may',
'jun',
'jul',
'aug',
'sep',
'oct',
'nov',
'dec',
]
def _parse_date_asctime(dt):
"""Parse asctime-style dates.
Converts asctime to RFC822-compatible dates and uses the RFC822 parser
to do the actual parsing.
Supported formats (format is standardized to the first one listed):
* {weekday name} {month name} dd hh:mm:ss {+-tz} yyyy
* {weekday name} {month name} dd hh:mm:ss yyyy
"""
parts = dt.split()
# Insert a GMT timezone, if needed.
if len(parts) == 5:
parts.insert(4, '+0000')
# Exit if there are not six parts.
if len(parts) != 6:
return None
# Reassemble the parts in an RFC822-compatible order and parse them.
return _parse_date_rfc822(' '.join([
parts[0], parts[2], parts[1], parts[5], parts[3], parts[4],
]))
| 2,380
|
Python
|
.py
| 60
| 36.283333
| 77
| 0.724123
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,231
|
perforce.py
|
rembo10_headphones/lib/feedparser/datetimes/perforce.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import email._parseaddr
import re
import time
def _parse_date_perforce(date_string):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile(r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(date_string)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
new_date_string = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = email._parseaddr.parsedate_tz(new_date_string)
if tm:
return time.gmtime(email._parseaddr.mktime_tz(tm))
| 2,213
|
Python
|
.py
| 42
| 50.261905
| 117
| 0.724042
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,232
|
iso8601.py
|
rembo10_headphones/lib/feedparser/datetimes/iso8601.py
|
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import time
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = [
'YYYY-?MM-?DD',
'YYYY-0MM?-?DD',
'YYYY-MM',
'YYYY-?OOO',
'YY-?MM-?DD',
'YY-?OOO',
'YYYY',
'-YY-?MM',
'-OOO',
'-YY',
'--MM-?DD',
'--MM',
'---DD',
'CC',
'',
]
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(date_string):
"""Parse a variety of ISO-8601-compatible formats like 20040105"""
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(date_string)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
| 5,550
|
Python
|
.py
| 153
| 31.065359
| 77
| 0.631306
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,233
|
georss.py
|
rembo10_headphones/lib/feedparser/namespaces/georss.py
|
# Support for the GeoRSS format
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Required for Python 3.6 compatibility.
from __future__ import generator_stop
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
}
def __init__(self):
self.ingeometry = 0
super(Namespace, self).__init__()
def _start_georssgeom(self, attrs_d):
self.push('geometry', 0)
context = self._get_context()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._get_context()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrs_d):
self.push('where', 0)
context = self._get_context()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrs_d):
srs_name = attrs_d.get('srsname')
try:
srs_dimension = int(attrs_d.get('srsdimension', '2'))
except ValueError:
srs_dimension = 2
context = self._get_context()
context['where']['srsName'] = srs_name
context['where']['srsDimension'] = srs_dimension
def _start_gml_point(self, attrs_d):
self._parse_srs_attrs(attrs_d)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrs_d):
self._parse_srs_attrs(attrs_d)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrs_d):
self._parse_srs_attrs(attrs_d)
self.push('geometry', 0)
def _start_gml_exterior(self, attrs_d):
self.push('geometry', 0)
def _start_gml_linearring(self, attrs_d):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrs_d):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._get_context()
srs_name = context['where'].get('srsName')
srs_dimension = context['where'].get('srsDimension', 2)
swap = True
if srs_name and "EPSG" in srs_name:
epsg = int(srs_name.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srs_dimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrs_d):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._get_context()
srs_name = context['where'].get('srsName')
srs_dimension = context['where'].get('srsDimension', 2)
swap = True
if srs_name and "EPSG" in srs_name:
epsg = int(srs_name.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srs_dimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = (float(ll) for ll in value.replace(',', ' ').split())
while True:
try:
t = [next(latlons), next(latlons)][::swap and -1 or 1]
if dims == 3:
t.append(next(latlons))
yield tuple(t)
except StopIteration:
return
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Point', 'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'LineString', 'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {'type': 'Polygon', 'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space separate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Box', 'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979,
]
| 11,399
|
Python
|
.py
| 236
| 41.911017
| 81
| 0.65387
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,234
|
cc.py
|
rembo10_headphones/lib/feedparser/namespaces/cc.py
|
# Support for the Creative Commons licensing extensions
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
# RDF-based namespace
'http://creativecommons.org/ns#license': 'cc',
# Old RDF-based namespace
'http://web.resource.org/cc/': 'cc',
# RSS-based namespace
'http://cyber.law.harvard.edu/rss/creativeCommonsRssModule.html': 'creativecommons',
# Old RSS-based namespace
'http://backend.userland.com/creativeCommonsRssModule': 'creativecommons',
}
def _start_cc_license(self, attrs_d):
context = self._get_context()
value = self._get_attribute(attrs_d, 'rdf:resource')
attrs_d = FeedParserDict()
attrs_d['rel'] = 'license'
if value:
attrs_d['href'] = value
context.setdefault('links', []).append(attrs_d)
def _start_creativecommons_license(self, attrs_d):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._get_context()
attrs_d = FeedParserDict()
attrs_d['rel'] = 'license'
if value:
attrs_d['href'] = value
context.setdefault('links', []).append(attrs_d)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
| 2,866
|
Python
|
.py
| 60
| 42.816667
| 92
| 0.721845
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,235
|
dc.py
|
rembo10_headphones/lib/feedparser/namespaces/dc.py
|
# Support for the Dublin Core metadata extensions
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..datetimes import _parse_date
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
}
def _end_dc_author(self):
self._end_author()
def _end_dc_creator(self):
self._end_author()
def _end_dc_date(self):
self._end_updated()
def _end_dc_description(self):
self._end_description()
def _end_dc_language(self):
self._end_language()
def _end_dc_publisher(self):
self._end_webmaster()
def _end_dc_rights(self):
self._end_rights()
def _end_dc_subject(self):
self._end_category()
def _end_dc_title(self):
self._end_title()
def _end_dcterms_created(self):
self._end_created()
def _end_dcterms_issued(self):
self._end_published()
def _end_dcterms_modified(self):
self._end_updated()
def _start_dc_author(self, attrs_d):
self._start_author(attrs_d)
def _start_dc_creator(self, attrs_d):
self._start_author(attrs_d)
def _start_dc_date(self, attrs_d):
self._start_updated(attrs_d)
def _start_dc_description(self, attrs_d):
self._start_description(attrs_d)
def _start_dc_language(self, attrs_d):
self._start_language(attrs_d)
def _start_dc_publisher(self, attrs_d):
self._start_webmaster(attrs_d)
def _start_dc_rights(self, attrs_d):
self._start_rights(attrs_d)
def _start_dc_subject(self, attrs_d):
self._start_category(attrs_d)
def _start_dc_title(self, attrs_d):
self._start_title(attrs_d)
def _start_dcterms_created(self, attrs_d):
self._start_created(attrs_d)
def _start_dcterms_issued(self, attrs_d):
self._start_published(attrs_d)
def _start_dcterms_modified(self, attrs_d):
self._start_updated(attrs_d)
def _start_dcterms_valid(self, attrs_d):
self.push('validity', 1)
def _end_dcterms_valid(self):
for validity_detail in self.pop('validity').split(';'):
if '=' in validity_detail:
key, value = validity_detail.split('=', 1)
if key == 'start':
self._save('validity_start', value, overwrite=True)
self._save('validity_start_parsed', _parse_date(value), overwrite=True)
elif key == 'end':
self._save('validity_end', value, overwrite=True)
self._save('validity_end_parsed', _parse_date(value), overwrite=True)
def _start_dc_contributor(self, attrs_d):
self.incontributor = 1
context = self._get_context()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
| 4,446
|
Python
|
.py
| 103
| 36.621359
| 91
| 0.671846
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,236
|
mediarss.py
|
rembo10_headphones/lib/feedparser/namespaces/mediarss.py
|
# Support for the Media RSS format
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
# Canonical namespace
'http://search.yahoo.com/mrss/': 'media',
# Old namespace (no trailing slash)
'http://search.yahoo.com/mrss': 'media',
}
def _start_media_category(self, attrs_d):
attrs_d.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrs_d)
def _end_media_category(self):
self._end_category()
def _end_media_keywords(self):
for term in self.pop('media_keywords').split(','):
if term.strip():
self._add_tag(term.strip(), None, None)
def _start_media_title(self, attrs_d):
self._start_title(attrs_d)
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_media_group(self, attrs_d):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_rating(self, attrs_d):
context = self._get_context()
context.setdefault('media_rating', attrs_d)
self.push('rating', 1)
def _end_media_rating(self):
rating = self.pop('rating')
if rating is not None and rating.strip():
context = self._get_context()
context['media_rating']['content'] = rating
def _start_media_credit(self, attrs_d):
context = self._get_context()
context.setdefault('media_credit', [])
context['media_credit'].append(attrs_d)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit is not None and credit.strip():
context = self._get_context()
context['media_credit'][-1]['content'] = credit
def _start_media_description(self, attrs_d):
self._start_description(attrs_d)
def _end_media_description(self):
self._end_description()
def _start_media_restriction(self, attrs_d):
context = self._get_context()
context.setdefault('media_restriction', attrs_d)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction is not None and restriction.strip():
context = self._get_context()
context['media_restriction']['content'] = [cc.strip().lower() for cc in restriction.split(' ')]
def _start_media_license(self, attrs_d):
context = self._get_context()
context.setdefault('media_license', attrs_d)
self.push('license', 1)
def _end_media_license(self):
license_ = self.pop('license')
if license_ is not None and license_.strip():
context = self._get_context()
context['media_license']['content'] = license_
def _start_media_content(self, attrs_d):
context = self._get_context()
context.setdefault('media_content', [])
context['media_content'].append(attrs_d)
def _start_media_thumbnail(self, attrs_d):
context = self._get_context()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrs_d)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._get_context()
if url is not None and url.strip():
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrs_d):
self.push('media_player', 0)
self._get_context()['media_player'] = FeedParserDict(attrs_d)
def _end_media_player(self):
value = self.pop('media_player')
context = self._get_context()
context['media_player']['content'] = value
| 5,336
|
Python
|
.py
| 116
| 39.094828
| 107
| 0.663908
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,237
|
admin.py
|
rembo10_headphones/lib/feedparser/namespaces/admin.py
|
# Support for the administrative elements extension
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..util import FeedParserDict
class Namespace(object):
# RDF Site Summary 1.0 Modules: Administrative
# http://web.resource.org/rss/1.0/modules/admin/
supported_namespaces = {
'http://webns.net/mvcb/': 'admin',
}
def _start_admin_generatoragent(self, attrs_d):
self.push('generator', 1)
value = self._get_attribute(attrs_d, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._get_context()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrs_d):
self.push('errorreportsto', 1)
value = self._get_attribute(attrs_d, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
| 2,317
|
Python
|
.py
| 47
| 45.446809
| 81
| 0.743816
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,238
|
psc.py
|
rembo10_headphones/lib/feedparser/namespaces/psc.py
|
# Support for the Podlove Simple Chapters format
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
import re
from .. import util
class Namespace(object):
supported_namespaces = {
'http://podlove.org/simple-chapters': 'psc',
}
def __init__(self):
# chapters will only be captured while psc_chapters_flag is True.
self.psc_chapters_flag = False
super(Namespace, self).__init__()
def _start_psc_chapters(self, attrs_d):
context = self._get_context()
if 'psc_chapters' not in context:
self.psc_chapters_flag = True
attrs_d['chapters'] = []
context['psc_chapters'] = util.FeedParserDict(attrs_d)
def _end_psc_chapters(self):
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrs_d):
if self.psc_chapters_flag:
start = self._get_attribute(attrs_d, 'start')
attrs_d['start_parsed'] = _parse_psc_chapter_start(start)
context = self._get_context()['psc_chapters']
context['chapters'].append(util.FeedParserDict(attrs_d))
format_ = re.compile(r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$')
def _parse_psc_chapter_start(start):
m = format_.match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
| 2,839
|
Python
|
.py
| 60
| 42.75
| 77
| 0.704882
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,239
|
itunes.py
|
rembo10_headphones/lib/feedparser/namespaces/itunes.py
|
# Support for the iTunes format
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
# Canonical namespace
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
# Extra namespace
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
}
def _start_itunes_author(self, attrs_d):
self._start_author(attrs_d)
def _end_itunes_author(self):
self._end_author()
def _end_itunes_category(self):
self._end_category()
def _start_itunes_name(self, attrs_d):
self._start_name(attrs_d)
def _end_itunes_name(self):
self._end_name()
def _start_itunes_email(self, attrs_d):
self._start_email(attrs_d)
def _end_itunes_email(self):
self._end_email()
def _start_itunes_subtitle(self, attrs_d):
self._start_subtitle(attrs_d)
def _end_itunes_subtitle(self):
self._end_subtitle()
def _start_itunes_summary(self, attrs_d):
self._start_summary(attrs_d)
def _end_itunes_summary(self):
self._end_summary()
def _start_itunes_owner(self, attrs_d):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._add_tag(term.strip(), 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrs_d):
self._add_tag(attrs_d.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _start_itunes_image(self, attrs_d):
self.push('itunes_image', 0)
if attrs_d.get('href'):
self._get_context()['image'] = FeedParserDict({'href': attrs_d.get('href')})
elif attrs_d.get('url'):
self._get_context()['image'] = FeedParserDict({'href': attrs_d.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._get_context()['itunes_block'] = (value == 'yes' or value == 'Yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._get_context()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
| 4,115
|
Python
|
.py
| 87
| 41.448276
| 117
| 0.68023
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,240
|
_base.py
|
rembo10_headphones/lib/feedparser/namespaces/_base.py
|
# Support for the Atom, RSS, RDF, and CDF feed formats
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
from ..datetimes import _parse_date
from ..urls import make_safe_absolute_uri
from ..util import FeedParserDict
class Namespace(object):
"""Support for the Atom, RSS, RDF, and CDF feed formats.
The feed formats all share common elements, some of which have conflicting
interpretations. For simplicity, all of the base feed format support is
collected here.
"""
supported_namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
}
def _start_rss(self, attrs_d):
versionmap = {
'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094',
}
# If we're here then this is an RSS feed.
# If we don't have a version or have a version that starts with something
# other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrs_d.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_channel(self, attrs_d):
self.infeed = 1
self._cdf_common(attrs_d)
def _cdf_common(self, attrs_d):
if 'lastmod' in attrs_d:
self._start_modified({})
self.elementstack[-1][-1] = attrs_d['lastmod']
self._end_modified()
if 'href' in attrs_d:
self._start_link({})
self.elementstack[-1][-1] = attrs_d['href']
self._end_link()
def _start_feed(self, attrs_d):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrs_d.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrs_d):
context = self._get_context()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrs_d):
context = self._get_context()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrs_d):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._get_context()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
def _start_contributor(self, attrs_d):
self.incontributor = 1
context = self._get_context()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_name(self, attrs_d):
self.push('name', 0)
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._get_context()
context['name'] = value
def _start_width(self, attrs_d):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._get_context()
context['width'] = value
def _start_height(self, attrs_d):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._get_context()
context['height'] = value
def _start_url(self, attrs_d):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrs_d):
self.push('email', 0)
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
def _start_subtitle(self, attrs_d):
self.push_content('subtitle', attrs_d, 'text/plain', 1)
_start_tagline = _start_subtitle
def _end_subtitle(self):
self.pop_content('subtitle')
_end_tagline = _end_subtitle
def _start_rights(self, attrs_d):
self.push_content('rights', attrs_d, 'text/plain', 1)
_start_copyright = _start_rights
def _end_rights(self):
self.pop_content('rights')
_end_copyright = _end_rights
def _start_item(self, attrs_d):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
id = self._get_attribute(attrs_d, 'rdf:about')
if id:
context = self._get_context()
context['id'] = id
self._cdf_common(attrs_d)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_language(self, attrs_d):
self.push('language', 1)
def _end_language(self):
self.lang = self.pop('language')
def _start_webmaster(self, attrs_d):
self.push('publisher', 1)
def _end_webmaster(self):
self.pop('publisher')
self._sync_author_detail('publisher')
def _start_published(self, attrs_d):
self.push('published', 1)
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrs_d):
self.push('updated', 1)
_start_modified = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrs_d):
self.push('created', 1)
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
def _start_expirationdate(self, attrs_d):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_category(self, attrs_d):
term = attrs_d.get('term')
scheme = attrs_d.get('scheme', attrs_d.get('domain'))
label = attrs_d.get('label')
self._add_tag(term, scheme, label)
self.push('category', 1)
_start_keywords = _start_category
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._get_context()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._add_tag(value, None, None)
_end_keywords = _end_category
def _start_cloud(self, attrs_d):
self._get_context()['cloud'] = FeedParserDict(attrs_d)
def _start_link(self, attrs_d):
attrs_d.setdefault('rel', 'alternate')
if attrs_d['rel'] == 'self':
attrs_d.setdefault('type', 'application/atom+xml')
else:
attrs_d.setdefault('type', 'text/html')
context = self._get_context()
attrs_d = self._enforce_href(attrs_d)
if 'href' in attrs_d:
attrs_d['href'] = self.resolve_uri(attrs_d['href'])
expecting_text = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrs_d))
if 'href' in attrs_d:
if (
attrs_d.get('rel') == 'alternate'
and self.map_content_type(attrs_d.get('type')) in self.html_types
):
context['link'] = attrs_d['href']
else:
self.push('link', expecting_text)
def _end_link(self):
self.pop('link')
def _start_guid(self, attrs_d):
self.guidislink = (attrs_d.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._get_context())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrs_d):
if self.svgOK:
return self.unknown_starttag('title', list(attrs_d.items()))
self.push_content('title', attrs_d, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_title(self):
if self.svgOK:
return
value = self.pop_content('title')
if not value:
return
self.title_depth = self.depth
def _start_description(self, attrs_d):
context = self._get_context()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrs_d)
else:
self.push_content('description', attrs_d, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrs_d):
self.push_content('description', attrs_d, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.pop_content('description')
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrs_d):
self.push_content('info', attrs_d, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.pop_content('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrs_d):
if attrs_d:
attrs_d = self._enforce_href(attrs_d)
if 'href' in attrs_d:
attrs_d['href'] = self.resolve_uri(attrs_d['href'])
self._get_context()['generator_detail'] = FeedParserDict(attrs_d)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._get_context()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_summary(self, attrs_d):
context = self._get_context()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrs_d)
else:
self._summaryKey = 'summary'
self.push_content(self._summaryKey, attrs_d, 'text/plain', 1)
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.pop_content(self._summaryKey or 'summary')
self._summaryKey = None
def _start_enclosure(self, attrs_d):
attrs_d = self._enforce_href(attrs_d)
context = self._get_context()
attrs_d['rel'] = 'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrs_d))
def _start_source(self, attrs_d):
if 'url' in attrs_d:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrs_d['url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._get_context()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrs_d):
self.push_content('content', attrs_d, 'text/plain', 1)
src = attrs_d.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrs_d):
self.push_content('content', attrs_d, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrs_d):
self.push_content('content', attrs_d, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.map_content_type(self.contentparams.get('type')) in ({'text/plain'} | self.html_types)
value = self.pop_content('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_newlocation(self, attrs_d):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._get_context()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = make_safe_absolute_uri(self.baseuri, url.strip())
| 16,998
|
Python
|
.py
| 427
| 31.30445
| 115
| 0.601273
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,241
|
loose.py
|
rembo10_headphones/lib/feedparser/parsers/loose.py
|
# The loose feed parser that interfaces with an SGML parsing library
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class _LooseFeedParser(object):
contentparams = None
def __init__(self, baseuri=None, baselang=None, encoding=None, entities=None):
self.baseuri = baseuri or ''
self.lang = baselang or None
self.encoding = encoding or 'utf-8' # character encoding
self.entities = entities or {}
super(_LooseFeedParser, self).__init__()
@staticmethod
def _normalize_attributes(kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
v = v.replace('&', '&')
return k, v
def decode_entities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
data = data.replace('/', '/')
data = data.replace('/', '/')
return data
@staticmethod
def strattrs(attrs):
return ''.join(
' %s="%s"' % (n, v.replace('"', '"'))
for n, v in attrs
)
| 3,452
|
Python
|
.py
| 72
| 41.875
| 82
| 0.643556
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,242
|
strict.py
|
rembo10_headphones/lib/feedparser/parsers/strict.py
|
# The strict feed parser that interfaces with an XML parsing library
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..exceptions import UndeclaredNamespace
class _StrictFeedParser(object):
def __init__(self, baseuri, baselang, encoding):
self.bozo = 0
self.exc = None
self.decls = {}
self.baseuri = baseuri or ''
self.lang = baselang
self.encoding = encoding
super(_StrictFeedParser, self).__init__()
@staticmethod
def _normalize_attributes(kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
return k, v
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.track_namespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix is None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespaces_in_use:
raise UndeclaredNamespace("'%s' is not associated with a namespace" % givenprefix)
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname == 'math' and namespace == 'http://www.w3.org/1998/Math/MathML':
attrsD['xmlns'] = namespace
if localname == 'svg' and namespace == 'http://www.w3.org/2000/svg':
attrsD['xmlns'] = namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: # Expat
for name, value in self.namespaces_in_use.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, list(attrsD.items()))
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: # Expat
for name, value in self.namespaces_in_use.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
| 5,817
|
Python
|
.py
| 121
| 39.950413
| 133
| 0.651883
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,243
|
__main__.py
|
rembo10_headphones/lib/beets/__main__.py
|
# This file is part of beets.
# Copyright 2017, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The __main__ module lets you run the beets CLI interface by typing
`python -m beets`.
"""
import sys
from .ui import main
if __name__ == "__main__":
main(sys.argv[1:])
| 825
|
Python
|
.py
| 20
| 39.85
| 71
| 0.762797
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,244
|
vfs.py
|
rembo10_headphones/lib/beets/vfs.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A simple utility for constructing filesystem-like trees from beets
libraries.
"""
from collections import namedtuple
from beets import util
Node = namedtuple('Node', ['files', 'dirs'])
def _insert(node, path, itemid):
"""Insert an item into a virtual filesystem node."""
if len(path) == 1:
# Last component. Insert file.
node.files[path[0]] = itemid
else:
# In a directory.
dirname = path[0]
rest = path[1:]
if dirname not in node.dirs:
node.dirs[dirname] = Node({}, {})
_insert(node.dirs[dirname], rest, itemid)
def libtree(lib):
"""Generates a filesystem-like directory tree for the files
contained in `lib`. Filesystem nodes are (files, dirs) named
tuples in which both components are dictionaries. The first
maps filenames to Item ids. The second maps directory names to
child node tuples.
"""
root = Node({}, {})
for item in lib.items():
dest = item.destination(fragment=True)
parts = util.components(dest)
_insert(root, parts, item.id)
return root
| 1,750
|
Python
|
.py
| 44
| 35.431818
| 71
| 0.706298
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,245
|
plugins.py
|
rembo10_headphones/lib/beets/plugins.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Support for beets plugins."""
import traceback
import re
import inspect
import abc
from collections import defaultdict
from functools import wraps
import beets
from beets import logging
import mediafile
PLUGIN_NAMESPACE = 'beetsplug'
# Plugins using the Last.fm API can share the same API key.
LASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43'
# Global logger.
log = logging.getLogger('beets')
class PluginConflictException(Exception):
"""Indicates that the services provided by one plugin conflict with
those of another.
For example two plugins may define different types for flexible fields.
"""
class PluginLogFilter(logging.Filter):
"""A logging filter that identifies the plugin that emitted a log
message.
"""
def __init__(self, plugin):
self.prefix = f'{plugin.name}: '
def filter(self, record):
if hasattr(record.msg, 'msg') and isinstance(record.msg.msg,
str):
# A _LogMessage from our hacked-up Logging replacement.
record.msg.msg = self.prefix + record.msg.msg
elif isinstance(record.msg, str):
record.msg = self.prefix + record.msg
return True
# Managing the plugins themselves.
class BeetsPlugin:
"""The base class for all beets plugins. Plugins provide
functionality by defining a subclass of BeetsPlugin and overriding
the abstract methods defined here.
"""
def __init__(self, name=None):
"""Perform one-time plugin setup.
"""
self.name = name or self.__module__.split('.')[-1]
self.config = beets.config[self.name]
if not self.template_funcs:
self.template_funcs = {}
if not self.template_fields:
self.template_fields = {}
if not self.album_template_fields:
self.album_template_fields = {}
self.early_import_stages = []
self.import_stages = []
self._log = log.getChild(self.name)
self._log.setLevel(logging.NOTSET) # Use `beets` logger level.
if not any(isinstance(f, PluginLogFilter) for f in self._log.filters):
self._log.addFilter(PluginLogFilter(self))
def commands(self):
"""Should return a list of beets.ui.Subcommand objects for
commands that should be added to beets' CLI.
"""
return ()
def _set_stage_log_level(self, stages):
"""Adjust all the stages in `stages` to WARNING logging level.
"""
return [self._set_log_level_and_params(logging.WARNING, stage)
for stage in stages]
def get_early_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages early in the pipeline.
The callables are wrapped versions of the functions in
`self.early_import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return self._set_stage_log_level(self.early_import_stages)
def get_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages.
The callables are wrapped versions of the functions in
`self.import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return self._set_stage_log_level(self.import_stages)
def _set_log_level_and_params(self, base_log_level, func):
"""Wrap `func` to temporarily set this plugin's logger level to
`base_log_level` + config options (and restore it to its previous
value after the function returns). Also determines which params may not
be sent for backwards-compatibility.
"""
argspec = inspect.getfullargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
assert self._log.level == logging.NOTSET
verbosity = beets.config['verbose'].get(int)
log_level = max(logging.DEBUG, base_log_level - 10 * verbosity)
self._log.setLevel(log_level)
if argspec.varkw is None:
kwargs = {k: v for k, v in kwargs.items()
if k in argspec.args}
try:
return func(*args, **kwargs)
finally:
self._log.setLevel(logging.NOTSET)
return wrapper
def queries(self):
"""Should return a dict mapping prefixes to Query subclasses.
"""
return {}
def track_distance(self, item, info):
"""Should return a Distance object to be added to the
distance for every track comparison.
"""
return beets.autotag.hooks.Distance()
def album_distance(self, items, album_info, mapping):
"""Should return a Distance object to be added to the
distance for every album-level comparison.
"""
return beets.autotag.hooks.Distance()
def candidates(self, items, artist, album, va_likely, extra_tags=None):
"""Should return a sequence of AlbumInfo objects that match the
album whose items are provided.
"""
return ()
def item_candidates(self, item, artist, title):
"""Should return a sequence of TrackInfo objects that match the
item provided.
"""
return ()
def album_for_id(self, album_id):
"""Return an AlbumInfo object or None if no matching release was
found.
"""
return None
def track_for_id(self, track_id):
"""Return a TrackInfo object or None if no matching release was
found.
"""
return None
def add_media_field(self, name, descriptor):
"""Add a field that is synchronized between media files and items.
When a media field is added ``item.write()`` will set the name
property of the item's MediaFile to ``item[name]`` and save the
changes. Similarly ``item.read()`` will set ``item[name]`` to
the value of the name property of the media file.
``descriptor`` must be an instance of ``mediafile.MediaField``.
"""
# Defer import to prevent circular dependency
from beets import library
mediafile.MediaFile.add_field(name, descriptor)
library.Item._media_fields.add(name)
_raw_listeners = None
listeners = None
def register_listener(self, event, func):
"""Add a function as a listener for the specified event.
"""
wrapped_func = self._set_log_level_and_params(logging.WARNING, func)
cls = self.__class__
if cls.listeners is None or cls._raw_listeners is None:
cls._raw_listeners = defaultdict(list)
cls.listeners = defaultdict(list)
if func not in cls._raw_listeners[event]:
cls._raw_listeners[event].append(func)
cls.listeners[event].append(wrapped_func)
template_funcs = None
template_fields = None
album_template_fields = None
@classmethod
def template_func(cls, name):
"""Decorator that registers a path template function. The
function will be invoked as ``%name{}`` from path format
strings.
"""
def helper(func):
if cls.template_funcs is None:
cls.template_funcs = {}
cls.template_funcs[name] = func
return func
return helper
@classmethod
def template_field(cls, name):
"""Decorator that registers a path template field computation.
The value will be referenced as ``$name`` from path format
strings. The function must accept a single parameter, the Item
being formatted.
"""
def helper(func):
if cls.template_fields is None:
cls.template_fields = {}
cls.template_fields[name] = func
return func
return helper
_classes = set()
def load_plugins(names=()):
"""Imports the modules for a sequence of plugin names. Each name
must be the name of a Python module under the "beetsplug" namespace
package in sys.path; the module indicated should contain the
BeetsPlugin subclasses desired.
"""
for name in names:
modname = f'{PLUGIN_NAMESPACE}.{name}'
try:
try:
namespace = __import__(modname, None, None)
except ImportError as exc:
# Again, this is hacky:
if exc.args[0].endswith(' ' + name):
log.warning('** plugin {0} not found', name)
else:
raise
else:
for obj in getattr(namespace, name).__dict__.values():
if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \
and obj != BeetsPlugin and obj not in _classes:
_classes.add(obj)
except Exception:
log.warning(
'** error loading plugin {}:\n{}',
name,
traceback.format_exc(),
)
_instances = {}
def find_plugins():
"""Returns a list of BeetsPlugin subclass instances from all
currently loaded beets plugins. Loads the default plugin set
first.
"""
if _instances:
# After the first call, use cached instances for performance reasons.
# See https://github.com/beetbox/beets/pull/3810
return list(_instances.values())
load_plugins()
plugins = []
for cls in _classes:
# Only instantiate each plugin class once.
if cls not in _instances:
_instances[cls] = cls()
plugins.append(_instances[cls])
return plugins
# Communication with plugins.
def commands():
"""Returns a list of Subcommand objects from all loaded plugins.
"""
out = []
for plugin in find_plugins():
out += plugin.commands()
return out
def queries():
"""Returns a dict mapping prefix strings to Query subclasses all loaded
plugins.
"""
out = {}
for plugin in find_plugins():
out.update(plugin.queries())
return out
def types(model_cls):
# Gives us `item_types` and `album_types`
attr_name = f'{model_cls.__name__.lower()}_types'
types = {}
for plugin in find_plugins():
plugin_types = getattr(plugin, attr_name, {})
for field in plugin_types:
if field in types and plugin_types[field] != types[field]:
raise PluginConflictException(
'Plugin {} defines flexible field {} '
'which has already been defined with '
'another type.'.format(plugin.name, field)
)
types.update(plugin_types)
return types
def named_queries(model_cls):
# Gather `item_queries` and `album_queries` from the plugins.
attr_name = f'{model_cls.__name__.lower()}_queries'
queries = {}
for plugin in find_plugins():
plugin_queries = getattr(plugin, attr_name, {})
queries.update(plugin_queries)
return queries
def track_distance(item, info):
"""Gets the track distance calculated by all loaded plugins.
Returns a Distance object.
"""
from beets.autotag.hooks import Distance
dist = Distance()
for plugin in find_plugins():
dist.update(plugin.track_distance(item, info))
return dist
def album_distance(items, album_info, mapping):
"""Returns the album distance calculated by plugins."""
from beets.autotag.hooks import Distance
dist = Distance()
for plugin in find_plugins():
dist.update(plugin.album_distance(items, album_info, mapping))
return dist
def candidates(items, artist, album, va_likely, extra_tags=None):
"""Gets MusicBrainz candidates for an album from each plugin.
"""
for plugin in find_plugins():
yield from plugin.candidates(items, artist, album, va_likely,
extra_tags)
def item_candidates(item, artist, title):
"""Gets MusicBrainz candidates for an item from the plugins.
"""
for plugin in find_plugins():
yield from plugin.item_candidates(item, artist, title)
def album_for_id(album_id):
"""Get AlbumInfo objects for a given ID string.
"""
for plugin in find_plugins():
album = plugin.album_for_id(album_id)
if album:
yield album
def track_for_id(track_id):
"""Get TrackInfo objects for a given ID string.
"""
for plugin in find_plugins():
track = plugin.track_for_id(track_id)
if track:
yield track
def template_funcs():
"""Get all the template functions declared by plugins as a
dictionary.
"""
funcs = {}
for plugin in find_plugins():
if plugin.template_funcs:
funcs.update(plugin.template_funcs)
return funcs
def early_import_stages():
"""Get a list of early import stage functions defined by plugins."""
stages = []
for plugin in find_plugins():
stages += plugin.get_early_import_stages()
return stages
def import_stages():
"""Get a list of import stage functions defined by plugins."""
stages = []
for plugin in find_plugins():
stages += plugin.get_import_stages()
return stages
# New-style (lazy) plugin-provided fields.
def item_field_getters():
"""Get a dictionary mapping field names to unary functions that
compute the field's value.
"""
funcs = {}
for plugin in find_plugins():
if plugin.template_fields:
funcs.update(plugin.template_fields)
return funcs
def album_field_getters():
"""As above, for album fields.
"""
funcs = {}
for plugin in find_plugins():
if plugin.album_template_fields:
funcs.update(plugin.album_template_fields)
return funcs
# Event dispatch.
def event_handlers():
"""Find all event handlers from plugins as a dictionary mapping
event names to sequences of callables.
"""
all_handlers = defaultdict(list)
for plugin in find_plugins():
if plugin.listeners:
for event, handlers in plugin.listeners.items():
all_handlers[event] += handlers
return all_handlers
def send(event, **arguments):
"""Send an event to all assigned event listeners.
`event` is the name of the event to send, all other named arguments
are passed along to the handlers.
Return a list of non-None values returned from the handlers.
"""
log.debug('Sending event: {0}', event)
results = []
for handler in event_handlers()[event]:
result = handler(**arguments)
if result is not None:
results.append(result)
return results
def feat_tokens(for_artist=True):
"""Return a regular expression that matches phrases like "featuring"
that separate a main artist or a song title from secondary artists.
The `for_artist` option determines whether the regex should be
suitable for matching artist fields (the default) or title fields.
"""
feat_words = ['ft', 'featuring', 'feat', 'feat.', 'ft.']
if for_artist:
feat_words += ['with', 'vs', 'and', 'con', '&']
return r'(?<=\s)(?:{})(?=\s)'.format(
'|'.join(re.escape(x) for x in feat_words)
)
def sanitize_choices(choices, choices_all):
"""Clean up a stringlist configuration attribute: keep only choices
elements present in choices_all, remove duplicate elements, expand '*'
wildcard while keeping original stringlist order.
"""
seen = set()
others = [x for x in choices_all if x not in choices]
res = []
for s in choices:
if s not in seen:
if s in list(choices_all):
res.append(s)
elif s == '*':
res.extend(others)
seen.add(s)
return res
def sanitize_pairs(pairs, pairs_all):
"""Clean up a single-element mapping configuration attribute as returned
by Confuse's `Pairs` template: keep only two-element tuples present in
pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*')
wildcards while keeping the original order. Note that ('*', '*') and
('*', 'whatever') have the same effect.
For example,
>>> sanitize_pairs(
... [('foo', 'baz bar'), ('key', '*'), ('*', '*')],
... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'),
... ('key', 'value')]
... )
[('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')]
"""
pairs_all = list(pairs_all)
seen = set()
others = [x for x in pairs_all if x not in pairs]
res = []
for k, values in pairs:
for v in values.split():
x = (k, v)
if x in pairs_all:
if x not in seen:
seen.add(x)
res.append(x)
elif k == '*':
new = [o for o in others if o not in seen]
seen.update(new)
res.extend(new)
elif v == '*':
new = [o for o in others if o not in seen and o[0] == k]
seen.update(new)
res.extend(new)
return res
def notify_info_yielded(event):
"""Makes a generator send the event 'event' every time it yields.
This decorator is supposed to decorate a generator, but any function
returning an iterable should work.
Each yielded value is passed to plugins using the 'info' parameter of
'send'.
"""
def decorator(generator):
def decorated(*args, **kwargs):
for v in generator(*args, **kwargs):
send(event, info=v)
yield v
return decorated
return decorator
def get_distance(config, data_source, info):
"""Returns the ``data_source`` weight and the maximum source weight
for albums or individual tracks.
"""
dist = beets.autotag.Distance()
if info.data_source == data_source:
dist.add('source', config['source_weight'].as_number())
return dist
def apply_item_changes(lib, item, move, pretend, write):
"""Store, move, and write the item according to the arguments.
:param lib: beets library.
:type lib: beets.library.Library
:param item: Item whose changes to apply.
:type item: beets.library.Item
:param move: Move the item if it's in the library.
:type move: bool
:param pretend: Return without moving, writing, or storing the item's
metadata.
:type pretend: bool
:param write: Write the item's metadata to its media file.
:type write: bool
"""
if pretend:
return
from beets import util
# Move the item if it's in the library.
if move and lib.directory in util.ancestry(item.path):
item.move(with_album=False)
if write:
item.try_write()
item.store()
class MetadataSourcePlugin(metaclass=abc.ABCMeta):
def __init__(self):
super().__init__()
self.config.add({'source_weight': 0.5})
@abc.abstractproperty
def id_regex(self):
raise NotImplementedError
@abc.abstractproperty
def data_source(self):
raise NotImplementedError
@abc.abstractproperty
def search_url(self):
raise NotImplementedError
@abc.abstractproperty
def album_url(self):
raise NotImplementedError
@abc.abstractproperty
def track_url(self):
raise NotImplementedError
@abc.abstractmethod
def _search_api(self, query_type, filters, keywords=''):
raise NotImplementedError
@abc.abstractmethod
def album_for_id(self, album_id):
raise NotImplementedError
@abc.abstractmethod
def track_for_id(self, track_id=None, track_data=None):
raise NotImplementedError
@staticmethod
def get_artist(artists, id_key='id', name_key='name'):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of artist object dicts.
For each artist, this function moves articles (such as 'a', 'an',
and 'the') to the front and strips trailing disambiguation numbers. It
returns a tuple containing the comma-separated string of all
normalized artists and the ``id`` of the main/first artist.
:param artists: Iterable of artist dicts or lists returned by API.
:type artists: list[dict] or list[list]
:param id_key: Key or index corresponding to the value of ``id`` for
the main/first artist. Defaults to 'id'.
:type id_key: str or int
:param name_key: Key or index corresponding to values of names
to concatenate for the artist string (containing all artists).
Defaults to 'name'.
:type name_key: str or int
:return: Normalized artist string.
:rtype: str
"""
artist_id = None
artist_names = []
for artist in artists:
if not artist_id:
artist_id = artist[id_key]
name = artist[name_key]
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'^(.*?), (a|an|the)$', r'\2 \1', name, flags=re.I)
artist_names.append(name)
artist = ', '.join(artist_names).replace(' ,', ',') or None
return artist, artist_id
def _get_id(self, url_type, id_):
"""Parse an ID from its URL if necessary.
:param url_type: Type of URL. Either 'album' or 'track'.
:type url_type: str
:param id_: Album/track ID or URL.
:type id_: str
:return: Album/track ID.
:rtype: str
"""
self._log.debug(
"Searching {} for {} '{}'", self.data_source, url_type, id_
)
match = re.search(self.id_regex['pattern'].format(url_type), str(id_))
if match:
id_ = match.group(self.id_regex['match_group'])
if id_:
return id_
return None
def candidates(self, items, artist, album, va_likely, extra_tags=None):
"""Returns a list of AlbumInfo objects for Search API results
matching an ``album`` and ``artist`` (if not various).
:param items: List of items comprised by an album to be matched.
:type items: list[beets.library.Item]
:param artist: The artist of the album to be matched.
:type artist: str
:param album: The name of the album to be matched.
:type album: str
:param va_likely: True if the album to be matched likely has
Various Artists.
:type va_likely: bool
:return: Candidate AlbumInfo objects.
:rtype: list[beets.autotag.hooks.AlbumInfo]
"""
query_filters = {'album': album}
if not va_likely:
query_filters['artist'] = artist
results = self._search_api(query_type='album', filters=query_filters)
albums = [self.album_for_id(album_id=r['id']) for r in results]
return [a for a in albums if a is not None]
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for Search API results
matching ``title`` and ``artist``.
:param item: Singleton item to be matched.
:type item: beets.library.Item
:param artist: The artist of the track to be matched.
:type artist: str
:param title: The title of the track to be matched.
:type title: str
:return: Candidate TrackInfo objects.
:rtype: list[beets.autotag.hooks.TrackInfo]
"""
tracks = self._search_api(
query_type='track', keywords=title, filters={'artist': artist}
)
return [self.track_for_id(track_data=track) for track in tracks]
def album_distance(self, items, album_info, mapping):
return get_distance(
data_source=self.data_source, info=album_info, config=self.config
)
def track_distance(self, item, track_info):
return get_distance(
data_source=self.data_source, info=track_info, config=self.config
)
| 24,973
|
Python
|
.py
| 625
| 31.8704
| 79
| 0.627855
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,246
|
importer.py
|
rembo10_headphones/lib/beets/importer.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the basic, interface-agnostic workflow for importing and
autotagging music files.
"""
import os
import re
import pickle
import itertools
from collections import defaultdict
from tempfile import mkdtemp
from bisect import insort, bisect_left
from contextlib import contextmanager
import shutil
import time
from beets import logging
from beets import autotag
from beets import library
from beets import dbcore
from beets import plugins
from beets import util
from beets import config
from beets.util import pipeline, sorted_walk, ancestry, MoveOperation
from beets.util import syspath, normpath, displayable_path
from enum import Enum
import mediafile
action = Enum('action',
['SKIP', 'ASIS', 'TRACKS', 'APPLY', 'ALBUMS', 'RETAG'])
# The RETAG action represents "don't apply any match, but do record
# new metadata". It's not reachable via the standard command prompt but
# can be used by plugins.
QUEUE_SIZE = 128
SINGLE_ARTIST_THRESH = 0.25
PROGRESS_KEY = 'tagprogress'
HISTORY_KEY = 'taghistory'
# Global logger.
log = logging.getLogger('beets')
class ImportAbort(Exception):
"""Raised when the user aborts the tagging operation.
"""
pass
# Utilities.
def _open_state():
"""Reads the state file, returning a dictionary."""
try:
with open(config['statefile'].as_filename(), 'rb') as f:
return pickle.load(f)
except Exception as exc:
# The `pickle` module can emit all sorts of exceptions during
# unpickling, including ImportError. We use a catch-all
# exception to avoid enumerating them all (the docs don't even have a
# full list!).
log.debug('state file could not be read: {0}', exc)
return {}
def _save_state(state):
"""Writes the state dictionary out to disk."""
try:
with open(config['statefile'].as_filename(), 'wb') as f:
pickle.dump(state, f)
except OSError as exc:
log.error('state file could not be written: {0}', exc)
# Utilities for reading and writing the beets progress file, which
# allows long tagging tasks to be resumed when they pause (or crash).
def progress_read():
state = _open_state()
return state.setdefault(PROGRESS_KEY, {})
@contextmanager
def progress_write():
state = _open_state()
progress = state.setdefault(PROGRESS_KEY, {})
yield progress
_save_state(state)
def progress_add(toppath, *paths):
"""Record that the files under all of the `paths` have been imported
under `toppath`.
"""
with progress_write() as state:
imported = state.setdefault(toppath, [])
for path in paths:
# Normally `progress_add` will be called with the path
# argument increasing. This is because of the ordering in
# `albums_in_dir`. We take advantage of that to make the
# code faster
if imported and imported[len(imported) - 1] <= path:
imported.append(path)
else:
insort(imported, path)
def progress_element(toppath, path):
"""Return whether `path` has been imported in `toppath`.
"""
state = progress_read()
if toppath not in state:
return False
imported = state[toppath]
i = bisect_left(imported, path)
return i != len(imported) and imported[i] == path
def has_progress(toppath):
"""Return `True` if there exist paths that have already been
imported under `toppath`.
"""
state = progress_read()
return toppath in state
def progress_reset(toppath):
with progress_write() as state:
if toppath in state:
del state[toppath]
# Similarly, utilities for manipulating the "incremental" import log.
# This keeps track of all directories that were ever imported, which
# allows the importer to only import new stuff.
def history_add(paths):
"""Indicate that the import of the album in `paths` is completed and
should not be repeated in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
state[HISTORY_KEY] = set()
state[HISTORY_KEY].add(tuple(paths))
_save_state(state)
def history_get():
"""Get the set of completed path tuples in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
return set()
return state[HISTORY_KEY]
# Abstract session class.
class ImportSession:
"""Controls an import action. Subclasses should implement methods to
communicate with the user or otherwise make decisions.
"""
def __init__(self, lib, loghandler, paths, query):
"""Create a session. `lib` is a Library object. `loghandler` is a
logging.Handler. Either `paths` or `query` is non-null and indicates
the source of files to be imported.
"""
self.lib = lib
self.logger = self._setup_logging(loghandler)
self.paths = paths
self.query = query
self._is_resuming = {}
self._merged_items = set()
self._merged_dirs = set()
# Normalize the paths.
if self.paths:
self.paths = list(map(normpath, self.paths))
def _setup_logging(self, loghandler):
logger = logging.getLogger(__name__)
logger.propagate = False
if not loghandler:
loghandler = logging.NullHandler()
logger.handlers = [loghandler]
return logger
def set_config(self, config):
"""Set `config` property from global import config and make
implied changes.
"""
# FIXME: Maybe this function should not exist and should instead
# provide "decision wrappers" like "should_resume()", etc.
iconfig = dict(config)
self.config = iconfig
# Incremental and progress are mutually exclusive.
if iconfig['incremental']:
iconfig['resume'] = False
# When based on a query instead of directories, never
# save progress or try to resume.
if self.query is not None:
iconfig['resume'] = False
iconfig['incremental'] = False
if iconfig['reflink']:
iconfig['reflink'] = iconfig['reflink'] \
.as_choice(['auto', True, False])
# Copy, move, reflink, link, and hardlink are mutually exclusive.
if iconfig['move']:
iconfig['copy'] = False
iconfig['link'] = False
iconfig['hardlink'] = False
iconfig['reflink'] = False
elif iconfig['link']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['hardlink'] = False
iconfig['reflink'] = False
elif iconfig['hardlink']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['link'] = False
iconfig['reflink'] = False
elif iconfig['reflink']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['link'] = False
iconfig['hardlink'] = False
# Only delete when copying.
if not iconfig['copy']:
iconfig['delete'] = False
self.want_resume = config['resume'].as_choice([True, False, 'ask'])
def tag_log(self, status, paths):
"""Log a message about a given album to the importer log. The status
should reflect the reason the album couldn't be tagged.
"""
self.logger.info('{0} {1}', status, displayable_path(paths))
def log_choice(self, task, duplicate=False):
"""Logs the task's current choice if it should be logged. If
``duplicate``, then this is a secondary choice after a duplicate was
detected and a decision was made.
"""
paths = task.paths
if duplicate:
# Duplicate: log all three choices (skip, keep both, and trump).
if task.should_remove_duplicates:
self.tag_log('duplicate-replace', paths)
elif task.choice_flag in (action.ASIS, action.APPLY):
self.tag_log('duplicate-keep', paths)
elif task.choice_flag is (action.SKIP):
self.tag_log('duplicate-skip', paths)
else:
# Non-duplicate: log "skip" and "asis" choices.
if task.choice_flag is action.ASIS:
self.tag_log('asis', paths)
elif task.choice_flag is action.SKIP:
self.tag_log('skip', paths)
def should_resume(self, path):
raise NotImplementedError
def choose_match(self, task):
raise NotImplementedError
def resolve_duplicate(self, task, found_duplicates):
raise NotImplementedError
def choose_item(self, task):
raise NotImplementedError
def run(self):
"""Run the import task.
"""
self.logger.info('import started {0}', time.asctime())
self.set_config(config['import'])
# Set up the pipeline.
if self.query is None:
stages = [read_tasks(self)]
else:
stages = [query_tasks(self)]
# In pretend mode, just log what would otherwise be imported.
if self.config['pretend']:
stages += [log_files(self)]
else:
if self.config['group_albums'] and \
not self.config['singletons']:
# Split directory tasks into one task for each album.
stages += [group_albums(self)]
# These stages either talk to the user to get a decision or,
# in the case of a non-autotagged import, just choose to
# import everything as-is. In *both* cases, these stages
# also add the music to the library database, so later
# stages need to read and write data from there.
if self.config['autotag']:
stages += [lookup_candidates(self), user_query(self)]
else:
stages += [import_asis(self)]
# Plugin stages.
for stage_func in plugins.early_import_stages():
stages.append(plugin_stage(self, stage_func))
for stage_func in plugins.import_stages():
stages.append(plugin_stage(self, stage_func))
stages += [manipulate_files(self)]
pl = pipeline.Pipeline(stages)
# Run the pipeline.
plugins.send('import_begin', session=self)
try:
if config['threaded']:
pl.run_parallel(QUEUE_SIZE)
else:
pl.run_sequential()
except ImportAbort:
# User aborted operation. Silently stop.
pass
# Incremental and resumed imports
def already_imported(self, toppath, paths):
"""Returns true if the files belonging to this task have already
been imported in a previous session.
"""
if self.is_resuming(toppath) \
and all([progress_element(toppath, p) for p in paths]):
return True
if self.config['incremental'] \
and tuple(paths) in self.history_dirs:
return True
return False
@property
def history_dirs(self):
if not hasattr(self, '_history_dirs'):
self._history_dirs = history_get()
return self._history_dirs
def already_merged(self, paths):
"""Returns true if all the paths being imported were part of a merge
during previous tasks.
"""
for path in paths:
if path not in self._merged_items \
and path not in self._merged_dirs:
return False
return True
def mark_merged(self, paths):
"""Mark paths and directories as merged for future reimport tasks.
"""
self._merged_items.update(paths)
dirs = {os.path.dirname(path) if os.path.isfile(path) else path
for path in paths}
self._merged_dirs.update(dirs)
def is_resuming(self, toppath):
"""Return `True` if user wants to resume import of this path.
You have to call `ask_resume` first to determine the return value.
"""
return self._is_resuming.get(toppath, False)
def ask_resume(self, toppath):
"""If import of `toppath` was aborted in an earlier session, ask
user if she wants to resume the import.
Determines the return value of `is_resuming(toppath)`.
"""
if self.want_resume and has_progress(toppath):
# Either accept immediately or prompt for input to decide.
if self.want_resume is True or \
self.should_resume(toppath):
log.warning('Resuming interrupted import of {0}',
util.displayable_path(toppath))
self._is_resuming[toppath] = True
else:
# Clear progress; we're starting from the top.
progress_reset(toppath)
# The importer task class.
class BaseImportTask:
"""An abstract base class for importer tasks.
Tasks flow through the importer pipeline. Each stage can update
them. """
def __init__(self, toppath, paths, items):
"""Create a task. The primary fields that define a task are:
* `toppath`: The user-specified base directory that contains the
music for this task. If the task has *no* user-specified base
(for example, when importing based on an -L query), this can
be None. This is used for tracking progress and history.
* `paths`: A list of *specific* paths where the music for this task
came from. These paths can be directories, when their entire
contents are being imported, or files, when the task comprises
individual tracks. This is used for progress/history tracking and
for displaying the task to the user.
* `items`: A list of `Item` objects representing the music being
imported.
These fields should not change after initialization.
"""
self.toppath = toppath
self.paths = paths
self.items = items
class ImportTask(BaseImportTask):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
The import session and stages call the following methods in the
given order.
* `lookup_candidates()` Sets the `common_artist`, `common_album`,
`candidates`, and `rec` attributes. `candidates` is a list of
`AlbumMatch` objects.
* `choose_match()` Uses the session to set the `match` attribute
from the `candidates` list.
* `find_duplicates()` Returns a list of albums from `lib` with the
same artist and album name as the task.
* `apply_metadata()` Sets the attributes of the items from the
task's `match` attribute.
* `add()` Add the imported items and album to the database.
* `manipulate_files()` Copy, move, and write files depending on the
session configuration.
* `set_fields()` Sets the fields given at CLI or configuration to
the specified values.
* `finalize()` Update the import progress and cleanup the file
system.
"""
def __init__(self, toppath, paths, items):
super().__init__(toppath, paths, items)
self.choice_flag = None
self.cur_album = None
self.cur_artist = None
self.candidates = []
self.rec = None
self.should_remove_duplicates = False
self.should_merge_duplicates = False
self.is_album = True
self.search_ids = [] # user-supplied candidate IDs.
def set_choice(self, choice):
"""Given an AlbumMatch or TrackMatch object or an action constant,
indicates that an action has been selected for this task.
"""
# Not part of the task structure:
assert choice != action.APPLY # Only used internally.
if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS,
action.RETAG):
self.choice_flag = choice
self.match = None
else:
self.choice_flag = action.APPLY # Implicit choice.
self.match = choice
def save_progress(self):
"""Updates the progress state to indicate that this album has
finished.
"""
if self.toppath:
progress_add(self.toppath, *self.paths)
def save_history(self):
"""Save the directory in the history for incremental imports.
"""
if self.paths:
history_add(self.paths)
# Logical decisions.
@property
def apply(self):
return self.choice_flag == action.APPLY
@property
def skip(self):
return self.choice_flag == action.SKIP
# Convenient data.
def chosen_ident(self):
"""Returns identifying metadata about the current choice. For
albums, this is an (artist, album) pair. For items, this is
(artist, title). May only be called when the choice flag is ASIS
or RETAG (in which case the data comes from the files' current
metadata) or APPLY (data comes from the choice).
"""
if self.choice_flag in (action.ASIS, action.RETAG):
return (self.cur_artist, self.cur_album)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.album)
def imported_items(self):
"""Return a list of Items that should be added to the library.
If the tasks applies an album match the method only returns the
matched items.
"""
if self.choice_flag in (action.ASIS, action.RETAG):
return list(self.items)
elif self.choice_flag == action.APPLY:
return list(self.match.mapping.keys())
else:
assert False
def apply_metadata(self):
"""Copy metadata from match info to the items.
"""
if config['import']['from_scratch']:
for item in self.match.mapping:
item.clear()
autotag.apply_metadata(self.match.info, self.match.mapping)
def duplicate_items(self, lib):
duplicate_items = []
for album in self.find_duplicates(lib):
duplicate_items += album.items()
return duplicate_items
def remove_duplicates(self, lib):
duplicate_items = self.duplicate_items(lib)
log.debug('removing {0} old duplicated items', len(duplicate_items))
for item in duplicate_items:
item.remove()
if lib.directory in util.ancestry(item.path):
log.debug('deleting duplicate {0}',
util.displayable_path(item.path))
util.remove(item.path)
util.prune_dirs(os.path.dirname(item.path),
lib.directory)
def set_fields(self, lib):
"""Sets the fields given at CLI or configuration to the specified
values, for both the album and all its items.
"""
items = self.imported_items()
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug('Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.album[field] = value
for item in items:
item[field] = value
with lib.transaction():
for item in items:
item.store()
self.album.store()
def finalize(self, session):
"""Save progress, clean up files, and emit plugin event.
"""
# Update progress.
if session.want_resume:
self.save_progress()
if session.config['incremental'] and not (
# Should we skip recording to incremental list?
self.skip and session.config['incremental_skip_later']
):
self.save_history()
self.cleanup(copy=session.config['copy'],
delete=session.config['delete'],
move=session.config['move'])
if not self.skip:
self._emit_imported(session.lib)
def cleanup(self, copy=False, delete=False, move=False):
"""Remove and prune imported paths.
"""
# Do not delete any files or prune directories when skipping.
if self.skip:
return
items = self.imported_items()
# When copying and deleting originals, delete old files.
if copy and delete:
new_paths = [os.path.realpath(item.path) for item in items]
for old_path in self.old_paths:
# Only delete files that were actually copied.
if old_path not in new_paths:
util.remove(syspath(old_path), False)
self.prune(old_path)
# When moving, prune empty directories containing the original files.
elif move:
for old_path in self.old_paths:
self.prune(old_path)
def _emit_imported(self, lib):
plugins.send('album_imported', lib=lib, album=self.album)
def handle_created(self, session):
"""Send the `import_task_created` event for this task. Return a list of
tasks that should continue through the pipeline. By default, this is a
list containing only the task itself, but plugins can replace the task
with new ones.
"""
tasks = plugins.send('import_task_created', session=session, task=self)
if not tasks:
tasks = [self]
else:
# The plugins gave us a list of lists of tasks. Flatten it.
tasks = [t for inner in tasks for t in inner]
return tasks
def lookup_candidates(self):
"""Retrieve and store candidates for this album. User-specified
candidate IDs are stored in self.search_ids: if present, the
initial lookup is restricted to only those IDs.
"""
artist, album, prop = \
autotag.tag_album(self.items, search_ids=self.search_ids)
self.cur_artist = artist
self.cur_album = album
self.candidates = prop.candidates
self.rec = prop.recommendation
def find_duplicates(self, lib):
"""Return a list of albums from `lib` with the same artist and
album name as the task.
"""
artist, album = self.chosen_ident()
if artist is None:
# As-is import with no artist. Skip check.
return []
duplicates = []
task_paths = {i.path for i in self.items if i}
duplicate_query = dbcore.AndQuery((
dbcore.MatchQuery('albumartist', artist),
dbcore.MatchQuery('album', album),
))
for album in lib.albums(duplicate_query):
# Check whether the album paths are all present in the task
# i.e. album is being completely re-imported by the task,
# in which case it is not a duplicate (will be replaced).
album_paths = {i.path for i in album.items()}
if not (album_paths <= task_paths):
duplicates.append(album)
return duplicates
def align_album_level_fields(self):
"""Make some album fields equal across `self.items`. For the
RETAG action, we assume that the responsible for returning it
(ie. a plugin) always ensures that the first item contains
valid data on the relevant fields.
"""
changes = {}
if self.choice_flag == action.ASIS:
# Taking metadata "as-is". Guess whether this album is VA.
plur_albumartist, freq = util.plurality(
[i.albumartist or i.artist for i in self.items]
)
if freq == len(self.items) or \
(freq > 1 and
float(freq) / len(self.items) >= SINGLE_ARTIST_THRESH):
# Single-artist album.
changes['albumartist'] = plur_albumartist
changes['comp'] = False
else:
# VA.
changes['albumartist'] = config['va_name'].as_str()
changes['comp'] = True
elif self.choice_flag in (action.APPLY, action.RETAG):
# Applying autotagged metadata. Just get AA from the first
# item.
if not self.items[0].albumartist:
changes['albumartist'] = self.items[0].artist
if not self.items[0].mb_albumartistid:
changes['mb_albumartistid'] = self.items[0].mb_artistid
# Apply new metadata.
for item in self.items:
item.update(changes)
def manipulate_files(self, operation=None, write=False, session=None):
""" Copy, move, link, hardlink or reflink (depending on `operation`) the files
as well as write metadata.
`operation` should be an instance of `util.MoveOperation`.
If `write` is `True` metadata is written to the files.
"""
items = self.imported_items()
# Save the original paths of all items for deletion and pruning
# in the next step (finalization).
self.old_paths = [item.path for item in items]
for item in items:
if operation is not None:
# In copy and link modes, treat re-imports specially:
# move in-library files. (Out-of-library files are
# copied/moved as usual).
old_path = item.path
if (operation != MoveOperation.MOVE
and self.replaced_items[item]
and session.lib.directory in util.ancestry(old_path)):
item.move()
# We moved the item, so remove the
# now-nonexistent file from old_paths.
self.old_paths.remove(old_path)
else:
# A normal import. Just copy files and keep track of
# old paths.
item.move(operation)
if write and (self.apply or self.choice_flag == action.RETAG):
item.try_write()
with session.lib.transaction():
for item in self.imported_items():
item.store()
plugins.send('import_task_files', session=session, task=self)
def add(self, lib):
"""Add the items as an album to the library and remove replaced items.
"""
self.align_album_level_fields()
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
self.album = lib.add_album(self.imported_items())
if 'data_source' in self.imported_items()[0]:
self.album.data_source = self.imported_items()[0].data_source
self.reimport_metadata(lib)
def record_replaced(self, lib):
"""Records the replaced items and albums in the `replaced_items`
and `replaced_albums` dictionaries.
"""
self.replaced_items = defaultdict(list)
self.replaced_albums = defaultdict(list)
replaced_album_ids = set()
for item in self.imported_items():
dup_items = list(lib.items(
dbcore.query.BytesQuery('path', item.path)
))
self.replaced_items[item] = dup_items
for dup_item in dup_items:
if (not dup_item.album_id or
dup_item.album_id in replaced_album_ids):
continue
replaced_album = dup_item._cached_album
if replaced_album:
replaced_album_ids.add(dup_item.album_id)
self.replaced_albums[replaced_album.path] = replaced_album
def reimport_metadata(self, lib):
"""For reimports, preserves metadata for reimported items and
albums.
"""
if self.is_album:
replaced_album = self.replaced_albums.get(self.album.path)
if replaced_album:
self.album.added = replaced_album.added
self.album.update(replaced_album._values_flex)
self.album.artpath = replaced_album.artpath
self.album.store()
log.debug(
'Reimported album: added {0}, flexible '
'attributes {1} from album {2} for {3}',
self.album.added,
replaced_album._values_flex.keys(),
replaced_album.id,
displayable_path(self.album.path)
)
for item in self.imported_items():
dup_items = self.replaced_items[item]
for dup_item in dup_items:
if dup_item.added and dup_item.added != item.added:
item.added = dup_item.added
log.debug(
'Reimported item added {0} '
'from item {1} for {2}',
item.added,
dup_item.id,
displayable_path(item.path)
)
item.update(dup_item._values_flex)
log.debug(
'Reimported item flexible attributes {0} '
'from item {1} for {2}',
dup_item._values_flex.keys(),
dup_item.id,
displayable_path(item.path)
)
item.store()
def remove_replaced(self, lib):
"""Removes all the items from the library that have the same
path as an item from this task.
"""
for item in self.imported_items():
for dup_item in self.replaced_items[item]:
log.debug('Replacing item {0}: {1}',
dup_item.id, displayable_path(item.path))
dup_item.remove()
log.debug('{0} of {1} items replaced',
sum(bool(l) for l in self.replaced_items.values()),
len(self.imported_items()))
def choose_match(self, session):
"""Ask the session which match should apply and apply it.
"""
choice = session.choose_match(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
"""Reload albums and items from the database.
"""
for item in self.imported_items():
item.load()
self.album.load()
# Utilities.
def prune(self, filename):
"""Prune any empty directories above the given file. If this
task has no `toppath` or the file path provided is not within
the `toppath`, then this function has no effect. Similarly, if
the file still exists, no pruning is performed, so it's safe to
call when the file in question may not have been removed.
"""
if self.toppath and not os.path.exists(filename):
util.prune_dirs(os.path.dirname(filename),
self.toppath,
clutter=config['clutter'].as_str_seq())
class SingletonImportTask(ImportTask):
"""ImportTask for a single track that is not associated to an album.
"""
def __init__(self, toppath, item):
super().__init__(toppath, [item.path], [item])
self.item = item
self.is_album = False
self.paths = [item.path]
def chosen_ident(self):
assert self.choice_flag in (action.ASIS, action.APPLY, action.RETAG)
if self.choice_flag in (action.ASIS, action.RETAG):
return (self.item.artist, self.item.title)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.title)
def imported_items(self):
return [self.item]
def apply_metadata(self):
autotag.apply_item_metadata(self.item, self.match.info)
def _emit_imported(self, lib):
for item in self.imported_items():
plugins.send('item_imported', lib=lib, item=item)
def lookup_candidates(self):
prop = autotag.tag_item(self.item, search_ids=self.search_ids)
self.candidates = prop.candidates
self.rec = prop.recommendation
def find_duplicates(self, lib):
"""Return a list of items from `lib` that have the same artist
and title as the task.
"""
artist, title = self.chosen_ident()
found_items = []
query = dbcore.AndQuery((
dbcore.MatchQuery('artist', artist),
dbcore.MatchQuery('title', title),
))
for other_item in lib.items(query):
# Existing items not considered duplicates.
if other_item.path != self.item.path:
found_items.append(other_item)
return found_items
duplicate_items = find_duplicates
def add(self, lib):
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
lib.add(self.item)
self.reimport_metadata(lib)
def infer_album_fields(self):
raise NotImplementedError
def choose_match(self, session):
"""Ask the session which match should apply and apply it.
"""
choice = session.choose_item(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
self.item.load()
def set_fields(self, lib):
"""Sets the fields given at CLI or configuration to the specified
values, for the singleton item.
"""
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug('Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.item[field] = value
self.item.store()
# FIXME The inheritance relationships are inverted. This is why there
# are so many methods which pass. More responsibility should be delegated to
# the BaseImportTask class.
class SentinelImportTask(ImportTask):
"""A sentinel task marks the progress of an import and does not
import any items itself.
If only `toppath` is set the task indicates the end of a top-level
directory import. If the `paths` argument is also given, the task
indicates the progress in the `toppath` import.
"""
def __init__(self, toppath, paths):
super().__init__(toppath, paths, ())
# TODO Remove the remaining attributes eventually
self.should_remove_duplicates = False
self.is_album = True
self.choice_flag = None
def save_history(self):
pass
def save_progress(self):
if self.paths is None:
# "Done" sentinel.
progress_reset(self.toppath)
else:
# "Directory progress" sentinel for singletons
progress_add(self.toppath, *self.paths)
def skip(self):
return True
def set_choice(self, choice):
raise NotImplementedError
def cleanup(self, **kwargs):
pass
def _emit_imported(self, session):
pass
class ArchiveImportTask(SentinelImportTask):
"""An import task that represents the processing of an archive.
`toppath` must be a `zip`, `tar`, or `rar` archive. Archive tasks
serve two purposes:
- First, it will unarchive the files to a temporary directory and
return it. The client should read tasks from the resulting
directory and send them through the pipeline.
- Second, it will clean up the temporary directory when it proceeds
through the pipeline. The client should send the archive task
after sending the rest of the music tasks to make this work.
"""
def __init__(self, toppath):
super().__init__(toppath, ())
self.extracted = False
@classmethod
def is_archive(cls, path):
"""Returns true if the given path points to an archive that can
be handled.
"""
if not os.path.isfile(path):
return False
for path_test, _ in cls.handlers():
if path_test(util.py3_path(path)):
return True
return False
@classmethod
def handlers(cls):
"""Returns a list of archive handlers.
Each handler is a `(path_test, ArchiveClass)` tuple. `path_test`
is a function that returns `True` if the given path can be
handled by `ArchiveClass`. `ArchiveClass` is a class that
implements the same interface as `tarfile.TarFile`.
"""
if not hasattr(cls, '_handlers'):
cls._handlers = []
from zipfile import is_zipfile, ZipFile
cls._handlers.append((is_zipfile, ZipFile))
import tarfile
cls._handlers.append((tarfile.is_tarfile, tarfile.open))
try:
from rarfile import is_rarfile, RarFile
except ImportError:
pass
else:
cls._handlers.append((is_rarfile, RarFile))
try:
from py7zr import is_7zfile, SevenZipFile
except ImportError:
pass
else:
cls._handlers.append((is_7zfile, SevenZipFile))
return cls._handlers
def cleanup(self, **kwargs):
"""Removes the temporary directory the archive was extracted to.
"""
if self.extracted:
log.debug('Removing extracted directory: {0}',
displayable_path(self.toppath))
shutil.rmtree(self.toppath)
def extract(self):
"""Extracts the archive to a temporary directory and sets
`toppath` to that directory.
"""
for path_test, handler_class in self.handlers():
if path_test(util.py3_path(self.toppath)):
break
extract_to = mkdtemp()
archive = handler_class(util.py3_path(self.toppath), mode='r')
try:
archive.extractall(extract_to)
finally:
archive.close()
self.extracted = True
self.toppath = extract_to
class ImportTaskFactory:
"""Generate album and singleton import tasks for all media files
indicated by a path.
"""
def __init__(self, toppath, session):
"""Create a new task factory.
`toppath` is the user-specified path to search for music to
import. `session` is the `ImportSession`, which controls how
tasks are read from the directory.
"""
self.toppath = toppath
self.session = session
self.skipped = 0 # Skipped due to incremental/resume.
self.imported = 0 # "Real" tasks created.
self.is_archive = ArchiveImportTask.is_archive(syspath(toppath))
def tasks(self):
"""Yield all import tasks for music found in the user-specified
path `self.toppath`. Any necessary sentinel tasks are also
produced.
During generation, update `self.skipped` and `self.imported`
with the number of tasks that were not produced (due to
incremental mode or resumed imports) and the number of concrete
tasks actually produced, respectively.
If `self.toppath` is an archive, it is adjusted to point to the
extracted data.
"""
# Check whether this is an archive.
if self.is_archive:
archive_task = self.unarchive()
if not archive_task:
return
# Search for music in the directory.
for dirs, paths in self.paths():
if self.session.config['singletons']:
for path in paths:
tasks = self._create(self.singleton(path))
yield from tasks
yield self.sentinel(dirs)
else:
tasks = self._create(self.album(paths, dirs))
yield from tasks
# Produce the final sentinel for this toppath to indicate that
# it is finished. This is usually just a SentinelImportTask, but
# for archive imports, send the archive task instead (to remove
# the extracted directory).
if self.is_archive:
yield archive_task
else:
yield self.sentinel()
def _create(self, task):
"""Handle a new task to be emitted by the factory.
Emit the `import_task_created` event and increment the
`imported` count if the task is not skipped. Return the same
task. If `task` is None, do nothing.
"""
if task:
tasks = task.handle_created(self.session)
self.imported += len(tasks)
return tasks
return []
def paths(self):
"""Walk `self.toppath` and yield `(dirs, files)` pairs where
`files` are individual music files and `dirs` the set of
containing directories where the music was found.
This can either be a recursive search in the ordinary case, a
single track when `toppath` is a file, a single directory in
`flat` mode.
"""
if not os.path.isdir(syspath(self.toppath)):
yield [self.toppath], [self.toppath]
elif self.session.config['flat']:
paths = []
for dirs, paths_in_dir in albums_in_dir(self.toppath):
paths += paths_in_dir
yield [self.toppath], paths
else:
for dirs, paths in albums_in_dir(self.toppath):
yield dirs, paths
def singleton(self, path):
"""Return a `SingletonImportTask` for the music file.
"""
if self.session.already_imported(self.toppath, [path]):
log.debug('Skipping previously-imported path: {0}',
displayable_path(path))
self.skipped += 1
return None
item = self.read_item(path)
if item:
return SingletonImportTask(self.toppath, item)
else:
return None
def album(self, paths, dirs=None):
"""Return a `ImportTask` with all media files from paths.
`dirs` is a list of parent directories used to record already
imported albums.
"""
if not paths:
return None
if dirs is None:
dirs = list({os.path.dirname(p) for p in paths})
if self.session.already_imported(self.toppath, dirs):
log.debug('Skipping previously-imported path: {0}',
displayable_path(dirs))
self.skipped += 1
return None
items = map(self.read_item, paths)
items = [item for item in items if item]
if items:
return ImportTask(self.toppath, dirs, items)
else:
return None
def sentinel(self, paths=None):
"""Return a `SentinelImportTask` indicating the end of a
top-level directory import.
"""
return SentinelImportTask(self.toppath, paths)
def unarchive(self):
"""Extract the archive for this `toppath`.
Extract the archive to a new directory, adjust `toppath` to
point to the extracted directory, and return an
`ArchiveImportTask`. If extraction fails, return None.
"""
assert self.is_archive
if not (self.session.config['move'] or
self.session.config['copy']):
log.warning("Archive importing requires either "
"'copy' or 'move' to be enabled.")
return
log.debug('Extracting archive: {0}',
displayable_path(self.toppath))
archive_task = ArchiveImportTask(self.toppath)
try:
archive_task.extract()
except Exception as exc:
log.error('extraction failed: {0}', exc)
return
# Now read albums from the extracted directory.
self.toppath = archive_task.toppath
log.debug('Archive extracted to: {0}', self.toppath)
return archive_task
def read_item(self, path):
"""Return an `Item` read from the path.
If an item cannot be read, return `None` instead and log an
error.
"""
try:
return library.Item.from_path(path)
except library.ReadError as exc:
if isinstance(exc.reason, mediafile.FileTypeError):
# Silently ignore non-music files.
pass
elif isinstance(exc.reason, mediafile.UnreadableFileError):
log.warning('unreadable file: {0}', displayable_path(path))
else:
log.error('error reading {0}: {1}',
displayable_path(path), exc)
# Pipeline utilities
def _freshen_items(items):
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
def _extend_pipeline(tasks, *stages):
# Return pipeline extension for stages with list of tasks
if type(tasks) == list:
task_iter = iter(tasks)
else:
task_iter = tasks
ipl = pipeline.Pipeline([task_iter] + list(stages))
return pipeline.multiple(ipl.pull())
# Full-album pipeline stages.
def read_tasks(session):
"""A generator yielding all the albums (as ImportTask objects) found
in the user-specified list of paths. In the case of a singleton
import, yields single-item tasks instead.
"""
skipped = 0
for toppath in session.paths:
# Check whether we need to resume the import.
session.ask_resume(toppath)
# Generate tasks.
task_factory = ImportTaskFactory(toppath, session)
yield from task_factory.tasks()
skipped += task_factory.skipped
if not task_factory.imported:
log.warning('No files imported from {0}',
displayable_path(toppath))
# Show skipped directories (due to incremental/resume).
if skipped:
log.info('Skipped {0} paths.', skipped)
def query_tasks(session):
"""A generator that works as a drop-in-replacement for read_tasks.
Instead of finding files from the filesystem, a query is used to
match items from the library.
"""
if session.config['singletons']:
# Search for items.
for item in session.lib.items(session.query):
task = SingletonImportTask(None, item)
for task in task.handle_created(session):
yield task
else:
# Search for albums.
for album in session.lib.albums(session.query):
log.debug('yielding album {0}: {1} - {2}',
album.id, album.albumartist, album.album)
items = list(album.items())
_freshen_items(items)
task = ImportTask(None, [album.item_dir()], items)
for task in task.handle_created(session):
yield task
@pipeline.mutator_stage
def lookup_candidates(session, task):
"""A coroutine for performing the initial MusicBrainz lookup for an
album. It accepts lists of Items and yields
(items, cur_artist, cur_album, candidates, rec) tuples. If no match
is found, all of the yielded parameters (except items) are None.
"""
if task.skip:
# FIXME This gets duplicated a lot. We need a better
# abstraction.
return
plugins.send('import_task_start', session=session, task=task)
log.debug('Looking up: {0}', displayable_path(task.paths))
# Restrict the initial lookup to IDs specified by the user via the -m
# option. Currently all the IDs are passed onto the tasks directly.
task.search_ids = session.config['search_ids'].as_str_seq()
task.lookup_candidates()
@pipeline.stage
def user_query(session, task):
"""A coroutine for interfacing with the user about the tagging
process.
The coroutine accepts an ImportTask objects. It uses the
session's `choose_match` method to determine the `action` for
this task. Depending on the action additional stages are executed
and the processed task is yielded.
It emits the ``import_task_choice`` event for plugins. Plugins have
acces to the choice via the ``taks.choice_flag`` property and may
choose to change it.
"""
if task.skip:
return task
if session.already_merged(task.paths):
return pipeline.BUBBLE
# Ask the user for a choice.
task.choose_match(session)
plugins.send('import_task_choice', session=session, task=task)
# As-tracks: transition to singleton workflow.
if task.choice_flag is action.TRACKS:
# Set up a little pipeline for dealing with the singletons.
def emitter(task):
for item in task.items:
task = SingletonImportTask(task.toppath, item)
yield from task.handle_created(session)
yield SentinelImportTask(task.toppath, task.paths)
return _extend_pipeline(emitter(task),
lookup_candidates(session),
user_query(session))
# As albums: group items by albums and create task for each album
if task.choice_flag is action.ALBUMS:
return _extend_pipeline([task],
group_albums(session),
lookup_candidates(session),
user_query(session))
resolve_duplicates(session, task)
if task.should_merge_duplicates:
# Create a new task for tagging the current items
# and duplicates together
duplicate_items = task.duplicate_items(session.lib)
# Duplicates would be reimported so make them look "fresh"
_freshen_items(duplicate_items)
duplicate_paths = [item.path for item in duplicate_items]
# Record merged paths in the session so they are not reimported
session.mark_merged(duplicate_paths)
merged_task = ImportTask(None, task.paths + duplicate_paths,
task.items + duplicate_items)
return _extend_pipeline([merged_task],
lookup_candidates(session),
user_query(session))
apply_choice(session, task)
return task
def resolve_duplicates(session, task):
"""Check if a task conflicts with items or albums already imported
and ask the session to resolve this.
"""
if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG):
found_duplicates = task.find_duplicates(session.lib)
if found_duplicates:
log.debug('found duplicates: {}'.format(
[o.id for o in found_duplicates]
))
# Get the default action to follow from config.
duplicate_action = config['import']['duplicate_action'].as_choice({
'skip': 's',
'keep': 'k',
'remove': 'r',
'merge': 'm',
'ask': 'a',
})
log.debug('default action for duplicates: {0}', duplicate_action)
if duplicate_action == 's':
# Skip new.
task.set_choice(action.SKIP)
elif duplicate_action == 'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif duplicate_action == 'r':
# Remove old.
task.should_remove_duplicates = True
elif duplicate_action == 'm':
# Merge duplicates together
task.should_merge_duplicates = True
else:
# No default action set; ask the session.
session.resolve_duplicate(task, found_duplicates)
session.log_choice(task, True)
@pipeline.mutator_stage
def import_asis(session, task):
"""Select the `action.ASIS` choice for all tasks.
This stage replaces the initial_lookup and user_query stages
when the importer is run without autotagging.
"""
if task.skip:
return
log.info('{}', displayable_path(task.paths))
task.set_choice(action.ASIS)
apply_choice(session, task)
def apply_choice(session, task):
"""Apply the task's choice to the Album or Item it contains and add
it to the library.
"""
if task.skip:
return
# Change metadata.
if task.apply:
task.apply_metadata()
plugins.send('import_task_apply', session=session, task=task)
task.add(session.lib)
# If ``set_fields`` is set, set those fields to the
# configured values.
# NOTE: This cannot be done before the ``task.add()`` call above,
# because then the ``ImportTask`` won't have an `album` for which
# it can set the fields.
if config['import']['set_fields']:
task.set_fields(session.lib)
@pipeline.mutator_stage
def plugin_stage(session, func, task):
"""A coroutine (pipeline stage) that calls the given function with
each non-skipped import task. These stages occur between applying
metadata changes and moving/copying/writing files.
"""
if task.skip:
return
func(session, task)
# Stage may modify DB, so re-load cached item data.
# FIXME Importer plugins should not modify the database but instead
# the albums and items attached to tasks.
task.reload()
@pipeline.stage
def manipulate_files(session, task):
"""A coroutine (pipeline stage) that performs necessary file
manipulations *after* items have been added to the library and
finalizes each task.
"""
if not task.skip:
if task.should_remove_duplicates:
task.remove_duplicates(session.lib)
if session.config['move']:
operation = MoveOperation.MOVE
elif session.config['copy']:
operation = MoveOperation.COPY
elif session.config['link']:
operation = MoveOperation.LINK
elif session.config['hardlink']:
operation = MoveOperation.HARDLINK
elif session.config['reflink']:
operation = MoveOperation.REFLINK
else:
operation = None
task.manipulate_files(
operation,
write=session.config['write'],
session=session,
)
# Progress, cleanup, and event.
task.finalize(session)
@pipeline.stage
def log_files(session, task):
"""A coroutine (pipeline stage) to log each file to be imported.
"""
if isinstance(task, SingletonImportTask):
log.info('Singleton: {0}', displayable_path(task.item['path']))
elif task.items:
log.info('Album: {0}', displayable_path(task.paths[0]))
for item in task.items:
log.info(' {0}', displayable_path(item['path']))
def group_albums(session):
"""A pipeline stage that groups the items of each task into albums
using their metadata.
Groups are identified using their artist and album fields. The
pipeline stage emits new album tasks for each discovered group.
"""
def group(item):
return (item.albumartist or item.artist, item.album)
task = None
while True:
task = yield task
if task.skip:
continue
tasks = []
sorted_items = sorted(task.items, key=group)
for _, items in itertools.groupby(sorted_items, group):
items = list(items)
task = ImportTask(task.toppath, [i.path for i in items],
items)
tasks += task.handle_created(session)
tasks.append(SentinelImportTask(task.toppath, task.paths))
task = pipeline.multiple(tasks)
MULTIDISC_MARKERS = (br'dis[ck]', br'cd')
MULTIDISC_PAT_FMT = br'^(.*%s[\W_]*)\d'
def is_subdir_of_any_in_list(path, dirs):
"""Returns True if path os a subdirectory of any directory in dirs
(a list). In other case, returns False.
"""
ancestors = ancestry(path)
return any(d in ancestors for d in dirs)
def albums_in_dir(path):
"""Recursively searches the given directory and returns an iterable
of (paths, items) where paths is a list of directories and items is
a list of Items that is probably an album. Specifically, any folder
containing any media files is an album.
"""
collapse_pat = collapse_paths = collapse_items = None
ignore = config['ignore'].as_str_seq()
ignore_hidden = config['ignore_hidden'].get(bool)
for root, dirs, files in sorted_walk(path, ignore=ignore,
ignore_hidden=ignore_hidden,
logger=log):
items = [os.path.join(root, f) for f in files]
# If we're currently collapsing the constituent directories in a
# multi-disc album, check whether we should continue collapsing
# and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing.
if collapse_paths:
if (is_subdir_of_any_in_list(root, collapse_paths)) or \
(collapse_pat and
collapse_pat.match(os.path.basename(root))):
# Still collapsing.
collapse_paths.append(root)
collapse_items += items
continue
else:
# Collapse finished. Yield the collapsed directory and
# proceed to process the current one.
if collapse_items:
yield collapse_paths, collapse_items
collapse_pat = collapse_paths = collapse_items = None
# Check whether this directory looks like the *first* directory
# in a multi-disc sequence. There are two indicators: the file
# is named like part of a multi-disc sequence (e.g., "Title Disc
# 1") or it contains no items but only directories that are
# named in this way.
start_collapsing = False
for marker in MULTIDISC_MARKERS:
# We're using replace on %s due to lack of .format() on bytestrings
p = MULTIDISC_PAT_FMT.replace(b'%s', marker)
marker_pat = re.compile(p, re.I)
match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album?
if dirs and not items:
# Check whether all subdirectories have the same prefix.
start_collapsing = True
subdir_pat = None
for subdir in dirs:
subdir = util.bytestring_path(subdir)
# The first directory dictates the pattern for
# the remaining directories.
if not subdir_pat:
match = marker_pat.match(subdir)
if match:
match_group = re.escape(match.group(1))
subdir_pat = re.compile(
b''.join([b'^', match_group, br'\d']),
re.I
)
else:
start_collapsing = False
break
# Subsequent directories must match the pattern.
elif not subdir_pat.match(subdir):
start_collapsing = False
break
# If all subdirectories match, don't check other
# markers.
if start_collapsing:
break
# Is this directory the first in a flattened multi-disc album?
elif match:
start_collapsing = True
# Set the current pattern to match directories with the same
# prefix as this one, followed by a digit.
collapse_pat = re.compile(
b''.join([b'^', re.escape(match.group(1)), br'\d']),
re.I
)
break
# If either of the above heuristics indicated that this is the
# beginning of a multi-disc album, initialize the collapsed
# directory and item lists and check the next directory.
if start_collapsing:
# Start collapsing; continue to the next iteration.
collapse_paths = [root]
collapse_items = items
continue
# If it's nonempty, yield it.
if items:
yield [root], items
# Clear out any unfinished collapse.
if collapse_paths and collapse_items:
yield collapse_paths, collapse_items
| 61,461
|
Python
|
.py
| 1,440
| 32.353472
| 86
| 0.605304
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,247
|
mediafile.py
|
rembo10_headphones/lib/beets/mediafile.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import mediafile
import warnings
warnings.warn("beets.mediafile is deprecated; use mediafile instead")
# Import everything from the mediafile module into this module.
for key, value in mediafile.__dict__.items():
if key not in ['__name__']:
globals()[key] = value
del key, value, warnings, mediafile
| 963
|
Python
|
.py
| 21
| 44.047619
| 71
| 0.77588
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,248
|
logging.py
|
rembo10_headphones/lib/beets/logging.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A drop-in replacement for the standard-library `logging` module that
allows {}-style log formatting on Python 2 and 3.
Provides everything the "logging" module does. The only difference is
that when getLogger(name) instantiates a logger that logger uses
{}-style formatting.
"""
from copy import copy
from logging import * # noqa
import subprocess
import threading
def logsafe(val):
"""Coerce a potentially "problematic" value so it can be formatted
in a Unicode log string.
This works around a number of pitfalls when logging objects in
Python 2:
- Logging path names, which must be byte strings, requires
conversion for output.
- Some objects, including some exceptions, will crash when you call
`unicode(v)` while `str(v)` works fine. CalledProcessError is an
example.
"""
# Already Unicode.
if isinstance(val, str):
return val
# Bytestring: needs decoding.
elif isinstance(val, bytes):
# Blindly convert with UTF-8. Eventually, it would be nice to
# (a) only do this for paths, if they can be given a distinct
# type, and (b) warn the developer if they do this for other
# bytestrings.
return val.decode('utf-8', 'replace')
# A "problem" object: needs a workaround.
elif isinstance(val, subprocess.CalledProcessError):
try:
return str(val)
except UnicodeDecodeError:
# An object with a broken __unicode__ formatter. Use __str__
# instead.
return str(val).decode('utf-8', 'replace')
# Other objects are used as-is so field access, etc., still works in
# the format string.
else:
return val
class StrFormatLogger(Logger):
"""A version of `Logger` that uses `str.format`-style formatting
instead of %-style formatting.
"""
class _LogMessage:
def __init__(self, msg, args, kwargs):
self.msg = msg
self.args = args
self.kwargs = kwargs
def __str__(self):
args = [logsafe(a) for a in self.args]
kwargs = {k: logsafe(v) for (k, v) in self.kwargs.items()}
return self.msg.format(*args, **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs):
"""Log msg.format(*args, **kwargs)"""
m = self._LogMessage(msg, args, kwargs)
return super()._log(level, m, (), exc_info, extra)
class ThreadLocalLevelLogger(Logger):
"""A version of `Logger` whose level is thread-local instead of shared.
"""
def __init__(self, name, level=NOTSET):
self._thread_level = threading.local()
self.default_level = NOTSET
super().__init__(name, level)
@property
def level(self):
try:
return self._thread_level.level
except AttributeError:
self._thread_level.level = self.default_level
return self.level
@level.setter
def level(self, value):
self._thread_level.level = value
def set_global_level(self, level):
"""Set the level on the current thread + the default value for all
threads.
"""
self.default_level = level
self.setLevel(level)
class BeetsLogger(ThreadLocalLevelLogger, StrFormatLogger):
pass
my_manager = copy(Logger.manager)
my_manager.loggerClass = BeetsLogger
def getLogger(name=None): # noqa
if name:
return my_manager.getLogger(name)
else:
return Logger.root
| 4,161
|
Python
|
.py
| 105
| 33.438095
| 75
| 0.672623
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,249
|
art.py
|
rembo10_headphones/lib/beets/art.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""High-level utilities for manipulating image files associated with
music and items' embedded album art.
"""
import subprocess
import platform
from tempfile import NamedTemporaryFile
import os
from beets.util import displayable_path, syspath, bytestring_path
from beets.util.artresizer import ArtResizer
import mediafile
def mediafile_image(image_path, maxwidth=None):
"""Return a `mediafile.Image` object for the path.
"""
with open(syspath(image_path), 'rb') as f:
data = f.read()
return mediafile.Image(data, type=mediafile.ImageType.front)
def get_art(log, item):
# Extract the art.
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.warning('Could not extract art from {0}: {1}',
displayable_path(item.path), exc)
return
return mf.art
def embed_item(log, item, imagepath, maxwidth=None, itempath=None,
compare_threshold=0, ifempty=False, as_album=False, id3v23=None,
quality=0):
"""Embed an image into the item's media file.
"""
# Conditions and filters.
if compare_threshold:
if not check_art_similarity(log, item, imagepath, compare_threshold):
log.info('Image not similar; skipping.')
return
if ifempty and get_art(log, item):
log.info('media file already contained art')
return
if maxwidth and not as_album:
imagepath = resize_image(log, imagepath, maxwidth, quality)
# Get the `Image` object from the file.
try:
log.debug('embedding {0}', displayable_path(imagepath))
image = mediafile_image(imagepath, maxwidth)
except OSError as exc:
log.warning('could not read image file: {0}', exc)
return
# Make sure the image kind is safe (some formats only support PNG
# and JPEG).
if image.mime_type not in ('image/jpeg', 'image/png'):
log.info('not embedding image of unsupported type: {}',
image.mime_type)
return
item.try_write(path=itempath, tags={'images': [image]}, id3v23=id3v23)
def embed_album(log, album, maxwidth=None, quiet=False, compare_threshold=0,
ifempty=False, quality=0):
"""Embed album art into all of the album's items.
"""
imagepath = album.artpath
if not imagepath:
log.info('No album art present for {0}', album)
return
if not os.path.isfile(syspath(imagepath)):
log.info('Album art not found at {0} for {1}',
displayable_path(imagepath), album)
return
if maxwidth:
imagepath = resize_image(log, imagepath, maxwidth, quality)
log.info('Embedding album art into {0}', album)
for item in album.items():
embed_item(log, item, imagepath, maxwidth, None, compare_threshold,
ifempty, as_album=True, quality=quality)
def resize_image(log, imagepath, maxwidth, quality):
"""Returns path to an image resized to maxwidth and encoded with the
specified quality level.
"""
log.debug('Resizing album art to {0} pixels wide and encoding at quality \
level {1}', maxwidth, quality)
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath),
quality=quality)
return imagepath
def check_art_similarity(log, item, imagepath, compare_threshold):
"""A boolean indicating if an image is similar to embedded item art.
"""
with NamedTemporaryFile(delete=True) as f:
art = extract(log, f.name, item)
if art:
is_windows = platform.system() == "Windows"
# Converting images to grayscale tends to minimize the weight
# of colors in the diff score. So we first convert both images
# to grayscale and then pipe them into the `compare` command.
# On Windows, ImageMagick doesn't support the magic \\?\ prefix
# on paths, so we pass `prefix=False` to `syspath`.
convert_cmd = ['convert', syspath(imagepath, prefix=False),
syspath(art, prefix=False),
'-colorspace', 'gray', 'MIFF:-']
compare_cmd = ['compare', '-metric', 'PHASH', '-', 'null:']
log.debug('comparing images with pipeline {} | {}',
convert_cmd, compare_cmd)
convert_proc = subprocess.Popen(
convert_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
compare_proc = subprocess.Popen(
compare_cmd,
stdin=convert_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
# Check the convert output. We're not interested in the
# standard output; that gets piped to the next stage.
convert_proc.stdout.close()
convert_stderr = convert_proc.stderr.read()
convert_proc.stderr.close()
convert_proc.wait()
if convert_proc.returncode:
log.debug(
'ImageMagick convert failed with status {}: {!r}',
convert_proc.returncode,
convert_stderr,
)
return
# Check the compare output.
stdout, stderr = compare_proc.communicate()
if compare_proc.returncode:
if compare_proc.returncode != 1:
log.debug('ImageMagick compare failed: {0}, {1}',
displayable_path(imagepath),
displayable_path(art))
return
out_str = stderr
else:
out_str = stdout
try:
phash_diff = float(out_str)
except ValueError:
log.debug('IM output is not a number: {0!r}', out_str)
return
log.debug('ImageMagick compare score: {0}', phash_diff)
return phash_diff <= compare_threshold
return True
def extract(log, outpath, item):
art = get_art(log, item)
outpath = bytestring_path(outpath)
if not art:
log.info('No album art present in {0}, skipping.', item)
return
# Add an extension to the filename.
ext = mediafile.image_extension(art)
if not ext:
log.warning('Unknown image type in {0}.',
displayable_path(item.path))
return
outpath += bytestring_path('.' + ext)
log.info('Extracting album art from: {0} to: {1}',
item, displayable_path(outpath))
with open(syspath(outpath), 'wb') as f:
f.write(art)
return outpath
def extract_first(log, outpath, items):
for item in items:
real_path = extract(log, outpath, item)
if real_path:
return real_path
def clear(log, lib, query):
items = lib.items(query)
log.info('Clearing album art from {0} items', len(items))
for item in items:
log.debug('Clearing art for {0}', item)
item.try_write(tags={'images': None})
| 7,924
|
Python
|
.py
| 186
| 32.994624
| 79
| 0.616364
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,250
|
__init__.py
|
rembo10_headphones/lib/beets/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import confuse
from sys import stderr
__version__ = '1.6.0'
__author__ = 'Adrian Sampson <adrian@radbox.org>'
class IncludeLazyConfig(confuse.LazyConfig):
"""A version of Confuse's LazyConfig that also merges in data from
YAML files specified in an `include` setting.
"""
def read(self, user=True, defaults=True):
super().read(user, defaults)
try:
for view in self['include']:
self.set_file(view.as_filename())
except confuse.NotFoundError:
pass
except confuse.ConfigReadError as err:
stderr.write("configuration `import` failed: {}"
.format(err.reason))
config = IncludeLazyConfig('beets', __name__)
| 1,380
|
Python
|
.py
| 32
| 37.96875
| 71
| 0.708955
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,251
|
random.py
|
rembo10_headphones/lib/beets/random.py
|
# This file is part of beets.
# Copyright 2016, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Get a random song or album from the library.
"""
import random
from operator import attrgetter
from itertools import groupby
def _length(obj, album):
"""Get the duration of an item or album.
"""
if album:
return sum(i.length for i in obj.items())
else:
return obj.length
def _equal_chance_permutation(objs, field='albumartist', random_gen=None):
"""Generate (lazily) a permutation of the objects where every group
with equal values for `field` have an equal chance of appearing in
any given position.
"""
rand = random_gen or random
# Group the objects by artist so we can sample from them.
key = attrgetter(field)
objs.sort(key=key)
objs_by_artists = {}
for artist, v in groupby(objs, key):
objs_by_artists[artist] = list(v)
# While we still have artists with music to choose from, pick one
# randomly and pick a track from that artist.
while objs_by_artists:
# Choose an artist and an object for that artist, removing
# this choice from the pool.
artist = rand.choice(list(objs_by_artists.keys()))
objs_from_artist = objs_by_artists[artist]
i = rand.randint(0, len(objs_from_artist) - 1)
yield objs_from_artist.pop(i)
# Remove the artist if we've used up all of its objects.
if not objs_from_artist:
del objs_by_artists[artist]
def _take(iter, num):
"""Return a list containing the first `num` values in `iter` (or
fewer, if the iterable ends early).
"""
out = []
for val in iter:
out.append(val)
num -= 1
if num <= 0:
break
return out
def _take_time(iter, secs, album):
"""Return a list containing the first values in `iter`, which should
be Item or Album objects, that add up to the given amount of time in
seconds.
"""
out = []
total_time = 0.0
for obj in iter:
length = _length(obj, album)
if total_time + length <= secs:
out.append(obj)
total_time += length
return out
def random_objs(objs, album, number=1, time=None, equal_chance=False,
random_gen=None):
"""Get a random subset of the provided `objs`.
If `number` is provided, produce that many matches. Otherwise, if
`time` is provided, instead select a list whose total time is close
to that number of minutes. If `equal_chance` is true, give each
artist an equal chance of being included so that artists with more
songs are not represented disproportionately.
"""
rand = random_gen or random
# Permute the objects either in a straightforward way or an
# artist-balanced way.
if equal_chance:
perm = _equal_chance_permutation(objs)
else:
perm = objs
rand.shuffle(perm) # N.B. This shuffles the original list.
# Select objects by time our count.
if time:
return _take_time(perm, time * 60, album)
else:
return _take(perm, number)
| 3,676
|
Python
|
.py
| 95
| 33.126316
| 74
| 0.678642
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,252
|
library.py
|
rembo10_headphones/lib/beets/library.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The core data store and collection logic for beets.
"""
import os
import sys
import unicodedata
import time
import re
import string
import shlex
from beets import logging
from mediafile import MediaFile, UnreadableFileError
from beets import plugins
from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile, \
MoveOperation, lazy_property
from beets.util.functemplate import template, Template
from beets import dbcore
from beets.dbcore import types
import beets
# To use the SQLite "blob" type, it doesn't suffice to provide a byte
# string; SQLite treats that as encoded text. Wrapping it in a
# `memoryview` tells it that we actually mean non-text data.
BLOB_TYPE = memoryview
log = logging.getLogger('beets')
# Library-specific query types.
class PathQuery(dbcore.FieldQuery):
"""A query that matches all items under a given path.
Matching can either be case-insensitive or case-sensitive. By
default, the behavior depends on the OS: case-insensitive on Windows
and case-sensitive otherwise.
"""
def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query. `pattern` must be a path, either to a
file or a directory.
`case_sensitive` can be a bool or `None`, indicating that the
behavior should depend on the filesystem.
"""
super().__init__(field, pattern, fast)
# By default, the case sensitivity depends on the filesystem
# that the query path is located on.
if case_sensitive is None:
path = util.bytestring_path(util.normpath(pattern))
case_sensitive = beets.util.case_sensitive(path)
self.case_sensitive = case_sensitive
# Use a normalized-case pattern for case-insensitive matches.
if not case_sensitive:
pattern = pattern.lower()
# Match the path as a single file.
self.file_path = util.bytestring_path(util.normpath(pattern))
# As a directory (prefix).
self.dir_path = util.bytestring_path(os.path.join(self.file_path, b''))
@classmethod
def is_path_query(cls, query_part):
"""Try to guess whether a unicode query part is a path query.
Condition: separator precedes colon and the file exists.
"""
colon = query_part.find(':')
if colon != -1:
query_part = query_part[:colon]
# Test both `sep` and `altsep` (i.e., both slash and backslash on
# Windows).
return (
(os.sep in query_part or
(os.altsep and os.altsep in query_part)) and
os.path.exists(syspath(normpath(query_part)))
)
def match(self, item):
path = item.path if self.case_sensitive else item.path.lower()
return (path == self.file_path) or path.startswith(self.dir_path)
def col_clause(self):
file_blob = BLOB_TYPE(self.file_path)
dir_blob = BLOB_TYPE(self.dir_path)
if self.case_sensitive:
query_part = '({0} = ?) || (substr({0}, 1, ?) = ?)'
else:
query_part = '(BYTELOWER({0}) = BYTELOWER(?)) || \
(substr(BYTELOWER({0}), 1, ?) = BYTELOWER(?))'
return query_part.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
# Library-specific field types.
class DateType(types.Float):
# TODO representation should be `datetime` object
# TODO distinguish between date and time types
query = dbcore.query.DateQuery
def format(self, value):
return time.strftime(beets.config['time_format'].as_str(),
time.localtime(value or 0))
def parse(self, string):
try:
# Try a formatted date string.
return time.mktime(
time.strptime(string,
beets.config['time_format'].as_str())
)
except ValueError:
# Fall back to a plain timestamp number.
try:
return float(string)
except ValueError:
return self.null
class PathType(types.Type):
"""A dbcore type for filesystem paths. These are represented as
`bytes` objects, in keeping with the Unix filesystem abstraction.
"""
sql = 'BLOB'
query = PathQuery
model_type = bytes
def __init__(self, nullable=False):
"""Create a path type object. `nullable` controls whether the
type may be missing, i.e., None.
"""
self.nullable = nullable
@property
def null(self):
if self.nullable:
return None
else:
return b''
def format(self, value):
return util.displayable_path(value)
def parse(self, string):
return normpath(bytestring_path(string))
def normalize(self, value):
if isinstance(value, str):
# Paths stored internally as encoded bytes.
return bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
# We unwrap buffers to bytes.
return bytes(value)
else:
return value
def from_sql(self, sql_value):
return self.normalize(sql_value)
def to_sql(self, value):
if isinstance(value, bytes):
value = BLOB_TYPE(value)
return value
class MusicalKey(types.String):
"""String representing the musical key of a song.
The standard format is C, Cm, C#, C#m, etc.
"""
ENHARMONIC = {
r'db': 'c#',
r'eb': 'd#',
r'gb': 'f#',
r'ab': 'g#',
r'bb': 'a#',
}
null = None
def parse(self, key):
key = key.lower()
for flat, sharp in self.ENHARMONIC.items():
key = re.sub(flat, sharp, key)
key = re.sub(r'[\W\s]+minor', 'm', key)
key = re.sub(r'[\W\s]+major', '', key)
return key.capitalize()
def normalize(self, key):
if key is None:
return None
else:
return self.parse(key)
class DurationType(types.Float):
"""Human-friendly (M:SS) representation of a time interval."""
query = dbcore.query.DurationQuery
def format(self, value):
if not beets.config['format_raw_length'].get(bool):
return beets.ui.human_seconds_short(value or 0.0)
else:
return value
def parse(self, string):
try:
# Try to format back hh:ss to seconds.
return util.raw_seconds_short(string)
except ValueError:
# Fall back to a plain float.
try:
return float(string)
except ValueError:
return self.null
# Library-specific sort types.
class SmartArtistSort(dbcore.query.Sort):
"""Sort by artist (either album artist or track artist),
prioritizing the sort field over the raw field.
"""
def __init__(self, model_cls, ascending=True, case_insensitive=True):
self.album = model_cls is Album
self.ascending = ascending
self.case_insensitive = case_insensitive
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
field = 'albumartist' if self.album else 'artist'
collate = 'COLLATE NOCASE' if self.case_insensitive else ''
return ('(CASE {0}_sort WHEN NULL THEN {0} '
'WHEN "" THEN {0} '
'ELSE {0}_sort END) {1} {2}').format(field, collate, order)
def sort(self, objs):
if self.album:
def field(a):
return a.albumartist_sort or a.albumartist
else:
def field(i):
return i.artist_sort or i.artist
if self.case_insensitive:
def key(x):
return field(x).lower()
else:
key = field
return sorted(objs, key=key, reverse=not self.ascending)
# Special path format key.
PF_KEY_DEFAULT = 'default'
# Exceptions.
class FileOperationError(Exception):
"""Indicates an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions
error, and an unhandled Mutagen exception.
"""
def __init__(self, path, reason):
"""Create an exception describing an operation on the file at
`path` with the underlying (chained) exception `reason`.
"""
super().__init__(path, reason)
self.path = path
self.reason = reason
def text(self):
"""Get a string representing the error. Describes both the
underlying reason and the file path in question.
"""
return '{}: {}'.format(
util.displayable_path(self.path),
str(self.reason)
)
# define __str__ as text to avoid infinite loop on super() calls
# with @six.python_2_unicode_compatible
__str__ = text
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`).
"""
def __str__(self):
return 'error reading ' + super().text()
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`).
"""
def __str__(self):
return 'error writing ' + super().text()
# Item and Album model classes.
class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums.
"""
_format_config_key = None
"""Config key that specifies how an instance should be formatted.
"""
def _template_funcs(self):
funcs = DefaultTemplateFunctions(self, self._db).functions()
funcs.update(plugins.template_funcs())
return funcs
def store(self, fields=None):
super().store(fields)
plugins.send('database_change', lib=self._db, model=self)
def remove(self):
super().remove()
plugins.send('database_change', lib=self._db, model=self)
def add(self, lib=None):
super().add(lib)
plugins.send('database_change', lib=self._db, model=self)
def __format__(self, spec):
if not spec:
spec = beets.config[self._format_config_key].as_str()
assert isinstance(spec, str)
return self.evaluate_template(spec)
def __str__(self):
return format(self)
def __bytes__(self):
return self.__str__().encode('utf-8')
class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields.
Album-level fields take precedence if `for_path` is true.
"""
ALL_KEYS = '*'
def __init__(self, item, included_keys=ALL_KEYS, for_path=False):
# We treat album and item keys specially here,
# so exclude transitive album keys from the model's keys.
super().__init__(item, included_keys=[],
for_path=for_path)
self.included_keys = included_keys
if included_keys == self.ALL_KEYS:
# Performance note: this triggers a database query.
self.model_keys = item.keys(computed=True, with_album=False)
else:
self.model_keys = included_keys
self.item = item
@lazy_property
def all_keys(self):
return set(self.model_keys).union(self.album_keys)
@lazy_property
def album_keys(self):
album_keys = []
if self.album:
if self.included_keys == self.ALL_KEYS:
# Performance note: this triggers a database query.
for key in self.album.keys(computed=True):
if key in Album.item_keys \
or key not in self.item._fields.keys():
album_keys.append(key)
else:
album_keys = self.included_keys
return album_keys
@property
def album(self):
return self.item._cached_album
def _get(self, key):
"""Get the value for a key, either from the album or the item.
Raise a KeyError for invalid keys.
"""
if self.for_path and key in self.album_keys:
return self._get_formatted(self.album, key)
elif key in self.model_keys:
return self._get_formatted(self.model, key)
elif key in self.album_keys:
return self._get_formatted(self.album, key)
else:
raise KeyError(key)
def __getitem__(self, key):
"""Get the value for a key. `artist` and `albumartist`
are fallback values for each other when not set.
"""
value = self._get(key)
# `artist` and `albumartist` fields fall back to one another.
# This is helpful in path formats when the album artist is unset
# on as-is imports.
try:
if key == 'artist' and not value:
return self._get('albumartist')
elif key == 'albumartist' and not value:
return self._get('artist')
except KeyError:
pass
return value
def __iter__(self):
return iter(self.all_keys)
def __len__(self):
return len(self.all_keys)
class Item(LibModel):
_table = 'items'
_flex_table = 'item_attributes'
_fields = {
'id': types.PRIMARY_ID,
'path': PathType(),
'album_id': types.FOREIGN_ID,
'title': types.STRING,
'artist': types.STRING,
'artist_sort': types.STRING,
'artist_credit': types.STRING,
'album': types.STRING,
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'genre': types.STRING,
'style': types.STRING,
'discogs_albumid': types.INTEGER,
'discogs_artistid': types.INTEGER,
'discogs_labelid': types.INTEGER,
'lyricist': types.STRING,
'composer': types.STRING,
'composer_sort': types.STRING,
'work': types.STRING,
'mb_workid': types.STRING,
'work_disambig': types.STRING,
'arranger': types.STRING,
'grouping': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'track': types.PaddedInt(2),
'tracktotal': types.PaddedInt(2),
'disc': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'lyrics': types.STRING,
'comments': types.STRING,
'bpm': types.INTEGER,
'comp': types.BOOLEAN,
'mb_trackid': types.STRING,
'mb_albumid': types.STRING,
'mb_artistid': types.STRING,
'mb_albumartistid': types.STRING,
'mb_releasetrackid': types.STRING,
'trackdisambig': types.STRING,
'albumtype': types.STRING,
'albumtypes': types.STRING,
'label': types.STRING,
'acoustid_fingerprint': types.STRING,
'acoustid_id': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'isrc': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'media': types.STRING,
'albumdisambig': types.STRING,
'releasegroupdisambig': types.STRING,
'disctitle': types.STRING,
'encoder': types.STRING,
'rg_track_gain': types.NULL_FLOAT,
'rg_track_peak': types.NULL_FLOAT,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'r128_track_gain': types.NullPaddedInt(6),
'r128_album_gain': types.NullPaddedInt(6),
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
'initial_key': MusicalKey(),
'length': DurationType(),
'bitrate': types.ScaledInt(1000, 'kbps'),
'format': types.STRING,
'samplerate': types.ScaledInt(1000, 'kHz'),
'bitdepth': types.INTEGER,
'channels': types.INTEGER,
'mtime': DateType(),
'added': DateType(),
}
_search_fields = ('artist', 'title', 'comments',
'album', 'albumartist', 'genre')
_types = {
'data_source': types.STRING,
}
_media_fields = set(MediaFile.readable_fields()) \
.intersection(_fields.keys())
"""Set of item fields that are backed by `MediaFile` fields.
Any kind of field (fixed, flexible, and computed) may be a media
field. Only these fields are read from disk in `read` and written in
`write`.
"""
_media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys())
"""Set of item fields that are backed by *writable* `MediaFile` tag
fields.
This excludes fields that represent audio data, such as `bitrate` or
`length`.
"""
_formatter = FormattedItemMapping
_sorts = {'artist': SmartArtistSort}
_format_config_key = 'format_item'
__album = None
"""Cached album object. Read-only."""
@property
def _cached_album(self):
"""The Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
The instance is cached and refreshed on access.
DO NOT MODIFY!
If you want a copy to modify, use :meth:`get_album`.
"""
if not self.__album and self._db:
self.__album = self._db.get_album(self)
elif self.__album:
self.__album.load()
return self.__album
@_cached_album.setter
def _cached_album(self, album):
self.__album = album
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
getters['singleton'] = lambda i: i.album_id is None
getters['filesize'] = Item.try_filesize # In bytes.
return getters
@classmethod
def from_path(cls, path):
"""Creates a new item from the media file at the specified path.
"""
# Initiate with values that aren't read from files.
i = cls(album_id=None)
i.read(path)
i.mtime = i.current_mtime() # Initial mtime.
return i
def __setitem__(self, key, value):
"""Set the item's value for a standard field or a flexattr.
"""
# Encode unicode paths and read buffers.
if key == 'path':
if isinstance(value, str):
value = bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
value = bytes(value)
elif key == 'album_id':
self._cached_album = None
changed = super()._setitem(key, value)
if changed and key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
def __getitem__(self, key):
"""Get the value for a field, falling back to the album if
necessary. Raise a KeyError if the field is not available.
"""
try:
return super().__getitem__(key)
except KeyError:
if self._cached_album:
return self._cached_album[key]
raise
def __repr__(self):
# This must not use `with_album=True`, because that might access
# the database. When debugging, that is not guaranteed to succeed, and
# can even deadlock due to the database lock.
return '{}({})'.format(
type(self).__name__,
', '.join('{}={!r}'.format(k, self[k])
for k in self.keys(with_album=False)),
)
def keys(self, computed=False, with_album=True):
"""Get a list of available field names. `with_album`
controls whether the album's fields are included.
"""
keys = super().keys(computed=computed)
if with_album and self._cached_album:
keys = set(keys)
keys.update(self._cached_album.keys(computed=computed))
keys = list(keys)
return keys
def get(self, key, default=None, with_album=True):
"""Get the value for a given key or `default` if it does not
exist. Set `with_album` to false to skip album fallback.
"""
try:
return self._get(key, default, raise_=with_album)
except KeyError:
if self._cached_album:
return self._cached_album.get(key, default)
return default
def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is
specified, it is not reset (as it might otherwise be).
"""
super().update(values)
if self.mtime == 0 and 'mtime' in values:
self.mtime = values['mtime']
def clear(self):
"""Set all key/value pairs to None."""
for key in self._media_tag_fields:
setattr(self, key, None)
def get_album(self):
"""Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
"""
if not self._db:
return None
return self._db.get_album(self)
# Interaction with file metadata.
def read(self, read_path=None):
"""Read the metadata from the associated file.
If `read_path` is specified, read metadata from that file
instead. Updates all the properties in `_media_fields`
from the media file.
Raises a `ReadError` if the file could not be read.
"""
if read_path is None:
read_path = self.path
else:
read_path = normpath(read_path)
try:
mediafile = MediaFile(syspath(read_path))
except UnreadableFileError as exc:
raise ReadError(read_path, exc)
for key in self._media_fields:
value = getattr(mediafile, key)
if isinstance(value, int):
if value.bit_length() > 63:
value = 0
self[key] = value
# Database's mtime should now reflect the on-disk value.
if read_path == self.path:
self.mtime = self.current_mtime()
self.path = read_path
def write(self, path=None, tags=None, id3v23=None):
"""Write the item's metadata to a media file.
All fields in `_media_fields` are written to disk according to
the values on this object.
`path` is the path of the mediafile to write the data to. It
defaults to the item's path.
`tags` is a dictionary of additional metadata the should be
written to the file. (These tags need not be in `_media_fields`.)
`id3v23` will override the global `id3v23` config option if it is
set to something other than `None`.
Can raise either a `ReadError` or a `WriteError`.
"""
if path is None:
path = self.path
else:
path = normpath(path)
if id3v23 is None:
id3v23 = beets.config['id3v23'].get(bool)
# Get the data to write to the file.
item_tags = dict(self)
item_tags = {k: v for k, v in item_tags.items()
if k in self._media_fields} # Only write media fields.
if tags is not None:
item_tags.update(tags)
plugins.send('write', item=self, path=path, tags=item_tags)
# Open the file.
try:
mediafile = MediaFile(syspath(path), id3v23=id3v23)
except UnreadableFileError as exc:
raise ReadError(path, exc)
# Write the tags to the file.
mediafile.update(item_tags)
try:
mediafile.save()
except UnreadableFileError as exc:
raise WriteError(self.path, exc)
# The file has a new mtime.
if path == self.path:
self.mtime = self.current_mtime()
plugins.send('after_write', item=self, path=path)
def try_write(self, *args, **kwargs):
"""Calls `write()` but catches and logs `FileOperationError`
exceptions.
Returns `False` an exception was caught and `True` otherwise.
"""
try:
self.write(*args, **kwargs)
return True
except FileOperationError as exc:
log.error("{0}", exc)
return False
def try_sync(self, write, move, with_album=True):
"""Synchronize the item with the database and, possibly, updates its
tags on disk and its path (by moving the file).
`write` indicates whether to write new tags into the file. Similarly,
`move` controls whether the path should be updated. In the
latter case, files are *only* moved when they are inside their
library's directory (if any).
Similar to calling :meth:`write`, :meth:`move`, and :meth:`store`
(conditionally).
"""
if write:
self.try_write()
if move:
# Check whether this file is inside the library directory.
if self._db and self._db.directory in util.ancestry(self.path):
log.debug('moving {0} to synchronize path',
util.displayable_path(self.path))
self.move(with_album=with_album)
self.store()
# Files themselves.
def move_file(self, dest, operation=MoveOperation.MOVE):
"""Move, copy, link or hardlink the item's depending on `operation`,
updating the path value if the move succeeds.
If a file exists at `dest`, then it is slightly modified to be unique.
`operation` should be an instance of `util.MoveOperation`.
"""
if not util.samefile(self.path, dest):
dest = util.unique_path(dest)
if operation == MoveOperation.MOVE:
plugins.send("before_item_moved", item=self, source=self.path,
destination=dest)
util.move(self.path, dest)
plugins.send("item_moved", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.COPY:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.LINK:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.HARDLINK:
util.hardlink(self.path, dest)
plugins.send("item_hardlinked", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.REFLINK:
util.reflink(self.path, dest, fallback=False)
plugins.send("item_reflinked", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.REFLINK_AUTO:
util.reflink(self.path, dest, fallback=True)
plugins.send("item_reflinked", item=self, source=self.path,
destination=dest)
else:
assert False, 'unknown MoveOperation'
# Either copying or moving succeeded, so update the stored path.
self.path = dest
def current_mtime(self):
"""Returns the current mtime of the file, rounded to the nearest
integer.
"""
return int(os.path.getmtime(syspath(self.path)))
def try_filesize(self):
"""Get the size of the underlying file in bytes.
If the file is missing, return 0 (and log a warning).
"""
try:
return os.path.getsize(syspath(self.path))
except (OSError, Exception) as exc:
log.warning('could not get filesize: {0}', exc)
return 0
# Model methods.
def remove(self, delete=False, with_album=True):
"""Removes the item. If `delete`, then the associated file is
removed from disk. If `with_album`, then the item's album (if
any) is removed if it the item was the last in the album.
"""
super().remove()
# Remove the album if it is empty.
if with_album:
album = self.get_album()
if album and not album.items():
album.remove(delete, False)
# Send a 'item_removed' signal to plugins
plugins.send('item_removed', item=self)
# Delete the associated file.
if delete:
util.remove(self.path)
util.prune_dirs(os.path.dirname(self.path), self._db.directory)
self._db._memotable = {}
def move(self, operation=MoveOperation.MOVE, basedir=None,
with_album=True, store=True):
"""Move the item to its designated location within the library
directory (provided by destination()). Subdirectories are
created as needed. If the operation succeeds, the item's path
field is updated to reflect the new location.
Instead of moving the item it can also be copied, linked or hardlinked
depending on `operation` which should be an instance of
`util.MoveOperation`.
`basedir` overrides the library base directory for the destination.
If the item is in an album and `with_album` is `True`, the album is
given an opportunity to move its art.
By default, the item is stored to the database if it is in the
database, so any dirty fields prior to the move() call will be written
as a side effect.
If `store` is `False` however, the item won't be stored and you'll
have to manually store it after invoking this method.
"""
self._check_db()
dest = self.destination(basedir=basedir)
# Create necessary ancestry for the move.
util.mkdirall(dest)
# Perform the move and store the change.
old_path = self.path
self.move_file(dest, operation)
if store:
self.store()
# If this item is in an album, move its art.
if with_album:
album = self.get_album()
if album:
album.move_art(operation)
if store:
album.store()
# Prune vacated directory.
if operation == MoveOperation.MOVE:
util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating.
def destination(self, fragment=False, basedir=None, platform=None,
path_formats=None, replacements=None):
"""Returns the path in the library directory designated for the
item (i.e., where the file ought to be). fragment makes this
method return just the path fragment underneath the root library
directory; the path is also returned as Unicode instead of
encoded as a bytestring. basedir can override the library's base
directory for the destination.
"""
self._check_db()
platform = platform or sys.platform
basedir = basedir or self._db.directory
path_formats = path_formats or self._db.path_formats
if replacements is None:
replacements = self._db.replacements
# Use a path format based on a query, falling back on the
# default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
continue
query, _ = parse_query_string(query, type(self))
if query.match(self):
# The query matches the item! Use the corresponding path
# format.
break
else:
# No query matched; fall back to default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
break
else:
assert False, "no default path format"
if isinstance(path_format, Template):
subpath_tmpl = path_format
else:
subpath_tmpl = template(path_format)
# Evaluate the selected template.
subpath = self.evaluate_template(subpath_tmpl, True)
# Prepare path for output: normalize Unicode characters.
if platform == 'darwin':
subpath = unicodedata.normalize('NFD', subpath)
else:
subpath = unicodedata.normalize('NFC', subpath)
if beets.config['asciify_paths']:
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
maxlen = beets.config['max_filename_length'].get(int)
if not maxlen:
# When zero, try to determine from filesystem.
maxlen = util.max_filename_length(self._db.directory)
subpath, fellback = util.legalize_path(
subpath, replacements, maxlen,
os.path.splitext(self.path)[1], fragment
)
if fellback:
# Print an error message if legalization fell back to
# default replacements because of the maximum length.
log.warning(
'Fell back to default replacements when naming '
'file {}. Configure replacements to avoid lengthening '
'the filename.',
subpath
)
if fragment:
return util.as_string(subpath)
else:
return normpath(os.path.join(basedir, subpath))
class Album(LibModel):
"""Provides access to information about albums stored in a
library. Reflects the library's "albums" table, including album
art.
"""
_table = 'albums'
_flex_table = 'album_attributes'
_always_dirty = True
_fields = {
'id': types.PRIMARY_ID,
'artpath': PathType(True),
'added': DateType(),
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'album': types.STRING,
'genre': types.STRING,
'style': types.STRING,
'discogs_albumid': types.INTEGER,
'discogs_artistid': types.INTEGER,
'discogs_labelid': types.INTEGER,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'comp': types.BOOLEAN,
'mb_albumid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'albumtypes': types.STRING,
'label': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'albumdisambig': types.STRING,
'releasegroupdisambig': types.STRING,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'r128_album_gain': types.NullPaddedInt(6),
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
}
_search_fields = ('album', 'albumartist', 'genre')
_types = {
'path': PathType(),
'data_source': types.STRING,
}
_sorts = {
'albumartist': SmartArtistSort,
'artist': SmartArtistSort,
}
item_keys = [
'added',
'albumartist',
'albumartist_sort',
'albumartist_credit',
'album',
'genre',
'style',
'discogs_albumid',
'discogs_artistid',
'discogs_labelid',
'year',
'month',
'day',
'disctotal',
'comp',
'mb_albumid',
'mb_albumartistid',
'albumtype',
'albumtypes',
'label',
'mb_releasegroupid',
'asin',
'catalognum',
'script',
'language',
'country',
'albumstatus',
'albumdisambig',
'releasegroupdisambig',
'rg_album_gain',
'rg_album_peak',
'r128_album_gain',
'original_year',
'original_month',
'original_day',
]
"""List of keys that are set on an album's items.
"""
_format_config_key = 'format_album'
@classmethod
def _getters(cls):
# In addition to plugin-provided computed fields, also expose
# the album's directory as `path`.
getters = plugins.album_field_getters()
getters['path'] = Album.item_dir
getters['albumtotal'] = Album._albumtotal
return getters
def items(self):
"""Returns an iterable over the items associated with this
album.
"""
return self._db.items(dbcore.MatchQuery('album_id', self.id))
def remove(self, delete=False, with_items=True):
"""Removes this album and all its associated items from the
library. If delete, then the items' files are also deleted
from disk, along with any album art. The directories
containing the album are also removed (recursively) if empty.
Set with_items to False to avoid removing the album's items.
"""
super().remove()
# Send a 'album_removed' signal to plugins
plugins.send('album_removed', album=self)
# Delete art file.
if delete:
artpath = self.artpath
if artpath:
util.remove(artpath)
# Remove (and possibly delete) the constituent items.
if with_items:
for item in self.items():
item.remove(delete, False)
def move_art(self, operation=MoveOperation.MOVE):
"""Move, copy, link or hardlink (depending on `operation`) any
existing album art so that it remains in the same directory as
the items.
`operation` should be an instance of `util.MoveOperation`.
"""
old_art = self.artpath
if not old_art:
return
if not os.path.exists(old_art):
log.error('removing reference to missing album art file {}',
util.displayable_path(old_art))
self.artpath = None
return
new_art = self.art_destination(old_art)
if new_art == old_art:
return
new_art = util.unique_path(new_art)
log.debug('moving album art {0} to {1}',
util.displayable_path(old_art),
util.displayable_path(new_art))
if operation == MoveOperation.MOVE:
util.move(old_art, new_art)
util.prune_dirs(os.path.dirname(old_art), self._db.directory)
elif operation == MoveOperation.COPY:
util.copy(old_art, new_art)
elif operation == MoveOperation.LINK:
util.link(old_art, new_art)
elif operation == MoveOperation.HARDLINK:
util.hardlink(old_art, new_art)
elif operation == MoveOperation.REFLINK:
util.reflink(old_art, new_art, fallback=False)
elif operation == MoveOperation.REFLINK_AUTO:
util.reflink(old_art, new_art, fallback=True)
else:
assert False, 'unknown MoveOperation'
self.artpath = new_art
def move(self, operation=MoveOperation.MOVE, basedir=None, store=True):
"""Move, copy, link or hardlink (depending on `operation`)
all items to their destination. Any album art moves along with them.
`basedir` overrides the library base directory for the destination.
`operation` should be an instance of `util.MoveOperation`.
By default, the album is stored to the database, persisting any
modifications to its metadata. If `store` is `False` however,
the album is not stored automatically, and you'll have to manually
store it after invoking this method.
"""
basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination
# computation.
if store:
self.store()
# Move items.
items = list(self.items())
for item in items:
item.move(operation, basedir=basedir, with_album=False,
store=store)
# Move art.
self.move_art(operation)
if store:
self.store()
def item_dir(self):
"""Returns the directory containing the album's first item,
provided that such an item exists.
"""
item = self.items().get()
if not item:
raise ValueError('empty album for album id %d' % self.id)
return os.path.dirname(item.path)
def _albumtotal(self):
"""Return the total number of tracks on all discs on the album
"""
if self.disctotal == 1 or not beets.config['per_disc_numbering']:
return self.items()[0].tracktotal
counted = []
total = 0
for item in self.items():
if item.disc in counted:
continue
total += item.tracktotal
counted.append(item.disc)
if len(counted) == self.disctotal:
break
return total
def art_destination(self, image, item_dir=None):
"""Returns a path to the destination for the album art image
for the album. `image` is the path of the image that will be
moved there (used for its extension).
The path construction uses the existing path of the album's
items, so the album must contain at least one item or
item_dir must be provided.
"""
image = bytestring_path(image)
item_dir = item_dir or self.item_dir()
filename_tmpl = template(
beets.config['art_filename'].as_str())
subpath = self.evaluate_template(filename_tmpl, True)
if beets.config['asciify_paths']:
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
subpath = util.sanitize_path(subpath,
replacements=self._db.replacements)
subpath = bytestring_path(subpath)
_, ext = os.path.splitext(image)
dest = os.path.join(item_dir, subpath + ext)
return bytestring_path(dest)
def set_art(self, path, copy=True):
"""Sets the album's cover art to the image at the given path.
The image is copied (or moved) into place, replacing any
existing art.
Sends an 'art_set' event with `self` as the sole argument.
"""
path = bytestring_path(path)
oldart = self.artpath
artdest = self.art_destination(path)
if oldart and samefile(path, oldart):
# Art already set.
return
elif samefile(path, artdest):
# Art already in place.
self.artpath = path
return
# Normal operation.
if oldart == artdest:
util.remove(oldart)
artdest = util.unique_path(artdest)
if copy:
util.copy(path, artdest)
else:
util.move(path, artdest)
self.artpath = artdest
plugins.send('art_set', album=self)
def store(self, fields=None):
"""Update the database with the album information. The album's
tracks are also updated.
:param fields: The fields to be stored. If not specified, all fields
will be.
"""
# Get modified track fields.
track_updates = {}
for key in self.item_keys:
if key in self._dirty:
track_updates[key] = self[key]
with self._db.transaction():
super().store(fields)
if track_updates:
for item in self.items():
for key, value in track_updates.items():
item[key] = value
item.store()
def try_sync(self, write, move):
"""Synchronize the album and its items with the database.
Optionally, also write any new tags into the files and update
their paths.
`write` indicates whether to write tags to the item files, and
`move` controls whether files (both audio and album art) are
moved.
"""
self.store()
for item in self.items():
item.try_sync(write, move)
# Query construction helpers.
def parse_query_parts(parts, model_cls):
"""Given a beets query string as a list of components, return the
`Query` and `Sort` they represent.
Like `dbcore.parse_sorted_query`, with beets query prefixes and
special path query detection.
"""
# Get query types and their prefix characters.
prefixes = {':': dbcore.query.RegexpQuery}
prefixes.update(plugins.queries())
# Special-case path-like queries, which are non-field queries
# containing path separators (/).
path_parts = []
non_path_parts = []
for s in parts:
if PathQuery.is_path_query(s):
path_parts.append(s)
else:
non_path_parts.append(s)
case_insensitive = beets.config['sort_case_insensitive'].get(bool)
query, sort = dbcore.parse_sorted_query(
model_cls, non_path_parts, prefixes, case_insensitive
)
# Add path queries to aggregate query.
# Match field / flexattr depending on whether the model has the path field
fast_path_query = 'path' in model_cls._fields
query.subqueries += [PathQuery('path', s, fast_path_query)
for s in path_parts]
return query, sort
def parse_query_string(s, model_cls):
"""Given a beets query string, return the `Query` and `Sort` they
represent.
The string is split into components using shell-like syntax.
"""
message = f"Query is not unicode: {s!r}"
assert isinstance(s, str), message
try:
parts = shlex.split(s)
except ValueError as exc:
raise dbcore.InvalidQueryError(s, exc)
return parse_query_parts(parts, model_cls)
def _sqlite_bytelower(bytestring):
""" A custom ``bytelower`` sqlite function so we can compare
bytestrings in a semi case insensitive fashion. This is to work
around sqlite builds are that compiled with
``-DSQLITE_LIKE_DOESNT_MATCH_BLOBS``. See
``https://github.com/beetbox/beets/issues/2172`` for details.
"""
return bytestring.lower()
# The Library: interface to the database.
class Library(dbcore.Database):
"""A database of music containing songs and albums.
"""
_models = (Item, Album)
def __init__(self, path='library.blb',
directory='~/Music',
path_formats=((PF_KEY_DEFAULT,
'$artist/$album/$track $title'),),
replacements=None):
timeout = beets.config['timeout'].as_number()
super().__init__(path, timeout=timeout)
self.directory = bytestring_path(normpath(directory))
self.path_formats = path_formats
self.replacements = replacements
self._memotable = {} # Used for template substitution performance.
def _create_connection(self):
conn = super()._create_connection()
conn.create_function('bytelower', 1, _sqlite_bytelower)
return conn
# Adding objects to the database.
def add(self, obj):
"""Add the :class:`Item` or :class:`Album` object to the library
database. Return the object's new id.
"""
obj.add(self)
self._memotable = {}
return obj.id
def add_album(self, items):
"""Create a new album consisting of a list of items.
The items are added to the database if they don't yet have an
ID. Return a new :class:`Album` object. The list items must not
be empty.
"""
if not items:
raise ValueError('need at least one item')
# Create the album structure using metadata from the first item.
values = {key: items[0][key] for key in Album.item_keys}
album = Album(self, **values)
# Add the album structure and set the items' album_id fields.
# Store or add the items.
with self.transaction():
album.add(self)
for item in items:
item.album_id = album.id
if item.id is None:
item.add(self)
else:
item.store()
return album
# Querying.
def _fetch(self, model_cls, query, sort=None):
"""Parse a query and fetch. If a order specification is present
in the query string the `sort` argument is ignored.
"""
# Parse the query, if necessary.
try:
parsed_sort = None
if isinstance(query, str):
query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentValueError as exc:
raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the
# provided sort.
if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort):
sort = parsed_sort
return super()._fetch(
model_cls, query, sort
)
@staticmethod
def get_default_album_sort():
"""Get a :class:`Sort` object for albums from the config option.
"""
return dbcore.sort_from_strings(
Album, beets.config['sort_album'].as_str_seq())
@staticmethod
def get_default_item_sort():
"""Get a :class:`Sort` object for items from the config option.
"""
return dbcore.sort_from_strings(
Item, beets.config['sort_item'].as_str_seq())
def albums(self, query=None, sort=None):
"""Get :class:`Album` objects matching the query.
"""
return self._fetch(Album, query, sort or self.get_default_album_sort())
def items(self, query=None, sort=None):
"""Get :class:`Item` objects matching the query.
"""
return self._fetch(Item, query, sort or self.get_default_item_sort())
# Convenience accessors.
def get_item(self, id):
"""Fetch an :class:`Item` by its ID. Returns `None` if no match is
found.
"""
return self._get(Item, id)
def get_album(self, item_or_id):
"""Given an album ID or an item associated with an album, return
an :class:`Album` object for the album. If no such album exists,
returns `None`.
"""
if isinstance(item_or_id, int):
album_id = item_or_id
else:
album_id = item_or_id.album_id
if album_id is None:
return None
return self._get(Album, album_id)
# Default path template resources.
def _int_arg(s):
"""Convert a string argument to an integer for use in a template
function. May raise a ValueError.
"""
return int(s.strip())
class DefaultTemplateFunctions:
"""A container class for the default functions provided to path
templates. These functions are contained in an object to provide
additional context to the functions -- specifically, the Item being
evaluated.
"""
_prefix = 'tmpl_'
def __init__(self, item=None, lib=None):
"""Parametrize the functions. If `item` or `lib` is None, then
some functions (namely, ``aunique``) will always evaluate to the
empty string.
"""
self.item = item
self.lib = lib
def functions(self):
"""Returns a dictionary containing the functions defined in this
object. The keys are function names (as exposed in templates)
and the values are Python functions.
"""
out = {}
for key in self._func_names:
out[key[len(self._prefix):]] = getattr(self, key)
return out
@staticmethod
def tmpl_lower(s):
"""Convert a string to lower case."""
return s.lower()
@staticmethod
def tmpl_upper(s):
"""Covert a string to upper case."""
return s.upper()
@staticmethod
def tmpl_title(s):
"""Convert a string to title case."""
return string.capwords(s)
@staticmethod
def tmpl_left(s, chars):
"""Get the leftmost characters of a string."""
return s[0:_int_arg(chars)]
@staticmethod
def tmpl_right(s, chars):
"""Get the rightmost characters of a string."""
return s[-_int_arg(chars):]
@staticmethod
def tmpl_if(condition, trueval, falseval=''):
"""If ``condition`` is nonempty and nonzero, emit ``trueval``;
otherwise, emit ``falseval`` (if provided).
"""
try:
int_condition = _int_arg(condition)
except ValueError:
if condition.lower() == "false":
return falseval
else:
condition = int_condition
if condition:
return trueval
else:
return falseval
@staticmethod
def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents.
"""
return util.asciify_path(s, beets.config['path_sep_replace'].as_str())
@staticmethod
def tmpl_time(s, fmt):
"""Format a time value using `strftime`.
"""
cur_fmt = beets.config['time_format'].as_str()
return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None, bracket=None):
"""Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys. A fields
from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
return ''
if isinstance(self.item, Item):
album_id = self.item.album_id
elif isinstance(self.item, Album):
album_id = self.item.id
if album_id is None:
return ''
memokey = ('aunique', keys, disam, album_id)
memoval = self.lib._memotable.get(memokey)
if memoval is not None:
return memoval
keys = keys or beets.config['aunique']['keys'].as_str()
disam = disam or beets.config['aunique']['disambiguators'].as_str()
if bracket is None:
bracket = beets.config['aunique']['bracket'].as_str()
keys = keys.split()
disam = disam.split()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = ''
bracket_r = ''
album = self.lib.get_album(album_id)
if not album:
# Do nothing for singletons.
self.lib._memotable[memokey] = ''
return ''
# Find matching albums to disambiguate with.
subqueries = []
for key in keys:
value = album.get(key, '')
# Use slow queries for flexible attributes.
fast = key in album.item_keys
subqueries.append(dbcore.MatchQuery(key, value, fast))
albums = self.lib.albums(dbcore.AndQuery(subqueries))
# If there's only one album to matching these details, then do
# nothing.
if len(albums) == 1:
self.lib._memotable[memokey] = ''
return ''
# Find the first disambiguator that distinguishes the albums.
for disambiguator in disam:
# Get the value for each album for the current field.
disam_values = {a.get(disambiguator, '') for a in albums}
# If the set of unique values is equal to the number of
# albums in the disambiguation set, we're done -- this is
# sufficient disambiguation.
if len(disam_values) == len(albums):
break
else:
# No disambiguator distinguished all fields.
res = f' {bracket_l}{album.id}{bracket_r}'
self.lib._memotable[memokey] = res
return res
# Flatten disambiguation value into a string.
disam_value = album.formatted(for_path=True).get(disambiguator)
# Return empty string if disambiguator is empty.
if disam_value:
res = f' {bracket_l}{disam_value}{bracket_r}'
else:
res = ''
self.lib._memotable[memokey] = res
return res
@staticmethod
def tmpl_first(s, count=1, skip=0, sep='; ', join_str='; '):
""" Gets the item(s) from x to y in a string separated by something
and join then with something
:param s: the string
:param count: The number of items included
:param skip: The number of items skipped
:param sep: the separator. Usually is '; ' (default) or '/ '
:param join_str: the string which will join the items, default '; '.
"""
skip = int(skip)
count = skip + int(count)
return join_str.join(s.split(sep)[skip:count])
def tmpl_ifdef(self, field, trueval='', falseval=''):
""" If field exists return trueval or the field (default)
otherwise, emit return falseval (if provided).
:param field: The name of the field
:param trueval: The string if the condition is true
:param falseval: The string if the condition is false
:return: The string, based on condition
"""
if field in self.item:
return trueval if trueval else self.item.formatted().get(field)
else:
return falseval
# Get the name of tmpl_* functions in the above class.
DefaultTemplateFunctions._func_names = \
[s for s in dir(DefaultTemplateFunctions)
if s.startswith(DefaultTemplateFunctions._prefix)]
| 59,076
|
Python
|
.py
| 1,469
| 30.761743
| 79
| 0.602108
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,253
|
__init__.py
|
rembo10_headphones/lib/beets/ui/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module contains all of the core logic for beets' command-line
interface. To invoke the CLI, just call beets.ui.main(). The actual
CLI commands are implemented in the ui.commands module.
"""
import optparse
import textwrap
import sys
from difflib import SequenceMatcher
import sqlite3
import errno
import re
import struct
import traceback
import os.path
from beets import logging
from beets import library
from beets import plugins
from beets import util
from beets.util.functemplate import template
from beets import config
from beets.util import as_string
from beets.autotag import mb
from beets.dbcore import query as db_query
from beets.dbcore import db
import confuse
# On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == 'win32':
try:
import colorama
except ImportError:
pass
else:
colorama.init()
log = logging.getLogger('beets')
if not log.handlers:
log.addHandler(logging.StreamHandler())
log.propagate = False # Don't propagate to root handler.
PF_KEY_QUERIES = {
'comp': 'comp:true',
'singleton': 'singleton:true',
}
class UserError(Exception):
"""UI exception. Commands should throw this in order to display
nonrecoverable errors to the user.
"""
# Encoding utilities.
def _in_encoding():
"""Get the encoding to use for *inputting* strings from the console.
"""
return _stream_encoding(sys.stdin)
def _out_encoding():
"""Get the encoding to use for *outputting* strings to the console.
"""
return _stream_encoding(sys.stdout)
def _stream_encoding(stream, default='utf-8'):
"""A helper for `_in_encoding` and `_out_encoding`: get the stream's
preferred encoding, using a configured override or a default
fallback if neither is not specified.
"""
# Configured override?
encoding = config['terminal_encoding'].get()
if encoding:
return encoding
# For testing: When sys.stdout or sys.stdin is a StringIO under the
# test harness, it doesn't have an `encoding` attribute. Just use
# UTF-8.
if not hasattr(stream, 'encoding'):
return default
# Python's guessed output stream encoding, or UTF-8 as a fallback
# (e.g., when piped to a file).
return stream.encoding or default
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings when running under Python 2.
"""
return arglist
def print_(*strings, **kwargs):
"""Like print, but rather than raising an error when a character
is not in the terminal's encoding's character set, just silently
replaces it.
The arguments must be Unicode strings: `unicode` on Python 2; `str` on
Python 3.
The `end` keyword argument behaves similarly to the built-in `print`
(it defaults to a newline).
"""
if not strings:
strings = ['']
assert isinstance(strings[0], str)
txt = ' '.join(strings)
txt += kwargs.get('end', '\n')
# Encode the string and write it to stdout.
# On Python 3, sys.stdout expects text strings and uses the
# exception-throwing encoding error policy. To avoid throwing
# errors and use our configurable encoding override, we use the
# underlying bytes buffer instead.
if hasattr(sys.stdout, 'buffer'):
out = txt.encode(_out_encoding(), 'replace')
sys.stdout.buffer.write(out)
sys.stdout.buffer.flush()
else:
# In our test harnesses (e.g., DummyOut), sys.stdout.buffer
# does not exist. We instead just record the text string.
sys.stdout.write(txt)
# Configuration wrappers.
def _bool_fallback(a, b):
"""Given a boolean or None, return the original value or a fallback.
"""
if a is None:
assert isinstance(b, bool)
return b
else:
assert isinstance(a, bool)
return a
def should_write(write_opt=None):
"""Decide whether a command that updates metadata should also write
tags, using the importer configuration as the default.
"""
return _bool_fallback(write_opt, config['import']['write'].get(bool))
def should_move(move_opt=None):
"""Decide whether a command that updates metadata should also move
files when they're inside the library, using the importer
configuration as the default.
Specifically, commands should move files after metadata updates only
when the importer is configured *either* to move *or* to copy files.
They should avoid moving files when the importer is configured not
to touch any filenames.
"""
return _bool_fallback(
move_opt,
config['import']['move'].get(bool) or
config['import']['copy'].get(bool)
)
# Input prompts.
def input_(prompt=None):
"""Like `input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print_() explicitly to display prompts.
# https://bugs.python.org/issue1927
if prompt:
print_(prompt, end=' ')
try:
resp = input()
except EOFError:
raise UserError('stdin stream ended while input required')
return resp
def input_options(options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, max_width=72):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError('no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and (
(default is None and not numrange and first) or
(isinstance(default, str) and
found_letter.lower() == default.lower())):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Colorize the letter shortcut.
show_letter = colorize('action_default' if is_default else 'action',
show_letter)
# Insert the highlighted letter back into the word.
capitalized.append(
option[:index] + show_letter + option[index + 1:]
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = str(default)
default_name = colorize('action_default', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % str(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1]))
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
# Wrap the query text.
prompt = ''
line_length = 0
for i, (part, length) in enumerate(zip(prompt_parts,
prompt_part_lengths)):
# Add punctuation.
if i == len(prompt_parts) - 1:
part += '?'
else:
part += ','
length += 1
# Choose either the current line or the beginning of the next.
if line_length + length + 1 > max_width:
prompt += '\n'
line_length = 0
if line_length != 0:
# Not the beginning of the line; need a space.
part = ' ' + part
length += 1
prompt += part
line_length += length
# Make a fallback prompt too. This is displayed if the user enters
# something that is not recognized.
if not fallback_prompt:
fallback_prompt = 'Enter one of '
if numrange:
fallback_prompt += '%i-%i, ' % numrange
fallback_prompt += ', '.join(display_letters) + ':'
resp = input_(prompt)
while True:
resp = resp.strip().lower()
# Try default option.
if default is not None and not resp:
resp = default
# Try an integer input if available.
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
# Try a normal letter input.
if resp:
resp = resp[0]
if resp in letters:
return resp
# Prompt for new input.
resp = input_(fallback_prompt)
def input_yn(prompt, require=False):
"""Prompts the user for a "yes" or "no" response. The default is
"yes" unless `require` is `True`, in which case there is no default.
"""
sel = input_options(
('y', 'n'), require, prompt, 'Enter Y or N:'
)
return sel == 'y'
def input_select_objects(prompt, objs, rep, prompt_all=None):
"""Prompt to user to choose all, none, or some of the given objects.
Return the list of selected objects.
`prompt` is the prompt string to use for each question (it should be
phrased as an imperative verb). If `prompt_all` is given, it is used
instead of `prompt` for the first (yes(/no/select) question.
`rep` is a function to call on each object to print it out when confirming
objects individually.
"""
choice = input_options(
('y', 'n', 's'), False,
'%s? (Yes/no/select)' % (prompt_all or prompt))
print() # Blank line.
if choice == 'y': # Yes.
return objs
elif choice == 's': # Select.
out = []
for obj in objs:
rep(obj)
answer = input_options(
('y', 'n', 'q'), True, '%s? (yes/no/quit)' % prompt,
'Enter Y or N:'
)
if answer == 'y':
out.append(obj)
elif answer == 'q':
return out
return out
else: # No.
return []
# Human output formatting.
def human_bytes(size):
"""Formats size, a number of bytes, in a human-readable way."""
powers = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y', 'H']
unit = 'B'
for power in powers:
if size < 1024:
return f"{size:3.1f} {power}{unit}"
size /= 1024.0
unit = 'iB'
return "big"
def human_seconds(interval):
"""Formats interval, a number of seconds, as a human-readable time
interval using English words.
"""
units = [
(1, 'second'),
(60, 'minute'),
(60, 'hour'),
(24, 'day'),
(7, 'week'),
(52, 'year'),
(10, 'decade'),
]
for i in range(len(units) - 1):
increment, suffix = units[i]
next_increment, _ = units[i + 1]
interval /= float(increment)
if interval < next_increment:
break
else:
# Last unit.
increment, suffix = units[-1]
interval /= float(increment)
return f"{interval:3.1f} {suffix}s"
def human_seconds_short(interval):
"""Formats a number of seconds as a short human-readable M:SS
string.
"""
interval = int(interval)
return '%i:%02i' % (interval // 60, interval % 60)
# Colorization.
# ANSI terminal colorization code heavily inspired by pygments:
# https://bitbucket.org/birkenfeld/pygments-main/src/default/pygments/console.py
# (pygments is by Tim Hatch, Armin Ronacher, et al.)
COLOR_ESCAPE = "\x1b["
DARK_COLORS = {
"black": 0,
"darkred": 1,
"darkgreen": 2,
"brown": 3,
"darkyellow": 3,
"darkblue": 4,
"purple": 5,
"darkmagenta": 5,
"teal": 6,
"darkcyan": 6,
"lightgray": 7
}
LIGHT_COLORS = {
"darkgray": 0,
"red": 1,
"green": 2,
"yellow": 3,
"blue": 4,
"fuchsia": 5,
"magenta": 5,
"turquoise": 6,
"cyan": 6,
"white": 7
}
RESET_COLOR = COLOR_ESCAPE + "39;49;00m"
# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS
# as they are defined in the configuration files, see function: colorize
COLOR_NAMES = ['text_success', 'text_warning', 'text_error', 'text_highlight',
'text_highlight_minor', 'action_default', 'action']
COLORS = None
def _colorize(color, text):
"""Returns a string that prints the given text in the given color
in a terminal that is ANSI color-aware. The color must be something
in DARK_COLORS or LIGHT_COLORS.
"""
if color in DARK_COLORS:
escape = COLOR_ESCAPE + "%im" % (DARK_COLORS[color] + 30)
elif color in LIGHT_COLORS:
escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS[color] + 30)
else:
raise ValueError('no such color %s', color)
return escape + text + RESET_COLOR
def colorize(color_name, text):
"""Colorize text if colored output is enabled. (Like _colorize but
conditional.)
"""
if not config['ui']['color'] or 'NO_COLOR' in os.environ.keys():
return text
global COLORS
if not COLORS:
COLORS = {name:
config['ui']['colors'][name].as_str()
for name in COLOR_NAMES}
# In case a 3rd party plugin is still passing the actual color ('red')
# instead of the abstract color name ('text_error')
color = COLORS.get(color_name)
if not color:
log.debug('Invalid color_name: {0}', color_name)
color = color_name
return _colorize(color, text)
def _colordiff(a, b, highlight='text_highlight',
minor_highlight='text_highlight_minor'):
"""Given two values, return the same pair of strings except with
their differences highlighted in the specified color. Strings are
highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety.
"""
if not isinstance(a, str) \
or not isinstance(b, str):
# Non-strings: use ordinary equality.
a = str(a)
b = str(b)
if a == b:
return a, b
else:
return colorize(highlight, a), colorize(highlight, b)
if isinstance(a, bytes) or isinstance(b, bytes):
# A path field.
a = util.displayable_path(a)
b = util.displayable_path(b)
a_out = []
b_out = []
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
if op == 'equal':
# In both strings.
a_out.append(a[a_start:a_end])
b_out.append(b[b_start:b_end])
elif op == 'insert':
# Right only.
b_out.append(colorize(highlight, b[b_start:b_end]))
elif op == 'delete':
# Left only.
a_out.append(colorize(highlight, a[a_start:a_end]))
elif op == 'replace':
# Right and left differ. Colorise with second highlight if
# it's just a case change.
if a[a_start:a_end].lower() != b[b_start:b_end].lower():
color = highlight
else:
color = minor_highlight
a_out.append(colorize(color, a[a_start:a_end]))
b_out.append(colorize(color, b[b_start:b_end]))
else:
assert(False)
return ''.join(a_out), ''.join(b_out)
def colordiff(a, b, highlight='text_highlight'):
"""Colorize differences between two values if color is enabled.
(Like _colordiff but conditional.)
"""
if config['ui']['color']:
return _colordiff(a, b, highlight)
else:
return str(a), str(b)
def get_path_formats(subview=None):
"""Get the configuration's path formats as a list of query/template
pairs.
"""
path_formats = []
subview = subview or config['paths']
for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, template(view.as_str())))
return path_formats
def get_replacements():
"""Confuse validation function that reads regex/string pairs.
"""
replacements = []
for pattern, repl in config['replace'].get(dict).items():
repl = repl or ''
try:
replacements.append((re.compile(pattern), repl))
except re.error:
raise UserError(
'malformed regular expression in replace: {}'.format(
pattern
)
)
return replacements
def term_width():
"""Get the width (columns) of the terminal."""
fallback = config['ui']['terminal_width'].get(int)
# The fcntl and termios modules are not available on non-Unix
# platforms, so we fall back to a constant.
try:
import fcntl
import termios
except ImportError:
return fallback
try:
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ' * 4)
except OSError:
return fallback
try:
height, width = struct.unpack('hh', buf)
except struct.error:
return fallback
return width
FLOAT_EPSILON = 0.01
def _field_diff(field, old, old_fmt, new, new_fmt):
"""Given two Model objects and their formatted views, format their values
for `field` and highlight changes among them. Return a human-readable
string. If the value has not changed, return None instead.
"""
oldval = old.get(field)
newval = new.get(field)
# If no change, abort.
if isinstance(oldval, float) and isinstance(newval, float) and \
abs(oldval - newval) < FLOAT_EPSILON:
return None
elif oldval == newval:
return None
# Get formatted values for output.
oldstr = old_fmt.get(field, '')
newstr = new_fmt.get(field, '')
# For strings, highlight changes. For others, colorize the whole
# thing.
if isinstance(oldval, str):
oldstr, newstr = colordiff(oldval, newstr)
else:
oldstr = colorize('text_error', oldstr)
newstr = colorize('text_error', newstr)
return f'{oldstr} -> {newstr}'
def show_model_changes(new, old=None, fields=None, always=False):
"""Given a Model object, print a list of changes from its pristine
version stored in the database. Return a boolean indicating whether
any changes were found.
`old` may be the "original" object to avoid using the pristine
version from the database. `fields` may be a list of fields to
restrict the detection to. `always` indicates whether the object is
always identified, regardless of whether any changes are present.
"""
old = old or new._db._get(type(new), new.id)
# Keep the formatted views around instead of re-creating them in each
# iteration step
old_fmt = old.formatted()
new_fmt = new.formatted()
# Build up lines showing changed fields.
changes = []
for field in old:
# Subset of the fields. Never show mtime.
if field == 'mtime' or (fields and field not in fields):
continue
# Detect and show difference for this field.
line = _field_diff(field, old, old_fmt, new, new_fmt)
if line:
changes.append(f' {field}: {line}')
# New fields.
for field in set(new) - set(old):
if fields and field not in fields:
continue
changes.append(' {}: {}'.format(
field,
colorize('text_highlight', new_fmt[field])
))
# Print changes.
if changes or always:
print_(format(old))
if changes:
print_('\n'.join(changes))
return bool(changes)
def show_path_changes(path_changes):
"""Given a list of tuples (source, destination) that indicate the
path changes, log the changes as INFO-level output to the beets log.
The output is guaranteed to be unicode.
Every pair is shown on a single line if the terminal width permits it,
else it is split over two lines. E.g.,
Source -> Destination
vs.
Source
-> Destination
"""
sources, destinations = zip(*path_changes)
# Ensure unicode output
sources = list(map(util.displayable_path, sources))
destinations = list(map(util.displayable_path, destinations))
# Calculate widths for terminal split
col_width = (term_width() - len(' -> ')) // 2
max_width = len(max(sources + destinations, key=len))
if max_width > col_width:
# Print every change over two lines
for source, dest in zip(sources, destinations):
color_source, color_dest = colordiff(source, dest)
print_('{0} \n -> {1}'.format(color_source, color_dest))
else:
# Print every change on a single line, and add a header
title_pad = max_width - len('Source ') + len(' -> ')
print_('Source {0} Destination'.format(' ' * title_pad))
for source, dest in zip(sources, destinations):
pad = max_width - len(source)
color_source, color_dest = colordiff(source, dest)
print_('{0} {1} -> {2}'.format(
color_source,
' ' * pad,
color_dest,
))
# Helper functions for option parsing.
def _store_dict(option, opt_str, value, parser):
"""Custom action callback to parse options which have ``key=value``
pairs as values. All such pairs passed for this option are
aggregated into a dictionary.
"""
dest = option.dest
option_values = getattr(parser.values, dest, None)
if option_values is None:
# This is the first supplied ``key=value`` pair of option.
# Initialize empty dictionary and get a reference to it.
setattr(parser.values, dest, {})
option_values = getattr(parser.values, dest)
# Decode the argument using the platform's argument encoding.
value = util.text_string(value, util.arg_encoding())
try:
key, value = value.split('=', 1)
if not (key and value):
raise ValueError
except ValueError:
raise UserError(
"supplied argument `{}' is not of the form `key=value'"
.format(value))
option_values[key] = value
class CommonOptionsParser(optparse.OptionParser):
"""Offers a simple way to add common formatting options.
Options available include:
- matching albums instead of tracks: add_album_option()
- showing paths instead of items/albums: add_path_option()
- changing the format of displayed items/albums: add_format_option()
The last one can have several behaviors:
- against a special target
- with a certain format
- autodetected target with the album option
Each method is fully documented in the related method.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._album_flags = False
# this serves both as an indicator that we offer the feature AND allows
# us to check whether it has been specified on the CLI - bypassing the
# fact that arguments may be in any order
def add_album_option(self, flags=('-a', '--album')):
"""Add a -a/--album option to match albums instead of tracks.
If used then the format option can auto-detect whether we're setting
the format for items or albums.
Sets the album property on the options extracted from the CLI.
"""
album = optparse.Option(*flags, action='store_true',
help='match albums instead of tracks')
self.add_option(album)
self._album_flags = set(flags)
def _set_format(self, option, opt_str, value, parser, target=None,
fmt=None, store_true=False):
"""Internal callback that sets the correct format while parsing CLI
arguments.
"""
if store_true:
setattr(parser.values, option.dest, True)
# Use the explicitly specified format, or the string from the option.
if fmt:
value = fmt
elif value:
value, = decargs([value])
else:
value = ''
parser.values.format = value
if target:
config[target._format_config_key].set(value)
else:
if self._album_flags:
if parser.values.album:
target = library.Album
else:
# the option is either missing either not parsed yet
if self._album_flags & set(parser.rargs):
target = library.Album
else:
target = library.Item
config[target._format_config_key].set(value)
else:
config[library.Item._format_config_key].set(value)
config[library.Album._format_config_key].set(value)
def add_path_option(self, flags=('-p', '--path')):
"""Add a -p/--path option to display the path instead of the default
format.
By default this affects both items and albums. If add_album_option()
is used then the target will be autodetected.
Sets the format property to '$path' on the options extracted from the
CLI.
"""
path = optparse.Option(*flags, nargs=0, action='callback',
callback=self._set_format,
callback_kwargs={'fmt': '$path',
'store_true': True},
help='print paths for matched items or albums')
self.add_option(path)
def add_format_option(self, flags=('-f', '--format'), target=None):
"""Add -f/--format option to print some LibModel instances with a
custom format.
`target` is optional and can be one of ``library.Item``, 'item',
``library.Album`` and 'album'.
Several behaviors are available:
- if `target` is given then the format is only applied to that
LibModel
- if the album option is used then the target will be autodetected
- otherwise the format is applied to both items and albums.
Sets the format property on the options extracted from the CLI.
"""
kwargs = {}
if target:
if isinstance(target, str):
target = {'item': library.Item,
'album': library.Album}[target]
kwargs['target'] = target
opt = optparse.Option(*flags, action='callback',
callback=self._set_format,
callback_kwargs=kwargs,
help='print with custom format')
self.add_option(opt)
def add_all_common_options(self):
"""Add album, path and format options.
"""
self.add_album_option()
self.add_path_option()
self.add_format_option()
# Subcommand parsing infrastructure.
#
# This is a fairly generic subcommand parser for optparse. It is
# maintained externally here:
# https://gist.github.com/462717
# There you will also find a better description of the code and a more
# succinct example program.
class Subcommand:
"""A subcommand of a root command-line application that may be
invoked by a SubcommandOptionParser.
"""
def __init__(self, name, parser=None, help='', aliases=(), hide=False):
"""Creates a new subcommand. name is the primary way to invoke
the subcommand; aliases are alternate names. parser is an
OptionParser responsible for parsing the subcommand's options.
help is a short description of the command. If no parser is
given, it defaults to a new, empty CommonOptionsParser.
"""
self.name = name
self.parser = parser or CommonOptionsParser()
self.aliases = aliases
self.help = help
self.hide = hide
self._root_parser = None
def print_help(self):
self.parser.print_help()
def parse_args(self, args):
return self.parser.parse_args(args)
@property
def root_parser(self):
return self._root_parser
@root_parser.setter
def root_parser(self, root_parser):
self._root_parser = root_parser
self.parser.prog = '{} {}'.format(
as_string(root_parser.get_prog_name()), self.name)
class SubcommandsOptionParser(CommonOptionsParser):
"""A variant of OptionParser that parses subcommands and their
arguments.
"""
def __init__(self, *args, **kwargs):
"""Create a new subcommand-aware option parser. All of the
options to OptionParser.__init__ are supported in addition
to subcommands, a sequence of Subcommand objects.
"""
# A more helpful default usage.
if 'usage' not in kwargs:
kwargs['usage'] = """
%prog COMMAND [ARGS...]
%prog help COMMAND"""
kwargs['add_help_option'] = False
# Super constructor.
super().__init__(*args, **kwargs)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
self.subcommands = []
def add_subcommand(self, *cmds):
"""Adds a Subcommand object to the parser's list of commands.
"""
for cmd in cmds:
cmd.root_parser = self
self.subcommands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = super().format_help(formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = ["\n"]
result.append(formatter.format_heading('Commands'))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
subcommands = [c for c in self.subcommands if not c.hide]
subcommands.sort(key=lambda c: c.name)
for subcommand in subcommands:
name = subcommand.name
if subcommand.aliases:
name += ' (%s)' % ', '.join(subcommand.aliases)
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for subcommand, name in zip(subcommands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = "%*s%s\n" % (formatter.current_indent, "", name)
indent_first = help_position
else:
name = "%*s%-*s " % (formatter.current_indent, "",
name_width, name)
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
help_line = help_lines[0] if help_lines else ''
result.append("%*s%s\n" % (indent_first, "", help_line))
result.extend(["%*s%s\n" % (help_position, "", line)
for line in help_lines[1:]])
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return out + "".join(result)
def _subcommand_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
given name. The name may either be the name of a subcommand or
an alias. If no subcommand matches, returns None.
"""
for subcommand in self.subcommands:
if name == subcommand.name or \
name in subcommand.aliases:
return subcommand
return None
def parse_global_options(self, args):
"""Parse options up to the subcommand argument. Returns a tuple
of the options object and the remaining arguments.
"""
options, subargs = self.parse_args(args)
# Force the help command
if options.help:
subargs = ['help']
elif options.version:
subargs = ['version']
return options, subargs
def parse_subcommand(self, args):
"""Given the `args` left unused by a `parse_global_options`,
return the invoked subcommand, the subcommand options, and the
subcommand arguments.
"""
# Help is default command
if not args:
args = ['help']
cmdname = args.pop(0)
subcommand = self._subcommand_for_name(cmdname)
if not subcommand:
raise UserError(f"unknown command '{cmdname}'")
suboptions, subargs = subcommand.parse_args(args)
return subcommand, suboptions, subargs
optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)
# The main entry point and bootstrapping.
def _load_plugins(options, config):
"""Load the plugins specified on the command line or in the configuration.
"""
paths = config['pluginpath'].as_str_seq(split=False)
paths = [util.normpath(p) for p in paths]
log.debug('plugin paths: {0}', util.displayable_path(paths))
# On Python 3, the search paths need to be unicode.
paths = [util.py3_path(p) for p in paths]
# Extend the `beetsplug` package to include the plugin paths.
import beetsplug
beetsplug.__path__ = paths + list(beetsplug.__path__)
# For backwards compatibility, also support plugin paths that
# *contain* a `beetsplug` package.
sys.path += paths
# If we were given any plugins on the command line, use those.
if options.plugins is not None:
plugin_list = (options.plugins.split(',')
if len(options.plugins) > 0 else [])
else:
plugin_list = config['plugins'].as_str_seq()
plugins.load_plugins(plugin_list)
return plugins
def _setup(options, lib=None):
"""Prepare and global state and updates it with command line options.
Returns a list of subcommands, a list of plugins, and a library instance.
"""
# Configure the MusicBrainz API.
mb.configure()
config = _configure(options)
plugins = _load_plugins(options, config)
# Add types and queries defined by plugins.
plugin_types_album = plugins.types(library.Album)
library.Album._types.update(plugin_types_album)
item_types = plugin_types_album.copy()
item_types.update(library.Item._types)
item_types.update(plugins.types(library.Item))
library.Item._types = item_types
library.Item._queries.update(plugins.named_queries(library.Item))
library.Album._queries.update(plugins.named_queries(library.Album))
plugins.send("pluginload")
# Get the default subcommands.
from beets.ui.commands import default_commands
subcommands = list(default_commands)
subcommands.extend(plugins.commands())
if lib is None:
lib = _open_library(config)
plugins.send("library_opened", lib=lib)
return subcommands, plugins, lib
def _configure(options):
"""Amend the global configuration object with command line options.
"""
# Add any additional config files specified with --config. This
# special handling lets specified plugins get loaded before we
# finish parsing the command line.
if getattr(options, 'config', None) is not None:
overlay_path = options.config
del options.config
config.set_file(overlay_path)
else:
overlay_path = None
config.set_args(options)
# Configure the logger.
if config['verbose'].get(int):
log.set_global_level(logging.DEBUG)
else:
log.set_global_level(logging.INFO)
if overlay_path:
log.debug('overlaying configuration: {0}',
util.displayable_path(overlay_path))
config_path = config.user_config_path()
if os.path.isfile(config_path):
log.debug('user configuration: {0}',
util.displayable_path(config_path))
else:
log.debug('no user configuration found at {0}',
util.displayable_path(config_path))
log.debug('data directory: {0}',
util.displayable_path(config.config_dir()))
return config
def _open_library(config):
"""Create a new library instance from the configuration.
"""
dbpath = util.bytestring_path(config['library'].as_filename())
try:
lib = library.Library(
dbpath,
config['directory'].as_filename(),
get_path_formats(),
get_replacements(),
)
lib.get_item(0) # Test database connection.
except (sqlite3.OperationalError, sqlite3.DatabaseError) as db_error:
log.debug('{}', traceback.format_exc())
raise UserError("database file {} cannot not be opened: {}".format(
util.displayable_path(dbpath),
db_error
))
log.debug('library database: {0}\n'
'library directory: {1}',
util.displayable_path(lib.path),
util.displayable_path(lib.directory))
return lib
def _raw_main(args, lib=None):
"""A helper function for `main` without top-level exception
handling.
"""
parser = SubcommandsOptionParser()
parser.add_format_option(flags=('--format-item',), target=library.Item)
parser.add_format_option(flags=('--format-album',), target=library.Album)
parser.add_option('-l', '--library', dest='library',
help='library database file to use')
parser.add_option('-d', '--directory', dest='directory',
help="destination music directory")
parser.add_option('-v', '--verbose', dest='verbose', action='count',
help='log more details (use twice for even more)')
parser.add_option('-c', '--config', dest='config',
help='path to configuration file')
parser.add_option('-p', '--plugins', dest='plugins',
help='a comma-separated list of plugins to load')
parser.add_option('-h', '--help', dest='help', action='store_true',
help='show this help message and exit')
parser.add_option('--version', dest='version', action='store_true',
help=optparse.SUPPRESS_HELP)
options, subargs = parser.parse_global_options(args)
# Special case for the `config --edit` command: bypass _setup so
# that an invalid configuration does not prevent the editor from
# starting.
if subargs and subargs[0] == 'config' \
and ('-e' in subargs or '--edit' in subargs):
from beets.ui.commands import config_edit
return config_edit()
test_lib = bool(lib)
subcommands, plugins, lib = _setup(options, lib)
parser.add_subcommand(*subcommands)
subcommand, suboptions, subargs = parser.parse_subcommand(subargs)
subcommand.func(lib, suboptions, subargs)
plugins.send('cli_exit', lib=lib)
if not test_lib:
# Clean up the library unless it came from the test harness.
lib._close()
def main(args=None):
"""Run the main command-line interface for beets. Includes top-level
exception handlers that print friendly error messages.
"""
try:
_raw_main(args)
except UserError as exc:
message = exc.args[0] if exc.args else None
log.error('error: {0}', message)
sys.exit(1)
except util.HumanReadableException as exc:
exc.log(log)
sys.exit(1)
except library.FileOperationError as exc:
# These errors have reasonable human-readable descriptions, but
# we still want to log their tracebacks for debugging.
log.debug('{}', traceback.format_exc())
log.error('{}', exc)
sys.exit(1)
except confuse.ConfigError as exc:
log.error('configuration error: {0}', exc)
sys.exit(1)
except db_query.InvalidQueryError as exc:
log.error('invalid query: {0}', exc)
sys.exit(1)
except OSError as exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
sys.stderr.close()
else:
raise
except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode.
log.debug('{}', traceback.format_exc())
except db.DBAccessError as exc:
log.error(
'database access error: {0}\n'
'the library file might have a permissions problem',
exc
)
sys.exit(1)
| 43,849
|
Python
|
.py
| 1,094
| 31.8117
| 80
| 0.621576
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,254
|
commands.py
|
rembo10_headphones/lib/beets/ui/commands.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module provides the default commands for beets' command-line
interface.
"""
import os
import re
from platform import python_version
from collections import namedtuple, Counter
from itertools import chain
import beets
from beets import ui
from beets.ui import print_, input_, decargs, show_path_changes
from beets import autotag
from beets.autotag import Recommendation
from beets.autotag import hooks
from beets import plugins
from beets import importer
from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path, \
MoveOperation
from beets import library
from beets import config
from beets import logging
from . import _store_dict
VARIOUS_ARTISTS = 'Various Artists'
PromptChoice = namedtuple('PromptChoice', ['short', 'long', 'callback'])
# Global logger.
log = logging.getLogger('beets')
# The list of default subcommands. This is populated with Subcommand
# objects that can be fed to a SubcommandsOptionParser.
default_commands = []
# Utilities.
def _do_query(lib, query, album, also_items=True):
"""For commands that operate on matched items, performs a query
and returns a list of matching items and a list of matching
albums. (The latter is only nonempty when album is True.) Raises
a UserError if no items match. also_items controls whether, when
fetching albums, the associated items should be fetched also.
"""
if album:
albums = list(lib.albums(query))
items = []
if also_items:
for al in albums:
items += al.items()
else:
albums = []
items = list(lib.items(query))
if album and not albums:
raise ui.UserError('No matching albums found.')
elif not album and not items:
raise ui.UserError('No matching items found.')
return items, albums
# fields: Shows a list of available fields for queries and format strings.
def _print_keys(query):
"""Given a SQLite query result, print the `key` field of each
returned row, with indentation of 2 spaces.
"""
for row in query:
print_(' ' * 2 + row['key'])
def fields_func(lib, opts, args):
def _print_rows(names):
names.sort()
print_(' ' + '\n '.join(names))
print_("Item fields:")
_print_rows(library.Item.all_keys())
print_("Album fields:")
_print_rows(library.Album.all_keys())
with lib.transaction() as tx:
# The SQL uses the DISTINCT to get unique values from the query
unique_fields = 'SELECT DISTINCT key FROM (%s)'
print_("Item flexible attributes:")
_print_keys(tx.query(unique_fields % library.Item._flex_table))
print_("Album flexible attributes:")
_print_keys(tx.query(unique_fields % library.Album._flex_table))
fields_cmd = ui.Subcommand(
'fields',
help='show fields available for queries and format strings'
)
fields_cmd.func = fields_func
default_commands.append(fields_cmd)
# help: Print help text for commands
class HelpCommand(ui.Subcommand):
def __init__(self):
super().__init__(
'help', aliases=('?',),
help='give detailed help on a specific sub-command',
)
def func(self, lib, opts, args):
if args:
cmdname = args[0]
helpcommand = self.root_parser._subcommand_for_name(cmdname)
if not helpcommand:
raise ui.UserError(f"unknown command '{cmdname}'")
helpcommand.print_help()
else:
self.root_parser.print_help()
default_commands.append(HelpCommand())
# import: Autotagger and importer.
# Importer utilities and support.
def disambig_string(info):
"""Generate a string for an AlbumInfo or TrackInfo object that
provides context that helps disambiguate similar-looking albums and
tracks.
"""
disambig = []
if info.data_source and info.data_source != 'MusicBrainz':
disambig.append(info.data_source)
if isinstance(info, hooks.AlbumInfo):
if info.media:
if info.mediums and info.mediums > 1:
disambig.append('{}x{}'.format(
info.mediums, info.media
))
else:
disambig.append(info.media)
if info.year:
disambig.append(str(info.year))
if info.country:
disambig.append(info.country)
if info.label:
disambig.append(info.label)
if info.catalognum:
disambig.append(info.catalognum)
if info.albumdisambig:
disambig.append(info.albumdisambig)
if disambig:
return ', '.join(disambig)
def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
out = '%.1f%%' % ((1 - dist) * 100)
if dist <= config['match']['strong_rec_thresh'].as_number():
out = ui.colorize('text_success', out)
elif dist <= config['match']['medium_rec_thresh'].as_number():
out = ui.colorize('text_warning', out)
else:
out = ui.colorize('text_error', out)
return out
def penalty_string(distance, limit=None):
"""Returns a colorized string that indicates all the penalties
applied to a distance object.
"""
penalties = []
for key in distance.keys():
key = key.replace('album_', '')
key = key.replace('track_', '')
key = key.replace('_', ' ')
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ['...']
return ui.colorize('text_warning', '(%s)' % ', '.join(penalties))
def show_change(cur_artist, cur_album, match):
"""Print out a representation of the changes that will be made if an
album's tags are changed according to `match`, which must be an AlbumMatch
object.
"""
def show_album(artist, album):
if artist:
album_description = f' {artist} - {album}'
elif album:
album_description = ' %s' % album
else:
album_description = ' (unknown album)'
print_(album_description)
def format_index(track_info):
"""Return a string representing the track index of the given
TrackInfo or Item object.
"""
if isinstance(track_info, hooks.TrackInfo):
index = track_info.index
medium_index = track_info.medium_index
medium = track_info.medium
mediums = match.info.mediums
else:
index = medium_index = track_info.track
medium = track_info.disc
mediums = track_info.disctotal
if config['per_disc_numbering']:
if mediums and mediums > 1:
return f'{medium}-{medium_index}'
else:
return str(medium_index if medium_index is not None
else index)
else:
return str(index)
# Identify the album in question.
if cur_artist != match.info.artist or \
(cur_album != match.info.album and
match.info.album != VARIOUS_ARTISTS):
artist_l, artist_r = cur_artist or '', match.info.artist
album_l, album_r = cur_album or '', match.info.album
if artist_r == VARIOUS_ARTISTS:
# Hide artists for VA releases.
artist_l, artist_r = '', ''
if config['artist_credit']:
artist_r = match.info.artist_credit
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
album_l, album_r = ui.colordiff(album_l, album_r)
print_("Correcting tags from:")
show_album(artist_l, album_l)
print_("To:")
show_album(artist_r, album_r)
else:
print_("Tagging:\n {0.artist} - {0.album}".format(match.info))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('text_highlight_minor', '(%s)' % disambig))
print_(' '.join(info))
# Tracks.
pairs = list(match.mapping.items())
pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index)
# Build up LHS and RHS for track difference display. The `lines` list
# contains ``(lhs, rhs, width)`` tuples where `width` is the length (in
# characters) of the uncolorized LHS.
lines = []
medium = disctitle = None
for item, track_info in pairs:
# Medium number and title.
if medium != track_info.medium or disctitle != track_info.disctitle:
media = match.info.media or 'Media'
if match.info.mediums > 1 and track_info.disctitle:
lhs = '{} {}: {}'.format(media, track_info.medium,
track_info.disctitle)
elif match.info.mediums > 1:
lhs = f'{media} {track_info.medium}'
elif track_info.disctitle:
lhs = f'{media}: {track_info.disctitle}'
else:
lhs = None
if lhs:
lines.append((lhs, '', 0))
medium, disctitle = track_info.medium, track_info.disctitle
# Titles.
new_title = track_info.title
if not item.title.strip():
# If there's no title, we use the filename.
cur_title = displayable_path(os.path.basename(item.path))
lhs, rhs = cur_title, new_title
else:
cur_title = item.title.strip()
lhs, rhs = ui.colordiff(cur_title, new_title)
lhs_width = len(cur_title)
# Track number change.
cur_track, new_track = format_index(item), format_index(track_info)
if cur_track != new_track:
if item.track in (track_info.index, track_info.medium_index):
color = 'text_highlight_minor'
else:
color = 'text_highlight'
templ = ui.colorize(color, ' (#{0})')
lhs += templ.format(cur_track)
rhs += templ.format(new_track)
lhs_width += len(cur_track) + 4
# Length change.
if item.length and track_info.length and \
abs(item.length - track_info.length) > \
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
templ = ui.colorize('text_highlight', ' ({0})')
lhs += templ.format(cur_length)
rhs += templ.format(new_length)
lhs_width += len(cur_length) + 3
# Penalties.
penalties = penalty_string(match.distance.tracks[track_info])
if penalties:
rhs += ' %s' % penalties
if lhs != rhs:
lines.append((' * %s' % lhs, rhs, lhs_width))
elif config['import']['detail']:
lines.append((' * %s' % lhs, '', lhs_width))
# Print each track in two columns, or across two lines.
col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2
if lines:
max_width = max(w for _, _, w in lines)
for lhs, rhs, lhs_width in lines:
if not rhs:
print_(lhs)
elif max_width > col_width:
print_(f'{lhs} ->\n {rhs}')
else:
pad = max_width - lhs_width
print_('{}{} -> {}'.format(lhs, ' ' * pad, rhs))
# Missing and unmatched tracks.
if match.extra_tracks:
print_('Missing tracks ({}/{} - {:.1%}):'.format(
len(match.extra_tracks),
len(match.info.tracks),
len(match.extra_tracks) / len(match.info.tracks)
))
pad_width = max(len(track_info.title) for track_info in
match.extra_tracks)
for track_info in match.extra_tracks:
line = ' ! {0: <{width}} (#{1: >2})'.format(track_info.title,
format_index(track_info),
width=pad_width)
if track_info.length:
line += ' (%s)' % ui.human_seconds_short(track_info.length)
print_(ui.colorize('text_warning', line))
if match.extra_items:
print_('Unmatched tracks ({}):'.format(len(match.extra_items)))
pad_width = max(len(item.title) for item in match.extra_items)
for item in match.extra_items:
line = ' ! {0: <{width}} (#{1: >2})'.format(item.title,
format_index(item),
width=pad_width)
if item.length:
line += ' (%s)' % ui.human_seconds_short(item.length)
print_(ui.colorize('text_warning', line))
def show_item_change(item, match):
"""Print out the change that would occur by tagging `item` with the
metadata from `match`, a TrackMatch object.
"""
cur_artist, new_artist = item.artist, match.info.artist
cur_title, new_title = item.title, match.info.title
if cur_artist != new_artist or cur_title != new_title:
cur_artist, new_artist = ui.colordiff(cur_artist, new_artist)
cur_title, new_title = ui.colordiff(cur_title, new_title)
print_("Correcting track tags from:")
print_(f" {cur_artist} - {cur_title}")
print_("To:")
print_(f" {new_artist} - {new_title}")
else:
print_(f"Tagging track: {cur_artist} - {cur_title}")
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('text_highlight_minor', '(%s)' % disambig))
print_(' '.join(info))
def summarize_items(items, singleton):
"""Produces a brief summary line describing a set of items. Used for
manually resolving duplicates during import.
`items` is a list of `Item` objects. `singleton` indicates whether
this is an album or single-item import (if the latter, them `items`
should only have one element).
"""
summary_parts = []
if not singleton:
summary_parts.append("{} items".format(len(items)))
format_counts = {}
for item in items:
format_counts[item.format] = format_counts.get(item.format, 0) + 1
if len(format_counts) == 1:
# A single format.
summary_parts.append(items[0].format)
else:
# Enumerate all the formats by decreasing frequencies:
for fmt, count in sorted(
format_counts.items(),
key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0])
):
summary_parts.append(f'{fmt} {count}')
if items:
average_bitrate = sum([item.bitrate for item in items]) / len(items)
total_duration = sum([item.length for item in items])
total_filesize = sum([item.filesize for item in items])
summary_parts.append('{}kbps'.format(int(average_bitrate / 1000)))
if items[0].format == "FLAC":
sample_bits = '{}kHz/{} bit'.format(
round(int(items[0].samplerate) / 1000, 1), items[0].bitdepth)
summary_parts.append(sample_bits)
summary_parts.append(ui.human_seconds_short(total_duration))
summary_parts.append(ui.human_bytes(total_filesize))
return ', '.join(summary_parts)
def _summary_judgment(rec):
"""Determines whether a decision should be made without even asking
the user. This occurs in quiet mode and when an action is chosen for
NONE recommendations. Return None if the user should be queried.
Otherwise, returns an action. May also print to the console if a
summary judgment is made.
"""
if config['import']['quiet']:
if rec == Recommendation.strong:
return importer.action.APPLY
else:
action = config['import']['quiet_fallback'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
})
elif config['import']['timid']:
return None
elif rec == Recommendation.none:
action = config['import']['none_rec_action'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
'ask': None,
})
else:
return None
if action == importer.action.SKIP:
print_('Skipping.')
elif action == importer.action.ASIS:
print_('Importing as-is.')
return action
def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None,
choices=[]):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
objects depending on `singleton`. for albums, `cur_artist`,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
`choices` is a list of `PromptChoice`s to be used in each prompt.
Returns one of the following:
* the result of the choice, which may be SKIP or ASIS
* a candidate (an AlbumMatch/TrackMatch object)
* a chosen `PromptChoice` from `choices`
"""
# Sanity check.
if singleton:
assert item is not None
else:
assert cur_artist is not None
assert cur_album is not None
# Build helper variables for the prompt choices.
choice_opts = tuple(c.long for c in choices)
choice_actions = {c.short: c for c in choices}
# Zero candidates.
if not candidates:
if singleton:
print_("No matching recordings found.")
else:
print_("No matching release found for {} tracks."
.format(itemcount))
print_('For help, see: '
'https://beets.readthedocs.org/en/latest/faq.html#nomatch')
sel = ui.input_options(choice_opts)
if sel in choice_actions:
return choice_actions[sel]
else:
assert False
# Is the change good enough?
bypass_candidates = False
if rec != Recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec <= Recommendation.low
if not bypass_candidates:
# Display list of candidates.
print_('Finding tags for {} "{} - {}".'.format(
'track' if singleton else 'album',
item.artist if singleton else cur_artist,
item.title if singleton else cur_album,
))
print_('Candidates:')
for i, match in enumerate(candidates):
# Index, metadata, and distance.
line = [
'{}.'.format(i + 1),
'{} - {}'.format(
match.info.artist,
match.info.title if singleton else match.info.album,
),
'({})'.format(dist_string(match.distance)),
]
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
line.append(penalties)
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
line.append(ui.colorize('text_highlight_minor',
'(%s)' % disambig))
print_(' '.join(line))
# Ask the user for a choice.
sel = ui.input_options(choice_opts,
numrange=(1, len(candidates)))
if sel == 'm':
pass
elif sel in choice_actions:
return choice_actions[sel]
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
# When choosing anything but the first match,
# disable the default action.
require = True
bypass_candidates = False
# Show what we're about to do.
if singleton:
show_item_change(item, match)
else:
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == Recommendation.strong and not config['import']['timid']:
return match
# Ask for confirmation.
default = config['import']['default_action'].as_choice({
'apply': 'a',
'skip': 's',
'asis': 'u',
'none': None,
})
if default is None:
require = True
# Bell ring when user interaction is needed.
if config['import']['bell']:
ui.print_('\a', end='')
sel = ui.input_options(('Apply', 'More candidates') + choice_opts,
require=require, default=default)
if sel == 'a':
return match
elif sel in choice_actions:
return choice_actions[sel]
def manual_search(session, task):
"""Get a new `Proposal` using manual search criteria.
Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_('Artist:').strip()
name = input_('Album:' if task.is_album else 'Track:').strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, artist, name
)
return prop
else:
return autotag.tag_item(task.item, artist, name)
def manual_id(session, task):
"""Get a new `Proposal` using a manually-entered ID.
Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = 'Enter {} ID:'.format('release' if task.is_album
else 'recording')
search_id = input_(prompt).strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, search_ids=search_id.split()
)
return prop
else:
return autotag.tag_item(task.item, search_ids=search_id.split())
def abort_action(session, task):
"""A prompt choice callback that aborts the importer.
"""
raise importer.ImportAbort()
class TerminalImportSession(importer.ImportSession):
"""An import session that runs in a terminal.
"""
def choose_match(self, task):
"""Given an initial autotagging of items, go through an interactive
dance with the user to ask for a choice of metadata. Returns an
AlbumMatch object, ASIS, or SKIP.
"""
# Show what we're tagging.
print_()
print_(displayable_path(task.paths, '\n') +
' ({} items)'.format(len(task.items)))
# Let plugins display info or prompt the user before we go through the
# process of selecting candidate.
results = plugins.send('import_task_before_choice',
session=self, task=task)
actions = [action for action in results if action]
if len(actions) == 1:
return actions[0]
elif len(actions) > 1:
raise plugins.PluginConflictException(
'Only one handler for `import_task_before_choice` may return '
'an action.')
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.action.APPLY:
match = task.candidates[0]
show_change(task.cur_artist, task.cur_album, match)
return match
elif action is not None:
return action
# Loop until we have a choice.
while True:
# Ask for a choice from the user. The result of
# `choose_candidate` may be an `importer.action`, an
# `AlbumMatch` object for a specific selection, or a
# `PromptChoice`.
choices = self._get_choices(task)
choice = choose_candidate(
task.candidates, False, task.rec, task.cur_artist,
task.cur_album, itemcount=len(task.items), choices=choices
)
# Basic choices that require no more action here.
if choice in (importer.action.SKIP, importer.action.ASIS):
# Pass selection to main control flow.
return choice
# Plugin-provided choices. We invoke the associated callback
# function.
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.action):
return post_choice
elif isinstance(post_choice, autotag.Proposal):
# Use the new candidates and continue around the loop.
task.candidates = post_choice.candidates
task.rec = post_choice.recommendation
# Otherwise, we have a specific match selection.
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
assert isinstance(choice, autotag.AlbumMatch)
return choice
def choose_item(self, task):
"""Ask the user for a choice about tagging a single item. Returns
either an action constant or a TrackMatch object.
"""
print_()
print_(displayable_path(task.item.path))
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.action.APPLY:
match = candidates[0]
show_item_change(task.item, match)
return match
elif action is not None:
return action
while True:
# Ask for a choice.
choices = self._get_choices(task)
choice = choose_candidate(candidates, True, rec, item=task.item,
choices=choices)
if choice in (importer.action.SKIP, importer.action.ASIS):
return choice
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.action):
return post_choice
elif isinstance(post_choice, autotag.Proposal):
candidates = post_choice.candidates
rec = post_choice.recommendation
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
return choice
def resolve_duplicate(self, task, found_duplicates):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warning("This {0} is already in the library!",
("album" if task.is_album else "item"))
if config['import']['quiet']:
# In quiet mode, don't prompt -- just skip.
log.info('Skipping.')
sel = 's'
else:
# Print some detail about the existing and new items so the
# user can make an informed decision.
for duplicate in found_duplicates:
print_("Old: " + summarize_items(
list(duplicate.items()) if task.is_album else [duplicate],
not task.is_album,
))
print_("New: " + summarize_items(
task.imported_items(),
not task.is_album,
))
sel = ui.input_options(
('Skip new', 'Keep all', 'Remove old', 'Merge all')
)
if sel == 's':
# Skip new.
task.set_choice(importer.action.SKIP)
elif sel == 'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif sel == 'r':
# Remove old.
task.should_remove_duplicates = True
elif sel == 'm':
task.should_merge_duplicates = True
else:
assert False
def should_resume(self, path):
return ui.input_yn("Import of the directory:\n{}\n"
"was interrupted. Resume (Y/n)?"
.format(displayable_path(path)))
def _get_choices(self, task):
"""Get the list of prompt choices that should be presented to the
user. This consists of both built-in choices and ones provided by
plugins.
The `before_choose_candidate` event is sent to the plugins, with
session and task as its parameters. Plugins are responsible for
checking the right conditions and returning a list of `PromptChoice`s,
which is flattened and checked for conflicts.
If two or more choices have the same short letter, a warning is
emitted and all but one choices are discarded, giving preference
to the default importer choices.
Returns a list of `PromptChoice`s.
"""
# Standard, built-in choices.
choices = [
PromptChoice('s', 'Skip',
lambda s, t: importer.action.SKIP),
PromptChoice('u', 'Use as-is',
lambda s, t: importer.action.ASIS)
]
if task.is_album:
choices += [
PromptChoice('t', 'as Tracks',
lambda s, t: importer.action.TRACKS),
PromptChoice('g', 'Group albums',
lambda s, t: importer.action.ALBUMS),
]
choices += [
PromptChoice('e', 'Enter search', manual_search),
PromptChoice('i', 'enter Id', manual_id),
PromptChoice('b', 'aBort', abort_action),
]
# Send the before_choose_candidate event and flatten list.
extra_choices = list(chain(*plugins.send('before_choose_candidate',
session=self, task=task)))
# Add a "dummy" choice for the other baked-in option, for
# duplicate checking.
all_choices = [
PromptChoice('a', 'Apply', None),
] + choices + extra_choices
# Check for conflicts.
short_letters = [c.short for c in all_choices]
if len(short_letters) != len(set(short_letters)):
# Duplicate short letter has been found.
duplicates = [i for i, count in Counter(short_letters).items()
if count > 1]
for short in duplicates:
# Keep the first of the choices, removing the rest.
dup_choices = [c for c in all_choices if c.short == short]
for c in dup_choices[1:]:
log.warning("Prompt choice '{0}' removed due to conflict "
"with '{1}' (short letter: '{2}')",
c.long, dup_choices[0].long, c.short)
extra_choices.remove(c)
return choices + extra_choices
# The import command.
def import_files(lib, paths, query):
"""Import the files in the given list of paths or matching the
query.
"""
# Check the user-specified directories.
for path in paths:
if not os.path.exists(syspath(normpath(path))):
raise ui.UserError('no such file or directory: {}'.format(
displayable_path(path)))
# Check parameter consistency.
if config['import']['quiet'] and config['import']['timid']:
raise ui.UserError("can't be both quiet and timid")
# Open the log.
if config['import']['log'].get() is not None:
logpath = syspath(config['import']['log'].as_filename())
try:
loghandler = logging.FileHandler(logpath)
except OSError:
raise ui.UserError("could not open log file for writing: "
"{}".format(displayable_path(logpath)))
else:
loghandler = None
# Never ask for input in quiet mode.
if config['import']['resume'].get() == 'ask' and \
config['import']['quiet']:
config['import']['resume'] = False
session = TerminalImportSession(lib, loghandler, paths, query)
session.run()
# Emit event.
plugins.send('import', lib=lib, paths=paths)
def import_func(lib, opts, args):
config['import'].set_args(opts)
# Special case: --copy flag suppresses import_move (which would
# otherwise take precedence).
if opts.copy:
config['import']['move'] = False
if opts.library:
query = decargs(args)
paths = []
else:
query = None
paths = args
if not paths:
raise ui.UserError('no path specified')
# On Python 2, we used to get filenames as raw bytes, which is
# what we need. On Python 3, we need to undo the "helpful"
# conversion to Unicode strings to get the real bytestring
# filename.
paths = [p.encode(util.arg_encoding(), 'surrogateescape')
for p in paths]
import_files(lib, paths, query)
import_cmd = ui.Subcommand(
'import', help='import new music', aliases=('imp', 'im')
)
import_cmd.parser.add_option(
'-c', '--copy', action='store_true', default=None,
help="copy tracks into library directory (default)"
)
import_cmd.parser.add_option(
'-C', '--nocopy', action='store_false', dest='copy',
help="don't copy tracks (opposite of -c)"
)
import_cmd.parser.add_option(
'-m', '--move', action='store_true', dest='move',
help="move tracks into the library (overrides -c)"
)
import_cmd.parser.add_option(
'-w', '--write', action='store_true', default=None,
help="write new metadata to files' tags (default)"
)
import_cmd.parser.add_option(
'-W', '--nowrite', action='store_false', dest='write',
help="don't write metadata (opposite of -w)"
)
import_cmd.parser.add_option(
'-a', '--autotag', action='store_true', dest='autotag',
help="infer tags for imported files (default)"
)
import_cmd.parser.add_option(
'-A', '--noautotag', action='store_false', dest='autotag',
help="don't infer tags for imported files (opposite of -a)"
)
import_cmd.parser.add_option(
'-p', '--resume', action='store_true', default=None,
help="resume importing if interrupted"
)
import_cmd.parser.add_option(
'-P', '--noresume', action='store_false', dest='resume',
help="do not try to resume importing"
)
import_cmd.parser.add_option(
'-q', '--quiet', action='store_true', dest='quiet',
help="never prompt for input: skip albums instead"
)
import_cmd.parser.add_option(
'-l', '--log', dest='log',
help='file to log untaggable albums for later review'
)
import_cmd.parser.add_option(
'-s', '--singletons', action='store_true',
help='import individual tracks instead of full albums'
)
import_cmd.parser.add_option(
'-t', '--timid', dest='timid', action='store_true',
help='always confirm all actions'
)
import_cmd.parser.add_option(
'-L', '--library', dest='library', action='store_true',
help='retag items matching a query'
)
import_cmd.parser.add_option(
'-i', '--incremental', dest='incremental', action='store_true',
help='skip already-imported directories'
)
import_cmd.parser.add_option(
'-I', '--noincremental', dest='incremental', action='store_false',
help='do not skip already-imported directories'
)
import_cmd.parser.add_option(
'--from-scratch', dest='from_scratch', action='store_true',
help='erase existing metadata before applying new metadata'
)
import_cmd.parser.add_option(
'--flat', dest='flat', action='store_true',
help='import an entire tree as a single album'
)
import_cmd.parser.add_option(
'-g', '--group-albums', dest='group_albums', action='store_true',
help='group tracks in a folder into separate albums'
)
import_cmd.parser.add_option(
'--pretend', dest='pretend', action='store_true',
help='just print the files to import'
)
import_cmd.parser.add_option(
'-S', '--search-id', dest='search_ids', action='append',
metavar='ID',
help='restrict matching to a specific metadata backend ID'
)
import_cmd.parser.add_option(
'--set', dest='set_fields', action='callback',
callback=_store_dict,
metavar='FIELD=VALUE',
help='set the given fields to the supplied values'
)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt=''):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
if album:
for album in lib.albums(query):
ui.print_(format(album, fmt))
else:
for item in lib.items(query):
ui.print_(format(item, fmt))
def list_func(lib, opts, args):
list_items(lib, decargs(args), opts.album)
list_cmd = ui.Subcommand('list', help='query the library', aliases=('ls',))
list_cmd.parser.usage += "\n" \
'Example: %prog -f \'$album: $title\' artist:beatles'
list_cmd.parser.add_all_common_options()
list_cmd.func = list_func
default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend, fields):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
:param fields: The fields to be stored. If not specified, all fields will
be.
"""
with lib.transaction():
if move and fields is not None and 'path' not in fields:
# Special case: if an item needs to be moved, the path field has to
# updated; otherwise the new path will not be reflected in the
# database.
fields.append('path')
items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes.
affected_albums = set()
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
ui.print_(format(item))
ui.print_(ui.colorize('text_error', ' deleted'))
if not pretend:
item.remove(True)
affected_albums.add(item.album_id)
continue
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug('skipping {0} because mtime is up to date ({1})',
displayable_path(item.path), item.mtime)
continue
# Read new data.
try:
item.read()
except library.ReadError as exc:
log.error('error reading {0}: {1}',
displayable_path(item.path), exc)
continue
# Special-case album artist when it matches track artist. (Hacky
# but necessary for preserving album-level metadata for non-
# autotagged imports.)
if not item.albumartist:
old_item = lib.get_item(item.id)
if old_item.albumartist == old_item.artist == item.artist:
item.albumartist = old_item.albumartist
item._dirty.discard('albumartist')
# Check for and display changes.
changed = ui.show_model_changes(
item,
fields=fields or library.Item._media_fields)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move(store=False)
item.store(fields=fields)
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store(fields=fields)
# Skip album changes while pretending.
if pretend:
return
# Modify affected albums to reflect changes in their items.
for album_id in affected_albums:
if album_id is None: # Singletons.
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
log.debug('emptied album {0}', album_id)
continue
first_item = album.items().get()
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = first_item[key]
album.store(fields=fields)
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug('moving album {0}', album_id)
# Manually moving and storing the album.
items = list(album.items())
for item in items:
item.move(store=False, with_album=False)
item.store(fields=fields)
album.move(store=False)
album.store(fields=fields)
def update_func(lib, opts, args):
# Verify that the library folder exists to prevent accidental wipes.
if not os.path.isdir(lib.directory):
ui.print_("Library path is unavailable or does not exist.")
ui.print_(lib.directory)
if not ui.input_yn("Are you sure you want to continue (y/n)?", True):
return
update_items(lib, decargs(args), opts.album, ui.should_move(opts.move),
opts.pretend, opts.fields)
update_cmd = ui.Subcommand(
'update', help='update the library', aliases=('upd', 'up',)
)
update_cmd.parser.add_album_option()
update_cmd.parser.add_format_option()
update_cmd.parser.add_option(
'-m', '--move', action='store_true', dest='move',
help="move files in the library directory"
)
update_cmd.parser.add_option(
'-M', '--nomove', action='store_false', dest='move',
help="don't move files in library"
)
update_cmd.parser.add_option(
'-p', '--pretend', action='store_true',
help="show all changes but do nothing"
)
update_cmd.parser.add_option(
'-F', '--field', default=None, action='append', dest='fields',
help='list of fields to update'
)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete, force):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
objs = albums if album else items
# Confirm file removal if not forcing removal.
if not force:
# Prepare confirmation with user.
album_str = " in {} album{}".format(
len(albums), 's' if len(albums) > 1 else ''
) if album else ""
if delete:
fmt = '$path - $title'
prompt = 'Really DELETE'
prompt_all = 'Really DELETE {} file{}{}'.format(
len(items), 's' if len(items) > 1 else '', album_str
)
else:
fmt = ''
prompt = 'Really remove from the library?'
prompt_all = 'Really remove {} item{}{} from the library?'.format(
len(items), 's' if len(items) > 1 else '', album_str
)
# Helpers for printing affected items
def fmt_track(t):
ui.print_(format(t, fmt))
def fmt_album(a):
ui.print_()
for i in a.items():
fmt_track(i)
fmt_obj = fmt_album if album else fmt_track
# Show all the items.
for o in objs:
fmt_obj(o)
# Confirm with user.
objs = ui.input_select_objects(prompt, objs, fmt_obj,
prompt_all=prompt_all)
if not objs:
return
# Remove (and possibly delete) items.
with lib.transaction():
for obj in objs:
obj.remove(delete)
def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete, opts.force)
remove_cmd = ui.Subcommand(
'remove', help='remove matching items from the library', aliases=('rm',)
)
remove_cmd.parser.add_option(
"-d", "--delete", action="store_true",
help="also remove files from disk"
)
remove_cmd.parser.add_option(
"-f", "--force", action="store_true",
help="do not ask when removing items"
)
remove_cmd.parser.add_album_option()
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
# stats: Show library/query statistics.
def show_stats(lib, query, exact):
"""Shows some statistics about the matched items."""
items = lib.items(query)
total_size = 0
total_time = 0.0
total_items = 0
artists = set()
albums = set()
album_artists = set()
for item in items:
if exact:
try:
total_size += os.path.getsize(syspath(item.path))
except OSError as exc:
log.info('could not get size of {}: {}', item.path, exc)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
total_items += 1
artists.add(item.artist)
album_artists.add(item.albumartist)
if item.album_id:
albums.add(item.album_id)
size_str = '' + ui.human_bytes(total_size)
if exact:
size_str += f' ({total_size} bytes)'
print_("""Tracks: {}
Total time: {}{}
{}: {}
Artists: {}
Albums: {}
Album artists: {}""".format(
total_items,
ui.human_seconds(total_time),
f' ({total_time:.2f} seconds)' if exact else '',
'Total size' if exact else 'Approximate total size',
size_str,
len(artists),
len(albums),
len(album_artists)),
)
def stats_func(lib, opts, args):
show_stats(lib, decargs(args), opts.exact)
stats_cmd = ui.Subcommand(
'stats', help='show statistics about the library or a query'
)
stats_cmd.parser.add_option(
'-e', '--exact', action='store_true',
help='exact size and time'
)
stats_cmd.func = stats_func
default_commands.append(stats_cmd)
# version: Show current beets version.
def show_version(lib, opts, args):
print_('beets version %s' % beets.__version__)
print_(f'Python version {python_version()}')
# Show plugins.
names = sorted(p.name for p in plugins.find_plugins())
if names:
print_('plugins:', ', '.join(names))
else:
print_('no plugins loaded')
version_cmd = ui.Subcommand(
'version', help='output version information'
)
version_cmd.func = show_version
default_commands.append(version_cmd)
# modify: Declaratively change metadata.
def modify_items(lib, mods, dels, query, write, move, album, confirm):
"""Modifies matching items according to user-specified assignments and
deletions.
`mods` is a dictionary of field and value pairse indicating
assignments. `dels` is a list of fields to be deleted.
"""
# Parse key=value specifications into a dictionary.
model_cls = library.Album if album else library.Item
for key, value in mods.items():
mods[key] = model_cls._parse(key, value)
# Get the items to modify.
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Apply changes *temporarily*, preview them, and collect modified
# objects.
print_('Modifying {} {}s.'
.format(len(objs), 'album' if album else 'item'))
changed = []
for obj in objs:
if print_and_modify(obj, mods, dels) and obj not in changed:
changed.append(obj)
# Still something to do?
if not changed:
print_('No changes to make.')
return
# Confirm action.
if confirm:
if write and move:
extra = ', move and write tags'
elif write:
extra = ' and write tags'
elif move:
extra = ' and move'
else:
extra = ''
changed = ui.input_select_objects(
'Really modify%s' % extra, changed,
lambda o: print_and_modify(o, mods, dels)
)
# Apply changes to database and files
with lib.transaction():
for obj in changed:
obj.try_sync(write, move)
def print_and_modify(obj, mods, dels):
"""Print the modifications to an item and return a bool indicating
whether any changes were made.
`mods` is a dictionary of fields and values to update on the object;
`dels` is a sequence of fields to delete.
"""
obj.update(mods)
for field in dels:
try:
del obj[field]
except KeyError:
pass
return ui.show_model_changes(obj)
def modify_parse_args(args):
"""Split the arguments for the modify subcommand into query parts,
assignments (field=value), and deletions (field!). Returns the result as
a three-tuple in that order.
"""
mods = {}
dels = []
query = []
for arg in args:
if arg.endswith('!') and '=' not in arg and ':' not in arg:
dels.append(arg[:-1]) # Strip trailing !.
elif '=' in arg and ':' not in arg.split('=', 1)[0]:
key, val = arg.split('=', 1)
mods[key] = val
else:
query.append(arg)
return query, mods, dels
def modify_func(lib, opts, args):
query, mods, dels = modify_parse_args(decargs(args))
if not mods and not dels:
raise ui.UserError('no modifications specified')
modify_items(lib, mods, dels, query, ui.should_write(opts.write),
ui.should_move(opts.move), opts.album, not opts.yes)
modify_cmd = ui.Subcommand(
'modify', help='change metadata fields', aliases=('mod',)
)
modify_cmd.parser.add_option(
'-m', '--move', action='store_true', dest='move',
help="move files in the library directory"
)
modify_cmd.parser.add_option(
'-M', '--nomove', action='store_false', dest='move',
help="don't move files in library"
)
modify_cmd.parser.add_option(
'-w', '--write', action='store_true', default=None,
help="write new metadata to files' tags (default)"
)
modify_cmd.parser.add_option(
'-W', '--nowrite', action='store_false', dest='write',
help="don't write metadata (opposite of -w)"
)
modify_cmd.parser.add_album_option()
modify_cmd.parser.add_format_option(target='item')
modify_cmd.parser.add_option(
'-y', '--yes', action='store_true',
help='skip confirmation'
)
modify_cmd.func = modify_func
default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album, pretend, confirm=False,
export=False):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
"""
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
num_objs = len(objs)
# Filter out files that don't need to be moved.
def isitemmoved(item):
return item.path != item.destination(basedir=dest)
def isalbummoved(album):
return any(isitemmoved(i) for i in album.items())
objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)]
num_unmoved = num_objs - len(objs)
# Report unmoved files that match the query.
unmoved_msg = ''
if num_unmoved > 0:
unmoved_msg = f' ({num_unmoved} already in place)'
copy = copy or export # Exporting always copies.
action = 'Copying' if copy else 'Moving'
act = 'copy' if copy else 'move'
entity = 'album' if album else 'item'
log.info('{0} {1} {2}{3}{4}.', action, len(objs), entity,
's' if len(objs) != 1 else '', unmoved_msg)
if not objs:
return
if pretend:
if album:
show_path_changes([(item.path, item.destination(basedir=dest))
for obj in objs for item in obj.items()])
else:
show_path_changes([(obj.path, obj.destination(basedir=dest))
for obj in objs])
else:
if confirm:
objs = ui.input_select_objects(
'Really %s' % act, objs,
lambda o: show_path_changes(
[(o.path, o.destination(basedir=dest))]))
for obj in objs:
log.debug('moving: {0}', util.displayable_path(obj.path))
if export:
# Copy without affecting the database.
obj.move(operation=MoveOperation.COPY, basedir=dest,
store=False)
else:
# Ordinary move/copy: store the new path.
if copy:
obj.move(operation=MoveOperation.COPY, basedir=dest)
else:
obj.move(operation=MoveOperation.MOVE, basedir=dest)
def move_func(lib, opts, args):
dest = opts.dest
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(dest):
raise ui.UserError('no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend,
opts.timid, opts.export)
move_cmd = ui.Subcommand(
'move', help='move or copy items', aliases=('mv',)
)
move_cmd.parser.add_option(
'-d', '--dest', metavar='DIR', dest='dest',
help='destination directory'
)
move_cmd.parser.add_option(
'-c', '--copy', default=False, action='store_true',
help='copy instead of moving'
)
move_cmd.parser.add_option(
'-p', '--pretend', default=False, action='store_true',
help='show how files would be moved, but don\'t touch anything'
)
move_cmd.parser.add_option(
'-t', '--timid', dest='timid', action='store_true',
help='always confirm all actions'
)
move_cmd.parser.add_option(
'-e', '--export', default=False, action='store_true',
help='copy without changing the database path'
)
move_cmd.parser.add_album_option()
move_cmd.func = move_func
default_commands.append(move_cmd)
# write: Write tags into files.
def write_items(lib, query, pretend, force):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = _do_query(lib, query, False, False)
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
log.info('missing file: {0}', util.displayable_path(item.path))
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except library.ReadError as exc:
log.error('error reading {0}: {1}',
displayable_path(item.path), exc)
continue
# Check for and display changes.
changed = ui.show_model_changes(item, clean_item,
library.Item._media_tag_fields, force)
if (changed or force) and not pretend:
# We use `try_sync` here to keep the mtime up to date in the
# database.
item.try_sync(True, False)
def write_func(lib, opts, args):
write_items(lib, decargs(args), opts.pretend, opts.force)
write_cmd = ui.Subcommand('write', help='write tag information to files')
write_cmd.parser.add_option(
'-p', '--pretend', action='store_true',
help="show all changes but do nothing"
)
write_cmd.parser.add_option(
'-f', '--force', action='store_true',
help="write tags even if the existing tags match the database"
)
write_cmd.func = write_func
default_commands.append(write_cmd)
# config: Show and edit user configuration.
def config_func(lib, opts, args):
# Make sure lazy configuration is loaded
config.resolve()
# Print paths.
if opts.paths:
filenames = []
for source in config.sources:
if not opts.defaults and source.default:
continue
if source.filename:
filenames.append(source.filename)
# In case the user config file does not exist, prepend it to the
# list.
user_path = config.user_config_path()
if user_path not in filenames:
filenames.insert(0, user_path)
for filename in filenames:
print_(displayable_path(filename))
# Open in editor.
elif opts.edit:
config_edit()
# Dump configuration.
else:
config_out = config.dump(full=opts.defaults, redact=opts.redact)
if config_out.strip() != '{}':
print_(util.text_string(config_out))
else:
print("Empty configuration")
def config_edit():
"""Open a program to edit the user configuration.
An empty config file is created if no existing config file exists.
"""
path = config.user_config_path()
editor = util.editor_command()
try:
if not os.path.isfile(path):
open(path, 'w+').close()
util.interactive_open([path], editor)
except OSError as exc:
message = f"Could not edit configuration: {exc}"
if not editor:
message += ". Please set the EDITOR environment variable"
raise ui.UserError(message)
config_cmd = ui.Subcommand('config',
help='show or edit the user configuration')
config_cmd.parser.add_option(
'-p', '--paths', action='store_true',
help='show files that configuration was loaded from'
)
config_cmd.parser.add_option(
'-e', '--edit', action='store_true',
help='edit user configuration with $EDITOR'
)
config_cmd.parser.add_option(
'-d', '--defaults', action='store_true',
help='include the default configuration'
)
config_cmd.parser.add_option(
'-c', '--clear', action='store_false',
dest='redact', default=True,
help='do not redact sensitive fields'
)
config_cmd.func = config_func
default_commands.append(config_cmd)
# completion: print completion script
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print_(line, end='')
if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)):
log.warning('Warning: Unable to find the bash-completion package. '
'Command line completion might not work.')
BASH_COMPLETION_PATHS = map(syspath, [
'/etc/bash_completion',
'/usr/share/bash-completion/bash_completion',
'/usr/local/share/bash-completion/bash_completion',
# SmartOS
'/opt/local/share/bash-completion/bash_completion',
# Homebrew (before bash-completion2)
'/usr/local/etc/bash_completion',
])
def completion_script(commands):
"""Yield the full completion shell script as strings.
``commands`` is alist of ``ui.Subcommand`` instances to generate
completion data for.
"""
base_script = os.path.join(os.path.dirname(__file__), 'completion_base.sh')
with open(base_script) as base_script:
yield util.text_string(base_script.read())
options = {}
aliases = {}
command_names = []
# Collect subcommands
for cmd in commands:
name = cmd.name
command_names.append(name)
for alias in cmd.aliases:
if re.match(r'^\w+$', alias):
aliases[alias] = name
options[name] = {'flags': [], 'opts': []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'):
option_type = 'flags'
else:
option_type = 'opts'
options[name][option_type].extend(
opts._short_opts + opts._long_opts
)
# Add global options
options['_global'] = {
'flags': ['-v', '--verbose'],
'opts':
'-l --library -c --config -d --directory -h --help'.split(' ')
}
# Add flags common to all commands
options['_common'] = {
'flags': ['-h', '--help']
}
# Start generating the script
yield "_beet() {\n"
# Command names
yield " local commands='%s'\n" % ' '.join(command_names)
yield "\n"
# Command aliases
yield " local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items():
yield " local alias__{}={}\n".format(alias.replace('-', '_'), cmd)
yield '\n'
# Fields
yield " fields='%s'\n" % ' '.join(
set(
list(library.Item._fields.keys()) +
list(library.Album._fields.keys())
)
)
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = ' '.join(option_list)
yield " local {}__{}='{}'\n".format(
option_type, cmd.replace('-', '_'), option_list)
yield ' _beet_dispatch\n'
yield '}\n'
completion_cmd = ui.Subcommand(
'completion',
help='print shell script that provides command line completion'
)
completion_cmd.func = print_completion
completion_cmd.hide = True
default_commands.append(completion_cmd)
| 63,045
|
Python
|
.py
| 1,572
| 31.19084
| 79
| 0.598771
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,255
|
db.py
|
rembo10_headphones/lib/beets/dbcore/db.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The central Model and Database constructs for DBCore.
"""
import time
import os
import re
from collections import defaultdict
import threading
import sqlite3
import contextlib
import beets
from beets.util import functemplate
from beets.util import py3_path
from beets.dbcore import types
from .query import MatchQuery, NullSort, TrueQuery
from collections.abc import Mapping
class DBAccessError(Exception):
"""The SQLite database became inaccessible.
This can happen when trying to read or write the database when, for
example, the database file is deleted or otherwise disappears. There
is probably no way to recover from this error.
"""
class FormattedMapping(Mapping):
"""A `dict`-like formatted view of a model.
The accessor `mapping[key]` returns the formatted version of
`model[key]` as a unicode string.
The `included_keys` parameter allows filtering the fields that are
returned. By default all fields are returned. Limiting to specific keys can
avoid expensive per-item database queries.
If `for_path` is true, all path separators in the formatted values
are replaced.
"""
ALL_KEYS = '*'
def __init__(self, model, included_keys=ALL_KEYS, for_path=False):
self.for_path = for_path
self.model = model
if included_keys == self.ALL_KEYS:
# Performance note: this triggers a database query.
self.model_keys = self.model.keys(True)
else:
self.model_keys = included_keys
def __getitem__(self, key):
if key in self.model_keys:
return self._get_formatted(self.model, key)
else:
raise KeyError(key)
def __iter__(self):
return iter(self.model_keys)
def __len__(self):
return len(self.model_keys)
def get(self, key, default=None):
if default is None:
default = self.model._type(key).format(None)
return super().get(key, default)
def _get_formatted(self, model, key):
value = model._type(key).format(model.get(key))
if isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
if self.for_path:
sep_repl = beets.config['path_sep_replace'].as_str()
sep_drive = beets.config['drive_sep_replace'].as_str()
if re.match(r'^\w:', value):
value = re.sub(r'(?<=^\w):', sep_drive, value)
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
return value
class LazyConvertDict:
"""Lazily convert types for attributes fetched from the database
"""
def __init__(self, model_cls):
"""Initialize the object empty
"""
self.data = {}
self.model_cls = model_cls
self._converted = {}
def init(self, data):
"""Set the base data that should be lazily converted
"""
self.data = data
def _convert(self, key, value):
"""Convert the attribute type according the the SQL type
"""
return self.model_cls._type(key).from_sql(value)
def __setitem__(self, key, value):
"""Set an attribute value, assume it's already converted
"""
self._converted[key] = value
def __getitem__(self, key):
"""Get an attribute value, converting the type on demand
if needed
"""
if key in self._converted:
return self._converted[key]
elif key in self.data:
value = self._convert(key, self.data[key])
self._converted[key] = value
return value
def __delitem__(self, key):
"""Delete both converted and base data
"""
if key in self._converted:
del self._converted[key]
if key in self.data:
del self.data[key]
def keys(self):
"""Get a list of available field names for this object.
"""
return list(self._converted.keys()) + list(self.data.keys())
def copy(self):
"""Create a copy of the object.
"""
new = self.__class__(self.model_cls)
new.data = self.data.copy()
new._converted = self._converted.copy()
return new
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys()
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Abstract base for model classes.
class Model:
"""An abstract object representing an object in the database. Model
objects act like dictionaries (i.e., they allow subscript access like
``obj['field']``). The same field set is available via attribute
access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are
available:
* **Fixed attributes** come from a predetermined list of field
names. These fields correspond to SQLite table columns and are
thus fast to read, write, and query.
* **Flexible attributes** are free-form and do not need to be listed
ahead of time.
* **Computed attributes** are read-only fields computed by a getter
function provided by a plugin.
Access to all three field types is uniform: ``obj.field`` works the
same regardless of whether ``field`` is fixed, flexible, or
computed.
Model objects can optionally be associated with a `Library` object,
in which case they can be loaded and stored from the database. Dirty
flags are used to track which fields need to be stored.
"""
# Abstract components (to be provided by subclasses).
_table = None
"""The main SQLite table name.
"""
_flex_table = None
"""The flex field SQLite table name.
"""
_fields = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are `Type` objects.
"""
_search_fields = ()
"""The fields that should be queried by default by unqualified query
terms.
"""
_types = {}
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
"""
_sorts = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
_queries = {}
"""Named queries that use a field-like `name:value` syntax but which
do not relate to any specific field.
"""
_always_dirty = False
"""By default, fields only become "dirty" when their value actually
changes. Enabling this flag marks fields as dirty even when the new
value is the same as the old value (e.g., `o.f = o.f`).
"""
_revision = -1
"""A revision number from when the model was loaded from or written
to the database.
"""
@classmethod
def _getters(cls):
"""Return a mapping from field names to getter functions.
"""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
raise NotImplementedError()
def _template_funcs(self):
"""Return a mapping from function names to text-transformer
functions.
"""
# As above: we could consider caching this result.
raise NotImplementedError()
# Basic operation.
def __init__(self, db=None, **values):
"""Create a new object with an optional Database association and
initial field values.
"""
self._db = db
self._dirty = set()
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
# Initial contents.
self.update(values)
self.clear_dirty()
@classmethod
def _awaken(cls, db=None, fixed_values={}, flex_values={}):
"""Create an object with values drawn from the database.
This is a performance optimization: the checks involved with
ordinary construction are bypassed.
"""
obj = cls(db)
obj._values_fixed.init(fixed_values)
obj._values_flex.init(flex_values)
return obj
def __repr__(self):
return '{}({})'.format(
type(self).__name__,
', '.join(f'{k}={v!r}' for k, v in dict(self).items()),
)
def clear_dirty(self):
"""Mark all fields as *clean* (i.e., not needing to be stored to
the database). Also update the revision.
"""
self._dirty = set()
if self._db:
self._revision = self._db.revision
def _check_db(self, need_id=True):
"""Ensure that this object is associated with a database row: it
has a reference to a database (`_db`) and an id. A ValueError
exception is raised otherwise.
"""
if not self._db:
raise ValueError(
'{} has no database'.format(type(self).__name__)
)
if need_id and not self.id:
raise ValueError('{} has no id'.format(type(self).__name__))
def copy(self):
"""Create a copy of the model object.
The field values and other state is duplicated, but the new copy
remains associated with the same database as the old object.
(A simple `copy.deepcopy` will not work because it would try to
duplicate the SQLite connection.)
"""
new = self.__class__()
new._db = self._db
new._values_fixed = self._values_fixed.copy()
new._values_flex = self._values_flex.copy()
new._dirty = self._dirty.copy()
return new
# Essential field accessors.
@classmethod
def _type(cls, key):
"""Get the type of a field, a `Type` instance.
If the field has no explicit type, it is given the base `Type`,
which does no conversion.
"""
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
def _get(self, key, default=None, raise_=False):
"""Get the value for a field, or `default`. Alternatively,
raise a KeyError if the field is not available.
"""
getters = self._getters()
if key in getters: # Computed.
return getters[key](self)
elif key in self._fields: # Fixed.
if key in self._values_fixed:
return self._values_fixed[key]
else:
return self._type(key).null
elif key in self._values_flex: # Flexible.
return self._values_flex[key]
elif raise_:
raise KeyError(key)
else:
return default
get = _get
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
"""
return self._get(key, raise_=True)
def _setitem(self, key, value):
"""Assign the value for a field, return whether new and old value
differ.
"""
# Choose where to place the value.
if key in self._fields:
source = self._values_fixed
else:
source = self._values_flex
# If the field has a type, filter the value.
value = self._type(key).normalize(value)
# Assign value and possibly mark as dirty.
old_value = source.get(key)
source[key] = value
changed = old_value != value
if self._always_dirty or changed:
self._dirty.add(key)
return changed
def __setitem__(self, key, value):
"""Assign the value for a field.
"""
self._setitem(key, value)
def __delitem__(self, key):
"""Remove a flexible attribute from the model.
"""
if key in self._values_flex: # Flexible.
del self._values_flex[key]
self._dirty.add(key) # Mark for dropping on store.
elif key in self._fields: # Fixed
setattr(self, key, self._type(key).null)
elif key in self._getters(): # Computed.
raise KeyError(f'computed field {key} cannot be deleted')
else:
raise KeyError(f'no such field {key}')
def keys(self, computed=False):
"""Get a list of available field names for this object. The
`computed` parameter controls whether computed (plugin-provided)
fields are included in the key list.
"""
base_keys = list(self._fields) + list(self._values_flex.keys())
if computed:
return base_keys + list(self._getters().keys())
else:
return base_keys
@classmethod
def all_keys(cls):
"""Get a list of available keys for objects of this type.
Includes fixed and computed fields.
"""
return list(cls._fields) + list(cls._getters().keys())
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys(computed=True)
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Convenient attribute access.
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError(f'model has no attribute {key!r}')
else:
try:
return self[key]
except KeyError:
raise AttributeError(f'no such field {key!r}')
def __setattr__(self, key, value):
if key.startswith('_'):
super().__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
if key.startswith('_'):
super().__delattr__(key)
else:
del self[key]
# Database interaction (CRUD methods).
def store(self, fields=None):
"""Save the object's metadata into the library database.
:param fields: the fields to be stored. If not specified, all fields
will be.
"""
if fields is None:
fields = self._fields
self._check_db()
# Build assignments for query.
assignments = []
subvars = []
for key in fields:
if key != 'id' and key in self._dirty:
self._dirty.remove(key)
assignments.append(key + '=?')
value = self._type(key).to_sql(self[key])
subvars.append(value)
assignments = ','.join(assignments)
with self._db.transaction() as tx:
# Main table update.
if assignments:
query = 'UPDATE {} SET {} WHERE id=?'.format(
self._table, assignments
)
subvars.append(self.id)
tx.mutate(query, subvars)
# Modified/added flexible attributes.
for key, value in self._values_flex.items():
if key in self._dirty:
self._dirty.remove(key)
tx.mutate(
'INSERT INTO {} '
'(entity_id, key, value) '
'VALUES (?, ?, ?);'.format(self._flex_table),
(self.id, key, value),
)
# Deleted flexible attributes.
for key in self._dirty:
tx.mutate(
'DELETE FROM {} '
'WHERE entity_id=? AND key=?'.format(self._flex_table),
(self.id, key)
)
self.clear_dirty()
def load(self):
"""Refresh the object's metadata from the library database.
If check_revision is true, the database is only queried loaded when a
transaction has been committed since the item was last loaded.
"""
self._check_db()
if not self._dirty and self._db.revision == self._revision:
# Exit early
return
stored_obj = self._db._get(type(self), self.id)
assert stored_obj is not None, f"object {self.id} not in DB"
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
self.update(dict(stored_obj))
self.clear_dirty()
def remove(self):
"""Remove the object's associated rows from the database.
"""
self._check_db()
with self._db.transaction() as tx:
tx.mutate(
f'DELETE FROM {self._table} WHERE id=?',
(self.id,)
)
tx.mutate(
f'DELETE FROM {self._flex_table} WHERE entity_id=?',
(self.id,)
)
def add(self, db=None):
"""Add the object to the library database. This object must be
associated with a database; you can provide one via the `db`
parameter or use the currently associated database.
The object's `id` and `added` fields are set along with any
current field values.
"""
if db:
self._db = db
self._check_db(False)
with self._db.transaction() as tx:
new_id = tx.mutate(
f'INSERT INTO {self._table} DEFAULT VALUES'
)
self.id = new_id
self.added = time.time()
# Mark every non-null field as dirty and store.
for key in self:
if self[key] is not None:
self._dirty.add(key)
self.store()
# Formatting and templating.
_formatter = FormattedMapping
def formatted(self, included_keys=_formatter.ALL_KEYS, for_path=False):
"""Get a mapping containing all values on this object formatted
as human-readable unicode strings.
"""
return self._formatter(self, included_keys, for_path)
def evaluate_template(self, template, for_path=False):
"""Evaluate a template (a string or a `Template` object) using
the object's fields. If `for_path` is true, then no new path
separators will be added to the template.
"""
# Perform substitution.
if isinstance(template, str):
template = functemplate.template(template)
return template.substitute(self.formatted(for_path=for_path),
self._template_funcs())
# Parsing.
@classmethod
def _parse(cls, key, string):
"""Parse a string as a value for the given key.
"""
if not isinstance(string, str):
raise TypeError("_parse() argument must be a string")
return cls._type(key).parse(string)
def set_parse(self, key, string):
"""Set the object's key to a value represented by a string.
"""
self[key] = self._parse(key, string)
# Database controller and supporting interfaces.
class Results:
"""An item query result set. Iterating over the collection lazily
constructs LibModel objects that reflect database rows.
"""
def __init__(self, model_class, rows, db, flex_rows,
query=None, sort=None):
"""Create a result set that will construct objects of type
`model_class`.
`model_class` is a subclass of `LibModel` that will be
constructed. `rows` is a query result: a list of mappings. The
new objects will be associated with the database `db`.
If `query` is provided, it is used as a predicate to filter the
results for a "slow query" that cannot be evaluated by the
database directly. If `sort` is provided, it is used to sort the
full list of results before returning. This means it is a "slow
sort" and all objects must be built before returning the first
one.
"""
self.model_class = model_class
self.rows = rows
self.db = db
self.query = query
self.sort = sort
self.flex_rows = flex_rows
# We keep a queue of rows we haven't yet consumed for
# materialization. We preserve the original total number of
# rows.
self._rows = rows
self._row_count = len(rows)
# The materialized objects corresponding to rows that have been
# consumed.
self._objects = []
def _get_objects(self):
"""Construct and generate Model objects for they query. The
objects are returned in the order emitted from the database; no
slow sort is applied.
For performance, this generator caches materialized objects to
avoid constructing them more than once. This way, iterating over
a `Results` object a second time should be much faster than the
first.
"""
# Index flexible attributes by the item ID, so we have easier access
flex_attrs = self._get_indexed_flex_attrs()
index = 0 # Position in the materialized objects.
while index < len(self._objects) or self._rows:
# Are there previously-materialized objects to produce?
if index < len(self._objects):
yield self._objects[index]
index += 1
# Otherwise, we consume another row, materialize its object
# and produce it.
else:
while self._rows:
row = self._rows.pop(0)
obj = self._make_model(row, flex_attrs.get(row['id'], {}))
# If there is a slow-query predicate, ensurer that the
# object passes it.
if not self.query or self.query.match(obj):
self._objects.append(obj)
index += 1
yield obj
break
def __iter__(self):
"""Construct and generate Model objects for all matching
objects, in sorted order.
"""
if self.sort:
# Slow sort. Must build the full list first.
objects = self.sort.sort(list(self._get_objects()))
return iter(objects)
else:
# Objects are pre-sorted (i.e., by the database).
return self._get_objects()
def _get_indexed_flex_attrs(self):
""" Index flexible attributes by the entity id they belong to
"""
flex_values = {}
for row in self.flex_rows:
if row['entity_id'] not in flex_values:
flex_values[row['entity_id']] = {}
flex_values[row['entity_id']][row['key']] = row['value']
return flex_values
def _make_model(self, row, flex_values={}):
""" Create a Model object for the given row
"""
cols = dict(row)
values = {k: v for (k, v) in cols.items()
if not k[:4] == 'flex'}
# Construct the Python object
obj = self.model_class._awaken(self.db, values, flex_values)
return obj
def __len__(self):
"""Get the number of matching objects.
"""
if not self._rows:
# Fully materialized. Just count the objects.
return len(self._objects)
elif self.query:
# A slow query. Fall back to testing every object.
count = 0
for obj in self:
count += 1
return count
else:
# A fast query. Just count the rows.
return self._row_count
def __nonzero__(self):
"""Does this result contain any objects?
"""
return self.__bool__()
def __bool__(self):
"""Does this result contain any objects?
"""
return bool(len(self))
def __getitem__(self, n):
"""Get the nth item in this result set. This is inefficient: all
items up to n are materialized and thrown away.
"""
if not self._rows and not self.sort:
# Fully materialized and already in order. Just look up the
# object.
return self._objects[n]
it = iter(self)
try:
for i in range(n):
next(it)
return next(it)
except StopIteration:
raise IndexError(f'result index {n} out of range')
def get(self):
"""Return the first matching object, or None if no objects
match.
"""
it = iter(self)
try:
return next(it)
except StopIteration:
return None
class Transaction:
"""A context manager for safe, concurrent access to the database.
All SQL commands should be executed through a transaction.
"""
_mutated = False
"""A flag storing whether a mutation has been executed in the
current transaction.
"""
def __init__(self, db):
self.db = db
def __enter__(self):
"""Begin a transaction. This transaction may be created while
another is active in a different thread.
"""
with self.db._tx_stack() as stack:
first = not stack
stack.append(self)
if first:
# Beginning a "root" transaction, which corresponds to an
# SQLite transaction.
self.db._db_lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Complete a transaction. This must be the most recently
entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed.
"""
# Beware of races; currently secured by db._db_lock
self.db.revision += self._mutated
with self.db._tx_stack() as stack:
assert stack.pop() is self
empty = not stack
if empty:
# Ending a "root" transaction. End the SQLite transaction.
self.db._connection().commit()
self._mutated = False
self.db._db_lock.release()
def query(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
a list of rows from the database.
"""
cursor = self.db._connection().execute(statement, subvals)
return cursor.fetchall()
def mutate(self, statement, subvals=()):
"""Execute an SQL statement with substitution values and return
the row ID of the last affected row.
"""
try:
cursor = self.db._connection().execute(statement, subvals)
except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as
# DBAccessError so the application can abort.
if e.args[0] in ("attempt to write a readonly database",
"unable to open database file"):
raise DBAccessError(e.args[0])
else:
raise
else:
self._mutated = True
return cursor.lastrowid
def script(self, statements):
"""Execute a string containing multiple SQL statements."""
# We don't know whether this mutates, but quite likely it does.
self._mutated = True
self.db._connection().executescript(statements)
class Database:
"""A container for Model objects that wraps an SQLite database as
the backend.
"""
_models = ()
"""The Model subclasses representing tables in this database.
"""
supports_extensions = hasattr(sqlite3.Connection, 'enable_load_extension')
"""Whether or not the current version of SQLite supports extensions"""
revision = 0
"""The current revision of the database. To be increased whenever
data is written in a transaction.
"""
def __init__(self, path, timeout=5.0):
self.path = path
self.timeout = timeout
self._connections = {}
self._tx_stacks = defaultdict(list)
self._extensions = []
# A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources.
self._shared_map_lock = threading.Lock()
# A lock to protect access to the database itself. SQLite does
# allow multiple threads to access the database at the same
# time, but many users were experiencing crashes related to this
# capability: where SQLite was compiled without HAVE_USLEEP, its
# backoff algorithm in the case of contention was causing
# whole-second sleeps (!) that would trigger its internal
# timeout. Using this lock ensures only one SQLite transaction
# is active at a time.
self._db_lock = threading.Lock()
# Set up database schema.
for model_cls in self._models:
self._make_table(model_cls._table, model_cls._fields)
self._make_attribute_table(model_cls._flex_table)
# Primitive access control: connections and transactions.
def _connection(self):
"""Get a SQLite connection object to the underlying database.
One connection object is created per thread.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
if thread_id in self._connections:
return self._connections[thread_id]
else:
conn = self._create_connection()
self._connections[thread_id] = conn
return conn
def _create_connection(self):
"""Create a SQLite connection to the underlying database.
Makes a new connection every time. If you need to configure the
connection settings (e.g., add custom functions), override this
method.
"""
# Make a new connection. The `sqlite3` module can't use
# bytestring paths here on Python 3, so we need to
# provide a `str` using `py3_path`.
conn = sqlite3.connect(
py3_path(self.path), timeout=self.timeout
)
if self.supports_extensions:
conn.enable_load_extension(True)
# Load any extension that are already loaded for other connections.
for path in self._extensions:
conn.load_extension(path)
# Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row
return conn
def _close(self):
"""Close the all connections to the underlying SQLite database
from all threads. This does not render the database object
unusable; new connections can still be opened on demand.
"""
with self._shared_map_lock:
self._connections.clear()
@contextlib.contextmanager
def _tx_stack(self):
"""A context manager providing access to the current thread's
transaction stack. The context manager synchronizes access to
the stack map. Transactions should never migrate across threads.
"""
thread_id = threading.current_thread().ident
with self._shared_map_lock:
yield self._tx_stacks[thread_id]
def transaction(self):
"""Get a :class:`Transaction` object for interacting directly
with the underlying SQLite database.
"""
return Transaction(self)
def load_extension(self, path):
"""Load an SQLite extension into all open connections."""
if not self.supports_extensions:
raise ValueError(
'this sqlite3 installation does not support extensions')
self._extensions.append(path)
# Load the extension into every open connection.
for conn in self._connections.values():
conn.load_extension(path)
# Schema setup and migration.
def _make_table(self, table, fields):
"""Set up the schema of the database. `fields` is a mapping
from field names to `Type`s. Columns are added if necessary.
"""
# Get current schema.
with self.transaction() as tx:
rows = tx.query('PRAGMA table_info(%s)' % table)
current_fields = {row[1] for row in rows}
field_names = set(fields.keys())
if current_fields.issuperset(field_names):
# Table exists and has all the required columns.
return
if not current_fields:
# No table exists.
columns = []
for name, typ in fields.items():
columns.append(f'{name} {typ.sql}')
setup_sql = 'CREATE TABLE {} ({});\n'.format(table,
', '.join(columns))
else:
# Table exists does not match the field set.
setup_sql = ''
for name, typ in fields.items():
if name in current_fields:
continue
setup_sql += 'ALTER TABLE {} ADD COLUMN {} {};\n'.format(
table, name, typ.sql
)
with self.transaction() as tx:
tx.script(setup_sql)
def _make_attribute_table(self, flex_table):
"""Create a table and associated index for flexible attributes
for the given entity (if they don't exist).
"""
with self.transaction() as tx:
tx.script("""
CREATE TABLE IF NOT EXISTS {0} (
id INTEGER PRIMARY KEY,
entity_id INTEGER,
key TEXT,
value TEXT,
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
CREATE INDEX IF NOT EXISTS {0}_by_entity
ON {0} (entity_id);
""".format(flex_table))
# Querying.
def _fetch(self, model_cls, query=None, sort=None):
"""Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a
Query object, or None (to fetch everything). `sort` is an
`Sort` object.
"""
query = query or TrueQuery() # A null query.
sort = sort or NullSort() # Unsorted.
where, subvals = query.clause()
order_by = sort.order_clause()
sql = ("SELECT * FROM {} WHERE {} {}").format(
model_cls._table,
where or '1',
f"ORDER BY {order_by}" if order_by else '',
)
# Fetch flexible attributes for items matching the main query.
# Doing the per-item filtering in python is faster than issuing
# one query per item to sqlite.
flex_sql = ("""
SELECT * FROM {} WHERE entity_id IN
(SELECT id FROM {} WHERE {});
""".format(
model_cls._flex_table,
model_cls._table,
where or '1',
)
)
with self.transaction() as tx:
rows = tx.query(sql, subvals)
flex_rows = tx.query(flex_sql, subvals)
return Results(
model_cls, rows, self, flex_rows,
None if where else query, # Slow query component.
sort if sort.is_slow() else None, # Slow sort component.
)
def _get(self, model_cls, id):
"""Get a Model object by its id or None if the id does not
exist.
"""
return self._fetch(model_cls, MatchQuery('id', id)).get()
| 37,013
|
Python
|
.py
| 919
| 30.558215
| 79
| 0.591779
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,256
|
queryparse.py
|
rembo10_headphones/lib/beets/dbcore/queryparse.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Parsing of strings into DBCore queries.
"""
import re
import itertools
from . import query
PARSE_QUERY_PART_REGEX = re.compile(
# Non-capturing optional segment for the keyword.
r'(-|\^)?' # Negation prefixes.
r'(?:'
r'(\S+?)' # The field key.
r'(?<!\\):' # Unescaped :
r')?'
r'(.*)', # The term itself.
re.I # Case-insensitive.
)
def parse_query_part(part, query_classes={}, prefixes={},
default_class=query.SubstringQuery):
"""Parse a single *query part*, which is a chunk of a complete query
string representing a single criterion.
A query part is a string consisting of:
- A *pattern*: the value to look for.
- Optionally, a *field name* preceding the pattern, separated by a
colon. So in `foo:bar`, `foo` is the field name and `bar` is the
pattern.
- Optionally, a *query prefix* just before the pattern (and after the
optional colon) indicating the type of query that should be used. For
example, in `~foo`, `~` might be a prefix. (The set of prefixes to
look for is given in the `prefixes` parameter.)
- Optionally, a negation indicator, `-` or `^`, at the very beginning.
Both prefixes and the separating `:` character may be escaped with a
backslash to avoid their normal meaning.
The function returns a tuple consisting of:
- The field name: a string or None if it's not present.
- The pattern, a string.
- The query class to use, which inherits from the base
:class:`Query` type.
- A negation flag, a bool.
The three optional parameters determine which query class is used (i.e.,
the third return value). They are:
- `query_classes`, which maps field names to query classes. These
are used when no explicit prefix is present.
- `prefixes`, which maps prefix strings to query classes.
- `default_class`, the fallback when neither the field nor a prefix
indicates a query class.
So the precedence for determining which query class to return is:
prefix, followed by field, and finally the default.
For example, assuming the `:` prefix is used for `RegexpQuery`:
- `'stapler'` -> `(None, 'stapler', SubstringQuery, False)`
- `'color:red'` -> `('color', 'red', SubstringQuery, False)`
- `':^Quiet'` -> `(None, '^Quiet', RegexpQuery, False)`, because
the `^` follows the `:`
- `'color::b..e'` -> `('color', 'b..e', RegexpQuery, False)`
- `'-color:red'` -> `('color', 'red', SubstringQuery, True)`
"""
# Apply the regular expression and extract the components.
part = part.strip()
match = PARSE_QUERY_PART_REGEX.match(part)
assert match # Regex should always match
negate = bool(match.group(1))
key = match.group(2)
term = match.group(3).replace('\\:', ':')
# Check whether there's a prefix in the query and use the
# corresponding query type.
for pre, query_class in prefixes.items():
if term.startswith(pre):
return key, term[len(pre):], query_class, negate
# No matching prefix, so use either the query class determined by
# the field or the default as a fallback.
query_class = query_classes.get(key, default_class)
return key, term, query_class, negate
def construct_query_part(model_cls, prefixes, query_part):
"""Parse a *query part* string and return a :class:`Query` object.
:param model_cls: The :class:`Model` class that this is a query for.
This is used to determine the appropriate query types for the
model's fields.
:param prefixes: A map from prefix strings to :class:`Query` types.
:param query_part: The string to parse.
See the documentation for `parse_query_part` for more information on
query part syntax.
"""
# A shortcut for empty query parts.
if not query_part:
return query.TrueQuery()
# Use `model_cls` to build up a map from field (or query) names to
# `Query` classes.
query_classes = {}
for k, t in itertools.chain(model_cls._fields.items(),
model_cls._types.items()):
query_classes[k] = t.query
query_classes.update(model_cls._queries) # Non-field queries.
# Parse the string.
key, pattern, query_class, negate = \
parse_query_part(query_part, query_classes, prefixes)
# If there's no key (field name) specified, this is a "match
# anything" query.
if key is None:
if issubclass(query_class, query.FieldQuery):
# The query type matches a specific field, but none was
# specified. So we use a version of the query that matches
# any field.
out_query = query.AnyFieldQuery(pattern, model_cls._search_fields,
query_class)
else:
# Non-field query type.
out_query = query_class(pattern)
# Field queries get constructed according to the name of the field
# they are querying.
elif issubclass(query_class, query.FieldQuery):
key = key.lower()
out_query = query_class(key.lower(), pattern, key in model_cls._fields)
# Non-field (named) query.
else:
out_query = query_class(pattern)
# Apply negation.
if negate:
return query.NotQuery(out_query)
else:
return out_query
def query_from_strings(query_cls, model_cls, prefixes, query_parts):
"""Creates a collection query of type `query_cls` from a list of
strings in the format used by parse_query_part. `model_cls`
determines how queries are constructed from strings.
"""
subqueries = []
for part in query_parts:
subqueries.append(construct_query_part(model_cls, prefixes, part))
if not subqueries: # No terms in query.
subqueries = [query.TrueQuery()]
return query_cls(subqueries)
def construct_sort_part(model_cls, part, case_insensitive=True):
"""Create a `Sort` from a single string criterion.
`model_cls` is the `Model` being queried. `part` is a single string
ending in ``+`` or ``-`` indicating the sort. `case_insensitive`
indicates whether or not the sort should be performed in a case
sensitive manner.
"""
assert part, "part must be a field name and + or -"
field = part[:-1]
assert field, "field is missing"
direction = part[-1]
assert direction in ('+', '-'), "part must end with + or -"
is_ascending = direction == '+'
if field in model_cls._sorts:
sort = model_cls._sorts[field](model_cls, is_ascending,
case_insensitive)
elif field in model_cls._fields:
sort = query.FixedFieldSort(field, is_ascending, case_insensitive)
else:
# Flexible or computed.
sort = query.SlowFieldSort(field, is_ascending, case_insensitive)
return sort
def sort_from_strings(model_cls, sort_parts, case_insensitive=True):
"""Create a `Sort` from a list of sort criteria (strings).
"""
if not sort_parts:
sort = query.NullSort()
elif len(sort_parts) == 1:
sort = construct_sort_part(model_cls, sort_parts[0], case_insensitive)
else:
sort = query.MultipleSort()
for part in sort_parts:
sort.add_sort(construct_sort_part(model_cls, part,
case_insensitive))
return sort
def parse_sorted_query(model_cls, parts, prefixes={},
case_insensitive=True):
"""Given a list of strings, create the `Query` and `Sort` that they
represent.
"""
# Separate query token and sort token.
query_parts = []
sort_parts = []
# Split up query in to comma-separated subqueries, each representing
# an AndQuery, which need to be joined together in one OrQuery
subquery_parts = []
for part in parts + [',']:
if part.endswith(','):
# Ensure we can catch "foo, bar" as well as "foo , bar"
last_subquery_part = part[:-1]
if last_subquery_part:
subquery_parts.append(last_subquery_part)
# Parse the subquery in to a single AndQuery
# TODO: Avoid needlessly wrapping AndQueries containing 1 subquery?
query_parts.append(query_from_strings(
query.AndQuery, model_cls, prefixes, subquery_parts
))
del subquery_parts[:]
else:
# Sort parts (1) end in + or -, (2) don't have a field, and
# (3) consist of more than just the + or -.
if part.endswith(('+', '-')) \
and ':' not in part \
and len(part) > 1:
sort_parts.append(part)
else:
subquery_parts.append(part)
# Avoid needlessly wrapping single statements in an OR
q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0]
s = sort_from_strings(model_cls, sort_parts, case_insensitive)
return q, s
| 9,680
|
Python
|
.py
| 213
| 38.239437
| 79
| 0.647009
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,257
|
__init__.py
|
rembo10_headphones/lib/beets/dbcore/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""DBCore is an abstract database package that forms the basis for beets'
Library.
"""
from .db import Model, Database
from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery
from .types import Type
from .queryparse import query_from_strings
from .queryparse import sort_from_strings
from .queryparse import parse_sorted_query
from .query import InvalidQueryError
# flake8: noqa
| 1,040
|
Python
|
.py
| 24
| 42.208333
| 73
| 0.803554
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,258
|
query.py
|
rembo10_headphones/lib/beets/dbcore/query.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The Query type hierarchy for DBCore.
"""
import re
from operator import mul
from beets import util
from datetime import datetime, timedelta
import unicodedata
from functools import reduce
class ParsingError(ValueError):
"""Abstract class for any unparseable user-requested album/query
specification.
"""
class InvalidQueryError(ParsingError):
"""Represent any kind of invalid query.
The query should be a unicode string or a list, which will be space-joined.
"""
def __init__(self, query, explanation):
if isinstance(query, list):
query = " ".join(query)
message = f"'{query}': {explanation}"
super().__init__(message)
class InvalidQueryArgumentValueError(ParsingError):
"""Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised.
"""
def __init__(self, what, expected, detail=None):
message = f"'{what}' is not {expected}"
if detail:
message = f"{message}: {detail}"
super().__init__(message)
class Query:
"""An abstract class representing a query into the item database.
"""
def clause(self):
"""Generate an SQLite expression implementing the query.
Return (clause, subvals) where clause is a valid sqlite
WHERE clause implementing the query and subvals is a list of
items to be substituted for ?s in the clause.
"""
return None, ()
def match(self, item):
"""Check whether this query matches a given Item. Can be used to
perform queries on arbitrary sets of Items.
"""
raise NotImplementedError
def __repr__(self):
return f"{self.__class__.__name__}()"
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return 0
class FieldQuery(Query):
"""An abstract query that searches in a specific field for a
pattern. Subclasses must provide a `value_match` class method, which
determines whether a certain pattern string matches a certain value
string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite.
"""
def __init__(self, field, pattern, fast=True):
self.field = field
self.pattern = pattern
self.fast = fast
def col_clause(self):
return None, ()
def clause(self):
if self.fast:
return self.col_clause()
else:
# Matching a flexattr. This is a slow query.
return None, ()
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings.
"""
raise NotImplementedError()
def match(self, item):
return self.value_match(self.pattern, item.get(self.field))
def __repr__(self):
return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, "
"{0.fast})".format(self))
def __eq__(self, other):
return super().__eq__(other) and \
self.field == other.field and self.pattern == other.pattern
def __hash__(self):
return hash((self.field, hash(self.pattern)))
class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field."""
def col_clause(self):
return self.field + " = ?", [self.pattern]
@classmethod
def value_match(cls, pattern, value):
return pattern == value
class NoneQuery(FieldQuery):
"""A query that checks whether a field is null."""
def __init__(self, field, fast=True):
super().__init__(field, None, fast)
def col_clause(self):
return self.field + " IS NULL", ()
def match(self, item):
return item.get(self.field) is None
def __repr__(self):
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching
them.
"""
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. The value
may have any type.
"""
return cls.string_match(pattern, util.as_string(value))
@classmethod
def string_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings. Subclasses implement this method.
"""
raise NotImplementedError()
class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field."""
def col_clause(self):
pattern = (self.pattern
.replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
search = '%' + pattern + '%'
clause = self.field + " like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern, value):
return pattern.lower() in value.lower()
class RegexpQuery(StringFieldQuery):
"""A query that matches a regular expression in a specific item
field.
Raises InvalidQueryError when the pattern is not a valid regular
expression.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
pattern = self._normalize(pattern)
try:
self.pattern = re.compile(self.pattern)
except re.error as exc:
# Invalid regular expression.
raise InvalidQueryArgumentValueError(pattern,
"a regular expression",
format(exc))
@staticmethod
def _normalize(s):
"""Normalize a Unicode string's representation (used on both
patterns and matched values).
"""
return unicodedata.normalize('NFC', s)
@classmethod
def string_match(cls, pattern, value):
return pattern.search(cls._normalize(value)) is not None
class BooleanQuery(MatchQuery):
"""Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
if isinstance(pattern, str):
self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern)
class BytesQuery(MatchQuery):
"""Match a raw bytes field (i.e., a path). This is a necessary hack
to work around the `sqlite3` module's desire to treat `bytes` and
`unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values.
"""
def __init__(self, field, pattern):
super().__init__(field, pattern)
# Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode.
if isinstance(self.pattern, (str, bytes)):
if isinstance(self.pattern, str):
self.pattern = self.pattern.encode('utf-8')
self.buf_pattern = memoryview(self.pattern)
elif isinstance(self.pattern, memoryview):
self.buf_pattern = self.pattern
self.pattern = bytes(self.pattern)
def col_clause(self):
return self.field + " = ?", [self.buf_pattern]
class NumericQuery(FieldQuery):
"""Matches numeric fields. A syntax using Ruby-style range ellipses
(``..``) lets users specify one- or two-sided ranges. For example,
``year:2001..`` finds music released since the turn of the century.
Raises InvalidQueryError when the pattern does not represent an int or
a float.
"""
def _convert(self, s):
"""Convert a string to a numeric type (float or int).
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
# This is really just a bit of fun premature optimization.
if not s:
return None
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(s, "an int or a float")
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
parts = pattern.split('..', 1)
if len(parts) == 1:
# No range.
self.point = self._convert(parts[0])
self.rangemin = None
self.rangemax = None
else:
# One- or two-sided range.
self.point = None
self.rangemin = self._convert(parts[0])
self.rangemax = self._convert(parts[1])
def match(self, item):
if self.field not in item:
return False
value = item[self.field]
if isinstance(value, str):
value = self._convert(value)
if self.point is not None:
return value == self.point
else:
if self.rangemin is not None and value < self.rangemin:
return False
if self.rangemax is not None and value > self.rangemax:
return False
return True
def col_clause(self):
if self.point is not None:
return self.field + '=?', (self.point,)
else:
if self.rangemin is not None and self.rangemax is not None:
return ('{0} >= ? AND {0} <= ?'.format(self.field),
(self.rangemin, self.rangemax))
elif self.rangemin is not None:
return f'{self.field} >= ?', (self.rangemin,)
elif self.rangemax is not None:
return f'{self.field} <= ?', (self.rangemax,)
else:
return '1', ()
class CollectionQuery(Query):
"""An abstract query class that aggregates other queries. Can be
indexed like a list to access the sub-queries.
"""
def __init__(self, subqueries=()):
self.subqueries = subqueries
# Act like a sequence.
def __len__(self):
return len(self.subqueries)
def __getitem__(self, key):
return self.subqueries[key]
def __iter__(self):
return iter(self.subqueries)
def __contains__(self, item):
return item in self.subqueries
def clause_with_joiner(self, joiner):
"""Return a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces).
"""
clause_parts = []
subvals = []
for subq in self.subqueries:
subq_clause, subq_subvals = subq.clause()
if not subq_clause:
# Fall back to slow query.
return None, ()
clause_parts.append('(' + subq_clause + ')')
subvals += subq_subvals
clause = (' ' + joiner + ' ').join(clause_parts)
return clause, subvals
def __repr__(self):
return "{0.__class__.__name__}({0.subqueries!r})".format(self)
def __eq__(self, other):
return super().__eq__(other) and \
self.subqueries == other.subqueries
def __hash__(self):
"""Since subqueries are mutable, this object should not be hashable.
However and for conveniences purposes, it can be hashed.
"""
return reduce(mul, map(hash, self.subqueries), 1)
class AnyFieldQuery(CollectionQuery):
"""A query that matches if a given FieldQuery subclass matches in
any field. The individual field query class is provided to the
constructor.
"""
def __init__(self, pattern, fields, cls):
self.pattern = pattern
self.fields = fields
self.query_class = cls
subqueries = []
for field in self.fields:
subqueries.append(cls(field, pattern, True))
super().__init__(subqueries)
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
for subq in self.subqueries:
if subq.match(item):
return True
return False
def __repr__(self):
return ("{0.__class__.__name__}({0.pattern!r}, {0.fields!r}, "
"{0.query_class.__name__})".format(self))
def __eq__(self, other):
return super().__eq__(other) and \
self.query_class == other.query_class
def __hash__(self):
return hash((self.pattern, tuple(self.fields), self.query_class))
class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the
query is initialized.
"""
def __setitem__(self, key, value):
self.subqueries[key] = value
def __delitem__(self, key):
del self.subqueries[key]
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('and')
def match(self, item):
return all(q.match(item) for q in self.subqueries)
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
return any(q.match(item) for q in self.subqueries)
class NotQuery(Query):
"""A query that matches the negation of its `subquery`, as a shorcut for
performing `not(subquery)` without using regular expressions.
"""
def __init__(self, subquery):
self.subquery = subquery
def clause(self):
clause, subvals = self.subquery.clause()
if clause:
return f'not ({clause})', subvals
else:
# If there is no clause, there is nothing to negate. All the logic
# is handled by match() for slow queries.
return clause, subvals
def match(self, item):
return not self.subquery.match(item)
def __repr__(self):
return "{0.__class__.__name__}({0.subquery!r})".format(self)
def __eq__(self, other):
return super().__eq__(other) and \
self.subquery == other.subquery
def __hash__(self):
return hash(('not', hash(self.subquery)))
class TrueQuery(Query):
"""A query that always matches."""
def clause(self):
return '1', ()
def match(self, item):
return True
class FalseQuery(Query):
"""A query that never matches."""
def clause(self):
return '0', ()
def match(self, item):
return False
# Time/date queries.
def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch.
"""
if hasattr(date, 'timestamp'):
# The `timestamp` method exists on Python 3.3+.
return int(date.timestamp())
else:
epoch = datetime.fromtimestamp(0)
delta = date - epoch
return int(delta.total_seconds())
def _parse_periods(pattern):
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
"""
parts = pattern.split('..', 1)
if len(parts) == 1:
instant = Period.parse(parts[0])
return (instant, instant)
else:
start = Period.parse(parts[0])
end = Period.parse(parts[1])
return (start, end)
class Period:
"""A period of time given by a date, time and precision.
Example: 2014-01-01 10:50:30 with precision 'month' represents all
instants of time during January 2014.
"""
precisions = ('year', 'month', 'day', 'hour', 'minute', 'second')
date_formats = (
('%Y',), # year
('%Y-%m',), # month
('%Y-%m-%d',), # day
('%Y-%m-%dT%H', '%Y-%m-%d %H'), # hour
('%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M'), # minute
('%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S') # second
)
relative_units = {'y': 365, 'm': 30, 'w': 7, 'd': 1}
relative_re = '(?P<sign>[+|-]?)(?P<quantity>[0-9]+)' + \
'(?P<timespan>[y|m|w|d])'
def __init__(self, date, precision):
"""Create a period with the given date (a `datetime` object) and
precision (a string, one of "year", "month", "day", "hour", "minute",
or "second").
"""
if precision not in Period.precisions:
raise ValueError(f'Invalid precision {precision}')
self.date = date
self.precision = precision
@classmethod
def parse(cls, string):
"""Parse a date and return a `Period` object or `None` if the
string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
The date may be absolute or relative. Absolute dates look like
`YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
dates have three parts:
- Optionally, a ``+`` or ``-`` sign indicating the future or the
past. The default is the future.
- A number: how much to add or subtract.
- A letter indicating the unit: days, weeks, months or years
(``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
and a "year" is exactly 365 days.
"""
def find_date_and_format(string):
for ord, format in enumerate(cls.date_formats):
for format_option in format:
try:
date = datetime.strptime(string, format_option)
return date, ord
except ValueError:
# Parsing failed.
pass
return (None, None)
if not string:
return None
# Check for a relative date.
match_dq = re.match(cls.relative_re, string)
if match_dq:
sign = match_dq.group('sign')
quantity = match_dq.group('quantity')
timespan = match_dq.group('timespan')
# Add or subtract the given amount of time from the current
# date.
multiplier = -1 if sign == '-' else 1
days = cls.relative_units[timespan]
date = datetime.now() + \
timedelta(days=int(quantity) * days) * multiplier
return cls(date, cls.precisions[5])
# Check for an absolute date.
date, ordinal = find_date_and_format(string)
if date is None:
raise InvalidQueryArgumentValueError(string,
'a valid date/time string')
precision = cls.precisions[ordinal]
return cls(date, precision)
def open_right_endpoint(self):
"""Based on the precision, convert the period to a precise
`datetime` for use as a right endpoint in a right-open interval.
"""
precision = self.precision
date = self.date
if 'year' == self.precision:
return date.replace(year=date.year + 1, month=1)
elif 'month' == precision:
if (date.month < 12):
return date.replace(month=date.month + 1)
else:
return date.replace(year=date.year + 1, month=1)
elif 'day' == precision:
return date + timedelta(days=1)
elif 'hour' == precision:
return date + timedelta(hours=1)
elif 'minute' == precision:
return date + timedelta(minutes=1)
elif 'second' == precision:
return date + timedelta(seconds=1)
else:
raise ValueError(f'unhandled precision {precision}')
class DateInterval:
"""A closed-open interval of dates.
A left endpoint of None means since the beginning of time.
A right endpoint of None means towards infinity.
"""
def __init__(self, start, end):
if start is not None and end is not None and not start < end:
raise ValueError("start date {} is not before end date {}"
.format(start, end))
self.start = start
self.end = end
@classmethod
def from_periods(cls, start, end):
"""Create an interval with two Periods as the endpoints.
"""
end_date = end.open_right_endpoint() if end is not None else None
start_date = start.date if start is not None else None
return cls(start_date, end_date)
def contains(self, date):
if self.start is not None and date < self.start:
return False
if self.end is not None and date >= self.end:
return False
return True
def __str__(self):
return f'[{self.start}, {self.end})'
class DateQuery(FieldQuery):
"""Matches date fields stored as seconds since Unix epoch time.
Dates can be specified as ``year-month-day`` strings where only year
is mandatory.
The value of a date field can be matched against a date interval by
using an ellipsis interval syntax similar to that of NumericQuery.
"""
def __init__(self, field, pattern, fast=True):
super().__init__(field, pattern, fast)
start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end)
def match(self, item):
if self.field not in item:
return False
timestamp = float(item[self.field])
date = datetime.fromtimestamp(timestamp)
return self.interval.contains(date)
_clause_tmpl = "{0} {1} ?"
def col_clause(self):
clause_parts = []
subvals = []
if self.interval.start:
clause_parts.append(self._clause_tmpl.format(self.field, ">="))
subvals.append(_to_epoch_time(self.interval.start))
if self.interval.end:
clause_parts.append(self._clause_tmpl.format(self.field, "<"))
subvals.append(_to_epoch_time(self.interval.end))
if clause_parts:
# One- or two-sided interval.
clause = ' AND '.join(clause_parts)
else:
# Match any date.
clause = '1'
return clause, subvals
class DurationQuery(NumericQuery):
"""NumericQuery that allow human-friendly (M:SS) time interval formats.
Converts the range(s) to a float value, and delegates on NumericQuery.
Raises InvalidQueryError when the pattern does not represent an int, float
or M:SS time interval.
"""
def _convert(self, s):
"""Convert a M:SS or numeric string to a float.
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
if not s:
return None
try:
return util.raw_seconds_short(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(
s,
"a M:SS string or a float")
# Sorting.
class Sort:
"""An abstract class representing a sort operation for a query into
the item database.
"""
def order_clause(self):
"""Generates a SQL fragment to be used in a ORDER BY clause, or
None if no fragment is used (i.e., this is a slow sort).
"""
return None
def sort(self, items):
"""Sort the list of objects and return a list.
"""
return sorted(items)
def is_slow(self):
"""Indicate whether this query is *slow*, meaning that it cannot
be executed in SQL and must be executed in Python.
"""
return False
def __hash__(self):
return 0
def __eq__(self, other):
return type(self) == type(other)
class MultipleSort(Sort):
"""Sort that encapsulates multiple sub-sorts.
"""
def __init__(self, sorts=None):
self.sorts = sorts or []
def add_sort(self, sort):
self.sorts.append(sort)
def _sql_sorts(self):
"""Return the list of sub-sorts for which we can be (at least
partially) fast.
A contiguous suffix of fast (SQL-capable) sub-sorts are
executable in SQL. The remaining, even if they are fast
independently, must be executed slowly.
"""
sql_sorts = []
for sort in reversed(self.sorts):
if not sort.order_clause() is None:
sql_sorts.append(sort)
else:
break
sql_sorts.reverse()
return sql_sorts
def order_clause(self):
order_strings = []
for sort in self._sql_sorts():
order = sort.order_clause()
order_strings.append(order)
return ", ".join(order_strings)
def is_slow(self):
for sort in self.sorts:
if sort.is_slow():
return True
return False
def sort(self, items):
slow_sorts = []
switch_slow = False
for sort in reversed(self.sorts):
if switch_slow:
slow_sorts.append(sort)
elif sort.order_clause() is None:
switch_slow = True
slow_sorts.append(sort)
else:
pass
for sort in slow_sorts:
items = sort.sort(items)
return items
def __repr__(self):
return f'MultipleSort({self.sorts!r})'
def __hash__(self):
return hash(tuple(self.sorts))
def __eq__(self, other):
return super().__eq__(other) and \
self.sorts == other.sorts
class FieldSort(Sort):
"""An abstract sort criterion that orders by a specific field (of
any kind).
"""
def __init__(self, field, ascending=True, case_insensitive=True):
self.field = field
self.ascending = ascending
self.case_insensitive = case_insensitive
def sort(self, objs):
# TODO: Conversion and null-detection here. In Python 3,
# comparisons with None fail. We should also support flexible
# attributes with different types without falling over.
def key(item):
field_val = item.get(self.field, '')
if self.case_insensitive and isinstance(field_val, str):
field_val = field_val.lower()
return field_val
return sorted(objs, key=key, reverse=not self.ascending)
def __repr__(self):
return '<{}: {}{}>'.format(
type(self).__name__,
self.field,
'+' if self.ascending else '-',
)
def __hash__(self):
return hash((self.field, self.ascending))
def __eq__(self, other):
return super().__eq__(other) and \
self.field == other.field and \
self.ascending == other.ascending
class FixedFieldSort(FieldSort):
"""Sort object to sort on a fixed field.
"""
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
if self.case_insensitive:
field = '(CASE ' \
'WHEN TYPEOF({0})="text" THEN LOWER({0}) ' \
'WHEN TYPEOF({0})="blob" THEN LOWER({0}) ' \
'ELSE {0} END)'.format(self.field)
else:
field = self.field
return f"{field} {order}"
class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field.
"""
def is_slow(self):
return True
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items):
return items
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
def __eq__(self, other):
return type(self) == type(other) or other is None
def __hash__(self):
return 0
| 28,599
|
Python
|
.py
| 726
| 30.570248
| 79
| 0.598301
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,259
|
types.py
|
rembo10_headphones/lib/beets/dbcore/types.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Representation of type information for DBCore model fields.
"""
from . import query
from beets.util import str2bool
# Abstract base.
class Type:
"""An object encapsulating the type of a model field. Includes
information about how to store, query, format, and parse a given
field.
"""
sql = 'TEXT'
"""The SQLite column type for the value.
"""
query = query.SubstringQuery
"""The `Query` subclass to be used when querying the field.
"""
model_type = str
"""The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field
is accessed. To this end, the constructor is used by the `normalize`
and `from_sql` methods and the `default` property.
"""
@property
def null(self):
"""The value to be exposed when the underlying value is None.
"""
return self.model_type()
def format(self, value):
"""Given a value of this type, produce a Unicode string
representing the value. This is used in template evaluation.
"""
if value is None:
value = self.null
# `self.null` might be `None`
if value is None:
value = ''
if isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
return str(value)
def parse(self, string):
"""Parse a (possibly human-written) string and return the
indicated value of this type.
"""
try:
return self.model_type(string)
except ValueError:
return self.null
def normalize(self, value):
"""Given a value that will be assigned into a field of this
type, normalize the value to have the appropriate type. This
base implementation only reinterprets `None`.
"""
if value is None:
return self.null
else:
# TODO This should eventually be replaced by
# `self.model_type(value)`
return value
def from_sql(self, sql_value):
"""Receives the value stored in the SQL backend and return the
value to be stored in the model.
For fixed fields the type of `value` is determined by the column
type affinity given in the `sql` property and the SQL to Python
mapping of the database adapter. For more information see:
https://www.sqlite.org/datatype3.html
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
Flexible fields have the type affinity `TEXT`. This means the
`sql_value` is either a `memoryview` or a `unicode` object`
and the method must handle these in addition.
"""
if isinstance(sql_value, memoryview):
sql_value = bytes(sql_value).decode('utf-8', 'ignore')
if isinstance(sql_value, str):
return self.parse(sql_value)
else:
return self.normalize(sql_value)
def to_sql(self, model_value):
"""Convert a value as stored in the model object to a value used
by the database adapter.
"""
return model_value
# Reusable types.
class Default(Type):
null = None
class Integer(Type):
"""A basic integer type.
"""
sql = 'INTEGER'
query = query.NumericQuery
model_type = int
def normalize(self, value):
try:
return self.model_type(round(float(value)))
except ValueError:
return self.null
except TypeError:
return self.null
class PaddedInt(Integer):
"""An integer field that is formatted with a given number of digits,
padded with zeroes.
"""
def __init__(self, digits):
self.digits = digits
def format(self, value):
return '{0:0{1}d}'.format(value or 0, self.digits)
class NullPaddedInt(PaddedInt):
"""Same as `PaddedInt`, but does not normalize `None` to `0.0`.
"""
null = None
class ScaledInt(Integer):
"""An integer whose formatting operation scales the number by a
constant and adds a suffix. Good for units with large magnitudes.
"""
def __init__(self, unit, suffix=''):
self.unit = unit
self.suffix = suffix
def format(self, value):
return '{}{}'.format((value or 0) // self.unit, self.suffix)
class Id(Integer):
"""An integer used as the row id or a foreign key in a SQLite table.
This type is nullable: None values are not translated to zero.
"""
null = None
def __init__(self, primary=True):
if primary:
self.sql = 'INTEGER PRIMARY KEY'
class Float(Type):
"""A basic floating-point type. The `digits` parameter specifies how
many decimal places to use in the human-readable representation.
"""
sql = 'REAL'
query = query.NumericQuery
model_type = float
def __init__(self, digits=1):
self.digits = digits
def format(self, value):
return '{0:.{1}f}'.format(value or 0, self.digits)
class NullFloat(Float):
"""Same as `Float`, but does not normalize `None` to `0.0`.
"""
null = None
class String(Type):
"""A Unicode string type.
"""
sql = 'TEXT'
query = query.SubstringQuery
def normalize(self, value):
if value is None:
return self.null
else:
return self.model_type(value)
class Boolean(Type):
"""A boolean type.
"""
sql = 'INTEGER'
query = query.BooleanQuery
model_type = bool
def format(self, value):
return str(bool(value))
def parse(self, string):
return str2bool(string)
# Shared instances of common types.
DEFAULT = Default()
INTEGER = Integer()
PRIMARY_ID = Id(True)
FOREIGN_ID = Id(False)
FLOAT = Float()
NULL_FLOAT = NullFloat()
STRING = String()
BOOLEAN = Boolean()
| 6,513
|
Python
|
.py
| 183
| 29.180328
| 78
| 0.651433
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,260
|
mb.py
|
rembo10_headphones/lib/beets/autotag/mb.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Searches for albums in the MusicBrainz database.
"""
import musicbrainzngs
import re
import traceback
from beets import logging
from beets import plugins
import beets.autotag.hooks
import beets
from beets import util
from beets import config
from collections import Counter
from urllib.parse import urljoin
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
BASE_URL = 'https://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]']
FIELDS_TO_MB_KEYS = {
'catalognum': 'catno',
'country': 'country',
'label': 'label',
'media': 'format',
'year': 'date',
}
musicbrainzngs.set_useragent('beets', beets.__version__,
'https://beets.io/')
class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
"""
def __init__(self, reason, verb, query, tb=None):
self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError):
reason = 'MusicBrainz not reachable'
super().__init__(reason, verb, tb)
def get_message(self):
return '{} in {} with query {}'.format(
self._reasonstr(), self.verb, repr(self.query)
)
log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels', 'isrcs']
BROWSE_INCLUDES = ['artist-credits', 'work-rels',
'artist-rels', 'recording-rels', 'release-rels']
if "work-level-rels" in musicbrainzngs.VALID_BROWSE_INCLUDES['recording']:
BROWSE_INCLUDES.append("work-level-rels")
BROWSE_CHUNKSIZE = 100
BROWSE_MAXTRACKS = 500
TRACK_INCLUDES = ['artists', 'aliases', 'isrcs']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
if 'genres' in musicbrainzngs.VALID_INCLUDES['recording']:
RELEASE_INCLUDES += ['genres']
def track_url(trackid):
return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid):
return urljoin(BASE_URL, 'release/' + albumid)
def configure():
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
"""
hostname = config['musicbrainz']['host'].as_str()
https = config['musicbrainz']['https'].get(bool)
# Only call set_hostname when a custom server is configured. Since
# musicbrainz-ngs connects to musicbrainz.org with HTTPS by default
if hostname != "musicbrainz.org":
musicbrainzngs.set_hostname(hostname, https)
musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int),
)
def _preferred_alias(aliases):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
"""
if not aliases:
return
# Only consider aliases that have locales set.
aliases = [a for a in aliases if 'locale' in a]
# Search configured locales in order.
for locale in config['import']['languages'].as_str_seq():
# Find matching primary aliases for this locale.
matches = [a for a in aliases
if a['locale'] == locale and 'primary' in a]
# Skip to the next locale if we have no matches
if not matches:
continue
return matches[0]
def _preferred_release_event(release):
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
for country in countries:
for event in release.get('release-event-list', {}):
try:
if country in event['area']['iso-3166-1-code-list']:
return country, event['date']
except KeyError:
pass
return release.get('country'), release.get('date')
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
"""
artist_parts = []
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, str):
# Join phrase.
artist_parts.append(el)
artist_credit_parts.append(el)
artist_sort_parts.append(el)
else:
alias = _preferred_alias(el['artist'].get('alias-list', ()))
# An artist.
if alias:
cur_artist_name = alias['alias']
else:
cur_artist_name = el['artist']['name']
artist_parts.append(cur_artist_name)
# Artist sort name.
if alias:
artist_sort_parts.append(alias['sort-name'])
elif 'sort-name' in el['artist']:
artist_sort_parts.append(el['artist']['sort-name'])
else:
artist_sort_parts.append(cur_artist_name)
# Artist credit.
if 'name' in el:
artist_credit_parts.append(el['name'])
else:
artist_credit_parts.append(cur_artist_name)
return (
''.join(artist_parts),
''.join(artist_sort_parts),
''.join(artist_credit_parts),
)
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
title=recording['title'],
track_id=recording['id'],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source='MusicBrainz',
data_url=track_url(recording['id']),
)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
info.trackdisambig = recording.get('disambiguation')
if recording.get('isrc-list'):
info.isrc = ';'.join(recording['isrc-list'])
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = ', '.join(lyricist)
if composer:
info.composer = ', '.join(composer)
info.composer_sort = ', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = ', '.join(arranger)
# Supplementary fields provided by plugins
extra_trackdatas = plugins.send('mb_track_extract', data=recording)
for extra_trackdata in extra_trackdatas:
info.update(extra_trackdata)
info.decode()
return info
def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
"""
if date_str:
date_parts = date_str.split('-')
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
if original:
key = 'original_' + key
setattr(info, key, date_num)
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Get artist name using join phrases.
artist_name, artist_sort_name, artist_credit_name = \
_flatten_artist_credit(release['artist-credit'])
ntracks = sum(len(m['track-list']) for m in release['medium-list'])
# The MusicBrainz API omits 'artist-relation-list' and 'work-relation-list'
# when the release has more than 500 tracks. So we use browse_recordings
# on chunks of tracks to recover the same information in this case.
if ntracks > BROWSE_MAXTRACKS:
log.debug('Album {} has too many tracks', release['id'])
recording_list = []
for i in range(0, ntracks, BROWSE_CHUNKSIZE):
log.debug('Retrieving tracks starting at {}', i)
recording_list.extend(musicbrainzngs.browse_recordings(
release=release['id'], limit=BROWSE_CHUNKSIZE,
includes=BROWSE_INCLUDES,
offset=i)['recording-list'])
track_map = {r['id']: r for r in recording_list}
for medium in release['medium-list']:
for recording in medium['track-list']:
recording_info = track_map[recording['recording']['id']]
recording['recording'] = recording_info
# Basic info.
track_infos = []
index = 0
for medium in release['medium-list']:
disctitle = medium.get('title')
format = medium.get('format')
if format in config['match']['ignored_media'].as_str_seq():
continue
all_tracks = medium['track-list']
if ('data-track-list' in medium
and not config['match']['ignore_data_tracks']):
all_tracks += medium['data-track-list']
track_count = len(all_tracks)
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
if ('title' in track['recording'] and
track['recording']['title'] in SKIPPED_TRACKS):
continue
if ('video' in track['recording'] and
track['recording']['video'] == 'true' and
config['match']['ignore_video_tracks']):
continue
# Basic information from the recording.
index += 1
ti = track_info(
track['recording'],
index,
int(medium['position']),
int(track['position']),
track_count,
)
ti.release_track_id = track['id']
ti.disctitle = disctitle
ti.media = format
ti.track_alt = track['number']
# Prefer track data, where present, over recording data.
if track.get('title'):
ti.title = track['title']
if track.get('artist-credit'):
# Get the artist names.
ti.artist, ti.artist_sort, ti.artist_credit = \
_flatten_artist_credit(track['artist-credit'])
ti.artist_id = track['artist-credit'][0]['artist']['id']
if track.get('length'):
ti.length = int(track['length']) / (1000.0)
track_infos.append(ti)
info = beets.autotag.hooks.AlbumInfo(
album=release['title'],
album_id=release['id'],
artist=artist_name,
artist_id=release['artist-credit'][0]['artist']['id'],
tracks=track_infos,
mediums=len(release['medium-list']),
artist_sort=artist_sort_name,
artist_credit=artist_credit_name,
data_source='MusicBrainz',
data_url=album_url(release['id']),
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config['va_name'].as_str()
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.albumstatus = release.get('status')
# Get the disambiguation strings at the release and release group level.
if release['release-group'].get('disambiguation'):
info.releasegroupdisambig = \
release['release-group'].get('disambiguation')
if release.get('disambiguation'):
info.albumdisambig = release.get('disambiguation')
# Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Set the new-style "primary" and "secondary" release types.
albumtypes = []
if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type']
if rel_primarytype:
albumtypes.append(rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']:
for sec_type in release['release-group']['secondary-type-list']:
albumtypes.append(sec_type.lower())
info.albumtypes = '; '.join(albumtypes)
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release['release-group'].get('first-release-date')
if not release_date:
# Fall back if release-specific date is not available.
release_date = release_group_date
_set_date_str(info, release_date, False)
_set_date_str(info, release_group_date, True)
# Label name.
if release.get('label-info-list'):
label_info = release['label-info-list'][0]
if label_info.get('label'):
label = label_info['label']['name']
if label != '[no label]':
info.label = label
info.catalognum = label_info.get('catalog-number')
# Text representation data.
if release.get('text-representation'):
rep = release['text-representation']
info.script = rep.get('script')
info.language = rep.get('language')
# Media (format).
if release['medium-list']:
first_medium = release['medium-list'][0]
info.media = first_medium.get('format')
if config['musicbrainz']['genres']:
sources = [
release['release-group'].get('genre-list', []),
release.get('genre-list', []),
]
genres = Counter()
for source in sources:
for genreitem in source:
genres[genreitem['name']] += int(genreitem['count'])
info.genre = '; '.join(g[0] for g in sorted(genres.items(),
key=lambda g: -g[1]))
extra_albumdatas = plugins.send('mb_album_extract', data=release)
for extra_albumdata in extra_albumdatas:
info.update(extra_albumdata)
info.decode()
return info
def match_album(artist, album, tracks=None, extra_tags=None):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError.
The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album and any other extra tags.
"""
# Build search criteria.
criteria = {'release': album.lower().strip()}
if artist is not None:
criteria['artist'] = artist.lower().strip()
else:
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = str(tracks)
# Additional search cues from existing metadata.
if extra_tags:
for tag in extra_tags:
key = FIELDS_TO_MB_KEYS[tag]
value = str(extra_tags.get(tag, '')).lower().strip()
if key == 'catno':
value = value.replace(' ', '')
if value:
criteria[key] = value
# Abort if we have no search terms.
if not any(criteria.values()):
return
try:
log.debug('Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'release search', criteria,
traceback.format_exc())
for release in res['release-list']:
# The search result is missing some data (namely, the tracks),
# so we just use the ID and fetch the rest of the information.
albuminfo = album_for_id(release['id'])
if albuminfo is not None:
yield albuminfo
def match_track(artist, title):
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
"""
criteria = {
'artist': artist.lower().strip(),
'recording': title.lower().strip(),
}
if not any(criteria.values()):
return
try:
res = musicbrainzngs.search_recordings(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'recording search', criteria,
traceback.format_exc())
for recording in res['recording-list']:
yield track_info(recording)
def _parse_id(s):
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
"""
# Find the first thing that looks like a UUID/MBID.
match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match:
return match.group()
def album_for_id(releaseid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
log.debug('Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid)
if not albumid:
log.debug('Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug('Album ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get release by ID', albumid,
traceback.format_exc())
return album_info(res['release'])
def track_for_id(releaseid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
trackid = _parse_id(releaseid)
if not trackid:
log.debug('Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug('Track ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get recording by ID', trackid,
traceback.format_exc())
return track_info(res['recording'])
| 21,532
|
Python
|
.py
| 501
| 34.133733
| 79
| 0.619591
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,261
|
match.py
|
rembo10_headphones/lib/beets/autotag/match.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Matches existing metadata with canonical information to identify
releases and tracks.
"""
import datetime
import re
from munkres import Munkres
from collections import namedtuple
from beets import logging
from beets import plugins
from beets import config
from beets.util import plurality
from beets.autotag import hooks
from beets.util.enumeration import OrderedEnum
# Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA
# release and also on the track level to to remove the penalty for
# differing artists.
VA_ARTISTS = ('', 'various artists', 'various', 'va', 'unknown')
# Global logger.
log = logging.getLogger('beets')
# Recommendation enumeration.
class Recommendation(OrderedEnum):
"""Indicates a qualitative suggestion to the user about what should
be done with a given match.
"""
none = 0
low = 1
medium = 2
strong = 3
# A structure for holding a set of possible matches to choose between. This
# consists of a list of possible candidates (i.e., AlbumInfo or TrackInfo
# objects) and a recommendation value.
Proposal = namedtuple('Proposal', ('candidates', 'recommendation'))
# Primary matching functionality.
def current_metadata(items):
"""Extract the likely current metadata for an album given a list of its
items. Return two dictionaries:
- The most common value for each field.
- Whether each field's value was unanimous (values are booleans).
"""
assert items # Must be nonempty.
likelies = {}
consensus = {}
fields = ['artist', 'album', 'albumartist', 'year', 'disctotal',
'mb_albumid', 'label', 'catalognum', 'country', 'media',
'albumdisambig']
for field in fields:
values = [item[field] for item in items if item]
likelies[field], freq = plurality(values)
consensus[field] = (freq == len(values))
# If there's an album artist consensus, use this for the artist.
if consensus['albumartist'] and likelies['albumartist']:
likelies['artist'] = likelies['albumartist']
return likelies, consensus
def assign_items(items, tracks):
"""Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo
objects, a set of extra Items, and a set of extra TrackInfo
objects. These "extra" objects occur when there is an unequal number
of objects of the two types.
"""
# Construct the cost matrix.
costs = []
for item in items:
row = []
for i, track in enumerate(tracks):
row.append(track_distance(item, track))
costs.append(row)
# Find a minimum-cost bipartite matching.
log.debug('Computing track assignment...')
matching = Munkres().compute(costs)
log.debug('...done.')
# Produce the output matching.
mapping = {items[i]: tracks[j] for (i, j) in matching}
extra_items = list(set(items) - set(mapping.keys()))
extra_items.sort(key=lambda i: (i.disc, i.track, i.title))
extra_tracks = list(set(tracks) - set(mapping.values()))
extra_tracks.sort(key=lambda t: (t.index, t.title))
return mapping, extra_items, extra_tracks
def track_index_changed(item, track_info):
"""Returns True if the item and track info index is different. Tolerates
per disc and per release numbering.
"""
return item.track not in (track_info.medium_index, track_info.index)
def track_distance(item, track_info, incl_artist=False):
"""Determines the significance of a track metadata change. Returns a
Distance object. `incl_artist` indicates that a distance component should
be included for the track artist (i.e., for various-artist releases).
"""
dist = hooks.Distance()
# Length.
if track_info.length:
diff = abs(item.length - track_info.length) - \
config['match']['track_length_grace'].as_number()
dist.add_ratio('track_length', diff,
config['match']['track_length_max'].as_number())
# Title.
dist.add_string('track_title', item.title, track_info.title)
# Artist. Only check if there is actually an artist in the track data.
if incl_artist and track_info.artist and \
item.artist.lower() not in VA_ARTISTS:
dist.add_string('track_artist', item.artist, track_info.artist)
# Track index.
if track_info.index and item.track:
dist.add_expr('track_index', track_index_changed(item, track_info))
# Track ID.
if item.mb_trackid:
dist.add_expr('track_id', item.mb_trackid != track_info.track_id)
# Plugins.
dist.update(plugins.track_distance(item, track_info))
return dist
def distance(items, album_info, mapping):
"""Determines how "significant" an album metadata change would be.
Returns a Distance object. `album_info` is an AlbumInfo object
reflecting the album to be compared. `items` is a sequence of all
Item objects that will be matched (order is not important).
`mapping` is a dictionary mapping Items to TrackInfo objects; the
keys are a subset of `items` and the values are a subset of
`album_info.tracks`.
"""
likelies, _ = current_metadata(items)
dist = hooks.Distance()
# Artist, if not various.
if not album_info.va:
dist.add_string('artist', likelies['artist'], album_info.artist)
# Album.
dist.add_string('album', likelies['album'], album_info.album)
# Current or preferred media.
if album_info.media:
# Preferred media options.
patterns = config['match']['preferred']['media'].as_str_seq()
options = [re.compile(r'(\d+x)?(%s)' % pat, re.I) for pat in patterns]
if options:
dist.add_priority('media', album_info.media, options)
# Current media.
elif likelies['media']:
dist.add_equality('media', album_info.media, likelies['media'])
# Mediums.
if likelies['disctotal'] and album_info.mediums:
dist.add_number('mediums', likelies['disctotal'], album_info.mediums)
# Prefer earliest release.
if album_info.year and config['match']['preferred']['original_year']:
# Assume 1889 (earliest first gramophone discs) if we don't know the
# original year.
original = album_info.original_year or 1889
diff = abs(album_info.year - original)
diff_max = abs(datetime.date.today().year - original)
dist.add_ratio('year', diff, diff_max)
# Year.
elif likelies['year'] and album_info.year:
if likelies['year'] in (album_info.year, album_info.original_year):
# No penalty for matching release or original year.
dist.add('year', 0.0)
elif album_info.original_year:
# Prefer matchest closest to the release year.
diff = abs(likelies['year'] - album_info.year)
diff_max = abs(datetime.date.today().year -
album_info.original_year)
dist.add_ratio('year', diff, diff_max)
else:
# Full penalty when there is no original year.
dist.add('year', 1.0)
# Preferred countries.
patterns = config['match']['preferred']['countries'].as_str_seq()
options = [re.compile(pat, re.I) for pat in patterns]
if album_info.country and options:
dist.add_priority('country', album_info.country, options)
# Country.
elif likelies['country'] and album_info.country:
dist.add_string('country', likelies['country'], album_info.country)
# Label.
if likelies['label'] and album_info.label:
dist.add_string('label', likelies['label'], album_info.label)
# Catalog number.
if likelies['catalognum'] and album_info.catalognum:
dist.add_string('catalognum', likelies['catalognum'],
album_info.catalognum)
# Disambiguation.
if likelies['albumdisambig'] and album_info.albumdisambig:
dist.add_string('albumdisambig', likelies['albumdisambig'],
album_info.albumdisambig)
# Album ID.
if likelies['mb_albumid']:
dist.add_equality('album_id', likelies['mb_albumid'],
album_info.album_id)
# Tracks.
dist.tracks = {}
for item, track in mapping.items():
dist.tracks[track] = track_distance(item, track, album_info.va)
dist.add('tracks', dist.tracks[track].distance)
# Missing tracks.
for i in range(len(album_info.tracks) - len(mapping)):
dist.add('missing_tracks', 1.0)
# Unmatched tracks.
for i in range(len(items) - len(mapping)):
dist.add('unmatched_tracks', 1.0)
# Plugins.
dist.update(plugins.album_distance(items, album_info, mapping))
return dist
def match_by_id(items):
"""If the items are tagged with a MusicBrainz album ID, returns an
AlbumInfo object for the corresponding album. Otherwise, returns
None.
"""
albumids = (item.mb_albumid for item in items if item.mb_albumid)
# Did any of the items have an MB album ID?
try:
first = next(albumids)
except StopIteration:
log.debug('No album ID found.')
return None
# Is there a consensus on the MB album ID?
for other in albumids:
if other != first:
log.debug('No album ID consensus.')
return None
# If all album IDs are equal, look up the album.
log.debug('Searching for discovered album ID: {0}', first)
return hooks.album_for_mbid(first)
def _recommendation(results):
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a
recommendation based on the results' distances.
If the recommendation is higher than the configured maximum for
an applied penalty, the recommendation will be downgraded to the
configured maximum for that penalty.
"""
if not results:
# No candidates: no recommendation.
return Recommendation.none
# Basic distance thresholding.
min_dist = results[0].distance
if min_dist < config['match']['strong_rec_thresh'].as_number():
# Strong recommendation level.
rec = Recommendation.strong
elif min_dist <= config['match']['medium_rec_thresh'].as_number():
# Medium recommendation level.
rec = Recommendation.medium
elif len(results) == 1:
# Only a single candidate.
rec = Recommendation.low
elif results[1].distance - min_dist >= \
config['match']['rec_gap_thresh'].as_number():
# Gap between first two candidates is large.
rec = Recommendation.low
else:
# No conclusion. Return immediately. Can't be downgraded any further.
return Recommendation.none
# Downgrade to the max rec if it is lower than the current rec for an
# applied penalty.
keys = set(min_dist.keys())
if isinstance(results[0], hooks.AlbumMatch):
for track_dist in min_dist.tracks.values():
keys.update(list(track_dist.keys()))
max_rec_view = config['match']['max_rec']
for key in keys:
if key in list(max_rec_view.keys()):
max_rec = max_rec_view[key].as_choice({
'strong': Recommendation.strong,
'medium': Recommendation.medium,
'low': Recommendation.low,
'none': Recommendation.none,
})
rec = min(rec, max_rec)
return rec
def _sort_candidates(candidates):
"""Sort candidates by distance."""
return sorted(candidates, key=lambda match: match.distance)
def _add_candidate(items, results, info):
"""Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves
checking the track count, ordering the items, checking for
duplicates, and calculating the distance.
"""
log.debug('Candidate: {0} - {1} ({2})',
info.artist, info.album, info.album_id)
# Discard albums with zero tracks.
if not info.tracks:
log.debug('No tracks.')
return
# Don't duplicate.
if info.album_id in results:
log.debug('Duplicate.')
return
# Discard matches without required tags.
for req_tag in config['match']['required'].as_str_seq():
if getattr(info, req_tag) is None:
log.debug('Ignored. Missing required tag: {0}', req_tag)
return
# Find mapping between the items and the track info.
mapping, extra_items, extra_tracks = assign_items(items, info.tracks)
# Get the change distance.
dist = distance(items, info, mapping)
# Skip matches with ignored penalties.
penalties = [key for key, _ in dist]
for penalty in config['match']['ignored'].as_str_seq():
if penalty in penalties:
log.debug('Ignored. Penalty: {0}', penalty)
return
log.debug('Success. Distance: {0}', dist)
results[info.album_id] = hooks.AlbumMatch(dist, info, mapping,
extra_items, extra_tracks)
def tag_album(items, search_artist=None, search_album=None,
search_ids=[]):
"""Return a tuple of the current artist name, the current album
name, and a `Proposal` containing `AlbumMatch` candidates.
The artist and album are the most common values of these fields
among `items`.
The `AlbumMatch` objects are generated by searching the metadata
backends. By default, the metadata of the items is used for the
search. This can be customized by setting the parameters.
`search_ids` is a list of metadata backend IDs: if specified,
it will restrict the candidates to those IDs, ignoring
`search_artist` and `search album`. The `mapping` field of the
album has the matched `items` as keys.
The recommendation is calculated from the match quality of the
candidates.
"""
# Get current metadata.
likelies, consensus = current_metadata(items)
cur_artist = likelies['artist']
cur_album = likelies['album']
log.debug('Tagging {0} - {1}', cur_artist, cur_album)
# The output result (distance, AlbumInfo) tuples (keyed by MB album
# ID).
candidates = {}
# Search by explicit ID.
if search_ids:
for search_id in search_ids:
log.debug('Searching for album ID: {0}', search_id)
for id_candidate in hooks.albums_for_id(search_id):
_add_candidate(items, candidates, id_candidate)
# Use existing metadata or text search.
else:
# Try search based on current ID.
id_info = match_by_id(items)
if id_info:
_add_candidate(items, candidates, id_info)
rec = _recommendation(list(candidates.values()))
log.debug('Album ID match recommendation is {0}', rec)
if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based
# matches.
if rec == Recommendation.strong:
log.debug('ID match.')
return cur_artist, cur_album, \
Proposal(list(candidates.values()), rec)
# Search terms.
if not (search_artist and search_album):
# No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album
log.debug('Search terms: {0} - {1}', search_artist, search_album)
extra_tags = None
if config['musicbrainz']['extra_tags']:
tag_list = config['musicbrainz']['extra_tags'].get()
extra_tags = {k: v for (k, v) in likelies.items() if k in tag_list}
log.debug('Additional search terms: {0}', extra_tags)
# Is this album likely to be a "various artist" release?
va_likely = ((not consensus['artist']) or
(search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items))
log.debug('Album might be VA: {0}', va_likely)
# Get the results from the data sources.
for matched_candidate in hooks.album_candidates(items,
search_artist,
search_album,
va_likely,
extra_tags):
_add_candidate(items, candidates, matched_candidate)
log.debug('Evaluating {0} candidates.', len(candidates))
# Sort and get the recommendation.
candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates)
return cur_artist, cur_album, Proposal(candidates, rec)
def tag_item(item, search_artist=None, search_title=None,
search_ids=[]):
"""Find metadata for a single track. Return a `Proposal` consisting
of `TrackMatch` objects.
`search_artist` and `search_title` may be used
to override the current metadata for the purposes of the MusicBrainz
title. `search_ids` may be used for restricting the search to a list
of metadata backend IDs.
"""
# Holds candidates found so far: keys are MBIDs; values are
# (distance, TrackInfo) pairs.
candidates = {}
# First, try matching by MusicBrainz ID.
trackids = search_ids or [t for t in [item.mb_trackid] if t]
if trackids:
for trackid in trackids:
log.debug('Searching for track ID: {0}', trackid)
for track_info in hooks.tracks_for_id(trackid):
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = \
hooks.TrackMatch(dist, track_info)
# If this is a good match, then don't keep searching.
rec = _recommendation(_sort_candidates(candidates.values()))
if rec == Recommendation.strong and \
not config['import']['timid']:
log.debug('Track ID match.')
return Proposal(_sort_candidates(candidates.values()), rec)
# If we're searching by ID, don't proceed.
if search_ids:
if candidates:
return Proposal(_sort_candidates(candidates.values()), rec)
else:
return Proposal([], Recommendation.none)
# Search terms.
if not (search_artist and search_title):
search_artist, search_title = item.artist, item.title
log.debug('Item search terms: {0} - {1}', search_artist, search_title)
# Get and evaluate candidate metadata.
for track_info in hooks.item_candidates(item, search_artist, search_title):
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
# Sort by distance and return with recommendation.
log.debug('Found {0} candidates.', len(candidates))
candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates)
return Proposal(candidates, rec)
| 19,893
|
Python
|
.py
| 433
| 38.013857
| 79
| 0.652398
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,262
|
__init__.py
|
rembo10_headphones/lib/beets/autotag/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Facilities for automatically determining files' correct metadata.
"""
from beets import logging
from beets import config
# Parts of external interface.
from .hooks import ( # noqa
AlbumInfo,
TrackInfo,
AlbumMatch,
TrackMatch,
Distance,
)
from .match import tag_item, tag_album, Proposal # noqa
from .match import Recommendation # noqa
# Global logger.
log = logging.getLogger('beets')
# Metadata fields that are already hardcoded, or where the tag name changes.
SPECIAL_FIELDS = {
'album': (
'va',
'releasegroup_id',
'artist_id',
'album_id',
'mediums',
'tracks',
'year',
'month',
'day',
'artist',
'artist_credit',
'artist_sort',
'data_url'
),
'track': (
'track_alt',
'artist_id',
'release_track_id',
'medium',
'index',
'medium_index',
'title',
'artist_credit',
'artist_sort',
'artist',
'track_id',
'medium_total',
'data_url',
'length'
)
}
# Additional utilities for the main interface.
def apply_item_metadata(item, track_info):
"""Set an item's metadata from its matched TrackInfo object.
"""
item.artist = track_info.artist
item.artist_sort = track_info.artist_sort
item.artist_credit = track_info.artist_credit
item.title = track_info.title
item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
if track_info.artist_id:
item.mb_artistid = track_info.artist_id
for field, value in track_info.items():
# We only overwrite fields that are not already hardcoded.
if field in SPECIAL_FIELDS['track']:
continue
if value is None:
continue
item[field] = value
# At the moment, the other metadata is left intact (including album
# and track number). Perhaps these should be emptied?
def apply_metadata(album_info, mapping):
"""Set the items' metadata to match an AlbumInfo object using a
mapping from Items to TrackInfo objects.
"""
for item, track_info in mapping.items():
# Artist or artist credit.
if config['artist_credit']:
item.artist = (track_info.artist_credit or
track_info.artist or
album_info.artist_credit or
album_info.artist)
item.albumartist = (album_info.artist_credit or
album_info.artist)
else:
item.artist = (track_info.artist or album_info.artist)
item.albumartist = album_info.artist
# Album.
item.album = album_info.album
# Artist sort and credit names.
item.artist_sort = track_info.artist_sort or album_info.artist_sort
item.artist_credit = (track_info.artist_credit or
album_info.artist_credit)
item.albumartist_sort = album_info.artist_sort
item.albumartist_credit = album_info.artist_credit
# Release date.
for prefix in '', 'original_':
if config['original_date'] and not prefix:
# Ignore specific release date.
continue
for suffix in 'year', 'month', 'day':
key = prefix + suffix
value = getattr(album_info, key) or 0
# If we don't even have a year, apply nothing.
if suffix == 'year' and not value:
break
# Otherwise, set the fetched value (or 0 for the month
# and day if not available).
item[key] = value
# If we're using original release date for both fields,
# also set item.year = info.original_year, etc.
if config['original_date']:
item[suffix] = value
# Title.
item.title = track_info.title
if config['per_disc_numbering']:
# We want to let the track number be zero, but if the medium index
# is not provided we need to fall back to the overall index.
if track_info.medium_index is not None:
item.track = track_info.medium_index
else:
item.track = track_info.index
item.tracktotal = track_info.medium_total or len(album_info.tracks)
else:
item.track = track_info.index
item.tracktotal = len(album_info.tracks)
# Disc and disc count.
item.disc = track_info.medium
item.disctotal = album_info.mediums
# MusicBrainz IDs.
item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
item.mb_albumid = album_info.album_id
if track_info.artist_id:
item.mb_artistid = track_info.artist_id
else:
item.mb_artistid = album_info.artist_id
item.mb_albumartistid = album_info.artist_id
item.mb_releasegroupid = album_info.releasegroup_id
# Compilation flag.
item.comp = album_info.va
# Track alt.
item.track_alt = track_info.track_alt
# Don't overwrite fields with empty values unless the
# field is explicitly allowed to be overwritten
for field, value in album_info.items():
if field in SPECIAL_FIELDS['album']:
continue
clobber = field in config['overwrite_null']['album'].as_str_seq()
if value is None and not clobber:
continue
item[field] = value
for field, value in track_info.items():
if field in SPECIAL_FIELDS['track']:
continue
clobber = field in config['overwrite_null']['track'].as_str_seq()
value = getattr(track_info, field)
if value is None and not clobber:
continue
item[field] = value
| 6,687
|
Python
|
.py
| 173
| 29.289017
| 79
| 0.606537
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,263
|
hooks.py
|
rembo10_headphones/lib/beets/autotag/hooks.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Glue between metadata sources and the matching logic."""
from collections import namedtuple
from functools import total_ordering
import re
from beets import logging
from beets import plugins
from beets import config
from beets.util import as_string
from beets.autotag import mb
from jellyfish import levenshtein_distance
from unidecode import unidecode
log = logging.getLogger('beets')
# The name of the type for patterns in re changed in Python 3.7.
try:
Pattern = re._pattern_type
except AttributeError:
Pattern = re.Pattern
# Classes used to represent candidate options.
class AttrDict(dict):
"""A dictionary that supports attribute ("dot") access, so `d.field`
is equivalent to `d['field']`.
"""
def __getattr__(self, attr):
if attr in self:
return self.get(attr)
else:
raise AttributeError
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __hash__(self):
return id(self)
class AlbumInfo(AttrDict):
"""Describes a canonical release that may be used to match a release
in the library. Consists of these data members:
- ``album``: the release title
- ``album_id``: MusicBrainz ID; UUID fragment only
- ``artist``: name of the release's primary artist
- ``artist_id``
- ``tracks``: list of TrackInfo objects making up the release
``mediums`` along with the fields up through ``tracks`` are required.
The others are optional and may be None.
"""
def __init__(self, tracks, album=None, album_id=None, artist=None,
artist_id=None, asin=None, albumtype=None, va=False,
year=None, month=None, day=None, label=None, mediums=None,
artist_sort=None, releasegroup_id=None, catalognum=None,
script=None, language=None, country=None, style=None,
genre=None, albumstatus=None, media=None, albumdisambig=None,
releasegroupdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source=None, data_url=None,
discogs_albumid=None, discogs_labelid=None,
discogs_artistid=None, **kwargs):
self.album = album
self.album_id = album_id
self.artist = artist
self.artist_id = artist_id
self.tracks = tracks
self.asin = asin
self.albumtype = albumtype
self.va = va
self.year = year
self.month = month
self.day = day
self.label = label
self.mediums = mediums
self.artist_sort = artist_sort
self.releasegroup_id = releasegroup_id
self.catalognum = catalognum
self.script = script
self.language = language
self.country = country
self.style = style
self.genre = genre
self.albumstatus = albumstatus
self.media = media
self.albumdisambig = albumdisambig
self.releasegroupdisambig = releasegroupdisambig
self.artist_credit = artist_credit
self.original_year = original_year
self.original_month = original_month
self.original_day = original_day
self.data_source = data_source
self.data_url = data_url
self.discogs_albumid = discogs_albumid
self.discogs_labelid = discogs_labelid
self.discogs_artistid = discogs_artistid
self.update(kwargs)
# Work around a bug in python-musicbrainz-ngs that causes some
# strings to be bytes rather than Unicode.
# https://github.com/alastair/python-musicbrainz-ngs/issues/85
def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object, and the
constituent `TrackInfo` objects, are decoded to Unicode.
"""
for fld in ['album', 'artist', 'albumtype', 'label', 'artist_sort',
'catalognum', 'script', 'language', 'country', 'style',
'genre', 'albumstatus', 'albumdisambig',
'releasegroupdisambig', 'artist_credit',
'media', 'discogs_albumid', 'discogs_labelid',
'discogs_artistid']:
value = getattr(self, fld)
if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore'))
for track in self.tracks:
track.decode(codec)
def copy(self):
dupe = AlbumInfo([])
dupe.update(self)
dupe.tracks = [track.copy() for track in self.tracks]
return dupe
class TrackInfo(AttrDict):
"""Describes a canonical track present on a release. Appears as part
of an AlbumInfo's ``tracks`` list. Consists of these data members:
- ``title``: name of the track
- ``track_id``: MusicBrainz ID; UUID fragment only
Only ``title`` and ``track_id`` are required. The rest of the fields
may be None. The indices ``index``, ``medium``, and ``medium_index``
are all 1-based.
"""
def __init__(self, title=None, track_id=None, release_track_id=None,
artist=None, artist_id=None, length=None, index=None,
medium=None, medium_index=None, medium_total=None,
artist_sort=None, disctitle=None, artist_credit=None,
data_source=None, data_url=None, media=None, lyricist=None,
composer=None, composer_sort=None, arranger=None,
track_alt=None, work=None, mb_workid=None,
work_disambig=None, bpm=None, initial_key=None, genre=None,
**kwargs):
self.title = title
self.track_id = track_id
self.release_track_id = release_track_id
self.artist = artist
self.artist_id = artist_id
self.length = length
self.index = index
self.media = media
self.medium = medium
self.medium_index = medium_index
self.medium_total = medium_total
self.artist_sort = artist_sort
self.disctitle = disctitle
self.artist_credit = artist_credit
self.data_source = data_source
self.data_url = data_url
self.lyricist = lyricist
self.composer = composer
self.composer_sort = composer_sort
self.arranger = arranger
self.track_alt = track_alt
self.work = work
self.mb_workid = mb_workid
self.work_disambig = work_disambig
self.bpm = bpm
self.initial_key = initial_key
self.genre = genre
self.update(kwargs)
# As above, work around a bug in python-musicbrainz-ngs.
def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object are decoded
to Unicode.
"""
for fld in ['title', 'artist', 'medium', 'artist_sort', 'disctitle',
'artist_credit', 'media']:
value = getattr(self, fld)
if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore'))
def copy(self):
dupe = TrackInfo()
dupe.update(self)
return dupe
# Candidate distance scoring.
# Parameters for string distance function.
# Words that can be moved to the end of a string using a comma.
SD_END_WORDS = ['the', 'a', 'an']
# Reduced weights for certain portions of the string.
SD_PATTERNS = [
(r'^the ', 0.1),
(r'[\[\(]?(ep|single)[\]\)]?', 0.0),
(r'[\[\(]?(featuring|feat|ft)[\. :].+', 0.1),
(r'\(.*?\)', 0.3),
(r'\[.*?\]', 0.3),
(r'(, )?(pt\.|part) .+', 0.2),
]
# Replacements to use before testing distance.
SD_REPLACE = [
(r'&', 'and'),
]
def _string_dist_basic(str1, str2):
"""Basic edit distance between two strings, ignoring
non-alphanumeric characters and case. Comparisons are based on a
transliteration/lowering to ASCII characters. Normalized by string
length.
"""
assert isinstance(str1, str)
assert isinstance(str2, str)
str1 = as_string(unidecode(str1))
str2 = as_string(unidecode(str2))
str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
str2 = re.sub(r'[^a-z0-9]', '', str2.lower())
if not str1 and not str2:
return 0.0
return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1, str2):
"""Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
"""
if str1 is None and str2 is None:
return 0.0
if str1 is None or str2 is None:
return 1.0
str1 = str1.lower()
str2 = str2.lower()
# Don't penalize strings that move certain words to the end. For
# example, "the something" should be considered equal to
# "something, the".
for word in SD_END_WORDS:
if str1.endswith(', %s' % word):
str1 = '{} {}'.format(word, str1[:-len(word) - 2])
if str2.endswith(', %s' % word):
str2 = '{} {}'.format(word, str2[:-len(word) - 2])
# Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE:
str1 = re.sub(pat, repl, str1)
str2 = re.sub(pat, repl, str2)
# Change the weight for certain string portions matched by a set
# of regular expressions. We gradually change the strings and build
# up penalties associated with parts of the string that were
# deleted.
base_dist = _string_dist_basic(str1, str2)
penalty = 0.0
for pat, weight in SD_PATTERNS:
# Get strings that drop the pattern.
case_str1 = re.sub(pat, '', str1)
case_str2 = re.sub(pat, '', str2)
if case_str1 != str1 or case_str2 != str2:
# If the pattern was present (i.e., it is deleted in the
# the current case), recalculate the distances for the
# modified strings.
case_dist = _string_dist_basic(case_str1, case_str2)
case_delta = max(0.0, base_dist - case_dist)
if case_delta == 0.0:
continue
# Shift our baseline strings down (to avoid rematching the
# same part of the string) and add a scaled distance
# amount to the penalties.
str1 = case_str1
str2 = case_str2
base_dist = case_dist
penalty += weight * case_delta
return base_dist + penalty
class LazyClassProperty:
"""A decorator implementing a read-only property that is *lazy* in
the sense that the getter is only invoked once. Subsequent accesses
through *any* instance use the cached result.
"""
def __init__(self, getter):
self.getter = getter
self.computed = False
def __get__(self, obj, owner):
if not self.computed:
self.value = self.getter(owner)
self.computed = True
return self.value
@total_ordering
class Distance:
"""Keeps track of multiple distance penalties. Provides a single
weighted distance for all penalties as well as a weighted distance
for each individual penalty.
"""
def __init__(self):
self._penalties = {}
@LazyClassProperty
def _weights(cls): # noqa: N805
"""A dictionary from keys to floating-point weights.
"""
weights_view = config['match']['distance_weights']
weights = {}
for key in weights_view.keys():
weights[key] = weights_view[key].as_number()
return weights
# Access the components and their aggregates.
@property
def distance(self):
"""Return a weighted and normalized distance across all
penalties.
"""
dist_max = self.max_distance
if dist_max:
return self.raw_distance / self.max_distance
return 0.0
@property
def max_distance(self):
"""Return the maximum distance penalty (normalization factor).
"""
dist_max = 0.0
for key, penalty in self._penalties.items():
dist_max += len(penalty) * self._weights[key]
return dist_max
@property
def raw_distance(self):
"""Return the raw (denormalized) distance.
"""
dist_raw = 0.0
for key, penalty in self._penalties.items():
dist_raw += sum(penalty) * self._weights[key]
return dist_raw
def items(self):
"""Return a list of (key, dist) pairs, with `dist` being the
weighted distance, sorted from highest to lowest. Does not
include penalties with a zero value.
"""
list_ = []
for key in self._penalties:
dist = self[key]
if dist:
list_.append((key, dist))
# Convert distance into a negative float we can sort items in
# ascending order (for keys, when the penalty is equal) and
# still get the items with the biggest distance first.
return sorted(
list_,
key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0])
)
def __hash__(self):
return id(self)
def __eq__(self, other):
return self.distance == other
# Behave like a float.
def __lt__(self, other):
return self.distance < other
def __float__(self):
return self.distance
def __sub__(self, other):
return self.distance - other
def __rsub__(self, other):
return other - self.distance
def __str__(self):
return f"{self.distance:.2f}"
# Behave like a dict.
def __getitem__(self, key):
"""Returns the weighted distance for a named penalty.
"""
dist = sum(self._penalties[key]) * self._weights[key]
dist_max = self.max_distance
if dist_max:
return dist / dist_max
return 0.0
def __iter__(self):
return iter(self.items())
def __len__(self):
return len(self.items())
def keys(self):
return [key for key, _ in self.items()]
def update(self, dist):
"""Adds all the distance penalties from `dist`.
"""
if not isinstance(dist, Distance):
raise ValueError(
'`dist` must be a Distance object, not {}'.format(type(dist))
)
for key, penalties in dist._penalties.items():
self._penalties.setdefault(key, []).extend(penalties)
# Adding components.
def _eq(self, value1, value2):
"""Returns True if `value1` is equal to `value2`. `value1` may
be a compiled regular expression, in which case it will be
matched against `value2`.
"""
if isinstance(value1, Pattern):
return bool(value1.match(value2))
return value1 == value2
def add(self, key, dist):
"""Adds a distance penalty. `key` must correspond with a
configured weight setting. `dist` must be a float between 0.0
and 1.0, and will be added to any existing distance penalties
for the same key.
"""
if not 0.0 <= dist <= 1.0:
raise ValueError(
f'`dist` must be between 0.0 and 1.0, not {dist}'
)
self._penalties.setdefault(key, []).append(dist)
def add_equality(self, key, value, options):
"""Adds a distance penalty of 1.0 if `value` doesn't match any
of the values in `options`. If an option is a compiled regular
expression, it will be considered equal if it matches against
`value`.
"""
if not isinstance(options, (list, tuple)):
options = [options]
for opt in options:
if self._eq(opt, value):
dist = 0.0
break
else:
dist = 1.0
self.add(key, dist)
def add_expr(self, key, expr):
"""Adds a distance penalty of 1.0 if `expr` evaluates to True,
or 0.0.
"""
if expr:
self.add(key, 1.0)
else:
self.add(key, 0.0)
def add_number(self, key, number1, number2):
"""Adds a distance penalty of 1.0 for each number of difference
between `number1` and `number2`, or 0.0 when there is no
difference. Use this when there is no upper limit on the
difference between the two numbers.
"""
diff = abs(number1 - number2)
if diff:
for i in range(diff):
self.add(key, 1.0)
else:
self.add(key, 0.0)
def add_priority(self, key, value, options):
"""Adds a distance penalty that corresponds to the position at
which `value` appears in `options`. A distance penalty of 0.0
for the first option, or 1.0 if there is no matching option. If
an option is a compiled regular expression, it will be
considered equal if it matches against `value`.
"""
if not isinstance(options, (list, tuple)):
options = [options]
unit = 1.0 / (len(options) or 1)
for i, opt in enumerate(options):
if self._eq(opt, value):
dist = i * unit
break
else:
dist = 1.0
self.add(key, dist)
def add_ratio(self, key, number1, number2):
"""Adds a distance penalty for `number1` as a ratio of `number2`.
`number1` is bound at 0 and `number2`.
"""
number = float(max(min(number1, number2), 0))
if number2:
dist = number / number2
else:
dist = 0.0
self.add(key, dist)
def add_string(self, key, str1, str2):
"""Adds a distance penalty based on the edit distance between
`str1` and `str2`.
"""
dist = string_dist(str1, str2)
self.add(key, dist)
# Structures that compose all the information for a candidate match.
AlbumMatch = namedtuple('AlbumMatch', ['distance', 'info', 'mapping',
'extra_items', 'extra_tracks'])
TrackMatch = namedtuple('TrackMatch', ['distance', 'info'])
# Aggregation of sources.
def album_for_mbid(release_id):
"""Get an AlbumInfo object for a MusicBrainz release ID. Return None
if the ID is not found.
"""
try:
album = mb.album_for_id(release_id)
if album:
plugins.send('albuminfo_received', info=album)
return album
except mb.MusicBrainzAPIError as exc:
exc.log(log)
def track_for_mbid(recording_id):
"""Get a TrackInfo object for a MusicBrainz recording ID. Return None
if the ID is not found.
"""
try:
track = mb.track_for_id(recording_id)
if track:
plugins.send('trackinfo_received', info=track)
return track
except mb.MusicBrainzAPIError as exc:
exc.log(log)
def albums_for_id(album_id):
"""Get a list of albums for an ID."""
a = album_for_mbid(album_id)
if a:
yield a
for a in plugins.album_for_id(album_id):
if a:
plugins.send('albuminfo_received', info=a)
yield a
def tracks_for_id(track_id):
"""Get a list of tracks for an ID."""
t = track_for_mbid(track_id)
if t:
yield t
for t in plugins.track_for_id(track_id):
if t:
plugins.send('trackinfo_received', info=t)
yield t
@plugins.notify_info_yielded('albuminfo_received')
def album_candidates(items, artist, album, va_likely, extra_tags):
"""Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective
names (strings), which may be derived from the item list or may be
entered by the user. ``va_likely`` is a boolean indicating whether
the album is likely to be a "various artists" release. ``extra_tags``
is an optional dictionary of additional tags used to further
constrain the search.
"""
# Base candidates if we have album and artist to match.
if artist and album:
try:
yield from mb.match_album(artist, album, len(items),
extra_tags)
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Also add VA matches from MusicBrainz where appropriate.
if va_likely and album:
try:
yield from mb.match_album(None, album, len(items),
extra_tags)
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Candidates from plugins.
yield from plugins.candidates(items, artist, album, va_likely,
extra_tags)
@plugins.notify_info_yielded('trackinfo_received')
def item_candidates(item, artist, title):
"""Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or
are specified by the user.
"""
# MusicBrainz candidates.
if artist and title:
try:
yield from mb.match_track(artist, title)
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Plugin candidates.
yield from plugins.item_candidates(item, artist, title)
| 21,866
|
Python
|
.py
| 551
| 31.286751
| 78
| 0.614054
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,264
|
pipeline.py
|
rembo10_headphones/lib/beets/util/pipeline.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple but robust implementation of generator/coroutine-based
pipelines in Python. The pipelines may be run either sequentially
(single-threaded) or in parallel (one thread per pipeline stage).
This implementation supports pipeline bubbles (indications that the
processing for a certain item should abort). To use them, yield the
BUBBLE constant from any stage coroutine except the last.
In the parallel case, the implementation transparently handles thread
shutdown when the processing is complete and when a stage raises an
exception. KeyboardInterrupts (^C) are also handled.
When running a parallel pipeline, it is also possible to use
multiple coroutines for the same pipeline stage; this lets you speed
up a bottleneck stage by dividing its work among multiple threads.
To do so, pass an iterable of coroutines to the Pipeline constructor
in place of any single coroutine.
"""
import queue
from threading import Thread, Lock
import sys
BUBBLE = '__PIPELINE_BUBBLE__'
POISON = '__PIPELINE_POISON__'
DEFAULT_QUEUE_SIZE = 16
def _invalidate_queue(q, val=None, sync=True):
"""Breaks a Queue such that it never blocks, always has size 1,
and has no maximum size. get()ing from the queue returns `val`,
which defaults to None. `sync` controls whether a lock is
required (because it's not reentrant!).
"""
def _qsize(len=len):
return 1
def _put(item):
pass
def _get():
return val
if sync:
q.mutex.acquire()
try:
# Originally, we set `maxsize` to 0 here, which is supposed to mean
# an unlimited queue size. However, there is a race condition since
# Python 3.2 when this attribute is changed while another thread is
# waiting in put()/get() due to a full/empty queue.
# Setting it to 2 is still hacky because Python does not give any
# guarantee what happens if Queue methods/attributes are overwritten
# when it is already in use. However, because of our dummy _put()
# and _get() methods, it provides a workaround to let the queue appear
# to be never empty or full.
# See issue https://github.com/beetbox/beets/issues/2078
q.maxsize = 2
q._qsize = _qsize
q._put = _put
q._get = _get
q.not_empty.notifyAll()
q.not_full.notifyAll()
finally:
if sync:
q.mutex.release()
class CountedQueue(queue.Queue):
"""A queue that keeps track of the number of threads that are
still feeding into it. The queue is poisoned when all threads are
finished with the queue.
"""
def __init__(self, maxsize=0):
queue.Queue.__init__(self, maxsize)
self.nthreads = 0
self.poisoned = False
def acquire(self):
"""Indicate that a thread will start putting into this queue.
Should not be called after the queue is already poisoned.
"""
with self.mutex:
assert not self.poisoned
assert self.nthreads >= 0
self.nthreads += 1
def release(self):
"""Indicate that a thread that was putting into this queue has
exited. If this is the last thread using the queue, the queue
is poisoned.
"""
with self.mutex:
self.nthreads -= 1
assert self.nthreads >= 0
if self.nthreads == 0:
# All threads are done adding to this queue. Poison it
# when it becomes empty.
self.poisoned = True
# Replacement _get invalidates when no items remain.
_old_get = self._get
def _get():
out = _old_get()
if not self.queue:
_invalidate_queue(self, POISON, False)
return out
if self.queue:
# Items remain.
self._get = _get
else:
# No items. Invalidate immediately.
_invalidate_queue(self, POISON, False)
class MultiMessage:
"""A message yielded by a pipeline stage encapsulating multiple
values to be sent to the next stage.
"""
def __init__(self, messages):
self.messages = messages
def multiple(messages):
"""Yield multiple([message, ..]) from a pipeline stage to send
multiple values to the next pipeline stage.
"""
return MultiMessage(messages)
def stage(func):
"""Decorate a function to become a simple stage.
>>> @stage
... def add(n, i):
... return i + n
>>> pipe = Pipeline([
... iter([1, 2, 3]),
... add(2),
... ])
>>> list(pipe.pull())
[3, 4, 5]
"""
def coro(*args):
task = None
while True:
task = yield task
task = func(*(args + (task,)))
return coro
def mutator_stage(func):
"""Decorate a function that manipulates items in a coroutine to
become a simple stage.
>>> @mutator_stage
... def setkey(key, item):
... item[key] = True
>>> pipe = Pipeline([
... iter([{'x': False}, {'a': False}]),
... setkey('x'),
... ])
>>> list(pipe.pull())
[{'x': True}, {'a': False, 'x': True}]
"""
def coro(*args):
task = None
while True:
task = yield task
func(*(args + (task,)))
return coro
def _allmsgs(obj):
"""Returns a list of all the messages encapsulated in obj. If obj
is a MultiMessage, returns its enclosed messages. If obj is BUBBLE,
returns an empty list. Otherwise, returns a list containing obj.
"""
if isinstance(obj, MultiMessage):
return obj.messages
elif obj == BUBBLE:
return []
else:
return [obj]
class PipelineThread(Thread):
"""Abstract base class for pipeline-stage threads."""
def __init__(self, all_threads):
super().__init__()
self.abort_lock = Lock()
self.abort_flag = False
self.all_threads = all_threads
self.exc_info = None
def abort(self):
"""Shut down the thread at the next chance possible.
"""
with self.abort_lock:
self.abort_flag = True
# Ensure that we are not blocking on a queue read or write.
if hasattr(self, 'in_queue'):
_invalidate_queue(self.in_queue, POISON)
if hasattr(self, 'out_queue'):
_invalidate_queue(self.out_queue, POISON)
def abort_all(self, exc_info):
"""Abort all other threads in the system for an exception.
"""
self.exc_info = exc_info
for thread in self.all_threads:
thread.abort()
class FirstPipelineThread(PipelineThread):
"""The thread running the first stage in a parallel pipeline setup.
The coroutine should just be a generator.
"""
def __init__(self, coro, out_queue, all_threads):
super().__init__(all_threads)
self.coro = coro
self.out_queue = out_queue
self.out_queue.acquire()
def run(self):
try:
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the value from the generator.
try:
msg = next(self.coro)
except StopIteration:
break
# Send messages to the next stage.
for msg in _allmsgs(msg):
with self.abort_lock:
if self.abort_flag:
return
self.out_queue.put(msg)
except BaseException:
self.abort_all(sys.exc_info())
return
# Generator finished; shut down the pipeline.
self.out_queue.release()
class MiddlePipelineThread(PipelineThread):
"""A thread running any stage in the pipeline except the first or
last.
"""
def __init__(self, coro, in_queue, out_queue, all_threads):
super().__init__(all_threads)
self.coro = coro
self.in_queue = in_queue
self.out_queue = out_queue
self.out_queue.acquire()
def run(self):
try:
# Prime the coroutine.
next(self.coro)
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the message from the previous stage.
msg = self.in_queue.get()
if msg is POISON:
break
with self.abort_lock:
if self.abort_flag:
return
# Invoke the current stage.
out = self.coro.send(msg)
# Send messages to next stage.
for msg in _allmsgs(out):
with self.abort_lock:
if self.abort_flag:
return
self.out_queue.put(msg)
except BaseException:
self.abort_all(sys.exc_info())
return
# Pipeline is shutting down normally.
self.out_queue.release()
class LastPipelineThread(PipelineThread):
"""A thread running the last stage in a pipeline. The coroutine
should yield nothing.
"""
def __init__(self, coro, in_queue, all_threads):
super().__init__(all_threads)
self.coro = coro
self.in_queue = in_queue
def run(self):
# Prime the coroutine.
next(self.coro)
try:
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the message from the previous stage.
msg = self.in_queue.get()
if msg is POISON:
break
with self.abort_lock:
if self.abort_flag:
return
# Send to consumer.
self.coro.send(msg)
except BaseException:
self.abort_all(sys.exc_info())
return
class Pipeline:
"""Represents a staged pattern of work. Each stage in the pipeline
is a coroutine that receives messages from the previous stage and
yields messages to be sent to the next stage.
"""
def __init__(self, stages):
"""Makes a new pipeline from a list of coroutines. There must
be at least two stages.
"""
if len(stages) < 2:
raise ValueError('pipeline must have at least two stages')
self.stages = []
for stage in stages:
if isinstance(stage, (list, tuple)):
self.stages.append(stage)
else:
# Default to one thread per stage.
self.stages.append((stage,))
def run_sequential(self):
"""Run the pipeline sequentially in the current thread. The
stages are run one after the other. Only the first coroutine
in each stage is used.
"""
list(self.pull())
def run_parallel(self, queue_size=DEFAULT_QUEUE_SIZE):
"""Run the pipeline in parallel using one thread per stage. The
messages between the stages are stored in queues of the given
size.
"""
queue_count = len(self.stages) - 1
queues = [CountedQueue(queue_size) for i in range(queue_count)]
threads = []
# Set up first stage.
for coro in self.stages[0]:
threads.append(FirstPipelineThread(coro, queues[0], threads))
# Middle stages.
for i in range(1, queue_count):
for coro in self.stages[i]:
threads.append(MiddlePipelineThread(
coro, queues[i - 1], queues[i], threads
))
# Last stage.
for coro in self.stages[-1]:
threads.append(
LastPipelineThread(coro, queues[-1], threads)
)
# Start threads.
for thread in threads:
thread.start()
# Wait for termination. The final thread lasts the longest.
try:
# Using a timeout allows us to receive KeyboardInterrupt
# exceptions during the join().
while threads[-1].is_alive():
threads[-1].join(1)
except BaseException:
# Stop all the threads immediately.
for thread in threads:
thread.abort()
raise
finally:
# Make completely sure that all the threads have finished
# before we return. They should already be either finished,
# in normal operation, or aborted, in case of an exception.
for thread in threads[:-1]:
thread.join()
for thread in threads:
exc_info = thread.exc_info
if exc_info:
# Make the exception appear as it was raised originally.
raise exc_info[1].with_traceback(exc_info[2])
def pull(self):
"""Yield elements from the end of the pipeline. Runs the stages
sequentially until the last yields some messages. Each of the messages
is then yielded by ``pulled.next()``. If the pipeline has a consumer,
that is the last stage does not yield any messages, then pull will not
yield any messages. Only the first coroutine in each stage is used
"""
coros = [stage[0] for stage in self.stages]
# "Prime" the coroutines.
for coro in coros[1:]:
next(coro)
# Begin the pipeline.
for out in coros[0]:
msgs = _allmsgs(out)
for coro in coros[1:]:
next_msgs = []
for msg in msgs:
out = coro.send(msg)
next_msgs.extend(_allmsgs(out))
msgs = next_msgs
for msg in msgs:
yield msg
# Smoke test.
if __name__ == '__main__':
import time
# Test a normally-terminating pipeline both in sequence and
# in parallel.
def produce():
for i in range(5):
print('generating %i' % i)
time.sleep(1)
yield i
def work():
num = yield
while True:
print('processing %i' % num)
time.sleep(2)
num = yield num * 2
def consume():
while True:
num = yield
time.sleep(1)
print('received %i' % num)
ts_start = time.time()
Pipeline([produce(), work(), consume()]).run_sequential()
ts_seq = time.time()
Pipeline([produce(), work(), consume()]).run_parallel()
ts_par = time.time()
Pipeline([produce(), (work(), work()), consume()]).run_parallel()
ts_end = time.time()
print('Sequential time:', ts_seq - ts_start)
print('Parallel time:', ts_par - ts_seq)
print('Multiply-parallel time:', ts_end - ts_par)
print()
# Test a pipeline that raises an exception.
def exc_produce():
for i in range(10):
print('generating %i' % i)
time.sleep(1)
yield i
def exc_work():
num = yield
while True:
print('processing %i' % num)
time.sleep(3)
if num == 3:
raise Exception()
num = yield num * 2
def exc_consume():
while True:
num = yield
print('received %i' % num)
Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1)
| 16,317
|
Python
|
.py
| 434
| 27.71659
| 78
| 0.57713
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,265
|
bluelet.py
|
rembo10_headphones/lib/beets/util/bluelet.py
|
"""Extremely simple pure-Python implementation of coroutine-style
asynchronous socket I/O. Inspired by, but inferior to, Eventlet.
Bluelet can also be thought of as a less-terrible replacement for
asyncore.
Bluelet: easy concurrency without all the messy parallelism.
"""
import socket
import select
import sys
import types
import errno
import traceback
import time
import collections
# Basic events used for thread scheduling.
class Event:
"""Just a base class identifying Bluelet events. An event is an
object yielded from a Bluelet thread coroutine to suspend operation
and communicate with the scheduler.
"""
pass
class WaitableEvent(Event):
"""A waitable event is one encapsulating an action that can be
waited for using a select() call. That is, it's an event with an
associated file descriptor.
"""
def waitables(self):
"""Return "waitable" objects to pass to select(). Should return
three iterables for input readiness, output readiness, and
exceptional conditions (i.e., the three lists passed to
select()).
"""
return (), (), ()
def fire(self):
"""Called when an associated file descriptor becomes ready
(i.e., is returned from a select() call).
"""
pass
class ValueEvent(Event):
"""An event that does nothing but return a fixed value."""
def __init__(self, value):
self.value = value
class ExceptionEvent(Event):
"""Raise an exception at the yield point. Used internally."""
def __init__(self, exc_info):
self.exc_info = exc_info
class SpawnEvent(Event):
"""Add a new coroutine thread to the scheduler."""
def __init__(self, coro):
self.spawned = coro
class JoinEvent(Event):
"""Suspend the thread until the specified child thread has
completed.
"""
def __init__(self, child):
self.child = child
class KillEvent(Event):
"""Unschedule a child thread."""
def __init__(self, child):
self.child = child
class DelegationEvent(Event):
"""Suspend execution of the current thread, start a new thread and,
once the child thread finished, return control to the parent
thread.
"""
def __init__(self, coro):
self.spawned = coro
class ReturnEvent(Event):
"""Return a value the current thread's delegator at the point of
delegation. Ends the current (delegate) thread.
"""
def __init__(self, value):
self.value = value
class SleepEvent(WaitableEvent):
"""Suspend the thread for a given duration.
"""
def __init__(self, duration):
self.wakeup_time = time.time() + duration
def time_left(self):
return max(self.wakeup_time - time.time(), 0.0)
class ReadEvent(WaitableEvent):
"""Reads from a file-like object."""
def __init__(self, fd, bufsize):
self.fd = fd
self.bufsize = bufsize
def waitables(self):
return (self.fd,), (), ()
def fire(self):
return self.fd.read(self.bufsize)
class WriteEvent(WaitableEvent):
"""Writes to a file-like object."""
def __init__(self, fd, data):
self.fd = fd
self.data = data
def waitable(self):
return (), (self.fd,), ()
def fire(self):
self.fd.write(self.data)
# Core logic for executing and scheduling threads.
def _event_select(events):
"""Perform a select() over all the Events provided, returning the
ones ready to be fired. Only WaitableEvents (including SleepEvents)
matter here; all other events are ignored (and thus postponed).
"""
# Gather waitables and wakeup times.
waitable_to_event = {}
rlist, wlist, xlist = [], [], []
earliest_wakeup = None
for event in events:
if isinstance(event, SleepEvent):
if not earliest_wakeup:
earliest_wakeup = event.wakeup_time
else:
earliest_wakeup = min(earliest_wakeup, event.wakeup_time)
elif isinstance(event, WaitableEvent):
r, w, x = event.waitables()
rlist += r
wlist += w
xlist += x
for waitable in r:
waitable_to_event[('r', waitable)] = event
for waitable in w:
waitable_to_event[('w', waitable)] = event
for waitable in x:
waitable_to_event[('x', waitable)] = event
# If we have a any sleeping threads, determine how long to sleep.
if earliest_wakeup:
timeout = max(earliest_wakeup - time.time(), 0.0)
else:
timeout = None
# Perform select() if we have any waitables.
if rlist or wlist or xlist:
rready, wready, xready = select.select(rlist, wlist, xlist, timeout)
else:
rready, wready, xready = (), (), ()
if timeout:
time.sleep(timeout)
# Gather ready events corresponding to the ready waitables.
ready_events = set()
for ready in rready:
ready_events.add(waitable_to_event[('r', ready)])
for ready in wready:
ready_events.add(waitable_to_event[('w', ready)])
for ready in xready:
ready_events.add(waitable_to_event[('x', ready)])
# Gather any finished sleeps.
for event in events:
if isinstance(event, SleepEvent) and event.time_left() == 0.0:
ready_events.add(event)
return ready_events
class ThreadException(Exception):
def __init__(self, coro, exc_info):
self.coro = coro
self.exc_info = exc_info
def reraise(self):
raise self.exc_info[1].with_traceback(self.exc_info[2])
SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
class Delegated(Event):
"""Placeholder indicating that a thread has delegated execution to a
different thread.
"""
def __init__(self, child):
self.child = child
def run(root_coro):
"""Schedules a coroutine, running it to completion. This
encapsulates the Bluelet scheduler, which the root coroutine can
add to by spawning new coroutines.
"""
# The "threads" dictionary keeps track of all the currently-
# executing and suspended coroutines. It maps coroutines to their
# currently "blocking" event. The event value may be SUSPENDED if
# the coroutine is waiting on some other condition: namely, a
# delegated coroutine or a joined coroutine. In this case, the
# coroutine should *also* appear as a value in one of the below
# dictionaries `delegators` or `joiners`.
threads = {root_coro: ValueEvent(None)}
# Maps child coroutines to delegating parents.
delegators = {}
# Maps child coroutines to joining (exit-waiting) parents.
joiners = collections.defaultdict(list)
def complete_thread(coro, return_value):
"""Remove a coroutine from the scheduling pool, awaking
delegators and joiners as necessary and returning the specified
value to any delegating parent.
"""
del threads[coro]
# Resume delegator.
if coro in delegators:
threads[delegators[coro]] = ValueEvent(return_value)
del delegators[coro]
# Resume joiners.
if coro in joiners:
for parent in joiners[coro]:
threads[parent] = ValueEvent(None)
del joiners[coro]
def advance_thread(coro, value, is_exc=False):
"""After an event is fired, run a given coroutine associated with
it in the threads dict until it yields again. If the coroutine
exits, then the thread is removed from the pool. If the coroutine
raises an exception, it is reraised in a ThreadException. If
is_exc is True, then the value must be an exc_info tuple and the
exception is thrown into the coroutine.
"""
try:
if is_exc:
next_event = coro.throw(*value)
else:
next_event = coro.send(value)
except StopIteration:
# Thread is done.
complete_thread(coro, None)
except BaseException:
# Thread raised some other exception.
del threads[coro]
raise ThreadException(coro, sys.exc_info())
else:
if isinstance(next_event, types.GeneratorType):
# Automatically invoke sub-coroutines. (Shorthand for
# explicit bluelet.call().)
next_event = DelegationEvent(next_event)
threads[coro] = next_event
def kill_thread(coro):
"""Unschedule this thread and its (recursive) delegates.
"""
# Collect all coroutines in the delegation stack.
coros = [coro]
while isinstance(threads[coro], Delegated):
coro = threads[coro].child
coros.append(coro)
# Complete each coroutine from the top to the bottom of the
# stack.
for coro in reversed(coros):
complete_thread(coro, None)
# Continue advancing threads until root thread exits.
exit_te = None
while threads:
try:
# Look for events that can be run immediately. Continue
# running immediate events until nothing is ready.
while True:
have_ready = False
for coro, event in list(threads.items()):
if isinstance(event, SpawnEvent):
threads[event.spawned] = ValueEvent(None) # Spawn.
advance_thread(coro, None)
have_ready = True
elif isinstance(event, ValueEvent):
advance_thread(coro, event.value)
have_ready = True
elif isinstance(event, ExceptionEvent):
advance_thread(coro, event.exc_info, True)
have_ready = True
elif isinstance(event, DelegationEvent):
threads[coro] = Delegated(event.spawned) # Suspend.
threads[event.spawned] = ValueEvent(None) # Spawn.
delegators[event.spawned] = coro
have_ready = True
elif isinstance(event, ReturnEvent):
# Thread is done.
complete_thread(coro, event.value)
have_ready = True
elif isinstance(event, JoinEvent):
threads[coro] = SUSPENDED # Suspend.
joiners[event.child].append(coro)
have_ready = True
elif isinstance(event, KillEvent):
threads[coro] = ValueEvent(None)
kill_thread(event.child)
have_ready = True
# Only start the select when nothing else is ready.
if not have_ready:
break
# Wait and fire.
event2coro = {v: k for k, v in threads.items()}
for event in _event_select(threads.values()):
# Run the IO operation, but catch socket errors.
try:
value = event.fire()
except OSError as exc:
if isinstance(exc.args, tuple) and \
exc.args[0] == errno.EPIPE:
# Broken pipe. Remote host disconnected.
pass
elif isinstance(exc.args, tuple) and \
exc.args[0] == errno.ECONNRESET:
# Connection was reset by peer.
pass
else:
traceback.print_exc()
# Abort the coroutine.
threads[event2coro[event]] = ReturnEvent(None)
else:
advance_thread(event2coro[event], value)
except ThreadException as te:
# Exception raised from inside a thread.
event = ExceptionEvent(te.exc_info)
if te.coro in delegators:
# The thread is a delegate. Raise exception in its
# delegator.
threads[delegators[te.coro]] = event
del delegators[te.coro]
else:
# The thread is root-level. Raise in client code.
exit_te = te
break
except BaseException:
# For instance, KeyboardInterrupt during select(). Raise
# into root thread and terminate others.
threads = {root_coro: ExceptionEvent(sys.exc_info())}
# If any threads still remain, kill them.
for coro in threads:
coro.close()
# If we're exiting with an exception, raise it in the client.
if exit_te:
exit_te.reraise()
# Sockets and their associated events.
class SocketClosedError(Exception):
pass
class Listener:
"""A socket wrapper object for listening sockets.
"""
def __init__(self, host, port):
"""Create a listening socket on the given hostname and port.
"""
self._closed = False
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
self.sock.listen(5)
def accept(self):
"""An event that waits for a connection on the listening socket.
When a connection is made, the event returns a Connection
object.
"""
if self._closed:
raise SocketClosedError()
return AcceptEvent(self)
def close(self):
"""Immediately close the listening socket. (Not an event.)
"""
self._closed = True
self.sock.close()
class Connection:
"""A socket wrapper object for connected sockets.
"""
def __init__(self, sock, addr):
self.sock = sock
self.addr = addr
self._buf = b''
self._closed = False
def close(self):
"""Close the connection."""
self._closed = True
self.sock.close()
def recv(self, size):
"""Read at most size bytes of data from the socket."""
if self._closed:
raise SocketClosedError()
if self._buf:
# We already have data read previously.
out = self._buf[:size]
self._buf = self._buf[size:]
return ValueEvent(out)
else:
return ReceiveEvent(self, size)
def send(self, data):
"""Sends data on the socket, returning the number of bytes
successfully sent.
"""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data)
def sendall(self, data):
"""Send all of data on the socket."""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data, True)
def readline(self, terminator=b"\n", bufsize=1024):
"""Reads a line (delimited by terminator) from the socket."""
if self._closed:
raise SocketClosedError()
while True:
if terminator in self._buf:
line, self._buf = self._buf.split(terminator, 1)
line += terminator
yield ReturnEvent(line)
break
data = yield ReceiveEvent(self, bufsize)
if data:
self._buf += data
else:
line = self._buf
self._buf = b''
yield ReturnEvent(line)
break
class AcceptEvent(WaitableEvent):
"""An event for Listener objects (listening sockets) that suspends
execution until the socket gets a connection.
"""
def __init__(self, listener):
self.listener = listener
def waitables(self):
return (self.listener.sock,), (), ()
def fire(self):
sock, addr = self.listener.sock.accept()
return Connection(sock, addr)
class ReceiveEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously reading data.
"""
def __init__(self, conn, bufsize):
self.conn = conn
self.bufsize = bufsize
def waitables(self):
return (self.conn.sock,), (), ()
def fire(self):
return self.conn.sock.recv(self.bufsize)
class SendEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously writing data.
"""
def __init__(self, conn, data, sendall=False):
self.conn = conn
self.data = data
self.sendall = sendall
def waitables(self):
return (), (self.conn.sock,), ()
def fire(self):
if self.sendall:
return self.conn.sock.sendall(self.data)
else:
return self.conn.sock.send(self.data)
# Public interface for threads; each returns an event object that
# can immediately be "yield"ed.
def null():
"""Event: yield to the scheduler without doing anything special.
"""
return ValueEvent(None)
def spawn(coro):
"""Event: add another coroutine to the scheduler. Both the parent
and child coroutines run concurrently.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % coro)
return SpawnEvent(coro)
def call(coro):
"""Event: delegate to another coroutine. The current coroutine
is resumed once the sub-coroutine finishes. If the sub-coroutine
returns a value using end(), then this event returns that value.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % coro)
return DelegationEvent(coro)
def end(value=None):
"""Event: ends the coroutine and returns a value to its
delegator.
"""
return ReturnEvent(value)
def read(fd, bufsize=None):
"""Event: read from a file descriptor asynchronously."""
if bufsize is None:
# Read all.
def reader():
buf = []
while True:
data = yield read(fd, 1024)
if not data:
break
buf.append(data)
yield ReturnEvent(''.join(buf))
return DelegationEvent(reader())
else:
return ReadEvent(fd, bufsize)
def write(fd, data):
"""Event: write to a file descriptor asynchronously."""
return WriteEvent(fd, data)
def connect(host, port):
"""Event: connect to a network address and return a Connection
object for communicating on the socket.
"""
addr = (host, port)
sock = socket.create_connection(addr)
return ValueEvent(Connection(sock, addr))
def sleep(duration):
"""Event: suspend the thread for ``duration`` seconds.
"""
return SleepEvent(duration)
def join(coro):
"""Suspend the thread until another, previously `spawn`ed thread
completes.
"""
return JoinEvent(coro)
def kill(coro):
"""Halt the execution of a different `spawn`ed thread.
"""
return KillEvent(coro)
# Convenience function for running socket servers.
def server(host, port, func):
"""A coroutine that runs a network server. Host and port specify the
listening address. func should be a coroutine that takes a single
parameter, a Connection object. The coroutine is invoked for every
incoming connection on the listening socket.
"""
def handler(conn):
try:
yield func(conn)
finally:
conn.close()
listener = Listener(host, port)
try:
while True:
conn = yield listener.accept()
yield spawn(handler(conn))
except KeyboardInterrupt:
pass
finally:
listener.close()
| 19,890
|
Python
|
.py
| 519
| 29.025048
| 76
| 0.606067
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,266
|
confit.py
|
rembo10_headphones/lib/beets/util/confit.py
|
# This file is part of beets.
# Copyright 2016-2019, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import confuse
import warnings
warnings.warn("beets.util.confit is deprecated; use confuse instead")
# Import everything from the confuse module into this module.
for key, value in confuse.__dict__.items():
if key not in ['__name__']:
globals()[key] = value
# Cleanup namespace.
del key, value, warnings, confuse
| 982
|
Python
|
.py
| 22
| 42.818182
| 71
| 0.773585
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,267
|
__init__.py
|
rembo10_headphones/lib/beets/util/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
import os
import sys
import errno
import locale
import re
import tempfile
import shutil
import fnmatch
import functools
from collections import Counter, namedtuple
from multiprocessing.pool import ThreadPool
import traceback
import subprocess
import platform
import shlex
from beets.util import hidden
from unidecode import unidecode
from enum import Enum
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = '\\\\?\\'
class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = 'Error' # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super().__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb.
"""
if ' ' in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
gerund += 'ing'
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, str):
return self.reason
elif isinstance(self.reason, bytes):
return self.reason.decode('utf-8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return '"{}"'.format(str(self.reason))
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error('{0}: {1}', self.error_kind, self.args[0])
class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super().__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ('move', 'copy', 'rename'):
clause = 'while {} {} to {}'.format(
self._gerund(),
displayable_path(self.paths[0]),
displayable_path(self.paths[1])
)
elif self.verb in ('delete', 'write', 'create', 'read'):
clause = 'while {} {}'.format(
self._gerund(),
displayable_path(self.paths[0])
)
else:
clause = 'during {} of paths {}'.format(
self.verb, ', '.join(displayable_path(p) for p in self.paths)
)
return f'{self._reasonstr()} {clause}'
class MoveOperation(Enum):
"""The file operations that e.g. various move functions can carry out.
"""
MOVE = 0
COPY = 1
LINK = 2
HARDLINK = 3
REFLINK = 4
REFLINK_AUTO = 5
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
path = syspath(path, prefix=False)
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path)
def ancestry(path):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
The argument should *not* be the result of a call to `syspath`.
"""
out = []
last_path = None
while path:
path = os.path.dirname(path)
if path == last_path:
break
last_path = path
if path:
# don't yield ''
out.insert(0, path)
return out
def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the pathes aren't Unicode strings.
path = bytestring_path(path)
ignore = [bytestring_path(i) for i in ignore]
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warning('could not list directory {}: {}'.format(
displayable_path(path), exc.strerror
))
return
dirs = []
files = []
for base in contents:
base = bytestring_path(base)
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
if logger:
logger.debug('ignoring {} due to ignore rule {}'.format(
base, pat
))
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from sorted_walk(...)
yield from sorted_walk(cur, ignore, ignore_hidden, logger)
def path_as_posix(path):
"""Return the string representation of the path with forward (/)
slashes.
"""
return path.replace(b'\\', b'/')
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except OSError as exc:
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc())
def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root) + 1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
clutter = [bytestring_path(c) for c in clutter]
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
try:
if fnmatch_all(match_paths, clutter):
# Directory contains only clutter (or nothing).
shutil.rmtree(directory)
else:
break
except OSError:
break
def components(path):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
The argument should *not* be the result of a call to `syspath`.
"""
comps = []
ances = ancestry(path)
for anc in ances:
comp = os.path.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = os.path.basename(path)
if last:
comps.append(last)
return comps
def arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf-8'
def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS).
"""
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
if encoding == 'mbcs':
# On Windows, a broken encoding known to Python as "MBCS" is
# used for the filesystem. However, we only use the Unicode API
# for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8.
encoding = 'utf-8'
return encoding
def bytestring_path(path):
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to utf-8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf-8')
PATH_SEP = bytestring_path(os.sep)
def displayable_path(path, separator='; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, str):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return str(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf-8', 'ignore')
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != 'ntpath':
return path
if not isinstance(path, str):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf-8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith('\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = 'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
if p1 == p2:
return True
return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
path = syspath(path)
if soft and not os.path.exists(path):
return
try:
os.remove(path)
except OSError as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except OSError as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc())
def move(path, dest, replace=False):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. If the paths are on different
filesystems (or the rename otherwise fails), a copy is attempted
instead, in which case metadata will *not* be preserved. Paths are
translated to system paths.
"""
if os.path.isdir(path):
raise FilesystemError(u'source is directory', 'move', (path, dest))
if os.path.isdir(dest):
raise FilesystemError(u'destination is directory', 'move',
(path, dest))
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
# First, try renaming the file.
try:
os.replace(path, dest)
except OSError:
tmp = tempfile.mktemp(suffix='.beets',
prefix=py3_path(b'.' + os.path.basename(dest)),
dir=py3_path(os.path.dirname(dest)))
tmp = syspath(tmp)
try:
shutil.copyfile(path, tmp)
os.replace(tmp, dest)
tmp = None
os.remove(path)
except OSError as exc:
raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc())
finally:
if tmp is not None:
os.remove(tmp)
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.symlink(syspath(path), syspath(dest))
except NotImplementedError:
# raised on python >= 3.2 and Windows versions before Vista
raise FilesystemError('OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = 'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def hardlink(path, dest, replace=False):
"""Create a hard link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.link(syspath(path), syspath(dest))
except NotImplementedError:
raise FilesystemError('OS does not support hard links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
if exc.errno == errno.EXDEV:
raise FilesystemError('Cannot hard link across devices.'
'link', (path, dest), traceback.format_exc())
else:
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def reflink(path, dest, replace=False, fallback=False):
"""Create a reflink from `dest` to `path`.
Raise an `OSError` if `dest` already exists, unless `replace` is
True. If `path` == `dest`, then do nothing.
If reflinking fails and `fallback` is enabled, try copying the file
instead. Otherwise, raise an error without trying a plain copy.
May raise an `ImportError` if the `reflink` module is not available.
"""
import reflink as pyreflink
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
pyreflink.reflink(path, dest)
except (NotImplementedError, pyreflink.ReflinkImpossibleError):
if fallback:
copy(path, dest, replace)
else:
raise FilesystemError('OS/filesystem does not support reflinks.',
'link', (path, dest), traceback.format_exc())
def unique_path(path):
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(br'\.(\d)+$', base)
if match:
num = int(match.group(1))
base = base[:match.start()]
else:
num = 0
while True:
num += 1
suffix = f'.{num}'.encode() + ext
new_path = base + suffix
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(r'[\\/]'), '_'), # / and \ -- forbidden everywhere.
(re.compile(r'^\.'), '_'), # Leading dot (hidden files on Unix).
(re.compile(r'[\x00-\x1f]'), ''), # Control characters.
(re.compile(r'[<>:"\?\*\|]'), '_'), # Windows "reserved characters".
(re.compile(r'\.$'), '_'), # Trailing dots.
(re.compile(r'\s+$'), ''), # Trailing whitespace.
]
def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ''
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps)
def truncate_path(path, length=MAX_FILENAME_LENGTH):
"""Given a bytestring path or a Unicode path fragment, truncate the
components to a legal length. In the last component, the extension
is preserved.
"""
comps = components(path)
out = [c[:length] for c in comps]
base, ext = os.path.splitext(comps[-1])
if ext:
# Last component has an extension.
base = base[:length - len(ext)]
out[-1] = base + ext
return os.path.join(*out)
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path
def legalize_path(path, replacements, length, extension, fragment):
"""Given a path-like Unicode string, produce a legal path. Return
the path and a flag indicating whether some replacements had to be
ignored (see below).
The legalization process (see `_legalize_stage`) consists of
applying the sanitation rules in `replacements`, encoding the string
to bytes (unless `fragment` is set), truncating components to
`length`, appending the `extension`.
This function performs up to three calls to `_legalize_stage` in
case truncation conflicts with replacements (as can happen when
truncation creates whitespace at the end of the string, for
example). The limited number of iterations iterations avoids the
possibility of an infinite loop of sanitation and truncation
operations, which could be caused by replacement rules that make the
string longer. The flag returned from this function indicates that
the path has to be truncated twice (indicating that replacements
made the string longer again after it was truncated); the
application should probably log some sort of warning.
"""
if fragment:
# Outputting Unicode.
extension = extension.decode('utf-8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
)
# Convert back to Unicode with extension removed.
first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path))
# Re-sanitize following truncation (including user replacements).
second_stage_path, retruncated = _legalize_stage(
first_stage_path, replacements, length, extension, fragment
)
# If the path was once again truncated, discard user replacements
# and run through one last legalization stage.
if retruncated:
second_stage_path, _ = _legalize_stage(
first_stage_path, None, length, extension, fragment
)
return second_stage_path, retruncated
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
decision, because paths are sadly bytes on Unix, but that's the way
it is. So this function helps us "smuggle" the true bytes data
through APIs that took Python 3's Unicode mandate too seriously.
"""
if isinstance(path, str):
return path
assert isinstance(path, bytes)
return os.fsdecode(path)
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in ('yes', '1', 'true', 't', 'y')
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return ''
elif isinstance(value, memoryview):
return bytes(value).decode('utf-8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf-8', 'ignore')
else:
return str(value)
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError('sequence must be non-empty')
return c.most_common(1)[0]
def cpu_count():
"""Return the number of hardware thread contexts (cores or SMT
threads) in the system.
"""
# Adapted from the soundconverter project:
# https://github.com/kassoulet/soundconverter
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == 'darwin':
try:
num = int(command_output([
'/usr/sbin/sysctl',
'-n',
'hw.ncpu',
]).stdout)
except (ValueError, OSError, subprocess.CalledProcessError):
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
return 1
def convert_command_args(args):
"""Convert command arguments to bytestrings on Python 2 and
surrogate-escaped strings on Python 3."""
assert isinstance(args, list)
def convert(arg):
if isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args]
# stdout and stderr as bytes
CommandOutput = namedtuple("CommandOutput", ("stdout", "stderr"))
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
byte strings of the respective output streams.
``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != 'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=' '.join(cmd),
output=stdout + stderr,
)
return CommandOutput(stdout, stderr)
def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`,
then `limit` is used instead (to prevent errors when a filesystem
misreports its capacity). If it cannot be determined (e.g., on
Windows), return `limit`.
"""
if hasattr(os, 'statvfs'):
try:
res = os.statvfs(path)
except OSError:
return limit
return min(res[9], limit)
else:
return limit
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd
def editor_command():
"""Get a command for opening a text file.
Use the `EDITOR` environment variable by default. If it is not
present, fall back to `open_anything()`, the platform-specific tool
for opening files in general.
"""
editor = os.environ.get('EDITOR')
if editor:
return editor
return open_anything()
def interactive_open(targets, command):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex.split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args)
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, str):
short_path = short_path.decode(_fsencoding())
import ctypes
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper
def raw_seconds_short(string):
"""Formats a human-readable M:SS string as a float (number of seconds).
Raises ValueError if the conversion cannot take place due to `string` not
being in the right format.
"""
match = re.match(r'^(\d+):([0-5]\d)$', string)
if not match:
raise ValueError('String not in M:SS format')
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)
def asciify_path(path, sep_replace):
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep,
sep_replace
)
return os.sep.join(path_components)
def par_map(transform, items):
"""Apply the function `transform` to all the elements in the
iterable `items`, like `map(transform, items)` but with no return
value. The map *might* happen in parallel: it's parallel on Python 3
and sequential on Python 2.
The parallelism uses threads (not processes), so this is only useful
for IO-bound `transform`s.
"""
pool = ThreadPool()
pool.map(transform, items)
pool.close()
pool.join()
def lazy_property(func):
"""A decorator that creates a lazily evaluated property. On first access,
the property is assigned the return value of `func`. This first value is
stored, so that future accesses do not have to evaluate `func` again.
This behaviour is useful when `func` is expensive to evaluate, and it is
not certain that the result will be needed.
"""
field_name = '_' + func.__name__
@property
@functools.wraps(func)
def wrapper(self):
if hasattr(self, field_name):
return getattr(self, field_name)
value = func(self)
setattr(self, field_name, value)
return value
return wrapper
def decode_commandline_path(path):
"""Prepare a path for substitution into commandline template.
On Python 3, we need to construct the subprocess commands to invoke as a
Unicode string. On Unix, this is a little unfortunate---the OS is
expecting bytes---so we use surrogate escaping and decode with the
argument encoding, which is the same encoding that will then be
*reversed* to recover the same bytes before invoking the OS. On
Windows, we want to preserve the Unicode filename "as is."
"""
# On Python 3, the template is a Unicode string, which only supports
# substitution of Unicode variables.
if platform.system() == 'Windows':
return path.decode(_fsencoding())
else:
return path.decode(arg_encoding(), 'surrogateescape')
| 36,926
|
Python
|
.py
| 912
| 33.297149
| 79
| 0.649946
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,268
|
hidden.py
|
rembo10_headphones/lib/beets/util/hidden.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple library to work out if a file is hidden on different platforms."""
import os
import stat
import ctypes
import sys
import beets.util
def _is_hidden_osx(path):
"""Return whether or not a file is hidden on OS X.
This uses os.lstat to work out if a file has the "hidden" flag.
"""
file_stat = os.lstat(beets.util.syspath(path))
if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'):
return bool(file_stat.st_flags & stat.UF_HIDDEN)
else:
return False
def _is_hidden_win(path):
"""Return whether or not a file is hidden on Windows.
This uses GetFileAttributes to work out if a file has the "hidden" flag
(FILE_ATTRIBUTE_HIDDEN).
"""
# FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation.
hidden_mask = 2
# Retrieve the attributes for the file.
attrs = ctypes.windll.kernel32.GetFileAttributesW(beets.util.syspath(path))
# Ensure we have valid attribues and compare them against the mask.
return attrs >= 0 and attrs & hidden_mask
def _is_hidden_dot(path):
"""Return whether or not a file starts with a dot.
Files starting with a dot are seen as "hidden" files on Unix-based OSes.
"""
return os.path.basename(path).startswith(b'.')
def is_hidden(path):
"""Return whether or not a file is hidden. `path` should be a
bytestring filename.
This method works differently depending on the platform it is called on.
On OS X, it uses both the result of `is_hidden_osx` and `is_hidden_dot` to
work out if a file is hidden.
On Windows, it uses the result of `is_hidden_win` to work out if a file is
hidden.
On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to
work out if a file is hidden.
"""
# Run platform specific functions depending on the platform
if sys.platform == 'darwin':
return _is_hidden_osx(path) or _is_hidden_dot(path)
elif sys.platform == 'win32':
return _is_hidden_win(path)
else:
return _is_hidden_dot(path)
__all__ = ['is_hidden']
| 2,735
|
Python
|
.py
| 63
| 39.285714
| 79
| 0.717842
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,269
|
functemplate.py
|
rembo10_headphones/lib/beets/util/functemplate.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module implements a string formatter based on the standard PEP
292 string.Template class extended with function calls. Variables, as
with string.Template, are indicated with $ and functions are delimited
with %.
This module assumes that everything is Unicode: the template and the
substitution values. Bytestrings are not supported. Also, the templates
always behave like the ``safe_substitute`` method in the standard
library: unknown symbols are left intact.
This is sort of like a tiny, horrible degeneration of a real templating
engine like Jinja2 or Mustache.
"""
import re
import ast
import dis
import types
import sys
import functools
SYMBOL_DELIM = '$'
FUNC_DELIM = '%'
GROUP_OPEN = '{'
GROUP_CLOSE = '}'
ARG_SEP = ','
ESCAPE_CHAR = '$'
VARIABLE_PREFIX = '__var_'
FUNCTION_PREFIX = '__func_'
class Environment:
"""Contains the values and functions to be substituted into a
template.
"""
def __init__(self, values, functions):
self.values = values
self.functions = functions
# Code generation helpers.
def ex_lvalue(name):
"""A variable load expression."""
return ast.Name(name, ast.Store())
def ex_rvalue(name):
"""A variable store expression."""
return ast.Name(name, ast.Load())
def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given
value.
"""
return ast.Constant(val)
def ex_varassign(name, expr):
"""Assign an expression into a single variable. The expression may
either be an `ast.expr` object or a value to be used as a literal.
"""
if not isinstance(expr, ast.expr):
expr = ex_literal(expr)
return ast.Assign([ex_lvalue(name)], expr)
def ex_call(func, args):
"""A function-call expression with only positional parameters. The
function may be an expression or the name of a function. Each
argument may be an expression or a value to be used as a literal.
"""
if isinstance(func, str):
func = ex_rvalue(func)
args = list(args)
for i in range(len(args)):
if not isinstance(args[i], ast.expr):
args[i] = ex_literal(args[i])
return ast.Call(func, args, [])
def compile_func(arg_names, statements, name='_the_func', debug=False):
"""Compile a list of statements as the body of a function and return
the resulting Python function. If `debug`, then print out the
bytecode of the compiled function.
"""
args_fields = {
'args': [ast.arg(arg=n, annotation=None) for n in arg_names],
'kwonlyargs': [],
'kw_defaults': [],
'defaults': [ex_literal(None) for _ in arg_names],
}
if 'posonlyargs' in ast.arguments._fields: # Added in Python 3.8.
args_fields['posonlyargs'] = []
args = ast.arguments(**args_fields)
func_def = ast.FunctionDef(
name=name,
args=args,
body=statements,
decorator_list=[],
)
# The ast.Module signature changed in 3.8 to accept a list of types to
# ignore.
if sys.version_info >= (3, 8):
mod = ast.Module([func_def], [])
else:
mod = ast.Module([func_def])
ast.fix_missing_locations(mod)
prog = compile(mod, '<generated>', 'exec')
# Debug: show bytecode.
if debug:
dis.dis(prog)
for const in prog.co_consts:
if isinstance(const, types.CodeType):
dis.dis(const)
the_locals = {}
exec(prog, {}, the_locals)
return the_locals[name]
# AST nodes for the template language.
class Symbol:
"""A variable-substitution symbol in a template."""
def __init__(self, ident, original):
self.ident = ident
self.original = original
def __repr__(self):
return 'Symbol(%s)' % repr(self.ident)
def evaluate(self, env):
"""Evaluate the symbol in the environment, returning a Unicode
string.
"""
if self.ident in env.values:
# Substitute for a value.
return env.values[self.ident]
else:
# Keep original text.
return self.original
def translate(self):
"""Compile the variable lookup."""
ident = self.ident
expr = ex_rvalue(VARIABLE_PREFIX + ident)
return [expr], {ident}, set()
class Call:
"""A function call in a template."""
def __init__(self, ident, args, original):
self.ident = ident
self.args = args
self.original = original
def __repr__(self):
return 'Call({}, {}, {})'.format(repr(self.ident), repr(self.args),
repr(self.original))
def evaluate(self, env):
"""Evaluate the function call in the environment, returning a
Unicode string.
"""
if self.ident in env.functions:
arg_vals = [expr.evaluate(env) for expr in self.args]
try:
out = env.functions[self.ident](*arg_vals)
except Exception as exc:
# Function raised exception! Maybe inlining the name of
# the exception will help debug.
return '<%s>' % str(exc)
return str(out)
else:
return self.original
def translate(self):
"""Compile the function call."""
varnames = set()
funcnames = {self.ident}
arg_exprs = []
for arg in self.args:
subexprs, subvars, subfuncs = arg.translate()
varnames.update(subvars)
funcnames.update(subfuncs)
# Create a subexpression that joins the result components of
# the arguments.
arg_exprs.append(ex_call(
ast.Attribute(ex_literal(''), 'join', ast.Load()),
[ex_call(
'map',
[
ex_rvalue(str.__name__),
ast.List(subexprs, ast.Load()),
]
)],
))
subexpr_call = ex_call(
FUNCTION_PREFIX + self.ident,
arg_exprs
)
return [subexpr_call], varnames, funcnames
class Expression:
"""Top-level template construct: contains a list of text blobs,
Symbols, and Calls.
"""
def __init__(self, parts):
self.parts = parts
def __repr__(self):
return 'Expression(%s)' % (repr(self.parts))
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
a Unicode string.
"""
out = []
for part in self.parts:
if isinstance(part, str):
out.append(part)
else:
out.append(part.evaluate(env))
return ''.join(map(str, out))
def translate(self):
"""Compile the expression to a list of Python AST expressions, a
set of variable names used, and a set of function names.
"""
expressions = []
varnames = set()
funcnames = set()
for part in self.parts:
if isinstance(part, str):
expressions.append(ex_literal(part))
else:
e, v, f = part.translate()
expressions.extend(e)
varnames.update(v)
funcnames.update(f)
return expressions, varnames, funcnames
# Parser.
class ParseError(Exception):
pass
class Parser:
"""Parses a template expression string. Instantiate the class with
the template source and call ``parse_expression``. The ``pos`` field
will indicate the character after the expression finished and
``parts`` will contain a list of Unicode strings, Symbols, and Calls
reflecting the concatenated portions of the expression.
This is a terrible, ad-hoc parser implementation based on a
left-to-right scan with no lexing step to speak of; it's probably
both inefficient and incorrect. Maybe this should eventually be
replaced with a real, accepted parsing technique (PEG, parser
generator, etc.).
"""
def __init__(self, string, in_argument=False):
""" Create a new parser.
:param in_arguments: boolean that indicates the parser is to be
used for parsing function arguments, ie. considering commas
(`ARG_SEP`) a special character
"""
self.string = string
self.in_argument = in_argument
self.pos = 0
self.parts = []
# Common parsing resources.
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
ESCAPE_CHAR)
special_char_re = re.compile(r'[%s]|\Z' %
''.join(re.escape(c) for c in special_chars))
escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP)
terminator_chars = (GROUP_CLOSE,)
def parse_expression(self):
"""Parse a template expression starting at ``pos``. Resulting
components (Unicode strings, Symbols, and Calls) are added to
the ``parts`` field, a list. The ``pos`` field is updated to be
the next character after the expression.
"""
# Append comma (ARG_SEP) to the list of special characters only when
# parsing function arguments.
extra_special_chars = ()
special_char_re = self.special_char_re
if self.in_argument:
extra_special_chars = (ARG_SEP,)
special_char_re = re.compile(
r'[%s]|\Z' % ''.join(
re.escape(c) for c in
self.special_chars + extra_special_chars
)
)
text_parts = []
while self.pos < len(self.string):
char = self.string[self.pos]
if char not in self.special_chars + extra_special_chars:
# A non-special character. Skip to the next special
# character, treating the interstice as literal text.
next_pos = (
special_char_re.search(
self.string[self.pos:]).start() + self.pos
)
text_parts.append(self.string[self.pos:next_pos])
self.pos = next_pos
continue
if self.pos == len(self.string) - 1:
# The last character can never begin a structure, so we
# just interpret it as a literal character (unless it
# terminates the expression, as with , and }).
if char not in self.terminator_chars + extra_special_chars:
text_parts.append(char)
self.pos += 1
break
next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in (self.escapable_chars +
extra_special_chars):
# An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just
# using { suffices in all cases).
text_parts.append(next_char)
self.pos += 2 # Skip the next character.
continue
# Shift all characters collected so far into a single string.
if text_parts:
self.parts.append(''.join(text_parts))
text_parts = []
if char == SYMBOL_DELIM:
# Parse a symbol.
self.parse_symbol()
elif char == FUNC_DELIM:
# Parse a function call.
self.parse_call()
elif char in self.terminator_chars + extra_special_chars:
# Template terminated.
break
elif char == GROUP_OPEN:
# Start of a group has no meaning hear; just pass
# through the character.
text_parts.append(char)
self.pos += 1
else:
assert False
# If any parsed characters remain, shift them into a string.
if text_parts:
self.parts.append(''.join(text_parts))
def parse_symbol(self):
"""Parse a variable reference (like ``$foo`` or ``${foo}``)
starting at ``pos``. Possibly appends a Symbol object (or,
failing that, text) to the ``parts`` field and updates ``pos``.
The character at ``pos`` must, as a precondition, be ``$``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == SYMBOL_DELIM
if self.pos == len(self.string) - 1:
# Last character.
self.parts.append(SYMBOL_DELIM)
self.pos += 1
return
next_char = self.string[self.pos + 1]
start_pos = self.pos
self.pos += 1
if next_char == GROUP_OPEN:
# A symbol like ${this}.
self.pos += 1 # Skip opening.
closer = self.string.find(GROUP_CLOSE, self.pos)
if closer == -1 or closer == self.pos:
# No closing brace found or identifier is empty.
self.parts.append(self.string[start_pos:self.pos])
else:
# Closer found.
ident = self.string[self.pos:closer]
self.pos = closer + 1
self.parts.append(Symbol(ident,
self.string[start_pos:self.pos]))
else:
# A bare-word symbol.
ident = self._parse_ident()
if ident:
# Found a real symbol.
self.parts.append(Symbol(ident,
self.string[start_pos:self.pos]))
else:
# A standalone $.
self.parts.append(SYMBOL_DELIM)
def parse_call(self):
"""Parse a function call (like ``%foo{bar,baz}``) starting at
``pos``. Possibly appends a Call object to ``parts`` and update
``pos``. The character at ``pos`` must be ``%``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == FUNC_DELIM
start_pos = self.pos
self.pos += 1
ident = self._parse_ident()
if not ident:
# No function name.
self.parts.append(FUNC_DELIM)
return
if self.pos >= len(self.string):
# Identifier terminates string.
self.parts.append(self.string[start_pos:self.pos])
return
if self.string[self.pos] != GROUP_OPEN:
# Argument list not opened.
self.parts.append(self.string[start_pos:self.pos])
return
# Skip past opening brace and try to parse an argument list.
self.pos += 1
args = self.parse_argument_list()
if self.pos >= len(self.string) or \
self.string[self.pos] != GROUP_CLOSE:
# Arguments unclosed.
self.parts.append(self.string[start_pos:self.pos])
return
self.pos += 1 # Move past closing brace.
self.parts.append(Call(ident, args, self.string[start_pos:self.pos]))
def parse_argument_list(self):
"""Parse a list of arguments starting at ``pos``, returning a
list of Expression objects. Does not modify ``parts``. Should
leave ``pos`` pointing to a } character or the end of the
string.
"""
# Try to parse a subexpression in a subparser.
expressions = []
while self.pos < len(self.string):
subparser = Parser(self.string[self.pos:], in_argument=True)
subparser.parse_expression()
# Extract and advance past the parsed expression.
expressions.append(Expression(subparser.parts))
self.pos += subparser.pos
if self.pos >= len(self.string) or \
self.string[self.pos] == GROUP_CLOSE:
# Argument list terminated by EOF or closing brace.
break
# Only other way to terminate an expression is with ,.
# Continue to the next argument.
assert self.string[self.pos] == ARG_SEP
self.pos += 1
return expressions
def _parse_ident(self):
"""Parse an identifier and return it (possibly an empty string).
Updates ``pos``.
"""
remainder = self.string[self.pos:]
ident = re.match(r'\w*', remainder).group(0)
self.pos += len(ident)
return ident
def _parse(template):
"""Parse a top-level template string Expression. Any extraneous text
is considered literal text.
"""
parser = Parser(template)
parser.parse_expression()
parts = parser.parts
remainder = parser.string[parser.pos:]
if remainder:
parts.append(remainder)
return Expression(parts)
def cached(func):
"""Like the `functools.lru_cache` decorator, but works (as a no-op)
on Python < 3.2.
"""
if hasattr(functools, 'lru_cache'):
return functools.lru_cache(maxsize=128)(func)
else:
# Do nothing when lru_cache is not available.
return func
@cached
def template(fmt):
return Template(fmt)
# External interface.
class Template:
"""A string template, including text, Symbols, and Calls.
"""
def __init__(self, template):
self.expr = _parse(template)
self.original = template
self.compiled = self.translate()
def __eq__(self, other):
return self.original == other.original
def interpret(self, values={}, functions={}):
"""Like `substitute`, but forces the interpreter (rather than
the compiled version) to be used. The interpreter includes
exception-handling code for missing variables and buggy template
functions but is much slower.
"""
return self.expr.evaluate(Environment(values, functions))
def substitute(self, values={}, functions={}):
"""Evaluate the template given the values and functions.
"""
try:
res = self.compiled(values, functions)
except Exception: # Handle any exceptions thrown by compiled version.
res = self.interpret(values, functions)
return res
def translate(self):
"""Compile the template to a Python function."""
expressions, varnames, funcnames = self.expr.translate()
argnames = []
for varname in varnames:
argnames.append(VARIABLE_PREFIX + varname)
for funcname in funcnames:
argnames.append(FUNCTION_PREFIX + funcname)
func = compile_func(
argnames,
[ast.Return(ast.List(expressions, ast.Load()))],
)
def wrapper_func(values={}, functions={}):
args = {}
for varname in varnames:
args[VARIABLE_PREFIX + varname] = values[varname]
for funcname in funcnames:
args[FUNCTION_PREFIX + funcname] = functions[funcname]
parts = func(**args)
return ''.join(parts)
return wrapper_func
# Performance tests.
if __name__ == '__main__':
import timeit
_tmpl = Template('foo $bar %baz{foozle $bar barzle} $bar')
_vars = {'bar': 'qux'}
_funcs = {'baz': str.upper}
interp_time = timeit.timeit('_tmpl.interpret(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs',
number=10000)
print(interp_time)
comp_time = timeit.timeit('_tmpl.substitute(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs',
number=10000)
print(comp_time)
print('Speedup:', interp_time / comp_time)
| 20,497
|
Python
|
.py
| 511
| 30.268102
| 78
| 0.587019
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,270
|
enumeration.py
|
rembo10_headphones/lib/beets/util/enumeration.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from enum import Enum
class OrderedEnum(Enum):
"""
An Enum subclass that allows comparison of members.
"""
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
| 1,370
|
Python
|
.py
| 34
| 34.970588
| 71
| 0.684725
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,271
|
artresizer.py
|
rembo10_headphones/lib/beets/util/artresizer.py
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Abstraction layer to resize images using PIL, ImageMagick, or a
public resizing proxy if neither is available.
"""
import subprocess
import os
import os.path
import re
from tempfile import NamedTemporaryFile
from urllib.parse import urlencode
from beets import logging
from beets import util
# Resizing methods
PIL = 1
IMAGEMAGICK = 2
WEBPROXY = 3
PROXY_URL = 'https://images.weserv.nl/'
log = logging.getLogger('beets')
def resize_url(url, maxwidth, quality=0):
"""Return a proxied image URL that resizes the original image to
maxwidth (preserving aspect ratio).
"""
params = {
'url': url.replace('http://', ''),
'w': maxwidth,
}
if quality > 0:
params['q'] = quality
return '{}?{}'.format(PROXY_URL, urlencode(params))
def temp_file_for(path):
"""Return an unused filename with the same extension as the
specified path.
"""
ext = os.path.splitext(path)[1]
with NamedTemporaryFile(suffix=util.py3_path(ext), delete=False) as f:
return util.bytestring_path(f.name)
def pil_resize(maxwidth, path_in, path_out=None, quality=0, max_filesize=0):
"""Resize using Python Imaging Library (PIL). Return the output path
of resized image.
"""
path_out = path_out or temp_file_for(path_in)
from PIL import Image
log.debug('artresizer: PIL resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out))
try:
im = Image.open(util.syspath(path_in))
size = maxwidth, maxwidth
im.thumbnail(size, Image.ANTIALIAS)
if quality == 0:
# Use PIL's default quality.
quality = -1
# progressive=False only affects JPEGs and is the default,
# but we include it here for explicitness.
im.save(util.py3_path(path_out), quality=quality, progressive=False)
if max_filesize > 0:
# If maximum filesize is set, we attempt to lower the quality of
# jpeg conversion by a proportional amount, up to 3 attempts
# First, set the maximum quality to either provided, or 95
if quality > 0:
lower_qual = quality
else:
lower_qual = 95
for i in range(5):
# 5 attempts is an abitrary choice
filesize = os.stat(util.syspath(path_out)).st_size
log.debug("PIL Pass {0} : Output size: {1}B", i, filesize)
if filesize <= max_filesize:
return path_out
# The relationship between filesize & quality will be
# image dependent.
lower_qual -= 10
# Restrict quality dropping below 10
if lower_qual < 10:
lower_qual = 10
# Use optimize flag to improve filesize decrease
im.save(util.py3_path(path_out), quality=lower_qual,
optimize=True, progressive=False)
log.warning("PIL Failed to resize file to below {0}B",
max_filesize)
return path_out
else:
return path_out
except OSError:
log.error("PIL cannot create thumbnail for '{0}'",
util.displayable_path(path_in))
return path_in
def im_resize(maxwidth, path_in, path_out=None, quality=0, max_filesize=0):
"""Resize using ImageMagick.
Use the ``magick`` program or ``convert`` on older versions. Return
the output path of resized image.
"""
path_out = path_out or temp_file_for(path_in)
log.debug('artresizer: ImageMagick resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out))
# "-resize WIDTHx>" shrinks images with the width larger
# than the given width while maintaining the aspect ratio
# with regards to the height.
# ImageMagick already seems to default to no interlace, but we include it
# here for the sake of explicitness.
cmd = ArtResizer.shared.im_convert_cmd + [
util.syspath(path_in, prefix=False),
'-resize', f'{maxwidth}x>',
'-interlace', 'none',
]
if quality > 0:
cmd += ['-quality', f'{quality}']
# "-define jpeg:extent=SIZEb" sets the target filesize for imagemagick to
# SIZE in bytes.
if max_filesize > 0:
cmd += ['-define', f'jpeg:extent={max_filesize}b']
cmd.append(util.syspath(path_out, prefix=False))
try:
util.command_output(cmd)
except subprocess.CalledProcessError:
log.warning('artresizer: IM convert failed for {0}',
util.displayable_path(path_in))
return path_in
return path_out
BACKEND_FUNCS = {
PIL: pil_resize,
IMAGEMAGICK: im_resize,
}
def pil_getsize(path_in):
from PIL import Image
try:
im = Image.open(util.syspath(path_in))
return im.size
except OSError as exc:
log.error("PIL could not read file {}: {}",
util.displayable_path(path_in), exc)
def im_getsize(path_in):
cmd = ArtResizer.shared.im_identify_cmd + \
['-format', '%w %h', util.syspath(path_in, prefix=False)]
try:
out = util.command_output(cmd).stdout
except subprocess.CalledProcessError as exc:
log.warning('ImageMagick size query failed')
log.debug(
'`convert` exited with (status {}) when '
'getting size with command {}:\n{}',
exc.returncode, cmd, exc.output.strip()
)
return
try:
return tuple(map(int, out.split(b' ')))
except IndexError:
log.warning('Could not understand IM output: {0!r}', out)
BACKEND_GET_SIZE = {
PIL: pil_getsize,
IMAGEMAGICK: im_getsize,
}
def pil_deinterlace(path_in, path_out=None):
path_out = path_out or temp_file_for(path_in)
from PIL import Image
try:
im = Image.open(util.syspath(path_in))
im.save(util.py3_path(path_out), progressive=False)
return path_out
except IOError:
return path_in
def im_deinterlace(path_in, path_out=None):
path_out = path_out or temp_file_for(path_in)
cmd = ArtResizer.shared.im_convert_cmd + [
util.syspath(path_in, prefix=False),
'-interlace', 'none',
util.syspath(path_out, prefix=False),
]
try:
util.command_output(cmd)
return path_out
except subprocess.CalledProcessError:
return path_in
DEINTERLACE_FUNCS = {
PIL: pil_deinterlace,
IMAGEMAGICK: im_deinterlace,
}
def im_get_format(filepath):
cmd = ArtResizer.shared.im_identify_cmd + [
'-format', '%[magick]',
util.syspath(filepath)
]
try:
return util.command_output(cmd).stdout
except subprocess.CalledProcessError:
return None
def pil_get_format(filepath):
from PIL import Image, UnidentifiedImageError
try:
with Image.open(util.syspath(filepath)) as im:
return im.format
except (ValueError, TypeError, UnidentifiedImageError, FileNotFoundError):
log.exception("failed to detect image format for {}", filepath)
return None
BACKEND_GET_FORMAT = {
PIL: pil_get_format,
IMAGEMAGICK: im_get_format,
}
def im_convert_format(source, target, deinterlaced):
cmd = ArtResizer.shared.im_convert_cmd + [
util.syspath(source),
*(["-interlace", "none"] if deinterlaced else []),
util.syspath(target),
]
try:
subprocess.check_call(
cmd,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL
)
return target
except subprocess.CalledProcessError:
return source
def pil_convert_format(source, target, deinterlaced):
from PIL import Image, UnidentifiedImageError
try:
with Image.open(util.syspath(source)) as im:
im.save(util.py3_path(target), progressive=not deinterlaced)
return target
except (ValueError, TypeError, UnidentifiedImageError, FileNotFoundError,
OSError):
log.exception("failed to convert image {} -> {}", source, target)
return source
BACKEND_CONVERT_IMAGE_FORMAT = {
PIL: pil_convert_format,
IMAGEMAGICK: im_convert_format,
}
class Shareable(type):
"""A pseudo-singleton metaclass that allows both shared and
non-shared instances. The ``MyClass.shared`` property holds a
lazily-created shared instance of ``MyClass`` while calling
``MyClass()`` to construct a new object works as usual.
"""
def __init__(cls, name, bases, dict):
super().__init__(name, bases, dict)
cls._instance = None
@property
def shared(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
class ArtResizer(metaclass=Shareable):
"""A singleton class that performs image resizes.
"""
def __init__(self):
"""Create a resizer object with an inferred method.
"""
self.method = self._check_method()
log.debug("artresizer: method is {0}", self.method)
self.can_compare = self._can_compare()
# Use ImageMagick's magick binary when it's available. If it's
# not, fall back to the older, separate convert and identify
# commands.
if self.method[0] == IMAGEMAGICK:
self.im_legacy = self.method[2]
if self.im_legacy:
self.im_convert_cmd = ['convert']
self.im_identify_cmd = ['identify']
else:
self.im_convert_cmd = ['magick']
self.im_identify_cmd = ['magick', 'identify']
def resize(
self, maxwidth, path_in, path_out=None, quality=0, max_filesize=0
):
"""Manipulate an image file according to the method, returning a
new path. For PIL or IMAGEMAGIC methods, resizes the image to a
temporary file and encodes with the specified quality level.
For WEBPROXY, returns `path_in` unmodified.
"""
if self.local:
func = BACKEND_FUNCS[self.method[0]]
return func(maxwidth, path_in, path_out,
quality=quality, max_filesize=max_filesize)
else:
return path_in
def deinterlace(self, path_in, path_out=None):
if self.local:
func = DEINTERLACE_FUNCS[self.method[0]]
return func(path_in, path_out)
else:
return path_in
def proxy_url(self, maxwidth, url, quality=0):
"""Modifies an image URL according the method, returning a new
URL. For WEBPROXY, a URL on the proxy server is returned.
Otherwise, the URL is returned unmodified.
"""
if self.local:
return url
else:
return resize_url(url, maxwidth, quality)
@property
def local(self):
"""A boolean indicating whether the resizing method is performed
locally (i.e., PIL or ImageMagick).
"""
return self.method[0] in BACKEND_FUNCS
def get_size(self, path_in):
"""Return the size of an image file as an int couple (width, height)
in pixels.
Only available locally.
"""
if self.local:
func = BACKEND_GET_SIZE[self.method[0]]
return func(path_in)
def get_format(self, path_in):
"""Returns the format of the image as a string.
Only available locally.
"""
if self.local:
func = BACKEND_GET_FORMAT[self.method[0]]
return func(path_in)
def reformat(self, path_in, new_format, deinterlaced=True):
"""Converts image to desired format, updating its extension, but
keeping the same filename.
Only available locally.
"""
if not self.local:
return path_in
new_format = new_format.lower()
# A nonexhaustive map of image "types" to extensions overrides
new_format = {
'jpeg': 'jpg',
}.get(new_format, new_format)
fname, ext = os.path.splitext(path_in)
path_new = fname + b'.' + new_format.encode('utf8')
func = BACKEND_CONVERT_IMAGE_FORMAT[self.method[0]]
# allows the exception to propagate, while still making sure a changed
# file path was removed
result_path = path_in
try:
result_path = func(path_in, path_new, deinterlaced)
finally:
if result_path != path_in:
os.unlink(path_in)
return result_path
def _can_compare(self):
"""A boolean indicating whether image comparison is available"""
return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7)
@staticmethod
def _check_method():
"""Return a tuple indicating an available method and its version.
The result has at least two elements:
- The method, eitehr WEBPROXY, PIL, or IMAGEMAGICK.
- The version.
If the method is IMAGEMAGICK, there is also a third element: a
bool flag indicating whether to use the `magick` binary or
legacy single-purpose executables (`convert`, `identify`, etc.)
"""
version = get_im_version()
if version:
version, legacy = version
return IMAGEMAGICK, version, legacy
version = get_pil_version()
if version:
return PIL, version
return WEBPROXY, (0)
def get_im_version():
"""Get the ImageMagick version and legacy flag as a pair. Or return
None if ImageMagick is not available.
"""
for cmd_name, legacy in ((['magick'], False), (['convert'], True)):
cmd = cmd_name + ['--version']
try:
out = util.command_output(cmd).stdout
except (subprocess.CalledProcessError, OSError) as exc:
log.debug('ImageMagick version check failed: {}', exc)
else:
if b'imagemagick' in out.lower():
pattern = br".+ (\d+)\.(\d+)\.(\d+).*"
match = re.search(pattern, out)
if match:
version = (int(match.group(1)),
int(match.group(2)),
int(match.group(3)))
return version, legacy
return None
def get_pil_version():
"""Get the PIL/Pillow version, or None if it is unavailable.
"""
try:
__import__('PIL', fromlist=['Image'])
return (0,)
except ImportError:
return None
| 15,240
|
Python
|
.py
| 395
| 30.253165
| 78
| 0.62205
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,272
|
pathrender.py
|
rembo10_headphones/headphones/pathrender.py
|
# encoding=utf8
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
"""
Path pattern substitution module, see details below for syntax.
The pattern matching is loosely based on foobar2000 pattern syntax,
i.e. the notion of escaping characters with \' and optional elements
enclosed in square brackets [] is taken from there while the
substitution variable names are Perl-ish or sh-ish. The following
syntax elements are supported:
* escaped literal strings, that is everything that is enclosed
within single quotes (like 'this');
* substitution variables, which start with dollar sign ($) and
extend until next non-alphanumeric+underscore character
(like $This and $5_that).
* optional elements enclosed in curly braces, which render
nonempty value only if any variable or optional inside returned
nonempty value, ignoring literals (like {'{'$That'}'}).
"""
from enum import Enum
__author__ = "Andrzej Ciarkowski <andrzej.ciarkowski@gmail.com>"
class _PatternElement(object):
'''ABC for hierarchy of path name renderer pattern elements.'''
def render(self, replacement):
# type: (Mapping[str,str]) -> str
'''Format this _PatternElement into string using provided substitution dictionary.'''
raise NotImplementedError()
def __ne__(self, other):
return not self == other
class _Generator(_PatternElement):
# pylint: disable=abstract-method
'''Tagging interface for "content-generating" elements like replacement or optional block.'''
pass
class _Replacement(_Generator):
'''Replacement variable, eg. $title.'''
def __init__(self, pattern):
# type: (str)
self._pattern = pattern
def render(self, replacement):
# type: (Mapping[str,str]) -> str
res = replacement.get(self._pattern, self._pattern)
if res is None:
return ''
else:
return res
def __str__(self):
return self._pattern
@property
def pattern(self):
return self._pattern
def __eq__(self, other):
return isinstance(other, _Replacement) and \
self._pattern == other.pattern
class _LiteralText(_PatternElement):
'''Just a plain piece of text to be rendered "as is".'''
def __init__(self, text):
# type: (str)
self._text = text
def render(self, replacement):
# type: (Mapping[str,str]) -> str
return self._text
def __str__(self):
return self._text
@property
def text(self):
return self._text
def __eq__(self, other):
return isinstance(other, _LiteralText) and self._text == other.text
class _OptionalBlock(_Generator):
'''Optional block will render its contents only if any _Generator in its scope did return non-empty result.'''
def __init__(self, scope):
# type: ([_PatternElement])
self._scope = scope
def render(self, replacement):
# type: (Mapping[str,str]) -> str
res = [(isinstance(x, _Generator), x.render(replacement)) for x in self._scope]
if any((t[0] and t[1] is not None and len(t[1]) != 0) for t in res):
return "".join(t[1] for t in res)
else:
return ""
def __eq__(self, other):
"""
:type other: _OptionalBlock
"""
return isinstance(other, _OptionalBlock) and self._scope == other._scope
_OPTIONAL_START = '{'
_OPTIONAL_END = '}'
_ESCAPE_CHAR = '\''
_REPLACEMENT_START = '$'
def _is_replacement_valid(c):
# type: (str) -> bool
return c.isalnum() or c == '_'
class _State(Enum):
LITERAL = 0
ESCAPE = 1
REPLACEMENT = 2
def _append_literal(scope, text):
# type: ([_PatternElement], str) -> None
'''Append literal text to the scope BUT ONLY if it's not an empty string.'''
if len(text) == 0:
return
scope.append(_LiteralText(text))
class Warnings(Enum):
'''Pattern parsing warnings, as stored withing warnings property of Pattern object after parsing.'''
UNCLOSED_ESCAPE = 'Warnings.UNCLOSED_ESCAPE'
UNCLOSED_OPTIONAL = 'Warnings.UNCLOSED_OPTIONAL'
def _parse_pattern(pattern, warnings):
# type: (str,MutableSet[Warnings]) -> [_PatternElement]
'''Parse path pattern text into list of _PatternElements, put warnings into the provided set.'''
start = 0 # index of current state start char
root_scope = [] # here our _PatternElements will reside
scope_stack = [root_scope] # stack so that we can return to the outer scope
scope = root_scope # pointer to the current list for _OptionalBlock
inside_optional = 0 # nesting level of _OptionalBlocks
state = _State.LITERAL # current state
for i, c in enumerate(pattern):
if state is _State.ESCAPE:
if c != _ESCAPE_CHAR:
# only escape char can get us out of _State.ESCAPE
continue
_append_literal(scope, pattern[start + 1:i])
state = _State.LITERAL
start = i + 1
# after exiting _State.ESCAPE on escape char no more processing of c
continue
if state is _State.REPLACEMENT:
if _is_replacement_valid(c):
# only replacement invalid can get us out _State.REPLACEMENT
continue
scope.append(_Replacement(pattern[start:i]))
state = _State.LITERAL
start = i
# intentional fall-through to _State.LITERAL
assert state is _State.LITERAL
if c == _ESCAPE_CHAR:
_append_literal(scope, pattern[start:i])
state = _State.ESCAPE
start = i
# no more processing to escape char c
continue
if c == _REPLACEMENT_START:
_append_literal(scope, pattern[start:i])
state = _State.REPLACEMENT
start = i
# no more processing to replacement char c
continue
if c == _OPTIONAL_START:
_append_literal(scope, pattern[start:i])
inside_optional += 1
new_scope = []
scope_stack.append(new_scope)
scope = new_scope
start = i + 1
continue
if c == _OPTIONAL_END:
if inside_optional == 0:
# no optional block to end, just treat as literal text
continue
inside_optional -= 1
_append_literal(scope, pattern[start:i])
scope_stack.pop()
prev_scope = scope_stack[-1]
prev_scope.append(_OptionalBlock(scope))
scope = prev_scope
start = i + 1
# fi
# done
if state is _State.ESCAPE:
warnings.add(Warnings.UNCLOSED_ESCAPE)
if inside_optional != 0:
warnings.add(Warnings.UNCLOSED_OPTIONAL)
if state is _State.REPLACEMENT:
root_scope.append(_Replacement(pattern[start:]))
else:
# don't care about unclosed elements :P
_append_literal(root_scope, pattern[start:])
return root_scope
class Pattern(object):
'''Stores preparsed rename pattern for repeated use.
If using the same pattern repeatedly it is much more effective
to parse the pattern into Pattern object and use it instead of
parsing the textual pattern on each substitution. To use Pattern
object for substitution simply call it as it was function
providing dictionary as an argument (see __call__()).'''
def __init__(self, pattern):
# type: (str)
self._warnings = set()
self._pattern = _parse_pattern(pattern, self._warnings)
def __call__(self, replacement):
# type: (Mapping[str,str]) -> str
'''Execute path rendering/substitution based on replacement dictionary.'''
return "".join(p.render(replacement) for p in self._pattern)
def _get_warnings(self):
# type: () -> str
'''Getter for warnings property.'''
return self._warnings
warnings = property(_get_warnings, doc="Access warnings raised during pattern parsing")
def render(pattern, replacement):
# type: (str, Mapping[str,str]) -> (str, AbstractSet[Warnings])
'''Render path name based on replacement pattern and dictionary.'''
p = Pattern(pattern)
return p(replacement), p.warnings
if __name__ == "__main__":
# primitive test ;)
p = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]}")
d = {'$Disc': '', '$Track': '05', '$Artist': 'Grzegżółka', '$Title': 'Błona kapłona', '$Year': '2019'}
assert p(d) == "05 - Grzegżółka - Błona kapłona [2019]"
| 9,261
|
Python
|
.py
| 219
| 35.027397
| 114
| 0.637355
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,273
|
notifiers.py
|
rembo10_headphones/headphones/notifiers.py
|
from urllib.parse import urlencode, quote_plus
import urllib.request, urllib.parse, urllib.error
import subprocess
import json
from email.mime.text import MIMEText
import smtplib
import email.utils
from http.client import HTTPSConnection
from urllib.parse import parse_qsl
import urllib.request, urllib.error, urllib.parse
import requests as requests
import os.path
from headphones import logger, helpers, common, request
from pynma import pynma
import cherrypy
import headphones
import gntp.notifier
#import oauth2 as oauth
import twitter
class GROWL(object):
"""
Growl notifications, for OS X.
"""
def __init__(self):
self.enabled = headphones.CONFIG.GROWL_ENABLED
self.host = headphones.CONFIG.GROWL_HOST
self.password = headphones.CONFIG.GROWL_PASSWORD
def conf(self, options):
return cherrypy.config['config'].get('Growl', options)
def notify(self, message, event):
if not self.enabled:
return
# Split host and port
if self.host == "":
host, port = "localhost", 23053
if ":" in self.host:
host, port = self.host.split(':', 1)
port = int(port)
else:
host, port = self.host, 23053
# If password is empty, assume none
if self.password == "":
password = None
else:
password = self.password
# Register notification
growl = gntp.notifier.GrowlNotifier(
applicationName='Headphones',
notifications=['New Event'],
defaultNotifications=['New Event'],
hostname=host,
port=port,
password=password
)
try:
growl.register()
except gntp.notifier.errors.NetworkError:
logger.warning('Growl notification failed: network error')
return
except gntp.notifier.errors.AuthError:
logger.warning('Growl notification failed: authentication error')
return
# Fix message
message = message.encode(headphones.SYS_ENCODING, "replace")
# Send it, including an image
image_file = os.path.join(str(headphones.PROG_DIR),
"data/images/headphoneslogo.png")
with open(image_file, 'rb') as f:
image = f.read()
try:
growl.notify(
noteType='New Event',
title=event,
description=message,
icon=image
)
except gntp.notifier.errors.NetworkError:
logger.warning('Growl notification failed: network error')
return
logger.info("Growl notifications sent.")
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, host, password):
self.enabled = True
self.host = host
self.password = password
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class PROWL(object):
"""
Prowl notifications.
"""
def __init__(self):
self.enabled = headphones.CONFIG.PROWL_ENABLED
self.keys = headphones.CONFIG.PROWL_KEYS
self.priority = headphones.CONFIG.PROWL_PRIORITY
def conf(self, options):
return cherrypy.config['config'].get('Prowl', options)
def notify(self, message, event):
if not headphones.CONFIG.PROWL_ENABLED:
return
http_handler = HTTPSConnection("api.prowlapp.com")
data = {'apikey': headphones.CONFIG.PROWL_KEYS,
'application': 'Headphones',
'event': event,
'description': message.encode("utf-8"),
'priority': headphones.CONFIG.PROWL_PRIORITY}
http_handler.request("POST",
"/publicapi/add",
headers={
'Content-type':
"application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
logger.info("Prowl notifications sent.")
return True
elif request_status == 401:
logger.info("Prowl auth failed: %s" % response.reason)
return False
else:
logger.info("Prowl notification failed.")
return False
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, keys, priority):
self.enabled = True
self.keys = keys
self.priority = priority
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class MPC(object):
"""
MPC library update
"""
def __init__(self):
pass
def notify(self):
subprocess.call(["mpc", "update"])
class XBMC(object):
"""
XBMC notifications
"""
def __init__(self):
self.hosts = headphones.CONFIG.XBMC_HOST
self.username = headphones.CONFIG.XBMC_USERNAME
self.password = headphones.CONFIG.XBMC_PASSWORD
def _sendhttp(self, host, command):
url_command = urllib.parse.urlencode(command)
url = host + '/xbmcCmds/xbmcHttp/?' + url_command
if self.password:
return request.request_content(url,
auth=(self.username, self.password))
else:
return request.request_content(url)
def _sendjson(self, host, method, params={}):
data = [
{'id': 0, 'jsonrpc': '2.0', 'method': method, 'params': params}]
headers = {'Content-Type': 'application/json'}
url = host + '/jsonrpc'
if self.password:
response = request.request_json(
url, method="post",
data=json.dumps(data),
headers=headers, auth=(
self.username, self.password))
else:
response = request.request_json(url, method="post",
data=json.dumps(data),
headers=headers)
if response:
return response[0]['result']
def update(self):
# From what I read you can't update the music library on a per
# directory or per path basis so need to update the whole thing
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library update command to XBMC @ ' + host)
request = self._sendjson(host, 'AudioLibrary.Scan')
if not request:
logger.warn('Error sending update request to XBMC')
def notify(self, artist, album, albumartpath):
hosts = [x.strip() for x in self.hosts.split(',')]
header = "Headphones"
message = "%s - %s added to your library" % (artist, album)
time = "3000" # in ms
for host in hosts:
logger.info('Sending notification command to XMBC @ ' + host)
try:
version = self._sendjson(host, 'Application.GetProperties',
{'properties': ['version']})[
'version']['major']
if version < 12: # Eden
notification = header + "," + message + "," + time + \
"," + albumartpath
notifycommand = {'command': 'ExecBuiltIn',
'parameter': 'Notification(' +
notification + ')'}
request = self._sendhttp(host, notifycommand)
else: # Frodo
params = {'title': header, 'message': message,
'displaytime': int(time),
'image': albumartpath}
request = self._sendjson(host, 'GUI.ShowNotification',
params)
if not request:
raise Exception
except Exception:
logger.error('Error sending notification request to XBMC')
class LMS(object):
"""
Class for updating a Logitech Media Server
"""
def __init__(self):
self.hosts = headphones.CONFIG.LMS_HOST
def _sendjson(self, host):
data = {'id': 1, 'method': 'slim.request', 'params': ["", ["rescan"]]}
data = json.JSONEncoder().encode(data)
content = {'Content-Type': 'application/json'}
req = urllib.request.Request(host + '/jsonrpc.js', data, content)
try:
handle = urllib.request.urlopen(req)
except Exception as e:
logger.warn('Error opening LMS url: %s' % e)
return
response = json.JSONDecoder().decode(handle.read())
try:
return response['result']
except:
logger.warn('LMS returned error: %s' % response['error'])
return response['error']
def update(self):
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library rescan command to LMS @ ' + host)
request = self._sendjson(host)
if request:
logger.warn('Error sending rescan request to LMS')
class Plex(object):
def __init__(self):
self.server_hosts = headphones.CONFIG.PLEX_SERVER_HOST
self.client_hosts = headphones.CONFIG.PLEX_CLIENT_HOST
self.username = headphones.CONFIG.PLEX_USERNAME
self.password = headphones.CONFIG.PLEX_PASSWORD
self.token = headphones.CONFIG.PLEX_TOKEN
def _sendhttp(self, host, command):
url = host + '/xbmcCmds/xbmcHttp/?' + command
if self.password:
response = request.request_response(url, auth=(
self.username, self.password))
else:
response = request.request_response(url)
return response
def _sendjson(self, host, method, params={}):
data = [
{'id': 0, 'jsonrpc': '2.0', 'method': method, 'params': params}]
headers = {'Content-Type': 'application/json'}
url = host + '/jsonrpc'
if self.password:
response = request.request_json(
url, method="post",
data=json.dumps(data),
headers=headers, auth=(
self.username, self.password))
else:
response = request.request_json(url, method="post",
data=json.dumps(data),
headers=headers)
if response:
return response[0]['result']
def update(self):
# Get token from user credentials
if not self.token:
loginpage = 'https://plex.tv/users/sign_in.json'
post_params = {
'user[login]': self.username,
'user[password]': self.password
}
headers = {
'X-Plex-Device-Name': 'Headphones',
'X-Plex-Product': 'Headphones',
'X-Plex-Client-Identifier': common.USER_AGENT,
'X-Plex-Version': ''
}
logger.info("Getting plex.tv credentials for user %s", self.username)
try:
r = requests.post(loginpage, data=post_params, headers=headers)
r.raise_for_status()
except requests.RequestException as e:
logger.error("Error getting plex.tv credentials, check settings: %s", e)
return False
try:
data = r.json()
except ValueError as e:
logger.error("Error getting plex.tv credentials: %s", e)
return False
try:
self.token = data['user']['authentication_token']
except KeyError as e:
logger.error("Error getting plex.tv credentials: %s", e)
return False
# From what I read you can't update the music library on a per
# directory or per path basis so need to update the whole thing
hosts = [x.strip() for x in self.server_hosts.split(',')]
for host in hosts:
logger.info(
'Sending library update command to Plex Media Server@ ' + host)
url = "%s/library/sections" % host
if self.token:
params = {'X-Plex-Token': self.token}
else:
params = False
try:
r = request.request_minidom(url, params=params)
if not r:
logger.warn("Error getting Plex Media Server details, check settings (possibly incorrect token)")
return False
sections = r.getElementsByTagName('Directory')
if not sections:
logger.info("Plex Media Server not running on: " + host)
return False
for s in sections:
if s.getAttribute('type') == "artist":
url = "%s/library/sections/%s/refresh" % (
host, s.getAttribute('key'))
request.request_response(url, params=params)
except Exception as e:
logger.error("Error getting Plex Media Server details: %s" % e)
return False
def notify(self, artist, album, albumartpath):
hosts = [x.strip() for x in self.client_hosts.split(',')]
header = "Headphones"
message = "%s - %s added to your library" % (artist, album)
time = "3000" # in ms
for host in hosts:
logger.info(
'Sending notification command to Plex client @ ' + host)
try:
version = self._sendjson(host, 'Application.GetProperties',
{'properties': ['version']})[
'version']['major']
if version < 12: # Eden
notification = header + "," + message + "," + time + \
"," + albumartpath
notifycommand = {'command': 'ExecBuiltIn',
'parameter': 'Notification(' +
notification + ')'}
request = self._sendhttp(host, notifycommand)
else: # Frodo
params = {'title': header, 'message': message,
'displaytime': int(time),
'image': albumartpath}
request = self._sendjson(host, 'GUI.ShowNotification',
params)
if not request:
raise Exception
except Exception:
logger.error(
'Error sending notification request to Plex client @ ' +
host)
class NMA(object):
def notify(self, artist=None, album=None, snatched=None):
title = 'Headphones'
api = headphones.CONFIG.NMA_APIKEY
nma_priority = headphones.CONFIG.NMA_PRIORITY
logger.debug("NMA title: " + title)
logger.debug("NMA API: " + api)
logger.debug("NMA Priority: " + str(nma_priority))
if snatched:
event = snatched + " snatched!"
message = "Headphones has snatched: " + snatched
else:
event = artist + ' - ' + album + ' complete!'
message = "Headphones has downloaded and postprocessed: " + \
artist + ' [' + album + ']'
logger.debug("NMA event: " + event)
logger.debug("NMA message: " + message)
batch = False
p = pynma.PyNMA()
keys = api.split(',')
p.addkey(keys)
if len(keys) > 1:
batch = True
response = p.push(title, event, message, priority=nma_priority,
batch_mode=batch)
if not response[api]['code'] == '200':
logger.error('Could not send notification to NotifyMyAndroid')
return False
else:
return True
class PUSHBULLET(object):
def __init__(self):
self.apikey = headphones.CONFIG.PUSHBULLET_APIKEY
self.deviceid = headphones.CONFIG.PUSHBULLET_DEVICEID
def notify(self, message, status):
if not headphones.CONFIG.PUSHBULLET_ENABLED:
return
url = "https://api.pushbullet.com/v2/pushes"
data = {'type': "note",
'title': "Headphones",
'body': message + ': ' + status}
if self.deviceid:
data['device_iden'] = self.deviceid
headers = {'Content-type': "application/json",
'Authorization': 'Bearer ' +
headphones.CONFIG.PUSHBULLET_APIKEY}
response = request.request_json(url, method="post", headers=headers,
data=json.dumps(data))
if response:
logger.info("PushBullet notifications sent.")
return True
else:
logger.info("PushBullet notification failed.")
return False
class PUSHALOT(object):
def notify(self, message, event):
if not headphones.CONFIG.PUSHALOT_ENABLED:
return
pushalot_authorizationtoken = headphones.CONFIG.PUSHALOT_APIKEY
logger.debug("Pushalot event: " + event)
logger.debug("Pushalot message: " + message)
logger.debug("Pushalot api: " + pushalot_authorizationtoken)
http_handler = HTTPSConnection("pushalot.com")
data = {'AuthorizationToken': pushalot_authorizationtoken,
'Title': event.encode('utf-8'),
'Body': message.encode("utf-8")}
http_handler.request("POST",
"/api/sendmessage",
headers={
'Content-type':
"application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
logger.debug("Pushalot response status: %r" % request_status)
logger.debug("Pushalot response headers: %r" % response.getheaders())
logger.debug("Pushalot response body: %r" % response.read())
if request_status == 200:
logger.info("Pushalot notifications sent.")
return True
elif request_status == 410:
logger.info("Pushalot auth failed: %s" % response.reason)
return False
else:
logger.info("Pushalot notification failed.")
return False
class JOIN(object):
def __init__(self):
self.enabled = headphones.CONFIG.JOIN_ENABLED
self.apikey = headphones.CONFIG.JOIN_APIKEY
self.deviceid = headphones.CONFIG.JOIN_DEVICEID
self.url = 'https://joinjoaomgcd.appspot.com/_ah/' \
'api/messaging/v1/sendPush?apikey={apikey}' \
'&title={title}&text={text}' \
'&icon={icon}'
def notify(self, message, event):
if not headphones.CONFIG.JOIN_ENABLED or \
not headphones.CONFIG.JOIN_APIKEY:
return
icon = "https://cdn.rawgit.com/Headphones/" \
"headphones/develop/data/images/headphoneslogo.png"
if not self.deviceid:
self.deviceid = "group.all"
l = [x.strip() for x in self.deviceid.split(',')]
if len(l) > 1:
self.url += '&deviceIds={deviceid}'
else:
self.url += '&deviceId={deviceid}'
response = urllib.request.urlopen(self.url.format(apikey=self.apikey,
title=quote_plus(event),
text=quote_plus(
message.encode(
"utf-8")),
icon=icon,
deviceid=self.deviceid))
if response:
logger.info("Join notifications sent.")
return True
else:
logger.error("Join notification failed.")
return False
class Synoindex(object):
def __init__(self, util_loc='/usr/syno/bin/synoindex'):
self.util_loc = util_loc
def util_exists(self):
return os.path.exists(self.util_loc)
def notify(self, path):
path = os.path.abspath(path)
if not self.util_exists():
logger.warn(
"Error sending notification: synoindex utility "
"not found at %s" % self.util_loc)
return
if os.path.isfile(path):
cmd_arg = '-a'
elif os.path.isdir(path):
cmd_arg = '-A'
else:
logger.warn(
"Error sending notification: Path passed to synoindex "
"was not a file or folder.")
return
cmd = [self.util_loc, cmd_arg, path]
logger.info("Calling synoindex command: %s" % str(cmd))
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=headphones.PROG_DIR)
out, error = p.communicate()
# synoindex never returns any codes other than '0',
# highly irritating
except OSError as e:
logger.warn("Error sending notification: %s" % str(e))
def notify_multiple(self, path_list):
if isinstance(path_list, list):
for path in path_list:
self.notify(path)
class PUSHOVER(object):
def __init__(self):
self.enabled = headphones.CONFIG.PUSHOVER_ENABLED
self.keys = headphones.CONFIG.PUSHOVER_KEYS
self.priority = headphones.CONFIG.PUSHOVER_PRIORITY
if headphones.CONFIG.PUSHOVER_APITOKEN:
self.application_token = headphones.CONFIG.PUSHOVER_APITOKEN
else:
self.application_token = "LdPCoy0dqC21ktsbEyAVCcwvQiVlsz"
def conf(self, options):
return cherrypy.config['config'].get('Pushover', options)
def notify(self, message, event):
if not headphones.CONFIG.PUSHOVER_ENABLED:
return
url = "https://api.pushover.net/1/messages.json"
data = {'token': self.application_token,
'user': headphones.CONFIG.PUSHOVER_KEYS,
'title': event,
'message': message.encode("utf-8"),
'priority': headphones.CONFIG.PUSHOVER_PRIORITY}
headers = {'Content-type': "application/x-www-form-urlencoded"}
response = request.request_response(url, method="POST",
headers=headers, data=data)
if response:
logger.info("Pushover notifications sent.")
return True
else:
logger.error("Pushover notification failed.")
return False
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, keys, priority):
self.enabled = True
self.keys = keys
self.priority = priority
self.notify('Main Screen Activate', 'Test Message')
class TwitterNotifier(object):
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def __init__(self):
self.consumer_key = "oYKnp2ddX5gbARjqX8ZAAg"
self.consumer_secret = "A4Xkw9i5SjHbTk7XT8zzOPqivhj9MmRDR9Qn95YA9sk"
def notify_snatch(self, title):
if headphones.CONFIG.TWITTER_ONSNATCH:
self._notifyTwitter(
common.notifyStrings[
common.NOTIFY_SNATCH] + ': ' + title + ' at ' +
helpers.now())
def notify_download(self, title):
if headphones.CONFIG.TWITTER_ENABLED:
self._notifyTwitter(common.notifyStrings[
common.NOTIFY_DOWNLOAD] + ': ' +
title + ' at ' + helpers.now())
def test_notify(self):
return self._notifyTwitter(
"This is a test notification from Headphones at " + helpers.now(),
force=True)
def _get_authorization(self):
oauth_consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info('Requesting temp token from Twitter')
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.info(
'Invalid respond from Twitter requesting temp token: %s' %
resp['status'])
else:
request_token = dict(parse_qsl(content))
headphones.CONFIG.TWITTER_USERNAME = request_token['oauth_token']
headphones.CONFIG.TWITTER_PASSWORD = request_token[
'oauth_token_secret']
return self.AUTHORIZATION_URL + "?oauth_token=" + request_token[
'oauth_token']
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = headphones.CONFIG.TWITTER_USERNAME
request_token[
'oauth_token_secret'] = headphones.CONFIG.TWITTER_PASSWORD
request_token['oauth_callback_confirmed'] = 'true'
token = oauth.Token(request_token['oauth_token'],
request_token['oauth_token_secret'])
token.set_verifier(key)
logger.info(
'Generating and signing request for an access token using key ' +
key)
oauth_consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
logger.info('oauth_consumer: ' + str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: ' + str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL,
method='POST',
body='oauth_verifier=%s' % key)
logger.info('resp, content: ' + str(resp) + ',' + str(content))
access_token = dict(parse_qsl(content))
logger.info('access_token: ' + str(access_token))
logger.info('resp[status] = ' + str(resp['status']))
if resp['status'] != '200':
logger.info('The request for a token with did not succeed: ' + str(
resp['status']),
logger.ERROR)
return False
else:
logger.info('Your Twitter Access Token key: %s' % access_token[
'oauth_token'])
logger.info(
'Access Token secret: %s' % access_token['oauth_token_secret'])
headphones.CONFIG.TWITTER_USERNAME = access_token['oauth_token']
headphones.CONFIG.TWITTER_PASSWORD = access_token[
'oauth_token_secret']
return True
def _send_tweet(self, message=None):
username = self.consumer_key
password = self.consumer_secret
access_token_key = headphones.CONFIG.TWITTER_USERNAME
access_token_secret = headphones.CONFIG.TWITTER_PASSWORD
logger.info("Sending tweet: " + message)
api = twitter.Api(username, password, access_token_key,
access_token_secret)
try:
api.PostUpdate(message)
except Exception as e:
logger.info("Error Sending Tweet: %s" % e)
return False
return True
def _notifyTwitter(self, message='', force=False):
prefix = headphones.CONFIG.TWITTER_PREFIX
if not headphones.CONFIG.TWITTER_ENABLED and not force:
return False
return self._send_tweet(prefix + ": " + message)
class OSX_NOTIFY(object):
def __init__(self):
try:
self.objc = __import__("objc")
self.AppKit = __import__("AppKit")
except:
logger.warn('OS X Notification: Cannot import objc or AppKit')
pass
def swizzle(self, cls, SEL, func):
old_IMP = getattr(cls, SEL, None)
if old_IMP is None:
old_IMP = cls.instanceMethodForSelector_(SEL)
def wrapper(self, *args, **kwargs):
return func(self, old_IMP, *args, **kwargs)
new_IMP = self.objc.selector(
wrapper,
selector=old_IMP.selector,
signature=old_IMP.signature
)
self.objc.classAddMethod(cls, SEL.encode(), new_IMP)
def notify(self, title, subtitle=None, text=None, sound=True, image=None):
try:
self.swizzle(
self.objc.lookUpClass('NSBundle'),
'bundleIdentifier',
self.swizzled_bundleIdentifier
)
NSUserNotification = self.objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = self.objc.lookUpClass(
'NSUserNotificationCenter')
NSAutoreleasePool = self.objc.lookUpClass('NSAutoreleasePool')
if not NSUserNotification or not NSUserNotificationCenter:
return False
pool = NSAutoreleasePool.alloc().init()
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
if subtitle:
notification.setSubtitle_(subtitle)
if text:
notification.setInformativeText_(text)
if sound:
notification.setSoundName_(
"NSUserNotificationDefaultSoundName")
if image:
source_img = self.AppKit.NSImage.alloc().\
initByReferencingFile_(image)
notification.setContentImage_(source_img)
# notification.set_identityImage_(source_img)
notification.setHasActionButton_(False)
notification_center = NSUserNotificationCenter.\
defaultUserNotificationCenter()
notification_center.deliverNotification_(notification)
del pool
return True
except Exception as e:
logger.warn('Error sending OS X Notification: %s' % e)
return False
def swizzled_bundleIdentifier(self, original, swizzled):
return 'ade.headphones.osxnotify'
class BOXCAR(object):
def __init__(self):
self.url = 'https://new.boxcar.io/api/notifications'
def notify(self, title, message, rgid=None):
try:
if rgid:
message += '<br></br><a href="https://musicbrainz.org/' \
'release-group/%s">MusicBrainz</a>' % rgid
data = urllib.parse.urlencode({
'user_credentials': headphones.CONFIG.BOXCAR_TOKEN,
'notification[title]': title.encode('utf-8'),
'notification[long_message]': message.encode('utf-8'),
'notification[sound]': "done",
'notification[icon_url]': "https://raw.githubusercontent.com/rembo10/headphones/master/data/images"
"/headphoneslogo.png"
})
req = urllib.request.Request(self.url)
handle = urllib.request.urlopen(req, data)
handle.close()
return True
except urllib.error.URLError as e:
logger.warn('Error sending Boxcar2 Notification: %s' % e)
return False
class SubSonicNotifier(object):
def __init__(self):
self.host = headphones.CONFIG.SUBSONIC_HOST
self.username = headphones.CONFIG.SUBSONIC_USERNAME
self.password = headphones.CONFIG.SUBSONIC_PASSWORD
def notify(self, albumpaths):
# Correct URL
if not self.host.lower().startswith("http"):
self.host = "http://" + self.host
if not self.host.lower().endswith("/"):
self.host = self.host + "/"
# Invoke request
request.request_response(
self.host + "musicFolderSettings.view?scanNow",
auth=(self.username, self.password))
class Email(object):
def notify(self, subject, message):
message = MIMEText(message, 'plain', "utf-8")
message['Subject'] = subject
message['From'] = email.utils.formataddr(
('Headphones', headphones.CONFIG.EMAIL_FROM))
message['To'] = headphones.CONFIG.EMAIL_TO
message['Date'] = email.utils.formatdate(localtime=True)
try:
if headphones.CONFIG.EMAIL_SSL:
mailserver = smtplib.SMTP_SSL(
headphones.CONFIG.EMAIL_SMTP_SERVER,
headphones.CONFIG.EMAIL_SMTP_PORT)
else:
mailserver = smtplib.SMTP(headphones.CONFIG.EMAIL_SMTP_SERVER,
headphones.CONFIG.EMAIL_SMTP_PORT)
if headphones.CONFIG.EMAIL_TLS:
mailserver.starttls()
mailserver.ehlo()
if headphones.CONFIG.EMAIL_SMTP_USER:
mailserver.login(headphones.CONFIG.EMAIL_SMTP_USER,
headphones.CONFIG.EMAIL_SMTP_PASSWORD)
mailserver.sendmail(headphones.CONFIG.EMAIL_FROM,
headphones.CONFIG.EMAIL_TO,
message.as_string())
mailserver.quit()
return True
except Exception as e:
logger.warn('Error sending Email: %s' % e)
return False
class TELEGRAM(object):
def notify(self, message, status, rgid=None, image=None):
if not headphones.CONFIG.TELEGRAM_ENABLED:
return
import requests
TELEGRAM_API = "https://api.telegram.org/bot%s/%s"
# Get configuration data
token = headphones.CONFIG.TELEGRAM_TOKEN
userid = headphones.CONFIG.TELEGRAM_USERID
# Construct message
message = '\n\n' + message
# MusicBrainz link
if rgid:
message += '\n\n <a href="https://musicbrainz.org/' \
'release-group/%s">MusicBrainz</a>' % rgid
# Send image
response = None
if image:
image_file = {'photo': (image, open(image, "rb"))}
payload = {'chat_id': userid, 'parse_mode': "HTML", 'caption': status + message}
try:
response = requests.post(TELEGRAM_API % (token, "sendPhoto"), data=payload, files=image_file)
except Exception as e:
logger.info('Telegram notify failed: ' + str(e))
# Sent text
else:
payload = {'chat_id': userid, 'parse_mode': "HTML", 'text': status + message}
try:
response = requests.post(TELEGRAM_API % (token, "sendMessage"), data=payload)
except Exception as e:
logger.info('Telegram notify failed: ' + str(e))
# Error logging
sent_successfuly = True
if response and not response.status_code == 200:
logger.info("Could not send notification to TelegramBot (token=%s). Response: [%s]", token, response.text)
sent_successfuly = False
logger.info("Telegram notifications sent.")
return sent_successfuly
class SLACK(object):
def notify(self, message, status):
if not headphones.CONFIG.SLACK_ENABLED:
return
import requests
SLACK_URL = headphones.CONFIG.SLACK_URL
channel = headphones.CONFIG.SLACK_CHANNEL
emoji = headphones.CONFIG.SLACK_EMOJI
payload = {'channel': channel, 'text': status + ': ' + message,
'icon_emoji': emoji}
try:
response = requests.post(SLACK_URL, json=payload)
except Exception as e:
logger.info('Slack notify failed: ' + str(e))
sent_successfuly = True
if not response.status_code == 200:
logger.info(
'Could not send notification to Slack. Response: [%s]',
(response.text))
sent_successfuly = False
logger.info("Slack notifications sent.")
return sent_successfuly
| 36,966
|
Python
|
.py
| 839
| 30.859356
| 118
| 0.556274
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,274
|
db.py
|
rembo10_headphones/headphones/db.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
###################################
# Stolen from Sick-Beard's db.py #
###################################
import time
import sqlite3
import os
import headphones
from headphones import logger
def dbFilename(filename="headphones.db"):
return os.path.join(headphones.DATA_DIR, filename)
def getCacheSize():
# this will protect against typecasting problems produced by empty string and None settings
if not headphones.CONFIG.CACHE_SIZEMB:
# sqlite will work with this (very slowly)
return 0
return int(headphones.CONFIG.CACHE_SIZEMB)
class DBConnection:
def __init__(self, filename="headphones.db"):
self.filename = filename
self.connection = sqlite3.connect(dbFilename(filename), timeout=20)
# don't wait for the disk to finish writing
self.connection.execute("PRAGMA synchronous = OFF")
# default set to Write-Ahead Logging WAL
self.connection.execute("PRAGMA journal_mode = %s" % headphones.CONFIG.JOURNAL_MODE)
# 64mb of cache memory,probably need to make it user configurable
self.connection.execute("PRAGMA cache_size=-%s" % (getCacheSize() * 1024))
self.connection.row_factory = sqlite3.Row
def action(self, query, args=None, upsert_insert_qry=None):
if query is None:
return
sqlResult = None
attempts = 0
dberror = None
while attempts < 10:
try:
with self.connection as c:
# log that previous attempt was locked and we're trying again
if dberror:
if args is None:
logger.debug('SQL: Database was previously locked, trying again. Attempt number %i. Query: %s', attempts + 1, query)
else:
logger.debug('SQL: Database was previously locked, trying again. Attempt number %i. Query: %s. Args: %s', attempts + 1, query, args)
# debugging
# try:
# explain_query = 'EXPLAIN QUERY PLAN ' + query
# if not args:
# sql_results = c.execute(explain_query)
# else:
# sql_results = c.execute(explain_query, args)
# if not args:
# print(explain_query)
# else:
# print(explain_query + ' ' + str(args))
# explain_results = sql_results
# for row in explain_results:
# print row
# except Exception as e:
# print(e)
# Execute query
# time0 = time.time()
if args is None:
sqlResult = c.execute(query)
# logger.debug('SQL: ' + query)
else:
sqlResult = c.execute(query, args)
# logger.debug('SQL: %s. Args: %s', query, args)
# INSERT part of upsert query
if upsert_insert_qry:
sqlResult = c.execute(upsert_insert_qry, args)
# logger.debug('SQL: %s. Args: %s', upsert_insert_qry, args)
# debugging: loose test to log queries taking longer than 5 seconds
# seconds = time.time() - time0
# if seconds > 5:
# if args is None:
# logger.debug("SQL: Query ran for %s seconds: %s", seconds, query)
# else:
# logger.debug("SQL: Query ran for %s seconds: %s with args %s", seconds, query, args)
break
except sqlite3.OperationalError as e:
if "unable to open database file" in str(e) or "database is locked" in str(e):
dberror = e
if args is None:
logger.debug('Database error: %s. Query: %s', e, query)
else:
logger.debug('Database error: %s. Query: %s. Args: %s', e, query, args)
attempts += 1
time.sleep(1)
else:
logger.error('Database error: %s', e)
raise
except sqlite3.DatabaseError as e:
logger.error('Fatal Error executing %s :: %s', query, e)
raise
# log if no results returned due to lock
if not sqlResult and attempts:
if args is None:
logger.warn('SQL: Query failed due to database error: %s. Query: %s', dberror, query)
else:
logger.warn('SQL: Query failed due to database error: %s. Query: %s. Args: %s', dberror, query, args)
return sqlResult
def select(self, query, args=None):
sqlResults = self.action(query, args).fetchall()
if sqlResults is None or sqlResults == [None]:
return []
return sqlResults
def upsert(self, tableName, valueDict, keyDict):
"""
Transactions an Update or Insert to a table based on key.
If the table is not updated then the 'WHERE changes' will be 0 and the table inserted
"""
def genParams(myDict):
return [x + " = ?" for x in list(myDict.keys())]
update_query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict))
insert_query = ("INSERT INTO " + tableName + " (" + ", ".join(list(valueDict.keys()) + list(keyDict.keys())) + ")" + " SELECT " + ", ".join(
["?"] * len(list(valueDict.keys()) + list(keyDict.keys()))) + " WHERE changes()=0")
try:
self.action(update_query, list(valueDict.values()) + list(keyDict.values()), upsert_insert_qry=insert_query)
except sqlite3.IntegrityError:
logger.info('Queries failed: %s and %s', update_query, insert_query)
| 6,853
|
Python
|
.py
| 133
| 38.105263
| 160
| 0.544734
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,275
|
transmission.py
|
rembo10_headphones/headphones/transmission.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import time
import json
from base64 import b64encode
import urllib.parse
import os
from headphones import logger, request
import headphones
# This is just a simple script to send torrents to transmission. The
# intention is to turn this into a class where we can check the state
# of the download, set the download dir, etc.
# TODO: Store torrent id so we can check up on it
_session_id = None
def addTorrent(link, data=None):
method = 'torrent-add'
if link.endswith('.torrent') and not link.startswith(('http', 'magnet')) or data:
if data:
metainfo = b64encode(data).decode("utf-8")
else:
with open(link, 'rb') as f:
metainfo = b64encode(f.read()).decode("utf-8")
arguments = {'metainfo': metainfo, 'download-dir': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
else:
arguments = {'filename': link, 'download-dir': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
response = torrentAction(method, arguments)
if not response:
return False
if response['result'] == 'success':
if 'torrent-added' in response['arguments']:
retid = response['arguments']['torrent-added']['hashString']
elif 'torrent-duplicate' in response['arguments']:
retid = response['arguments']['torrent-duplicate']['hashString']
else:
retid = False
logger.info("Torrent sent to Transmission successfully")
return retid
else:
logger.info('Transmission returned status %s' % response['result'])
return False
def getFolder(torrentid):
torrent_folder = None
single_file = False
method = 'torrent-get'
arguments = {'ids': torrentid, 'fields': ['files']}
response = torrentAction(method, arguments)
try:
torrent_files = response['arguments']['torrents'][0]['files']
if torrent_files:
if len(torrent_files) == 1:
torrent_folder = torrent_files[0]['name']
single_file = True
else:
torrent_folder = os.path.split(torrent_files[0]['name'])[0]
torrent_folder = torrent_folder.split(os.sep)[0]
single_file = False
except:
torrent_folder = None
single_file = False
return torrent_folder, single_file
def getName(torrentid):
method = 'torrent-get'
arguments = {'ids': torrentid, 'fields': ['name', 'percentDone']}
response = torrentAction(method, arguments)
percentdone = response['arguments']['torrents'][0]['percentDone']
torrent_folder_name = response['arguments']['torrents'][0]['name']
tries = 1
while percentdone == 0 and tries < 10:
tries += 1
time.sleep(5)
response = torrentAction(method, arguments)
percentdone = response['arguments']['torrents'][0]['percentDone']
torrent_folder_name = response['arguments']['torrents'][0]['name']
return torrent_folder_name
def setSeedRatio(torrentid, ratio):
method = 'torrent-set'
if ratio != 0:
arguments = {'seedRatioLimit': ratio, 'seedRatioMode': 1, 'ids': torrentid}
else:
arguments = {'seedRatioMode': 2, 'ids': torrentid}
response = torrentAction(method, arguments)
if not response:
return False
def removeTorrent(torrentid, remove_data=False):
method = 'torrent-get'
arguments = {'ids': torrentid, 'fields': ['isFinished', 'name']}
response = torrentAction(method, arguments)
if not response:
return False
try:
finished = response['arguments']['torrents'][0]['isFinished']
name = response['arguments']['torrents'][0]['name']
if finished:
logger.info('%s has finished seeding, removing torrent and data' % name)
method = 'torrent-remove'
if remove_data:
arguments = {'delete-local-data': True, 'ids': torrentid}
else:
arguments = {'ids': torrentid}
response = torrentAction(method, arguments)
return True
else:
logger.info(
'%s has not finished seeding yet, torrent will not be removed, will try again on next run' % name)
except:
return False
return False
def torrentAction(method, arguments):
global _session_id
host = headphones.CONFIG.TRANSMISSION_HOST
username = headphones.CONFIG.TRANSMISSION_USERNAME
password = headphones.CONFIG.TRANSMISSION_PASSWORD
if not host.startswith('http'):
host = 'http://' + host
if host.endswith('/'):
host = host[:-1]
# Fix the URL. We assume that the user does not point to the RPC endpoint,
# so add it if it is missing.
parts = list(urllib.parse.urlparse(host))
if not parts[0] in ("http", "https"):
parts[0] = "http"
if not parts[2].endswith("/rpc"):
parts[2] += "/transmission/rpc"
host = urllib.parse.urlunparse(parts)
data = {'method': method, 'arguments': arguments}
data_json = json.dumps(data)
auth = (username, password) if username and password else None
for retry in range(2):
if _session_id is not None:
headers = {'x-transmission-session-id': _session_id}
response = request.request_response(host, method="POST",
data=data_json, headers=headers, auth=auth,
whitelist_status_code=[200, 401, 409])
else:
response = request.request_response(host, auth=auth,
whitelist_status_code=[401, 409])
if response.status_code == 401:
if auth:
logger.error("Username and/or password not accepted by "
"Transmission")
else:
logger.error("Transmission authorization required")
return
elif response.status_code == 409:
_session_id = response.headers['x-transmission-session-id']
if _session_id is None:
logger.error("Expected a Session ID from Transmission, got None")
return
# retry request with new session id
logger.debug("Retrying Transmission request with new session id")
continue
resp_json = response.json()
return resp_json
| 7,010
|
Python
|
.py
| 165
| 34.363636
| 114
| 0.637754
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,276
|
config_test.py
|
rembo10_headphones/headphones/config_test.py
|
import mock
from mock import MagicMock
import headphones.config
import re
from . import unittestcompat
from .unittestcompat import TestCase, TestArgs
class ConfigApiTest(TestCase):
""" Common tests for headphones.Config
Common tests for headphones.Config This test suite guarantees, that external
API of the Config class conforms all expectations of other modules.
"""
def _setUpConfigMock(self, mock, sections):
# every constructor `xx = ConfigObj()` in headphones.config will return
# this mock:
self.config_mock = self.config_module_mock.return_value = mock
if sections:
mock.__contains__.side_effect = sections.__contains__
mock.__getitem__.side_effect = sections.__getitem__
mock.__setitem__.side_effect = sections.__setitem__
mock.items.side_effect = sections.items
return mock
def setUp(self):
# patch for low-level ConfigObj for entire test class
# result - each test_* method will get one additional
# argument during testing
self.config_module_mock_patcher = mock.patch('headphones.config.ConfigObj', name='ConfigObjModuleMock')
self.config_module_mock = self.config_module_mock_patcher.start()
existing_sections = {'General': {}, 'Email': {}}
# every constructor `xx = ConfigObj()` in headphones.config will return
# this mock:
self._setUpConfigMock(MagicMock(), existing_sections)
def tearDown(self):
self.config_module_mock_patcher.stop()
def test_constructor(self):
""" Config : creating """
cf = headphones.config.Config('/tmp/notexist')
self.assertIsInstance(cf, headphones.config.Config)
@TestArgs(
# this sections are explicitly added in the test body:
('General', False),
('Email', False),
# this sections will not be created nor in the test, either in the
# Config module
('some_new_section_never_defined', True),
('another_new_section_never_defined', True),
)
def test_check_section(self, section_name, expected_return):
""" Config : check_section """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
res = c.check_section(section_name)
res2 = c.check_section(section_name)
# assertions:
self.assertEqual(res, expected_return)
self.assertFalse(res2)
@TestArgs(
('api_enabled', 0, int),
('Api_Key', '', str),
)
def test_check_setting(self, setting_name, expected_return, expected_instance):
""" Config: check_setting , basic cases """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
res = c.check_setting(setting_name)
res2 = c.check_setting(setting_name)
# assertions:
self.assertIsInstance(res, expected_instance)
self.assertEqual(res, expected_return)
self.assertEqual(res, res2)
@TestArgs(
(''),
('This_IsNew_Name'),
)
def test_check_setting_raise_on_unknown_settings(self, setting_name):
""" Config: check_setting should raise on unknown """
path = '/tmp/notexist'
exc_regex = re.compile(setting_name, re.IGNORECASE)
# call methods
c = headphones.config.Config(path)
# assertions:
with self.assertRaisesRegex(KeyError, exc_regex):
c.check_setting(setting_name)
pass
@TestArgs(
(None)
)
def test_check_setting_raise_on_none(self, setting_name):
""" Config: check_setting shoud raise on None name """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
# assertions:
with self.assertRaises(AttributeError):
c.check_setting(setting_name)
pass
def test_write(self):
""" Config : write """
path = '/tmp/notexist'
# overload mocks, defined in setUp:
old_conf_mock = self._setUpConfigMock(MagicMock(), {'a': {}})
option_name_not_from_definitions = 'some_invalid_option_with_super_uniq1_name'
option_name_not_from_definitions_value = 1
old_conf_mock['asdf'] = {option_name_not_from_definitions: option_name_not_from_definitions_value}
# call methods
cf = headphones.config.Config(path)
# overload mock-patching for NEW CONFIG
new_patcher = mock.patch('headphones.config.ConfigObj', name='NEW_ConfigObjModuleMock_FOR_WRITE')
new_conf_module_mock = new_patcher.start()
new_conf_mock = \
new_conf_module_mock.return_value = \
MagicMock()
cf.write()
new_patcher.stop()
# assertions:
self.assertFalse(old_conf_mock.write.called, 'write not called for old config')
self.assertTrue(new_conf_mock.write.called, 'write called for new config')
self.assertEqual(new_conf_mock.filename, path)
new_conf_mock['General'].__setitem__.assert_any_call('download_dir', '')
# from 3.5... new_conf_mock['asdf'].__setitem__.assert_not_called('download_dir', '')
new_conf_mock['asdf'].__setitem__.assert_any_call(option_name_not_from_definitions, option_name_not_from_definitions_value)
@unittestcompat.skip("process_kwargs should be removed")
def test_process_kwargs(self):
self.assertTrue(True)
# ===========================================================
# GET ATTR
# ===========================================================
@TestArgs(
('ADD_ALBUM_ART', True),
('ALBUM_ART_FORMAT', 'shmolder'),
('API_ENABLED', 1),
('API_KEY', 'Hello'),
)
def test__getattr__ConfValues(self, name, value):
""" Config: __getattr__ with setting value explicit """
path = '/tmp/notexist'
self.config_mock["General"] = {name.lower(): value}
# call methods
c = headphones.config.Config(path)
act = c.__getattr__(name)
# assertions:
self.assertEqual(act, value)
@TestArgs(
('ADD_ALBUM_ART', 0),
('ALBUM_ART_FORMAT', 'folder'),
('API_ENABLED', 0),
('API_KEY', ''),
)
def test__getattr__ConfValuesDefault(self, name, value):
""" Config: __getattr__ from config(by braces), default values """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
res = c.__getattr__(name)
# assertions:
self.assertEqual(res, value)
def test__getattr__ConfValuesDefaultUsingDotNotation(self):
""" Config: __getattr__ from config (by dot), default values """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
# assertions:
self.assertEqual(c.ALBUM_ART_FORMAT, 'folder')
self.assertEqual(c.API_ENABLED, 0)
self.assertEqual(c.API_KEY, '')
def test__getattr__OwnAttributes(self):
""" Config: __getattr__ access own attrs """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
# assertions:
self.assertIsNotNone(c)
self.assertIn('<headphones.config.Config', c.__str__())
# ===========================================================
# SET ATTR
# ===========================================================
@TestArgs(
('ADD_ALBUM_ART', True),
('ALBUM_ART_FORMAT', 'shmolder'),
('API_ENABLED', 1),
('API_KEY', 'Hello'),
)
def test__setattr__ConfValuesDefault(self, name, value):
""" Config: __setattr__ with setting value explicit """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
act = c.__setattr__(name, value)
# assertions:
self.assertEqual(self.config_mock["General"][name.lower()], value)
self.assertEqual(act, value)
def test__setattr__ExplicitSetUsingDotNotation(self):
""" Config: __setattr__ with setting values using dot notation """
path = '/tmp/notexist'
# call methods
c = headphones.config.Config(path)
act1 = c.ALBUM_ART_FORMAT = 'Apple'
act2 = c.API_ENABLED = True
act3 = c.API_KEY = 123
# assertions:
self.assertEqual(self.config_mock["General"]['album_art_format'], 'Apple')
self.assertEqual(self.config_mock["General"]['api_enabled'], 1)
self.assertEqual(self.config_mock["General"]['api_key'], '123')
self.assertEqual(act1, 'Apple')
self.assertEqual(act2, 1)
# TODO : check this trange behaviour. I have expected to see here '123', not 123.
self.assertEqual(act3, 123)
# ===========================================================
# NEWZNABS
#
@TestArgs(
('', []),
('ABCDEF', [('A', 'B', 'C'), ('D', 'E', 'F')]),
(['ABC', 'DEF'], []),
([1], []),
([1, 2], []),
([1, 2, 3], [(1, 2, 3)]),
([1, 2, 3, 'Aaa'], [(1, 2, 3)]),
([1, 2, 3, 'Aaa', 'Bbba'], [(1, 2, 3)]),
([1, 2, 3, 'Aaa', 'Bbba', 'Ccccc'], [(1, 2, 3), ('Aaa', 'Bbba', 'Ccccc')]),
([1, 2, 3, 'Aaa', 'Bbba', 'Ccccc', 'Ddddda'], [(1, 2, 3), ('Aaa', 'Bbba', 'Ccccc')]),
)
def test_get_extra_newznabs(self, conf_value, expected):
""" Config: get_extra_newznabs """
path = '/tmp/notexist'
# itertools.izip(*[itertools.islice('', i, None, 3) for i in range(3)])
# set up mocks:
# 'EXTRA_NEWZNABS': (list, 'Newznab', ''),
# 'EXTRA_TORZNABS': (list, 'Torznab', ''),
self.config_mock["Newznab"] = {"extra_newznabs": conf_value}
# call methods
c = headphones.config.Config(path)
res = c.get_extra_newznabs()
# assertions:
self.assertEqual(res, expected)
def test_clear_extra_newznabs(self):
""" Config: clear_extra_newznabs """
path = '/tmp/notexist'
random_value = 1827746
self.config_mock["Newznab"] = {"extra_newznabs": [1, 2, 3]}
self.config_mock["Newznab"] = {"do_not_touch": random_value}
# call methods
c = headphones.config.Config(path)
res = c.clear_extra_newznabs()
# assertions:
self.assertIsNone(res)
self.assertEqual(self.config_mock["Newznab"]["extra_newznabs"], [])
self.assertEqual(self.config_mock["Newznab"]["do_not_touch"], random_value)
@TestArgs(
([], [''], ['']),
([], 'ABCDEF', ['A', 'B', 'C', 'D', 'E', 'F']),
([1, 2, [False, True]], ['3', [0, 0]], [1, 2, [False, True], '3', [0, 0]]),
)
def test_add_extra_newznab(self, initial, added, expected):
""" Config: add_extra_newznab """
path = '/tmp/notexist'
self.config_mock["Newznab"] = {"extra_newznabs": initial}
# call methods
c = headphones.config.Config(path)
c.add_extra_newznab(added)
act = self.config_mock["Newznab"]["extra_newznabs"]
# assertions:
self.assertEqual(act, expected)
@TestArgs(
(None),
([]),
([1, 2, 3]),
([True]),
)
def test_add_extra_newznab_raise_on_none(self, initial):
""" Config: add_extra_newznab should raise on None adding"""
path = '/tmp/notexist'
self.config_mock["Newznab"] = {"extra_newznabs": initial}
# call methods
c = headphones.config.Config(path)
with self.assertRaises(TypeError):
c.add_extra_newznab(None)
pass
# ===========================================================
# TORZNABS
# TODO : here is copypaste from of NEZNABS tests. Make tests better, plz refactor them
#
# TODO: Fix tests for following:
# CONFIG_VERSION == '5' each entry = 'host, api, enabled'
# CONFIG_VERSION > '5' each entry = 'host, api, seed ratio, enabled'
# @TestArgs(
# ('', []),
# ('ABCDEF', [('A', 'B', 'C'), ('D', 'E', 'F')]),
# (['ABC', 'DEF'], []),
# ([1], []),
# ([1, 2], []),
# ([1, 2, 3], [(1, 2, 3)]),
#
# ([1, 2, 3, 'Aaa'], [(1, 2, 3)]),
# ([1, 2, 3, 'Aaa', 'Bbba'], [(1, 2, 3)]),
# ([1, 2, 3, 'Aaa', 'Bbba', 'Ccccc'], [(1, 2, 3), ('Aaa', 'Bbba', 'Ccccc')]),
# ([1, 2, 3, 'Aaa', 'Bbba', 'Ccccc', 'Ddddda'], [(1, 2, 3), ('Aaa', 'Bbba', 'Ccccc')]),
# )
# def test_get_extra_torznabs(self, conf_value, expected):
# """ Config: get_extra_torznabs """
# path = '/tmp/notexist'
#
# # itertools.izip(*[itertools.islice('', i, None, 3) for i in range(3)])
# # set up mocks:
# # 'EXTRA_TORZNABS': (list, '', ''),
# self.config_mock["Torznab"] = {"extra_torznabs": conf_value}
#
# # call methods
# c = headphones.config.Config(path)
# res = c.get_extra_torznabs()
#
# # assertions:
# self.assertEqual(res, expected)
#
# def test_clear_extra_torznabs(self):
# """ Config: clear_extra_torznabs """
# path = '/tmp/notexist'
#
# random_value = -1292721
# self.config_mock["Torznab"] = {"extra_torznabs": [1, 2, 3]}
# self.config_mock["Torznab"] = {"do_not_touch": random_value}
#
# # call methods
# c = headphones.config.Config(path)
# res = c.clear_extra_torznabs()
#
# # assertions:
# self.assertIsNone(res)
# self.assertEqual(self.config_mock["Torznab"]["extra_torznabs"], [])
# self.assertEqual(self.config_mock["Torznab"]["do_not_touch"], random_value)
#
# @TestArgs(
# ([], [''], ['']),
# ([], 'ABCDEF', ['A', 'B', 'C', 'D', 'E', 'F']),
#
# ([1, 2, [False, True]], ['3', [0, 0]], [1, 2, [False, True], '3', [0, 0]]),
# )
# def test_add_extra_torznab(self, initial, added, expected):
# """ Config: add_extra_torznab """
# path = '/tmp/notexist'
#
# self.config_mock["Torznab"] = {"extra_torznabs": initial}
#
# # call methods
# c = headphones.config.Config(path)
# c.add_extra_torznab(added)
# act = self.config_mock["Torznab"]["extra_torznabs"]
#
# # assertions:
# self.assertEqual(act, expected)
#
# @TestArgs(
# (None),
# ([]),
# ([1, 2, 3]),
# ([True]),
# )
# def test_add_extra_torznab_raise_on_none(self, initial):
# """ Config: add_extra_torznab should raise on None adding"""
# path = '/tmp/notexist'
#
# self.config_mock["Torznab"] = {"extra_torznabs": initial}
#
# # call methods
# c = headphones.config.Config(path)
# with self.assertRaises(TypeError):
# c.add_extra_torznab(None)
# pass
| 14,946
|
Python
|
.py
| 369
| 32.918699
| 131
| 0.555058
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,277
|
lock.py
|
rembo10_headphones/headphones/lock.py
|
"""
Locking-related classes
"""
import time
import threading
import queue
import headphones.logger
class TimedLock(object):
"""
Enforce request rate limit if applicable. This uses the lock so there
is synchronized access to the API. When N threads enter this method, the
first will pass trough, since there there was no last request recorded.
The last request time will be set. Then, the second thread will unlock,
and see that the last request was X seconds ago. It will sleep
(request_limit - X) seconds, and then continue. Then the third one will
unblock, and so on. After all threads finish, the total time will at
least be (N * request_limit) seconds. If some request takes longer than
request_limit seconds, the next unblocked thread will wait less.
"""
def __init__(self, minimum_delta=0):
"""
Set up the lock
"""
self.lock = threading.Lock()
self.last_used = 0
self.minimum_delta = minimum_delta
self.queue = queue.Queue()
def __enter__(self):
"""
Called when with lock: is invoked
"""
self.lock.acquire()
delta = time.time() - self.last_used
sleep_amount = self.minimum_delta - delta
if sleep_amount >= 0:
# zero sleeps give the cpu a chance to task-switch
headphones.logger.debug('Sleeping %s (interval)', sleep_amount)
time.sleep(sleep_amount)
while not self.queue.empty():
try:
seconds = self.queue.get(False)
headphones.logger.debug('Sleeping %s (queued)', seconds)
time.sleep(seconds)
except queue.Empty:
continue
self.queue.task_done()
def __exit__(self, type, value, traceback):
"""
Called when exiting the with block.
"""
self.last_used = time.time()
self.lock.release()
def snooze(self, seconds):
"""
Asynchronously add time to the next request. Can be called outside
of the lock context, but it is possible for the next lock holder
to not check the queue until after something adds time to it.
"""
# We use a queue so that we don't have to synchronize
# across threads and with or without locks
headphones.logger.info('Adding %s to queue', seconds)
self.queue.put(seconds)
class FakeLock(object):
"""
If no locking or request throttling is needed, use this
"""
def __enter__(self):
"""
Do nothing on enter
"""
pass
def __exit__(self, type, value, traceback):
"""
Do nothing on exit
"""
pass
| 2,734
|
Python
|
.py
| 76
| 28.078947
| 76
| 0.61867
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,278
|
config.py
|
rembo10_headphones/headphones/config.py
|
import itertools
import os
import re
import ast
from configparser import ConfigParser
import headphones.logger
def bool_int(value):
"""
Casts a config value into a 0 or 1
"""
if isinstance(value, str):
if value.lower() in ('', '0', 'false', 'f', 'no', 'n', 'off'):
value = 0
return int(bool(value))
class path(str):
"""Internal 'marker' type for paths in config."""
@staticmethod
def __call__(val):
return path(val)
def __new__(cls, *args, **kw):
hstr = str.__new__(cls, *args, **kw)
return hstr
def __repr__(self):
return 'headphones.config.path(%s)' % self
_CONFIG_DEFINITIONS = {
'ADD_ALBUM_ART': (int, 'General', 0),
'ADVANCEDENCODER': (str, 'General', ''),
'ALBUM_ART_FORMAT': (str, 'General', 'folder'),
'ALBUM_ART_MIN_WIDTH': (str, 'General', ''),
'ALBUM_ART_MAX_WIDTH': (str, 'General', ''),
# This is used in importer.py to determine how complete an album needs to
# be - to be considered "downloaded". Percentage from 0-100
'ALBUM_COMPLETION_PCT': (int, 'Advanced', 80),
'API_ENABLED': (int, 'General', 0),
'API_KEY': (str, 'General', ''),
'ORPHEUS': (int, 'Orpheus.network', 0),
'ORPHEUS_PASSWORD': (str, 'Orpheus.network', ''),
'ORPHEUS_RATIO': (str, 'Orpheus.network', ''),
'ORPHEUS_USERNAME': (str, 'Orpheus.network', ''),
'ORPHEUS_URL': (str, 'Orpheus.network', 'https://orpheus.network'),
'AUTOWANT_ALL': (int, 'General', 0),
'AUTOWANT_MANUALLY_ADDED': (int, 'General', 1),
'AUTOWANT_UPCOMING': (int, 'General', 1),
'AUTO_ADD_ARTISTS': (int, 'General', 1),
'BITRATE': (int, 'General', 192),
'BLACKHOLE': (int, 'General', 0),
'BLACKHOLE_DIR': (path, 'General', ''),
'BOXCAR_ENABLED': (int, 'Boxcar', 0),
'BOXCAR_ONSNATCH': (int, 'Boxcar', 0),
'BOXCAR_TOKEN': (str, 'Boxcar', ''),
'CACHE_DIR': (path, 'General', ''),
'CACHE_SIZEMB': (int, 'Advanced', 32),
'CHECK_GITHUB': (int, 'General', 1),
'CHECK_GITHUB_INTERVAL': (int, 'General', 360),
'CHECK_GITHUB_ON_STARTUP': (int, 'General', 1),
'CLEANUP_FILES': (int, 'General', 0),
'CONFIG_VERSION': (str, 'General', '2'),
'CORRECT_METADATA': (int, 'General', 0),
'CUE_SPLIT': (int, 'General', 1),
'CUE_SPLIT_FLAC_PATH': (path, 'General', ''),
'CUE_SPLIT_SHNTOOL_PATH': (path, 'General', ''),
'CUSTOMAUTH': (int, 'General', 0),
'CUSTOMHOST': (str, 'General', 'localhost'),
'CUSTOMPASS': (str, 'General', ''),
'CUSTOMPORT': (int, 'General', 5000),
'CUSTOMSLEEP': (int, 'General', 1),
'CUSTOMUSER': (str, 'General', ''),
'DELETE_LOSSLESS_FILES': (int, 'General', 1),
'DELUGE_HOST': (str, 'Deluge', ''),
'DELUGE_CERT': (str, 'Deluge', ''),
'DELUGE_PASSWORD': (str, 'Deluge', ''),
'DELUGE_LABEL': (str, 'Deluge', ''),
'DELUGE_DONE_DIRECTORY': (str, 'Deluge', ''),
'DELUGE_DOWNLOAD_DIRECTORY': (str, 'Deluge', ''),
'DELUGE_PAUSED': (int, 'Deluge', 0),
'DESTINATION_DIR': (str, 'General', ''),
'DETECT_BITRATE': (int, 'General', 0),
'DO_NOT_PROCESS_UNMATCHED': (int, 'General', 0),
'DOWNLOAD_DIR': (path, 'General', ''),
'DOWNLOAD_SCAN_INTERVAL': (int, 'General', 5),
'DOWNLOAD_TORRENT_DIR': (path, 'General', ''),
'DO_NOT_OVERRIDE_GIT_BRANCH': (int, 'General', 0),
'EMAIL_ENABLED': (int, 'Email', 0),
'EMAIL_FROM': (str, 'Email', ''),
'EMAIL_TO': (str, 'Email', ''),
'EMAIL_SMTP_SERVER': (str, 'Email', ''),
'EMAIL_SMTP_USER': (str, 'Email', ''),
'EMAIL_SMTP_PASSWORD': (str, 'Email', ''),
'EMAIL_SMTP_PORT': (int, 'Email', 25),
'EMAIL_SSL': (int, 'Email', 0),
'EMAIL_TLS': (int, 'Email', 0),
'EMAIL_ONSNATCH': (int, 'Email', 0),
'EMBED_ALBUM_ART': (int, 'General', 0),
'EMBED_LYRICS': (int, 'General', 0),
'ENABLE_HTTPS': (int, 'General', 0),
'ENCODER': (str, 'General', 'ffmpeg'),
'ENCODERFOLDER': (path, 'General', ''),
'ENCODERLOSSLESS': (int, 'General', 1),
'ENCODEROUTPUTFORMAT': (str, 'General', 'mp3'),
'ENCODERQUALITY': (int, 'General', 2),
'ENCODERVBRCBR': (str, 'General', 'cbr'),
'ENCODER_MULTICORE': (int, 'General', 0),
'ENCODER_MULTICORE_COUNT': (int, 'General', 0),
'ENCODER_PATH': (path, 'General', ''),
'EXTRAS': (str, 'General', ''),
'EXTRA_NEWZNABS': (list, 'Newznab', ''),
'EXTRA_TORZNABS': (list, 'Torznab', ''),
'FILE_FORMAT': (str, 'General', '$Track $Artist - $Album [$Year] - $Title'),
'FILE_PERMISSIONS': (str, 'General', '0644'),
'FILE_PERMISSIONS_ENABLED': (bool_int, 'General', True),
'FILE_UNDERSCORES': (int, 'General', 0),
'FOLDER_FORMAT': (str, 'General', '$Artist/$Album [$Year]'),
'FOLDER_PERMISSIONS_ENABLED': (bool_int, 'General', True),
'FOLDER_PERMISSIONS': (str, 'General', '0755'),
'FREEZE_DB': (int, 'General', 0),
'GIT_BRANCH': (str, 'General', 'master'),
'GIT_PATH': (path, 'General', ''),
'GIT_USER': (str, 'General', 'rembo10'),
'GROWL_ENABLED': (int, 'Growl', 0),
'GROWL_HOST': (str, 'Growl', ''),
'GROWL_ONSNATCH': (int, 'Growl', 0),
'GROWL_PASSWORD': (str, 'Growl', ''),
'HEADPHONES_INDEXER': (bool_int, 'General', False),
'HPPASS': (str, 'General', ''),
'HPUSER': (str, 'General', ''),
'HTTPS_CERT': (path, 'General', ''),
'HTTPS_KEY': (path, 'General', ''),
'HTTP_HOST': (str, 'General', 'localhost'),
'HTTP_PASSWORD': (str, 'General', ''),
'HTTP_PORT': (int, 'General', 8181),
'HTTP_PROXY': (int, 'General', 0),
'HTTP_ROOT': (str, 'General', '/'),
'HTTP_USERNAME': (str, 'General', ''),
'IDTAG': (int, 'Beets', 0),
'IGNORE_CLEAN_RELEASES': (int, 'General', 0),
'IGNORED_WORDS': (str, 'General', ''),
'IGNORED_FOLDERS': (list, 'Advanced', []), # path
'IGNORED_FILES': (list, 'Advanced', []), # path
'INCLUDE_EXTRAS': (int, 'General', 0),
'INTERFACE': (str, 'General', 'default'),
'JOIN_APIKEY': (str, 'Join', ''),
'JOIN_DEVICEID': (str, 'Join', ''),
'JOIN_ENABLED': (int, 'Join', 0),
'JOIN_ONSNATCH': (int, 'Join', 0),
'JOURNAL_MODE': (str, 'Advanced', 'wal'),
'KEEP_NFO': (int, 'General', 0),
'KEEP_TORRENT_FILES': (int, 'General', 0),
'KEEP_TORRENT_FILES_DIR': (path, 'General', ''),
'LASTFM_USERNAME': (str, 'General', ''),
'LASTFM_APIKEY': (str, 'General', ''),
'LAUNCH_BROWSER': (int, 'General', 1),
'LIBRARYSCAN': (int, 'General', 1),
'LIBRARYSCAN_INTERVAL': (int, 'General', 24),
'LMS_ENABLED': (int, 'LMS', 0),
'LMS_HOST': (str, 'LMS', ''),
'LOG_DIR': (path, 'General', ''),
'LOSSLESS_BITRATE_FROM': (int, 'General', 0),
'LOSSLESS_BITRATE_TO': (int, 'General', 0),
'LOSSLESS_DESTINATION_DIR': (path, 'General', ''),
'MB_IGNORE_AGE': (int, 'General', 365),
'MB_IGNORE_AGE_MISSING': (int, 'General', 0),
'MIRROR': (str, 'General', 'musicbrainz.org'),
'MOVE_FILES': (int, 'General', 0),
'MPC_ENABLED': (bool_int, 'MPC', False),
'MUSIC_DIR': (path, 'General', ''),
'MUSIC_ENCODER': (int, 'General', 0),
'NEWZNAB': (int, 'Newznab', 0),
'NEWZNAB_APIKEY': (str, 'Newznab', ''),
'NEWZNAB_ENABLED': (int, 'Newznab', 1),
'NEWZNAB_HOST': (str, 'Newznab', ''),
'NMA_APIKEY': (str, 'NMA', ''),
'NMA_ENABLED': (int, 'NMA', 0),
'NMA_ONSNATCH': (int, 'NMA', 0),
'NMA_PRIORITY': (int, 'NMA', 0),
'NUMBEROFSEEDERS': (str, 'General', '10'),
'NZBGET_CATEGORY': (str, 'NZBget', ''),
'NZBGET_HOST': (str, 'NZBget', ''),
'NZBGET_PASSWORD': (str, 'NZBget', ''),
'NZBGET_PRIORITY': (int, 'NZBget', 0),
'NZBGET_USERNAME': (str, 'NZBget', 'nzbget'),
'NZBSORG': (int, 'NZBsorg', 0),
'NZBSORG_HASH': (str, 'NZBsorg', ''),
'NZBSORG_UID': (str, 'NZBsorg', ''),
'NZB_DOWNLOADER': (int, 'General', 0),
'OFFICIAL_RELEASES_ONLY': (int, 'General', 0),
'OMGWTFNZBS': (int, 'omgwtfnzbs', 0),
'OMGWTFNZBS_APIKEY': (str, 'omgwtfnzbs', ''),
'OMGWTFNZBS_UID': (str, 'omgwtfnzbs', ''),
'OPEN_MAGNET_LINKS': (int, 'General', 0), # 0: Ignore, 1: Open, 2: Convert, 3: Embed (rtorrent)
'MAGNET_LINKS': (int, 'General', 0),
'OSX_NOTIFY_APP': (str, 'OSX_Notify', '/Applications/Headphones'),
'OSX_NOTIFY_ENABLED': (int, 'OSX_Notify', 0),
'OSX_NOTIFY_ONSNATCH': (int, 'OSX_Notify', 0),
'PIRATEBAY': (int, 'Piratebay', 0),
'PIRATEBAY_PROXY_URL': (str, 'Piratebay', ''),
'PIRATEBAY_RATIO': (str, 'Piratebay', ''),
'PLEX_CLIENT_HOST': (str, 'Plex', ''),
'PLEX_ENABLED': (int, 'Plex', 0),
'PLEX_NOTIFY': (int, 'Plex', 0),
'PLEX_PASSWORD': (str, 'Plex', ''),
'PLEX_SERVER_HOST': (str, 'Plex', ''),
'PLEX_UPDATE': (int, 'Plex', 0),
'PLEX_USERNAME': (str, 'Plex', ''),
'PLEX_TOKEN': (str, 'Plex', ''),
'PREFERRED_BITRATE': (str, 'General', ''),
'PREFERRED_BITRATE_ALLOW_LOSSLESS': (int, 'General', 0),
'PREFERRED_BITRATE_HIGH_BUFFER': (int, 'General', 0),
'PREFERRED_BITRATE_LOW_BUFFER': (int, 'General', 0),
'PREFERRED_QUALITY': (int, 'General', 0),
'PREFERRED_WORDS': (str, 'General', ''),
'PREFER_TORRENTS': (int, 'General', 0),
'PROWL_ENABLED': (int, 'Prowl', 0),
'PROWL_KEYS': (str, 'Prowl', ''),
'PROWL_ONSNATCH': (int, 'Prowl', 0),
'PROWL_PRIORITY': (int, 'Prowl', 0),
'PUSHALOT_APIKEY': (str, 'Pushalot', ''),
'PUSHALOT_ENABLED': (int, 'Pushalot', 0),
'PUSHALOT_ONSNATCH': (int, 'Pushalot', 0),
'PUSHBULLET_APIKEY': (str, 'PushBullet', ''),
'PUSHBULLET_DEVICEID': (str, 'PushBullet', ''),
'PUSHBULLET_ENABLED': (int, 'PushBullet', 0),
'PUSHBULLET_ONSNATCH': (int, 'PushBullet', 0),
'PUSHOVER_APITOKEN': (str, 'Pushover', ''),
'PUSHOVER_ENABLED': (int, 'Pushover', 0),
'PUSHOVER_KEYS': (str, 'Pushover', ''),
'PUSHOVER_ONSNATCH': (int, 'Pushover', 0),
'PUSHOVER_PRIORITY': (int, 'Pushover', 0),
'QBITTORRENT_HOST': (str, 'QBitTorrent', ''),
'QBITTORRENT_LABEL': (str, 'QBitTorrent', ''),
'QBITTORRENT_PASSWORD': (str, 'QBitTorrent', ''),
'QBITTORRENT_USERNAME': (str, 'QBitTorrent', ''),
'RENAME_FILES': (int, 'General', 0),
'RENAME_SINGLE_DISC_IGNORE': (int, 'General', 0),
'RENAME_UNPROCESSED': (bool_int, 'General', 1),
'RENAME_FROZEN': (bool_int, 'General', 1),
'REPLACE_EXISTING_FOLDERS': (int, 'General', 0),
'KEEP_ORIGINAL_FOLDER': (int, 'General', 0),
'REQUIRED_WORDS': (str, 'General', ''),
'RUTRACKER': (int, 'Rutracker', 0),
'RUTRACKER_PASSWORD': (str, 'Rutracker', ''),
'RUTRACKER_RATIO': (str, 'Rutracker', ''),
'RUTRACKER_USER': (str, 'Rutracker', ''),
'RUTRACKER_COOKIE': (str, 'Rutracker', ''),
'SAB_APIKEY': (str, 'SABnzbd', ''),
'SAB_CATEGORY': (str, 'SABnzbd', ''),
'SAB_HOST': (str, 'SABnzbd', ''),
'SAB_PASSWORD': (str, 'SABnzbd', ''),
'SAB_USERNAME': (str, 'SABnzbd', ''),
'SAMPLINGFREQUENCY': (int, 'General', 44100),
'SEARCH_INTERVAL': (int, 'General', 1440),
'SLACK_ENABLED': (int, 'Slack', 0),
'SLACK_URL': (str, 'Slack', ''),
'SLACK_CHANNEL': (str, 'Slack', ''),
'SLACK_EMOJI': (str, 'Slack', ''),
'SLACK_ONSNATCH': (int, 'Slack', 0),
'SOFT_CHROOT': (path, 'General', ''),
'SONGKICK_APIKEY': (str, 'Songkick', 'nd1We7dFW2RqxPw8'),
'SONGKICK_ENABLED': (int, 'Songkick', 1),
'SONGKICK_FILTER_ENABLED': (int, 'Songkick', 0),
'SONGKICK_LOCATION': (str, 'Songkick', ''),
'SOULSEEK_API_URL': (str, 'Soulseek', ''),
'SOULSEEK_API_KEY': (str, 'Soulseek', ''),
'SOULSEEK_DOWNLOAD_DIR': (str, 'Soulseek', ''),
'SOULSEEK_INCOMPLETE_DOWNLOAD_DIR': (str, 'Soulseek', ''),
'SOULSEEK': (int, 'Soulseek', 0),
'SUBSONIC_ENABLED': (int, 'Subsonic', 0),
'SUBSONIC_HOST': (str, 'Subsonic', ''),
'SUBSONIC_PASSWORD': (str, 'Subsonic', ''),
'SUBSONIC_USERNAME': (str, 'Subsonic', ''),
'SYNOINDEX_ENABLED': (int, 'Synoindex', 0),
'TELEGRAM_TOKEN': (str, 'Telegram', ''),
'TELEGRAM_USERID': (str, 'Telegram', ''),
'TELEGRAM_ENABLED': (int, 'Telegram', 0),
'TELEGRAM_ONSNATCH': (int, 'Telegram', 0),
'TORRENTBLACKHOLE_DIR': (str, 'General', ''),
'TORRENT_DOWNLOADER': (int, 'General', 0),
'TORRENT_REMOVAL_INTERVAL': (int, 'General', 720),
'TORZNAB': (int, 'Torznab', 0),
'TORZNAB_APIKEY': (str, 'Torznab', ''),
'TORZNAB_ENABLED': (int, 'Torznab', 1),
'TORZNAB_HOST': (str, 'Torznab', ''),
'TORZNAB_RATIO': (str, 'Torznab', ''),
'TRANSMISSION_HOST': (str, 'Transmission', ''),
'TRANSMISSION_PASSWORD': (str, 'Transmission', ''),
'TRANSMISSION_USERNAME': (str, 'Transmission', ''),
'TWITTER_ENABLED': (int, 'Twitter', 0),
'TWITTER_ONSNATCH': (int, 'Twitter', 0),
'TWITTER_PASSWORD': (str, 'Twitter', ''),
'TWITTER_PREFIX': (str, 'Twitter', 'Headphones'),
'TWITTER_USERNAME': (str, 'Twitter', ''),
'UPDATE_DB_INTERVAL': (int, 'General', 24),
'USENET_RETENTION': (int, 'General', '1500'),
'UTORRENT_HOST': (str, 'uTorrent', ''),
'UTORRENT_LABEL': (str, 'uTorrent', ''),
'UTORRENT_PASSWORD': (str, 'uTorrent', ''),
'UTORRENT_USERNAME': (str, 'uTorrent', ''),
'VERIFY_SSL_CERT': (bool_int, 'Advanced', 1),
'WAIT_UNTIL_RELEASE_DATE': (int, 'General', 0),
'REDACTED': (int, 'Redacted', 0),
'REDACTED_USERNAME': (str, 'Redacted', ''),
'REDACTED_PASSWORD': (str, 'Redacted', ''),
'REDACTED_RATIO': (str, 'Redacted', ''),
'REDACTED_USE_FLTOKEN': (int, 'Redacted', 0),
'XBMC_ENABLED': (int, 'XBMC', 0),
'XBMC_HOST': (str, 'XBMC', ''),
'XBMC_NOTIFY': (int, 'XBMC', 0),
'XBMC_PASSWORD': (str, 'XBMC', ''),
'XBMC_UPDATE': (int, 'XBMC', 0),
'XBMC_USERNAME': (str, 'XBMC', ''),
'XLDPROFILE': (str, 'General', ''),
'BANDCAMP': (int, 'General', 0),
'BANDCAMP_DIR': (path, 'General', '')
}
# pylint:disable=R0902
# it might be nice to refactor for fewer instance variables
class Config(object):
""" Wraps access to particular values in a config file """
def __init__(self, config_file):
""" Initialize the config with values from a file """
self._config_file = config_file
self._config = ConfigParser(interpolation=None)
self._config.read(self._config_file)
for key in list(_CONFIG_DEFINITIONS.keys()):
self.check_setting(key)
self.ENCODER_MULTICORE_COUNT = max(0, self.ENCODER_MULTICORE_COUNT)
self._upgrade()
def _define(self, name):
key = name.upper()
ini_key = name.lower()
definition = _CONFIG_DEFINITIONS[key]
if len(definition) == 3:
definition_type, section, default = definition
elif len(definition) == 4:
definition_type, section, _, default = definition
return key, definition_type, section, ini_key, default
def check_section(self, section):
""" Check if INI section exists, if not create it """
if not self._config.has_section(section):
self._config[section] = {}
return True
else:
return False
def check_setting(self, key):
""" Cast any value in the config to the right type or use the default """
key, definition_type, section, ini_key, default = self._define(key)
self.check_section(section)
# ConfigParser values are strings, so need to convert to actual list
if definition_type == list:
definition_type = ast.literal_eval
try:
my_val = definition_type(self._config[section][ini_key])
# ConfigParser interprets quotes in the config
# literally, so we need to sanitize it. It's not really
# a config upgrade, since a user can at any time put
# some_key = 'some_val'
if type(my_val) in [str, path]:
my_val = my_val.strip('"').strip("'")
except Exception:
my_val = default
self._config[section][ini_key] = str(my_val)
return my_val
def write(self):
""" Make a copy of the stored config and write it to the configured file """
new_config = ConfigParser(interpolation=None)
# first copy over everything from the old config, even if it is not
# correctly defined to keep from losing data
for key, subkeys in list(self._config.items()):
if key not in new_config:
new_config[key] = {}
for subkey, value in list(subkeys.items()):
new_config[key][subkey] = value
# next make sure that everything we expect to have defined is so
for key in list(_CONFIG_DEFINITIONS.keys()):
key, definition_type, section, ini_key, default = self._define(key)
self.check_setting(key)
if section not in new_config:
new_config[section] = {}
new_config[section][ini_key] = self._config[section][ini_key]
# Write it to file
headphones.logger.info("Writing configuration to file")
try:
with open(self._config_file, 'w') as configfile:
new_config.write(configfile)
except IOError as e:
headphones.logger.error("Error writing configuration file: %s", e)
def get_extra_newznabs(self):
""" Return the extra newznab tuples """
extra_newznabs = list(
zip(*[itertools.islice(self.EXTRA_NEWZNABS, i, None, 3)
for i in range(3)])
)
return extra_newznabs
def clear_extra_newznabs(self):
""" Forget about the configured extra newznabs """
self.EXTRA_NEWZNABS = []
def add_extra_newznab(self, newznab):
""" Add a new extra newznab """
extra_newznabs = self.EXTRA_NEWZNABS
for item in newznab:
extra_newznabs.append(item)
self.EXTRA_NEWZNABS = extra_newznabs
def get_extra_torznabs(self):
""" Return the extra torznab tuples """
extra_torznabs = list(
zip(*[itertools.islice(self.EXTRA_TORZNABS, i, None, 4)
for i in range(4)])
)
return extra_torznabs
def clear_extra_torznabs(self):
""" Forget about the configured extra torznabs """
self.EXTRA_TORZNABS = []
def add_extra_torznab(self, torznab):
""" Add a new extra torznab """
extra_torznabs = self.EXTRA_TORZNABS
for item in torznab:
extra_torznabs.append(item)
self.EXTRA_TORZNABS = extra_torznabs
def __getattr__(self, name):
"""
Returns something from the ini unless it is a real property
of the configuration object or is not all caps.
"""
if not re.match(r'[A-Z_]+$', name):
return super(Config, self).__getattr__(name)
else:
return self.check_setting(name)
def __setattr__(self, name, value):
"""
Maps all-caps properties to ini values unless they exist on the
configuration object.
"""
if not re.match(r'[A-Z_]+$', name):
super(Config, self).__setattr__(name, value)
return value
else:
key, definition_type, section, ini_key, default = self._define(name)
self._config[section][ini_key] = str(value)
return self._config[section][ini_key]
def process_kwargs(self, kwargs):
"""
Given a big bunch of key value pairs, apply them to the ini.
"""
for name, value in list(kwargs.items()):
key, definition_type, section, ini_key, default = self._define(name)
self._config[section][ini_key] = str(value)
def _upgrade(self):
"""
Bring old configs up to date. Although this is kind of a dumb
way to do it because it doesn't handle multi-step upgrades
"""
if self.CONFIG_VERSION == '2':
# Update the config to use direct path to the encoder rather than the encoder folder
if self.ENCODERFOLDER:
self.ENCODER_PATH = os.path.join(self.ENCODERFOLDER, self.ENCODER)
self.CONFIG_VERSION = '3'
if self.CONFIG_VERSION == '3':
# Update the BLACKHOLE option to the NZB_DOWNLOADER format
if self.BLACKHOLE:
self.NZB_DOWNLOADER = 2
self.CONFIG_VERSION = '4'
# Enable Headphones Indexer if they have a VIP account
if self.CONFIG_VERSION == '4':
if self.HPUSER and self.HPPASS:
self.HEADPHONES_INDEXER = True
self.CONFIG_VERSION = '5'
if self.CONFIG_VERSION == '5':
if self.OPEN_MAGNET_LINKS:
self.MAGNET_LINKS = 2
# Add Seed Ratio to Torznabs
if self.EXTRA_TORZNABS:
extra_torznabs = list(
zip(*[itertools.islice(self.EXTRA_TORZNABS, i, None, 3)
for i in range(3)])
)
new_torznabs = []
for torznab in extra_torznabs:
new_torznabs.extend([torznab[0], torznab[1], '', torznab[2]])
if new_torznabs:
self.EXTRA_TORZNABS = new_torznabs
self.CONFIG_VERSION = '6'
| 21,252
|
Python
|
.py
| 480
| 37.15625
| 100
| 0.572269
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,279
|
albumswitcher.py
|
rembo10_headphones/headphones/albumswitcher.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import headphones
from headphones import db, logger, cache
def switch(AlbumID, ReleaseID):
"""
Takes the contents from allalbums & alltracks (based on ReleaseID) and switches them into
the albums & tracks table.
"""
logger.debug('Switching allalbums and alltracks')
myDB = db.DBConnection()
oldalbumdata = myDB.action(
'SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
newalbumdata = myDB.action(
'SELECT * from allalbums WHERE ReleaseID=?', [ReleaseID]).fetchone()
newtrackdata = myDB.action(
'SELECT * from alltracks WHERE ReleaseID=?', [ReleaseID]).fetchall()
myDB.action('DELETE from tracks WHERE AlbumID=?', [AlbumID])
controlValueDict = {"AlbumID": AlbumID}
newValueDict = {"ArtistID": newalbumdata['ArtistID'],
"ArtistName": newalbumdata['ArtistName'],
"AlbumTitle": newalbumdata['AlbumTitle'],
"ReleaseID": newalbumdata['ReleaseID'],
"AlbumASIN": newalbumdata['AlbumASIN'],
"ReleaseDate": newalbumdata['ReleaseDate'],
"Type": newalbumdata['Type'],
"ReleaseCountry": newalbumdata['ReleaseCountry'],
"ReleaseFormat": newalbumdata['ReleaseFormat']
}
myDB.upsert("albums", newValueDict, controlValueDict)
# Update cache
c = cache.Cache()
c.remove_from_cache(AlbumID=AlbumID)
c.get_artwork_from_cache(AlbumID=AlbumID)
for track in newtrackdata:
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": AlbumID}
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
}
myDB.upsert("tracks", newValueDict, controlValueDict)
# Mark albums as downloaded if they have at least 80% (by default,
# configurable) of the album
total_track_count = len(newtrackdata)
have_track_count = len(myDB.select(
'SELECT * from tracks WHERE AlbumID=? AND Location IS NOT NULL', [AlbumID]))
if oldalbumdata['Status'] == 'Skipped' and ((have_track_count / float(total_track_count)) >= (
headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0)):
myDB.action(
'UPDATE albums SET Status=? WHERE AlbumID=?', ['Downloaded', AlbumID])
# Update have track counts on index
totaltracks = len(myDB.select(
'SELECT TrackTitle from tracks AS tr INNER JOIN albums AS al ON al.AlbumID = tr.AlbumID WHERE al.ArtistID=? '
'AND al.Status != "Ignored"', [newalbumdata['ArtistID']]))
havetracks = len(myDB.select(
'SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL',
[newalbumdata['ArtistID']]))
controlValueDict = {"ArtistID": newalbumdata['ArtistID']}
newValueDict = {"TotalTracks": totaltracks,
"HaveTracks": havetracks}
myDB.upsert("artists", newValueDict, controlValueDict)
| 4,273
|
Python
|
.py
| 83
| 41.228916
| 117
| 0.626977
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,280
|
importer.py
|
rembo10_headphones/headphones/importer.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import time
from headphones import logger, helpers, db, mb, lastfm, metacritic
from mediafile import MediaFile
import headphones
blacklisted_special_artist_names = ['[anonymous]', '[data]', '[no artist]',
'[traditional]', '[unknown]', 'Various Artists']
blacklisted_special_artists = ['f731ccc4-e22a-43af-a747-64213329e088',
'33cf029c-63b0-41a0-9855-be2a3665fb3b',
'314e1c25-dde7-4e4d-b2f4-0a7b9f7c56dc',
'eec63d3c-3b81-4ad4-b1e4-7c147d4d2b61',
'9be7f096-97ec-4615-8957-8d40b5dcbc41',
'125ec42a-7229-4250-afc5-e057484327fe',
'89ad4ac3-39f7-470e-963a-56509c546377']
def is_exists(artistid):
myDB = db.DBConnection()
# See if the artist is already in the database
artistlist = myDB.select('SELECT ArtistID, ArtistName from artists WHERE ArtistID=?',
[artistid])
if any(artistid in x for x in artistlist):
logger.info(artistlist[0][
1] + " is already in the database. Updating 'have tracks', but not artist information")
return True
else:
return False
def artistlist_to_mbids(artistlist, forced=False):
for artist in artistlist:
if not artist and artist != ' ':
continue
# If adding artists through Manage New Artists, they're coming through as non-unicode (utf-8?)
# and screwing everything up
if not isinstance(artist, str):
try:
artist = artist.decode('utf-8', 'replace')
except Exception:
logger.warn("Unable to convert artist to unicode so cannot do a database lookup")
continue
results = mb.findArtist(artist, limit=1)
if not results:
logger.info('No results found for: %s' % artist)
continue
try:
artistid = results[0]['id']
except IndexError:
logger.info('MusicBrainz query turned up no matches for: %s' % artist)
continue
# Check if it's blacklisted/various artists (only check if it's not forced, e.g. through library scan auto-add.)
# Forced example = Adding an artist from Manage New Artists
myDB = db.DBConnection()
if not forced:
bl_artist = myDB.action('SELECT * FROM blacklist WHERE ArtistID=?',
[artistid]).fetchone()
if bl_artist or artistid in blacklisted_special_artists:
logger.info("Artist ID for '%s' is either blacklisted or Various Artists. To add artist, you must "
"do it manually (Artist ID: %s)" % (artist, artistid))
continue
# Add to database if it doesn't exist
if not is_exists(artistid):
addArtisttoDB(artistid)
# Just update the tracks if it does
# not sure this is correct and we're updating during scanning in librarysync
# else:
# havetracks = len(
# myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=?', [artistid])) + len(
# myDB.select('SELECT TrackTitle from have WHERE ArtistName like ?', [artist]))
# myDB.action('UPDATE artists SET HaveTracks=? WHERE ArtistID=?', [havetracks, artistid])
# Delete it from the New Artists if the request came from there
if forced:
myDB.action('DELETE from newartists WHERE ArtistName=?', [artist])
# Update the similar artist tag cloud:
lastfm.getSimilar()
def addArtistIDListToDB(artistidlist):
# Used to add a list of artist IDs to the database in a single thread
logger.debug("Importer: Adding artist ids %s" % artistidlist)
for artistid in artistidlist:
addArtisttoDB(artistid)
def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
# Putting this here to get around the circular import. We're using this to update thumbnails for artist/albums
from headphones import cache
# Can't add various artists - throws an error from MB
if artistid in blacklisted_special_artists:
logger.warn('Cannot import blocked special purpose artist with id' + artistid)
return
# We'll use this to see if we should update the 'LastUpdated' time stamp
errors = False
myDB = db.DBConnection()
# Delete from blacklist if it's on there
myDB.action('DELETE from blacklist WHERE ArtistID=?', [artistid])
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
controlValueDict = {"ArtistID": artistid}
# Don't replace a known artist name with an "Artist ID" placeholder
dbartist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [artistid]).fetchone()
# Only modify the Include Extras stuff if it's a new artist. We need it early so we know what to fetch
if not dbartist:
newValueDict = {"ArtistName": "Artist ID: %s" % (artistid),
"Status": "Loading",
"IncludeExtras": headphones.CONFIG.INCLUDE_EXTRAS,
"Extras": headphones.CONFIG.EXTRAS}
if type == "series":
newValueDict['Type'] = "series"
else:
newValueDict = {"Status": "Loading"}
if dbartist["Type"] == "series":
type = "series"
myDB.upsert("artists", newValueDict, controlValueDict)
if type == "series":
artist = mb.getSeries(artistid)
else:
artist = mb.getArtist(artistid, extrasonly)
if artist and artist.get('artist_name') in blacklisted_special_artist_names:
logger.warn('Cannot import blocked special purpose artist: %s' % artist.get('artist_name'))
myDB.action('DELETE from artists WHERE ArtistID=?', [artistid])
# in case it's already in the db
myDB.action('DELETE from albums WHERE ArtistID=?', [artistid])
myDB.action('DELETE from tracks WHERE ArtistID=?', [artistid])
return
if not artist:
logger.warn("Error fetching artist info. ID: " + artistid)
if dbartist is None:
newValueDict = {"ArtistName": "Fetch failed, try refreshing. (%s)" % (artistid),
"Status": "Active"}
else:
newValueDict = {"Status": "Active"}
myDB.upsert("artists", newValueDict, controlValueDict)
return
if artist['artist_name'].startswith('The '):
sortname = artist['artist_name'][4:]
else:
sortname = artist['artist_name']
logger.info("Now adding/updating: " + artist['artist_name'])
controlValueDict = {"ArtistID": artistid}
newValueDict = {"ArtistName": artist['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("artists", newValueDict, controlValueDict)
# See if we need to grab extras. Artist specific extras take precedence
# over global option. Global options are set when adding a new artist
try:
db_artist = myDB.action('SELECT IncludeExtras, Extras from artists WHERE ArtistID=?',
[artistid]).fetchone()
includeExtras = db_artist['IncludeExtras']
except IndexError:
includeExtras = False
# Clean all references to release group in dB that are no longer referenced
# from the musicbrainz refresh
group_list = []
force_repackage = 0
# Don't nuke the database if there's a MusicBrainz error
if len(artist['releasegroups']) != 0:
for groups in artist['releasegroups']:
group_list.append(groups['id'])
if not extrasonly:
remove_missing_groups_from_albums = myDB.select(
"SELECT AlbumID FROM albums WHERE ArtistID=?", [artistid])
else:
remove_missing_groups_from_albums = myDB.select(
'SELECT AlbumID FROM albums WHERE ArtistID=? AND Status="Skipped" AND Type!="Album"',
[artistid])
for items in remove_missing_groups_from_albums:
if items['AlbumID'] not in group_list:
# Remove all from albums/tracks that aren't in release groups
myDB.action("DELETE FROM albums WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM allalbums WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM tracks WHERE AlbumID=?", [items['AlbumID']])
myDB.action("DELETE FROM alltracks WHERE AlbumID=?", [items['AlbumID']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [items['AlbumID']])
logger.info("[%s] Removing all references to release group %s to reflect MusicBrainz refresh" % (
artist['artist_name'], items['AlbumID']))
if not extrasonly:
force_repackage = 1
else:
if not extrasonly:
logger.info(
"[%s] There was either an error pulling data from MusicBrainz or there might not be any releases for this category" %
artist['artist_name'])
# Then search for releases within releasegroups, if releases don't exist, then remove from allalbums/alltracks
album_searches = []
for rg in artist['releasegroups']:
al_title = rg['title']
today = helpers.today()
rgid = rg['id']
skip_log = 0
# Make a user configurable variable to skip update of albums with release dates older than this date (in days)
ignore_age = headphones.CONFIG.MB_IGNORE_AGE
rg_exists = myDB.action("SELECT * from albums WHERE AlbumID=?", [rg['id']]).fetchone()
if not forcefull:
new_release_group = False
try:
check_release_date = rg_exists['ReleaseDate']
except TypeError:
check_release_date = None
new_release_group = True
if new_release_group:
logger.info("[%s] Now adding: %s (New Release Group)" % (artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid, includeExtras)
else:
if check_release_date is None or check_release_date == "None":
if not headphones.CONFIG.MB_IGNORE_AGE_MISSING:
logger.info("[%s] Now updating: %s (No Release Date)" % (artist['artist_name'], rg['title']))
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
logger.info("[%s] Skipping update of: %s (No Release Date)" % (artist['artist_name'], rg['title']))
new_releases = 0
else:
if len(check_release_date) == 10:
release_date = check_release_date
elif len(check_release_date) == 7:
release_date = check_release_date + "-27"
elif len(check_release_date) == 4:
release_date = check_release_date + "-12-27"
else:
release_date = today
if helpers.age(release_date) < ignore_age:
logger.info("[%s] Now updating: %s (Release Date <%s Days)",
artist['artist_name'], rg['title'], ignore_age)
new_releases = mb.get_new_releases(rgid, includeExtras, True)
else:
logger.info("[%s] Skipping: %s (Release Date >%s Days)",
artist['artist_name'], rg['title'], ignore_age)
skip_log = 1
new_releases = 0
if force_repackage == 1:
new_releases = -1
logger.info('[%s] Forcing repackage of %s (Release Group Removed)',
artist['artist_name'], al_title)
else:
new_releases = new_releases
else:
logger.info("[%s] Now adding/updating: %s (Comprehensive Force)", artist['artist_name'],
rg['title'])
new_releases = mb.get_new_releases(rgid, includeExtras, forcefull)
if new_releases != 0:
# Dump existing hybrid release since we're repackaging/replacing it
myDB.action("DELETE from albums WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from allalbums WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from tracks WHERE ReleaseID=?", [rg['id']])
myDB.action("DELETE from alltracks WHERE ReleaseID=?", [rg['id']])
myDB.action('DELETE from releases WHERE ReleaseGroupID=?', [rg['id']])
# This will be used later to build a hybrid release
fullreleaselist = []
# Search for releases within a release group
find_hybrid_releases = myDB.select("SELECT * from allalbums WHERE AlbumID=?",
[rg['id']])
# Build the dictionary for the fullreleaselist
for items in find_hybrid_releases:
# don't include hybrid information, since that's what we're replacing
if items['ReleaseID'] != rg['id']:
hybrid_release_id = items['ReleaseID']
newValueDict = {"ArtistID": items['ArtistID'],
"ArtistName": items['ArtistName'],
"AlbumTitle": items['AlbumTitle'],
"AlbumID": items['AlbumID'],
"AlbumASIN": items['AlbumASIN'],
"ReleaseDate": items['ReleaseDate'],
"Type": items['Type'],
"ReleaseCountry": items['ReleaseCountry'],
"ReleaseFormat": items['ReleaseFormat']
}
find_hybrid_tracks = myDB.action("SELECT * from alltracks WHERE ReleaseID=?",
[hybrid_release_id])
totalTracks = 1
hybrid_track_array = []
for hybrid_tracks in find_hybrid_tracks:
hybrid_track_array.append({
'number': hybrid_tracks['TrackNumber'],
'title': hybrid_tracks['TrackTitle'],
'id': hybrid_tracks['TrackID'],
# 'url': hybrid_tracks['TrackURL'],
'duration': hybrid_tracks['TrackDuration']
})
totalTracks += 1
newValueDict['ReleaseID'] = hybrid_release_id
newValueDict['Tracks'] = hybrid_track_array
fullreleaselist.append(newValueDict)
# Basically just do the same thing again for the hybrid release
# This may end up being called with an empty fullreleaselist
try:
hybridrelease = getHybridRelease(fullreleaselist)
logger.info('[%s] Packaging %s releases into hybrid title' % (
artist['artist_name'], rg['title']))
except Exception as e:
errors = True
logger.warn('[%s] Unable to get hybrid release information for %s: %s' % (
artist['artist_name'], rg['title'], e))
continue
# Use the ReleaseGroupID as the ReleaseID for the hybrid release to differentiate it
# We can then use the condition WHERE ReleaseID == ReleaseGroupID to select it
# The hybrid won't have a country or a format
controlValueDict = {"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumID": rg['id'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"ReleaseDate": hybridrelease['ReleaseDate'],
"Type": rg['type']
}
myDB.upsert("allalbums", newValueDict, controlValueDict)
for track in hybridrelease['Tracks']:
cleanname = helpers.clean_name(artist['artist_name'] + ' ' + rg['title'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"ReleaseID": rg['id']}
newValueDict = {"ArtistID": artistid,
"ArtistName": artist['artist_name'],
"AlbumTitle": rg['title'],
"AlbumASIN": hybridrelease['AlbumASIN'],
"AlbumID": rg['id'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action('SELECT Location, BitRate, Format from have WHERE CleanName=?',
[cleanname]).fetchone()
if not match:
match = myDB.action(
'SELECT Location, BitRate, Format from have WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?',
[artist['artist_name'], rg['title'], track['title']]).fetchone()
# if not match:
# match = myDB.action('SELECT Location, BitRate, Format from have WHERE TrackID=?', [track['id']]).fetchone()
if match:
newValueDict['Location'] = match['Location']
newValueDict['BitRate'] = match['BitRate']
newValueDict['Format'] = match['Format']
# myDB.action('UPDATE have SET Matched="True" WHERE Location=?', [match['Location']])
myDB.action('UPDATE have SET Matched=? WHERE Location=?',
(rg['id'], match['Location']))
myDB.upsert("alltracks", newValueDict, controlValueDict)
# Delete matched tracks from the have table
# myDB.action('DELETE from have WHERE Matched="True"')
# If there's no release in the main albums tables, add the default (hybrid)
# If there is a release, check the ReleaseID against the AlbumID to see if they differ (user updated)
# check if the album already exists
if not rg_exists:
releaseid = rg['id']
else:
releaseid = rg_exists['ReleaseID']
if not releaseid:
releaseid = rg['id']
album = myDB.action('SELECT * from allalbums WHERE ReleaseID=?', [releaseid]).fetchone()
controlValueDict = {"AlbumID": rg['id']}
newValueDict = {"ArtistID": album['ArtistID'],
"ArtistName": album['ArtistName'],
"AlbumTitle": album['AlbumTitle'],
"ReleaseID": album['ReleaseID'],
"AlbumASIN": album['AlbumASIN'],
"ReleaseDate": album['ReleaseDate'],
"Type": album['Type'],
"ReleaseCountry": album['ReleaseCountry'],
"ReleaseFormat": album['ReleaseFormat']
}
if rg_exists:
newValueDict['DateAdded'] = rg_exists['DateAdded']
newValueDict['Status'] = rg_exists['Status']
else:
today = helpers.today()
newValueDict['DateAdded'] = today
if headphones.CONFIG.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
elif headphones.CONFIG.AUTOWANT_UPCOMING:
if helpers.is_valid_date(album['ReleaseDate']) and helpers.age(album['ReleaseDate']) < 21:
newValueDict['Status'] = "Wanted"
else:
newValueDict['Status'] = "Skipped"
myDB.upsert("albums", newValueDict, controlValueDict)
tracks = myDB.action('SELECT * from alltracks WHERE ReleaseID=?',
[releaseid]).fetchall()
# This is used to see how many tracks you have from an album - to
# mark it as downloaded. Default is 80%, can be set in config as
# ALBUM_COMPLETION_PCT
total_track_count = len(tracks)
if total_track_count == 0:
logger.warning("Total track count is zero for Release ID " +
"'%s', skipping.", releaseid)
continue
for track in tracks:
controlValueDict = {"TrackID": track['TrackID'],
"AlbumID": rg['id']}
newValueDict = {"ArtistID": track['ArtistID'],
"ArtistName": track['ArtistName'],
"AlbumTitle": track['AlbumTitle'],
"AlbumASIN": track['AlbumASIN'],
"ReleaseID": track['ReleaseID'],
"TrackTitle": track['TrackTitle'],
"TrackDuration": track['TrackDuration'],
"TrackNumber": track['TrackNumber'],
"CleanName": track['CleanName'],
"Location": track['Location'],
"Format": track['Format'],
"BitRate": track['BitRate']
}
myDB.upsert("tracks", newValueDict, controlValueDict)
# Mark albums as downloaded if they have at least 80% (by default, configurable) of the album
have_track_count = len(
myDB.select('SELECT * from tracks WHERE AlbumID=? AND Location IS NOT NULL',
[rg['id']]))
marked_as_downloaded = False
if rg_exists:
if rg_exists['Status'] == 'Skipped' and (
(have_track_count / float(total_track_count)) >= (
headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0)):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?',
['Downloaded', rg['id']])
marked_as_downloaded = True
else:
if (have_track_count / float(total_track_count)) >= (
headphones.CONFIG.ALBUM_COMPLETION_PCT / 100.0):
myDB.action('UPDATE albums SET Status=? WHERE AlbumID=?',
['Downloaded', rg['id']])
marked_as_downloaded = True
logger.info(
"[%s] Seeing if we need album art for %s" % (artist['artist_name'], rg['title']))
try:
cache.getThumb(AlbumID=rg['id'])
except Exception as e:
logger.error("Error getting album art: %s", e)
# Start a search for the album if it's new, hasn't been marked as
# downloaded and autowant_all is selected. This search is deferred,
# in case the search failes and the rest of the import will halt.
if not rg_exists and not marked_as_downloaded and headphones.CONFIG.AUTOWANT_ALL:
album_searches.append(rg['id'])
else:
if skip_log == 0:
logger.info("[%s] No new releases, so no changes made to %s" % (
artist['artist_name'], rg['title']))
time.sleep(3)
finalize_update(artistid, artist['artist_name'], errors)
logger.info("Seeing if we need album art for: %s" % artist['artist_name'])
try:
cache.getThumb(ArtistID=artistid)
except Exception as e:
logger.error("Error getting album art: %s", e)
logger.info("Fetching Metacritic reviews for: %s" % artist['artist_name'])
try:
metacritic.update(artistid, artist['artist_name'], artist['releasegroups'])
except Exception as e:
logger.error("Error getting Metacritic reviews: %s", e)
if errors:
logger.info(
"[%s] Finished updating artist: %s but with errors, so not marking it as updated in the database" % (
artist['artist_name'], artist['artist_name']))
else:
myDB.action('DELETE FROM newartists WHERE ArtistName = ?', [artist['artist_name']])
logger.info("Updating complete for: %s" % artist['artist_name'])
# Start searching for newly added albums
if album_searches:
from headphones import searcher
logger.info("Start searching for %d albums.", len(album_searches))
for album_search in album_searches:
searcher.searchforalbum(albumid=album_search)
def finalize_update(artistid, artistname, errors=False):
# Moving this little bit to it's own function so we can update have tracks & latest album when deleting extras
myDB = db.DBConnection()
latestalbum = myDB.action(
'SELECT AlbumTitle, ReleaseDate, AlbumID from albums WHERE ArtistID=? order by ReleaseDate DESC',
[artistid]).fetchone()
totaltracks = len(myDB.select(
'SELECT TrackTitle from tracks WHERE ArtistID=? AND AlbumID IN (SELECT AlbumID FROM albums WHERE Status != "Ignored")',
[artistid]))
# havetracks = len(myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL', [artistid])) + len(myDB.select('SELECT TrackTitle from have WHERE ArtistName like ?', [artist['artist_name']]))
havetracks = len(
myDB.select('SELECT TrackTitle from tracks WHERE ArtistID=? AND Location IS NOT NULL',
[artistid])) + len(
myDB.select('SELECT TrackTitle from have WHERE ArtistName like ? AND Matched = "Failed"',
[artistname]))
controlValueDict = {"ArtistID": artistid}
if latestalbum:
newValueDict = {"Status": "Active",
"LatestAlbum": latestalbum['AlbumTitle'],
"ReleaseDate": latestalbum['ReleaseDate'],
"AlbumID": latestalbum['AlbumID'],
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
else:
newValueDict = {"Status": "Active",
"TotalTracks": totaltracks,
"HaveTracks": havetracks}
if not errors:
newValueDict['LastUpdated'] = helpers.now()
myDB.upsert("artists", newValueDict, controlValueDict)
def addReleaseById(rid, rgid=None):
myDB = db.DBConnection()
# Create minimum info upfront if added from searchresults
status = ''
if rgid:
dbalbum = myDB.select("SELECT * from albums WHERE AlbumID=?", [rgid])
if not dbalbum:
status = 'Loading'
controlValueDict = {"AlbumID": rgid}
newValueDict = {"AlbumTitle": rgid,
"ArtistName": status,
"Status": status}
myDB.upsert("albums", newValueDict, controlValueDict)
time.sleep(1)
rgid = None
artistid = None
release_dict = None
results = myDB.select(
"SELECT albums.ArtistID, releases.ReleaseGroupID from releases, albums WHERE releases.ReleaseID=? and releases.ReleaseGroupID=albums.AlbumID LIMIT 1",
[rid])
for result in results:
rgid = result['ReleaseGroupID']
artistid = result['ArtistID']
logger.debug(
"Found a cached releaseid : releasegroupid relationship: " + rid + " : " + rgid)
if not rgid:
# didn't find it in the cache, get the information from MB
logger.debug(
"Didn't find releaseID " + rid + " in the cache. Looking up its ReleaseGroupID")
try:
release_dict = mb.getRelease(rid)
except Exception as e:
logger.info('Unable to get release information for Release %s: %s', rid, e)
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
if not release_dict:
logger.info('Unable to get release information for Release %s: no dict', rid)
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
rgid = release_dict['rgid']
artistid = release_dict['artist_id']
# we don't want to make more calls to MB here unless we have to, could be happening quite a lot
rg_exists = myDB.select("SELECT * from albums WHERE AlbumID=?", [rgid])
# make sure the artist exists since I don't know what happens later if it doesn't
artist_exists = myDB.select("SELECT * from artists WHERE ArtistID=?", [artistid])
if not artist_exists and release_dict:
if release_dict['artist_name'].startswith('The '):
sortname = release_dict['artist_name'][4:]
else:
sortname = release_dict['artist_name']
logger.info(
"Now manually adding: " + release_dict['artist_name'] + " - with status Paused")
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"ArtistName": release_dict['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Paused"}
if headphones.CONFIG.INCLUDE_EXTRAS:
newValueDict['IncludeExtras'] = 1
newValueDict['Extras'] = headphones.CONFIG.EXTRAS
if 'title' in release_dict:
newValueDict['LatestAlbum'] = release_dict['title']
elif 'rg_title' in release_dict:
newValueDict['LatestAlbum'] = release_dict['rg_title']
if 'date' in release_dict:
newValueDict['ReleaseDate'] = release_dict['date']
if rgid:
newValueDict['AlbumID'] = rgid
myDB.upsert("artists", newValueDict, controlValueDict)
elif not artist_exists and not release_dict:
logger.error(
"Artist does not exist in the database and did not get a valid response from MB. Skipping release.")
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
if not rg_exists and release_dict or status == 'Loading' and release_dict: # it should never be the case that we have an rg and not the artist
# but if it is this will fail
logger.info("Now adding-by-id album (" + release_dict['title'] + ") from id: " + rgid)
controlValueDict = {"AlbumID": rgid}
if status != 'Loading':
status = 'Wanted'
newValueDict = {"ArtistID": release_dict['artist_id'],
"ReleaseID": rgid,
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'] if 'title' in release_dict else
release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"ReleaseDate": release_dict['date'],
"DateAdded": helpers.today(),
"Status": status,
"Type": release_dict['rg_type'],
"ReleaseID": rid
}
myDB.upsert("albums", newValueDict, controlValueDict)
# keep a local cache of these so that external programs that are adding releasesByID don't hammer MB
myDB.action('INSERT INTO releases VALUES( ?, ?)', [rid, release_dict['rgid']])
for track in release_dict['tracks']:
cleanname = helpers.clean_name(
release_dict['artist_name'] + ' ' + release_dict['rg_title'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"AlbumID": rgid}
newValueDict = {"ArtistID": release_dict['artist_id'],
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['rg_title'],
"AlbumASIN": release_dict['asin'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action(
'SELECT Location, BitRate, Format, Matched from have WHERE CleanName=?',
[cleanname]).fetchone()
if not match:
match = myDB.action(
'SELECT Location, BitRate, Format, Matched from have WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?',
[release_dict['artist_name'], release_dict['rg_title'],
track['title']]).fetchone()
# if not match:
# match = myDB.action('SELECT Location, BitRate, Format from have WHERE TrackID=?', [track['id']]).fetchone()
if match:
newValueDict['Location'] = match['Location']
newValueDict['BitRate'] = match['BitRate']
newValueDict['Format'] = match['Format']
# myDB.action('DELETE from have WHERE Location=?', [match['Location']])
# If the album has been scanned before adding the release it will be unmatched, update to matched
if match['Matched'] == 'Failed':
myDB.action('UPDATE have SET Matched=? WHERE Location=?',
(release_dict['rgid'], match['Location']))
myDB.upsert("tracks", newValueDict, controlValueDict)
# Reset status
if status == 'Loading':
controlValueDict = {"AlbumID": rgid}
if headphones.CONFIG.AUTOWANT_MANUALLY_ADDED:
newValueDict = {"Status": "Wanted"}
else:
newValueDict = {"Status": "Skipped"}
myDB.upsert("albums", newValueDict, controlValueDict)
# Start a search for the album
if headphones.CONFIG.AUTOWANT_MANUALLY_ADDED:
from . import searcher
searcher.searchforalbum(rgid, False)
elif not rg_exists and not release_dict:
logger.error(
"ReleaseGroup does not exist in the database and did not get a valid response from MB. Skipping release.")
if status == 'Loading':
myDB.action("DELETE FROM albums WHERE AlbumID=?", [rgid])
return
else:
logger.info('Release ' + str(rid) + " already exists in the database!")
def updateFormat():
myDB = db.DBConnection()
tracks = myDB.select('SELECT * from tracks WHERE Location IS NOT NULL and Format IS NULL')
if len(tracks) > 0:
logger.info('Finding media format for %s files' % len(tracks))
for track in tracks:
try:
f = MediaFile(track['Location'])
except Exception as e:
logger.info("Exception from MediaFile for: " + track['Location'] + " : " + str(e))
continue
controlValueDict = {"TrackID": track['TrackID']}
newValueDict = {"Format": f.format}
myDB.upsert("tracks", newValueDict, controlValueDict)
logger.info('Finished finding media format for %s files' % len(tracks))
havetracks = myDB.select('SELECT * from have WHERE Location IS NOT NULL and Format IS NULL')
if len(havetracks) > 0:
logger.info('Finding media format for %s files' % len(havetracks))
for track in havetracks:
try:
f = MediaFile(track['Location'])
except Exception as e:
logger.info("Exception from MediaFile for: " + track['Location'] + " : " + str(e))
continue
controlValueDict = {"TrackID": track['TrackID']}
newValueDict = {"Format": f.format}
myDB.upsert("have", newValueDict, controlValueDict)
logger.info('Finished finding media format for %s files' % len(havetracks))
def getHybridRelease(fullreleaselist):
"""
Returns a dictionary of best group of tracks from the list of releases and
earliest release date
"""
if len(fullreleaselist) == 0:
raise ValueError("Empty fullreleaselist")
sortable_release_list = []
formats = {
'2xVinyl': '2',
'Vinyl': '2',
'CD': '0',
'Cassette': '3',
'2xCD': '1',
'Digital Media': '0'
}
countries = {
'US': '0',
'GB': '1',
'JP': '2',
}
for release in fullreleaselist:
# Find values for format and country
try:
format = int(formats[release['Format']])
except (ValueError, KeyError):
format = 3
try:
country = int(countries[release['Country']])
except (ValueError, KeyError):
country = 3
# Create record
release_dict = {
'hasasin': bool(release['AlbumASIN']),
'asin': release['AlbumASIN'],
'trackscount': len(release['Tracks']),
'releaseid': release['ReleaseID'],
'releasedate': release['ReleaseDate'],
'format': format,
'country': country,
'tracks': release['Tracks']
}
sortable_release_list.append(release_dict)
# Necessary to make dates that miss the month and/or day show up after full
# dates
def getSortableReleaseDate(releaseDate):
# Change this value to change the sorting behaviour of none, returning
# 'None' will put it at the top which was normal behaviour for pre-ngs
# versions
if releaseDate is None:
return 'None'
if releaseDate.count('-') == 2:
return releaseDate
elif releaseDate.count('-') == 1:
return releaseDate + '32'
else:
return releaseDate + '13-32'
sortable_release_list.sort(key=lambda x: getSortableReleaseDate(x['releasedate']))
average_tracks = sum(x['trackscount'] for x in sortable_release_list) / float(
len(sortable_release_list))
for item in sortable_release_list:
item['trackscount_delta'] = abs(average_tracks - item['trackscount'])
a = helpers.multikeysort(sortable_release_list,
['-hasasin', 'country', 'format', 'trackscount_delta'])
release_dict = {'ReleaseDate': sortable_release_list[0]['releasedate'],
'Tracks': a[0]['tracks'],
'AlbumASIN': a[0]['asin']
}
return release_dict
| 40,191
|
Python
|
.py
| 744
| 39.278226
| 221
| 0.559924
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,281
|
mb.py
|
rembo10_headphones/headphones/mb.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import musicbrainzngs
import headphones
import headphones.lock
from headphones import logger, db, helpers
mb_lock = headphones.lock.TimedLock(0)
# Quick fix to add mirror switching on the fly. Need to probably return the mbhost & mbport that's
# being used, so we can send those values to the log
def startmb():
mbuser = None
mbpass = None
if headphones.CONFIG.MIRROR == "musicbrainz.org":
mbhost = "musicbrainz.org"
mbport = 80
sleepytime = 1
elif headphones.CONFIG.MIRROR == "custom":
mbhost = headphones.CONFIG.CUSTOMHOST
mbport = int(headphones.CONFIG.CUSTOMPORT)
mbuser = headphones.CONFIG.CUSTOMUSER
mbpass = headphones.CONFIG.CUSTOMPASS
sleepytime = int(headphones.CONFIG.CUSTOMSLEEP)
elif headphones.CONFIG.MIRROR == "headphones":
mbhost = "musicbrainz.codeshy.com"
mbport = 80
mbuser = headphones.CONFIG.HPUSER
mbpass = headphones.CONFIG.HPPASS
sleepytime = 0
else:
return False
musicbrainzngs.set_useragent("headphones", "0.0", "https://github.com/rembo10/headphones")
musicbrainzngs.set_hostname(mbhost + ":" + str(mbport))
# Their rate limiting should be redundant to our lock
if sleepytime == 0:
musicbrainzngs.set_rate_limit(False)
else:
# calling it with an it ends up blocking all requests after the first
musicbrainzngs.set_rate_limit(limit_or_interval=float(sleepytime))
mb_lock.minimum_delta = sleepytime
# Add headphones credentials
if headphones.CONFIG.MIRROR == "headphones" or headphones.CONFIG.CUSTOMAUTH:
if not mbuser or not mbpass:
logger.warn("No username or password set for MusicBrainz server")
else:
musicbrainzngs.hpauth(mbuser, mbpass)
# Let us know if we disable custom authentication
if not headphones.CONFIG.CUSTOMAUTH and headphones.CONFIG.MIRROR == "custom":
musicbrainzngs.disable_hpauth()
logger.debug('Using the following server values: MBHost: %s, MBPort: %i, Sleep Interval: %i',
mbhost, mbport, sleepytime)
return True
def findArtist(name, limit=1):
artistlist = []
artistResults = None
criteria = {'artist': name.lower()}
with mb_lock:
try:
artistResults = musicbrainzngs.search_artists(limit=limit, **criteria)['artist-list']
except ValueError as e:
if "at least one query term is required" in str(e):
logger.error(
"Tried to search without a term, or an empty one. Provided artist (probably emtpy): %s",
name)
return False
else:
raise
except musicbrainzngs.WebServiceError as e:
logger.warn('Attempt to query MusicBrainz for %s failed (%s)' % (name, str(e)))
mb_lock.snooze(5)
if not artistResults:
return False
for result in artistResults:
if 'disambiguation' in result:
uniquename = str(result['sort-name'] + " (" + result['disambiguation'] + ")")
else:
uniquename = str(result['sort-name'])
if result['name'] != uniquename and limit == 1:
logger.info(
'Found an artist with a disambiguation: %s - doing an album based search' % name)
artistdict = findArtistbyAlbum(name)
if not artistdict:
logger.info(
'Cannot determine the best match from an artist/album search. Using top match instead')
artistlist.append({
# Just need the artist id if the limit is 1
'id': str(result['id']),
})
else:
artistlist.append(artistdict)
else:
artistlist.append({
'name': str(result['sort-name']),
'uniquename': uniquename,
'id': str(result['id']),
'url': str("https://musicbrainz.org/artist/" + result['id']),
# probably needs to be changed
'score': int(result['ext:score'])
})
return artistlist
def findRelease(name, limit=1, artist=None):
releaselist = []
releaseResults = None
# additional artist search
if not artist and ':' in name:
name, artist = name.rsplit(":", 1)
criteria = {'release': name.lower()}
if artist:
criteria['artist'] = artist.lower()
with mb_lock:
try:
releaseResults = musicbrainzngs.search_releases(limit=limit, **criteria)['release-list']
except musicbrainzngs.WebServiceError as e: # need to update exceptions
logger.warn('Attempt to query MusicBrainz for "%s" failed: %s' % (name, str(e)))
mb_lock.snooze(5)
if not releaseResults:
return False
for result in releaseResults:
title = result['title']
if 'disambiguation' in result:
title += ' (' + result['disambiguation'] + ')'
# Get formats and track counts
format_dict = OrderedDict()
formats = ''
tracks = ''
if 'medium-list' in result:
for medium in result['medium-list']:
if 'format' in medium:
format = medium['format']
if format not in format_dict:
format_dict[format] = 0
format_dict[format] += 1
if 'track-count' in medium:
if tracks:
tracks += ' + '
tracks += str(medium['track-count'])
for format, count in list(format_dict.items()):
if formats:
formats += ' + '
if count > 1:
formats += str(count) + 'x'
formats += format
rg_type = ''
if 'type' in result['release-group']:
rg_type = result['release-group']['type']
if rg_type == 'Album' and 'secondary-type-list' in result['release-group']:
secondary_type = result['release-group']['secondary-type-list'][0]
if secondary_type != rg_type:
rg_type = secondary_type
releaselist.append({
'uniquename': str(result['artist-credit'][0]['artist']['name']),
'title': str(title),
'id': str(result['artist-credit'][0]['artist']['id']),
'albumid': str(result['id']),
'url': str(
"https://musicbrainz.org/artist/" + result['artist-credit'][0]['artist']['id']),
# probably needs to be changed
'albumurl': str("https://musicbrainz.org/release/" + result['id']),
# probably needs to be changed
'score': int(result['ext:score']),
'date': str(result['date']) if 'date' in result else '',
'country': str(result['country']) if 'country' in result else '',
'formats': str(formats),
'tracks': str(tracks),
'rgid': str(result['release-group']['id']),
'rgtype': str(rg_type)
})
return releaselist
def findSeries(name, limit=1):
serieslist = []
seriesResults = None
criteria = {'series': name.lower()}
with mb_lock:
try:
seriesResults = musicbrainzngs.search_series(limit=limit, **criteria)['series-list']
except musicbrainzngs.WebServiceError as e:
logger.warn('Attempt to query MusicBrainz for %s failed (%s)' % (name, str(e)))
mb_lock.snooze(5)
if not seriesResults:
return False
for result in seriesResults:
if 'disambiguation' in result:
uniquename = str(result['name'] + " (" + result['disambiguation'] + ")")
else:
uniquename = str(result['name'])
serieslist.append({
'uniquename': uniquename,
'name': str(result['name']),
'type': str(result['type']),
'id': str(result['id']),
'url': str("https://musicbrainz.org/series/" + result['id']),
# probably needs to be changed
'score': int(result['ext:score'])
})
return serieslist
def getArtist(artistid, extrasonly=False):
artist_dict = {}
artist = None
try:
limit = 100
with mb_lock:
artist = musicbrainzngs.get_artist_by_id(artistid)['artist']
newRgs = None
artist['release-group-list'] = []
while newRgs is None or len(newRgs) >= limit:
with mb_lock:
newRgs = musicbrainzngs.browse_release_groups(
artistid,
release_type="album",
offset=len(artist['release-group-list']),
limit=limit)
newRgs = newRgs['release-group-list']
artist['release-group-list'] += newRgs
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to retrieve artist information from MusicBrainz failed for artistid: %s (%s)' % (
artistid, str(e)))
mb_lock.snooze(5)
except Exception as e:
pass
if not artist:
return False
artist_dict['artist_name'] = str(artist['name'])
releasegroups = []
if not extrasonly:
for rg in artist['release-group-list']:
if "secondary-type-list" in list(rg.keys()): # only add releases without a secondary type
continue
releasegroups.append({
'title': str(rg['title']),
'id': str(rg['id']),
'url': "https://musicbrainz.org/release-group/" + rg['id'],
'type': str(rg['type'])
})
# See if we need to grab extras. Artist specific extras take precedence over global option
# Global options are set when adding a new artist
myDB = db.DBConnection()
try:
db_artist = myDB.action('SELECT IncludeExtras, Extras from artists WHERE ArtistID=?',
[artistid]).fetchone()
includeExtras = db_artist['IncludeExtras']
except IndexError:
includeExtras = False
if includeExtras:
# Need to convert extras string from something like '2,5.6' to ['ep','live','remix'] (append new extras to end)
if db_artist['Extras']:
extras = list(map(int, db_artist['Extras'].split(',')))
else:
extras = []
extras_list = headphones.POSSIBLE_EXTRAS
includes = []
i = 1
for extra in extras_list:
if i in extras:
includes.append(extra)
i += 1
for include in includes:
mb_extras_list = []
try:
limit = 100
newRgs = None
while newRgs is None or len(newRgs) >= limit:
with mb_lock:
newRgs = musicbrainzngs.browse_release_groups(
artistid, release_type=include, offset=len(mb_extras_list), limit=limit)
newRgs = newRgs['release-group-list']
mb_extras_list += newRgs
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to retrieve artist information from MusicBrainz failed for artistid: %s (%s)' % (
artistid, str(e)))
mb_lock.snooze(5)
for rg in mb_extras_list:
rg_type = rg['type']
if rg_type == 'Album' and 'secondary-type-list' in rg:
secondary_type = rg['secondary-type-list'][0]
if secondary_type != rg_type:
rg_type = secondary_type
releasegroups.append({
'title': str(rg['title']),
'id': str(rg['id']),
'url': "https://musicbrainz.org/release-group/" + rg['id'],
'type': str(rg_type)
})
artist_dict['releasegroups'] = releasegroups
return artist_dict
def getSeries(seriesid):
series_dict = {}
series = None
try:
with mb_lock:
series = musicbrainzngs.get_series_by_id(seriesid, includes=['release-group-rels'])[
'series']
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to retrieve series information from MusicBrainz failed for seriesid: %s (%s)' % (
seriesid, str(e)))
mb_lock.snooze(5)
except Exception:
pass
if not series:
return False
if 'disambiguation' in series:
series_dict['artist_name'] = str(
series['name'] + " (" + str(series['disambiguation']) + ")")
else:
series_dict['artist_name'] = str(series['name'])
releasegroups = []
for rg in series['release_group-relation-list']:
releasegroup = rg['release-group']
releasegroups.append({
'title': releasegroup['title'],
'date': releasegroup['first-release-date'],
'id': releasegroup['id'],
'type': rg['type']
})
series_dict['releasegroups'] = releasegroups
return series_dict
def getReleaseGroup(rgid):
"""
Returns a list of releases in a release group
"""
releaseGroup = None
try:
with mb_lock:
releaseGroup = musicbrainzngs.get_release_group_by_id(
rgid, ["artists", "releases", "media", "discids", ])
releaseGroup = releaseGroup['release-group']
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to retrieve information from MusicBrainz for release group "%s" failed (%s)' % (
rgid, str(e)))
mb_lock.snooze(5)
if not releaseGroup:
return False
else:
return releaseGroup['release-list']
def getRelease(releaseid, include_artist_info=True):
"""
Deep release search to get track info
"""
release = {}
results = None
try:
with mb_lock:
if include_artist_info:
results = musicbrainzngs.get_release_by_id(releaseid,
["artists", "release-groups", "media",
"recordings"]).get('release')
else:
results = musicbrainzngs.get_release_by_id(releaseid, ["media", "recordings"]).get(
'release')
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to retrieve information from MusicBrainz for release "%s" failed (%s)' % (
releaseid, str(e)))
mb_lock.snooze(5)
if not results:
return False
release['title'] = str(results['title'])
release['id'] = str(results['id'])
release['asin'] = str(results['asin']) if 'asin' in results else None
release['date'] = str(results['date']) if 'date' in results else None
try:
release['format'] = str(results['medium-list'][0]['format'])
except:
release['format'] = 'Unknown'
try:
release['country'] = str(results['country'])
except:
release['country'] = 'Unknown'
if include_artist_info:
if 'release-group' in results:
release['rgid'] = str(results['release-group']['id'])
release['rg_title'] = str(results['release-group']['title'])
try:
release['rg_type'] = str(results['release-group']['type'])
if release['rg_type'] == 'Album' and 'secondary-type-list' in results[
'release-group']:
secondary_type = str(results['release-group']['secondary-type-list'][0])
if secondary_type != release['rg_type']:
release['rg_type'] = secondary_type
except KeyError:
release['rg_type'] = 'Unknown'
else:
logger.warn("Release " + releaseid + "had no ReleaseGroup associated")
release['artist_name'] = str(results['artist-credit'][0]['artist']['name'])
release['artist_id'] = str(results['artist-credit'][0]['artist']['id'])
release['tracks'] = getTracksFromRelease(results)
return release
def get_new_releases(rgid, includeExtras=False, forcefull=False):
myDB = db.DBConnection()
results = []
release_status = "official"
if includeExtras and not headphones.CONFIG.OFFICIAL_RELEASES_ONLY:
release_status = []
try:
limit = 100
newResults = None
while newResults is None or len(newResults) >= limit:
with mb_lock:
newResults = musicbrainzngs.browse_releases(
release_group=rgid,
includes=['artist-credits', 'labels', 'recordings', 'release-groups', 'media'],
release_status=release_status,
limit=limit,
offset=len(results))
if 'release-list' not in newResults:
break # may want to raise an exception here instead ?
newResults = newResults['release-list']
results += newResults
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to retrieve information from MusicBrainz for release group "%s" failed (%s)' % (
rgid, str(e)))
mb_lock.snooze(5)
return False
if not results or len(results) == 0:
return False
# Clean all references to releases in dB that are no longer referenced in musicbrainz
release_list = []
force_repackage1 = 0
if len(results) != 0:
for release_mark in results:
release_list.append(str(release_mark['id']))
release_title = release_mark['title']
remove_missing_releases = myDB.action("SELECT ReleaseID FROM allalbums WHERE AlbumID=?",
[rgid])
if remove_missing_releases:
for items in remove_missing_releases:
if items['ReleaseID'] not in release_list and items['ReleaseID'] != rgid:
# Remove all from albums/tracks that aren't in release
myDB.action("DELETE FROM albums WHERE ReleaseID=?", [items['ReleaseID']])
myDB.action("DELETE FROM tracks WHERE ReleaseID=?", [items['ReleaseID']])
myDB.action("DELETE FROM allalbums WHERE ReleaseID=?", [items['ReleaseID']])
myDB.action("DELETE FROM alltracks WHERE ReleaseID=?", [items['ReleaseID']])
logger.info(
"Removing all references to release %s to reflect MusicBrainz" % items[
'ReleaseID'])
force_repackage1 = 1
else:
logger.info(
"There was either an error pulling data from MusicBrainz or there might not be any releases for this category")
num_new_releases = 0
for releasedata in results:
release = {}
rel_id_check = releasedata['id']
album_checker = myDB.action('SELECT * from allalbums WHERE ReleaseID=?',
[rel_id_check]).fetchone()
if not album_checker or forcefull:
# DELETE all references to this release since we're updating it anyway.
myDB.action('DELETE from allalbums WHERE ReleaseID=?', [rel_id_check])
myDB.action('DELETE from alltracks WHERE ReleaseID=?', [rel_id_check])
release['AlbumTitle'] = str(releasedata['title'])
release['AlbumID'] = str(rgid)
release['AlbumASIN'] = str(releasedata['asin']) if 'asin' in releasedata else None
release['ReleaseDate'] = str(releasedata['date']) if 'date' in releasedata else None
release['ReleaseID'] = releasedata['id']
if 'release-group' not in releasedata:
raise Exception('No release group associated with release id ' + releasedata[
'id'] + ' album id' + rgid)
release['Type'] = str(releasedata['release-group']['type'])
if release['Type'] == 'Album' and 'secondary-type-list' in releasedata['release-group']:
secondary_type = str(releasedata['release-group']['secondary-type-list'][0])
if secondary_type != release['Type']:
release['Type'] = secondary_type
# making the assumption that the most important artist will be first in the list
if 'artist-credit' in releasedata:
release['ArtistID'] = str(releasedata['artist-credit'][0]['artist']['id'])
release['ArtistName'] = str(releasedata['artist-credit-phrase'])
else:
logger.warn('Release ' + releasedata['id'] + ' has no Artists associated.')
return False
release['ReleaseCountry'] = str(
releasedata['country']) if 'country' in releasedata else 'Unknown'
# assuming that the list will contain media and that the format will be consistent
try:
additional_medium = ''
for position in releasedata['medium-list']:
if position['format'] == releasedata['medium-list'][0]['format']:
medium_count = int(position['position'])
else:
additional_medium = additional_medium + ' + ' + position['format']
if medium_count == 1:
disc_number = ''
else:
disc_number = str(medium_count) + 'x'
packaged_medium = disc_number + releasedata['medium-list'][0][
'format'] + additional_medium
release['ReleaseFormat'] = str(packaged_medium)
except:
release['ReleaseFormat'] = 'Unknown'
release['Tracks'] = getTracksFromRelease(releasedata)
# What we're doing here now is first updating the allalbums & alltracks table to the most
# current info, then moving the appropriate release into the album table and its associated
# tracks into the tracks table
controlValueDict = {"ReleaseID": release['ReleaseID']}
newValueDict = {"ArtistID": release['ArtistID'],
"ArtistName": release['ArtistName'],
"AlbumTitle": release['AlbumTitle'],
"AlbumID": release['AlbumID'],
"AlbumASIN": release['AlbumASIN'],
"ReleaseDate": release['ReleaseDate'],
"Type": release['Type'],
"ReleaseCountry": release['ReleaseCountry'],
"ReleaseFormat": release['ReleaseFormat']
}
myDB.upsert("allalbums", newValueDict, controlValueDict)
for track in release['Tracks']:
cleanname = helpers.clean_name(
release['ArtistName'] + ' ' + release['AlbumTitle'] + ' ' + track['title'])
controlValueDict = {"TrackID": track['id'],
"ReleaseID": release['ReleaseID']}
newValueDict = {"ArtistID": release['ArtistID'],
"ArtistName": release['ArtistName'],
"AlbumTitle": release['AlbumTitle'],
"AlbumID": release['AlbumID'],
"AlbumASIN": release['AlbumASIN'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": cleanname
}
match = myDB.action('SELECT Location, BitRate, Format from have WHERE CleanName=?',
[cleanname]).fetchone()
if not match:
match = myDB.action(
'SELECT Location, BitRate, Format from have WHERE ArtistName LIKE ? AND AlbumTitle LIKE ? AND TrackTitle LIKE ?',
[release['ArtistName'], release['AlbumTitle'], track['title']]).fetchone()
# if not match:
# match = myDB.action('SELECT Location, BitRate, Format from have WHERE TrackID=?', [track['id']]).fetchone()
if match:
newValueDict['Location'] = match['Location']
newValueDict['BitRate'] = match['BitRate']
newValueDict['Format'] = match['Format']
# myDB.action('UPDATE have SET Matched="True" WHERE Location=?', [match['Location']])
myDB.action('UPDATE have SET Matched=? WHERE Location=?',
(release['AlbumID'], match['Location']))
myDB.upsert("alltracks", newValueDict, controlValueDict)
num_new_releases = num_new_releases + 1
if album_checker:
logger.info('[%s] Existing release %s (%s) updated' % (
release['ArtistName'], release['AlbumTitle'], rel_id_check))
else:
logger.info('[%s] New release %s (%s) added' % (
release['ArtistName'], release['AlbumTitle'], rel_id_check))
if force_repackage1 == 1:
num_new_releases = -1
logger.info('[%s] Forcing repackage of %s, since dB releases have been removed' % (
release['ArtistName'], release_title))
else:
num_new_releases = num_new_releases
return num_new_releases
def getTracksFromRelease(release):
totalTracks = 1
tracks = []
for medium in release['medium-list']:
for track in medium['track-list']:
try:
track_title = str(track['title'])
except:
track_title = str(track['recording']['title'])
tracks.append({
'number': totalTracks,
'title': track_title,
'id': str(track['recording']['id']),
'url': "https://musicbrainz.org/track/" + track['recording']['id'],
'duration': int(track['length']) if 'length' in track else 0
})
totalTracks += 1
return tracks
# Used when there is a disambiguation
def findArtistbyAlbum(name):
myDB = db.DBConnection()
artist = myDB.action(
'SELECT AlbumTitle from have WHERE ArtistName=? AND AlbumTitle IS NOT NULL ORDER BY RANDOM()',
[name]).fetchone()
if not artist:
return False
# Probably not neccessary but just want to double check
if not artist['AlbumTitle']:
return False
term = '"' + artist['AlbumTitle'] + '" AND artist:"' + name + '"'
results = None
try:
with mb_lock:
results = musicbrainzngs.search_release_groups(term).get('release-group-list')
except musicbrainzngs.WebServiceError as e:
logger.warn('Attempt to query MusicBrainz for %s failed (%s)' % (name, str(e)))
mb_lock.snooze(5)
if not results:
return False
artist_dict = {}
for releaseGroup in results:
newArtist = releaseGroup['artist-credit'][0]['artist']
# Only need the artist ID if we're doing an artist+album lookup
artist_dict['id'] = str(newArtist['id'])
return artist_dict
def findAlbumID(artist=None, album=None):
results = None
try:
if album and artist:
criteria = {'release': album.lower()}
criteria['artist'] = artist.lower()
else:
criteria = {'release': album.lower()}
with mb_lock:
results = musicbrainzngs.search_release_groups(limit=1, **criteria).get(
'release-group-list')
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to query MusicBrainz for %s - %s failed (%s)' % (artist, album, str(e)))
mb_lock.snooze(5)
if not results:
return False
if len(results) < 1:
return False
rgid = str(results[0]['id'])
return rgid
def getArtistForReleaseGroup(rgid):
"""
Returns artist name for a release group
Used for series where we store the series instead of the artist
"""
releaseGroup = None
try:
with mb_lock:
releaseGroup = musicbrainzngs.get_release_group_by_id(
rgid, ["artists"])
releaseGroup = releaseGroup['release-group']
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to retrieve information from MusicBrainz for release group "%s" failed (%s)' % (
rgid, str(e)))
mb_lock.snooze(5)
if not releaseGroup:
return False
else:
return releaseGroup['artist-credit'][0]['artist']['name']
def getArtistRelationships(artistid):
"""
Returns list of relationship urls. e.g. Discogs, Wikipedia etc.
"""
urls = []
artist = None
try:
with mb_lock:
info = musicbrainzngs.get_artist_by_id(artistid, includes='url-rels')
except musicbrainzngs.WebServiceError as e:
logger.warn(
'Attempt to query MusicBrainz for %s failed "%s"' % (artistid, str(e)))
mb_lock.snooze(5)
if 'artist' in info:
artist = info['artist']
if 'url-relation-list' in artist:
for l in artist['url-relation-list']:
urls.append({
'type': l['type'],
'url': l['target']
})
return urls
| 30,658
|
Python
|
.py
| 673
| 33.347697
| 137
| 0.564558
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,282
|
postprocessor.py
|
rembo10_headphones/headphones/postprocessor.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import shutil
import uuid
import threading
import itertools
import os
import re
import beets
import headphones
from beets import autotag
from beets import config as beetsconfig
from beets import logging as beetslogging
from mediafile import MediaFile, FileTypeError, UnreadableFileError
from beetsplug import lyrics as beetslyrics
from headphones import notifiers, utorrent, transmission, deluge, qbittorrent, soulseek
from headphones import db, albumart, librarysync
from headphones import logger, helpers, mb, music_encoder
from headphones import metadata
postprocessor_lock = threading.Lock()
def checkFolder():
logger.info("Checking download folder for completed downloads (only snatched ones).")
with postprocessor_lock:
myDB = db.DBConnection()
snatched = myDB.select('SELECT * from snatched WHERE Status="Snatched"')
for album in snatched:
if album['FolderName']:
folder_name = album['FolderName']
single = False
# Soulseek, check download complete or errored
if album['Kind'] == 'soulseek':
match = re.search(r'\{(.*?)\}(.*?)$', folder_name) # get soulseek user from folder_name
user_name = match.group(1)
folder_name = match.group(2)
completed, errored = soulseek.download_completed_album(user_name, folder_name)
if errored:
# If the album had any tracks with errors in it, the whole download is considered faulty. Status will be reset to wanted.
logger.info(f"Soulseek: Album with folder '{folder_name}' had errors during download. Setting status to 'Wanted'.")
myDB.action('UPDATE albums SET Status="Wanted" WHERE AlbumID=? AND Status="Snatched"', (album['AlbumID'],))
myDB.action('UPDATE snatched SET status = "Unprocessed" WHERE AlbumID=?', (album['AlbumID'],))
# Folder will be removed from configured complete and Incomplete directory
complete_path = os.path.join(headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR, folder_name)
incomplete_path = os.path.join(headphones.CONFIG.SOULSEEK_INCOMPLETE_DOWNLOAD_DIR, folder_name)
for path in [complete_path, incomplete_path]:
try:
shutil.rmtree(path)
except Exception as e:
pass
continue
elif completed:
download_dir = headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR
else:
continue
elif album['Kind'] == 'nzb':
download_dir = headphones.CONFIG.DOWNLOAD_DIR
elif album['Kind'] == 'bandcamp':
download_dir = headphones.CONFIG.BANDCAMP_DIR
else:
if headphones.CONFIG.DELUGE_DONE_DIRECTORY and headphones.CONFIG.TORRENT_DOWNLOADER == 3:
download_dir = headphones.CONFIG.DELUGE_DONE_DIRECTORY
else:
download_dir = headphones.CONFIG.DOWNLOAD_TORRENT_DIR
# Get folder from torrent hash
if album['TorrentHash'] and headphones.CONFIG.TORRENT_DOWNLOADER:
torrent_folder_name = None
if headphones.CONFIG.TORRENT_DOWNLOADER == 1:
torrent_folder_name, single = transmission.getFolder(album['TorrentHash'])
elif headphones.CONFIG.TORRENT_DOWNLOADER == 4:
torrent_folder_name, single = qbittorrent.getFolder(album['TorrentHash'])
if torrent_folder_name:
folder_name = torrent_folder_name
if folder_name:
album_path = os.path.join(download_dir, folder_name)
logger.debug("Checking if %s exists" % album_path)
if os.path.exists(album_path):
logger.info('Found "' + folder_name + '" in ' + album[
'Kind'] + ' download folder. Verifying....')
verify(album['AlbumID'], album_path, album['Kind'], single=single)
else:
logger.info("No folder name found for " + album['Title'])
logger.debug("Checking download folder finished.")
def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=False, single=False):
myDB = db.DBConnection()
release = myDB.action('SELECT * from albums WHERE AlbumID=?', [albumid]).fetchone()
tracks = myDB.select('SELECT * from tracks WHERE AlbumID=?', [albumid])
if not release or not tracks:
release_list = None
# Fetch album information from MusicBrainz
try:
release_list = mb.getReleaseGroup(albumid)
except Exception as e:
logger.error(
'Unable to get release information for manual album with rgid: %s. Error: %s',
albumid, e)
return
if not release_list:
logger.error('Unable to get release information for manual album with rgid: %s',
albumid)
return
# Since we're just using this to create the bare minimum information to
# insert an artist/album combo, use the first release
releaseid = release_list[0]['id']
release_dict = mb.getRelease(releaseid)
if not release_dict:
logger.error(
'Unable to get release information for manual album with rgid: %s. Cannot continue',
albumid)
return
# Check if the artist is added to the database. In case the database is
# frozen during post processing, new artists will not be processed. This
# prevents new artists from appearing suddenly. In case forced is True,
# this check is skipped, since it is assumed the user wants this.
if headphones.CONFIG.FREEZE_DB and not forced:
artist = myDB.select(
"SELECT ArtistName, ArtistID FROM artists WHERE ArtistId=? OR ArtistName=?",
[release_dict['artist_id'], release_dict['artist_name']])
if not artist:
logger.warn("Continuing would add new artist '%s' (ID %s), "
"but database is frozen. Will skip postprocessing for "
"album with rgid: %s", release_dict['artist_name'],
release_dict['artist_id'], albumid)
myDB.action(
'UPDATE snatched SET status = "Frozen" WHERE status NOT LIKE "Seed%" and AlbumID=?',
[albumid])
frozen = re.search(r' \(Frozen\)(?:\[\d+\])?', albumpath)
if not frozen:
if headphones.CONFIG.RENAME_FROZEN:
renameUnprocessedFolder(albumpath, tag="Frozen")
else:
logger.warn("Won't rename %s to mark as 'Frozen', because it is disabled.",
albumpath)
return
logger.info("Now adding/updating artist: " + release_dict['artist_name'])
if release_dict['artist_name'].startswith('The '):
sortname = release_dict['artist_name'][4:]
else:
sortname = release_dict['artist_name']
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"ArtistName": release_dict['artist_name'],
"ArtistSortName": sortname,
"DateAdded": helpers.today(),
"Status": "Paused"}
logger.info("ArtistID: " + release_dict['artist_id'] + " , ArtistName: " + release_dict[
'artist_name'])
if headphones.CONFIG.INCLUDE_EXTRAS:
newValueDict['IncludeExtras'] = 1
newValueDict['Extras'] = headphones.CONFIG.EXTRAS
myDB.upsert("artists", newValueDict, controlValueDict)
logger.info("Now adding album: " + release_dict['title'])
controlValueDict = {"AlbumID": albumid}
newValueDict = {"ArtistID": release_dict['artist_id'],
"ReleaseID": albumid,
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'],
"AlbumASIN": release_dict['asin'],
"ReleaseDate": release_dict['date'],
"DateAdded": helpers.today(),
"Type": release_dict['rg_type'],
"Status": "Snatched"
}
myDB.upsert("albums", newValueDict, controlValueDict)
# Delete existing tracks associated with this AlbumID since we're going to replace them and don't want any extras
myDB.action('DELETE from tracks WHERE AlbumID=?', [albumid])
for track in release_dict['tracks']:
controlValueDict = {"TrackID": track['id'],
"AlbumID": albumid}
clean_name = helpers.clean_name(
release_dict['artist_name'] + ' ' + release_dict['title'] + ' ' + track['title'])
newValueDict = {"ArtistID": release_dict['artist_id'],
"ArtistName": release_dict['artist_name'],
"AlbumTitle": release_dict['title'],
"AlbumASIN": release_dict['asin'],
"TrackTitle": track['title'],
"TrackDuration": track['duration'],
"TrackNumber": track['number'],
"CleanName": clean_name
}
myDB.upsert("tracks", newValueDict, controlValueDict)
controlValueDict = {"ArtistID": release_dict['artist_id']}
newValueDict = {"Status": "Paused"}
myDB.upsert("artists", newValueDict, controlValueDict)
logger.info("Addition complete for: " + release_dict['title'] + " - " + release_dict[
'artist_name'])
release = myDB.action('SELECT * from albums WHERE AlbumID=?', [albumid]).fetchone()
tracks = myDB.select('SELECT * from tracks WHERE AlbumID=?', [albumid])
downloaded_track_list = []
downloaded_cuecount = 0
media_extensions = tuple(map(lambda x: '.' + x, headphones.MEDIA_FORMATS))
for root, dirs, files in os.walk(albumpath):
for file in files:
if file.endswith(media_extensions):
downloaded_track_list.append(os.path.join(root, file))
elif file.endswith('.cue'):
downloaded_cuecount += 1
# if any of the files end in *.part, we know the torrent isn't done yet. Process if forced, though
elif file.endswith(('.part', '.utpart')) and not forced:
logger.info(
"Looks like " + os.path.basename(albumpath) + " isn't complete yet. Will try again on the next run")
return
# Force single file through
if single and not downloaded_track_list:
downloaded_track_list.append(albumpath)
# Check to see if we're preserving the torrent dir
if (headphones.CONFIG.KEEP_TORRENT_FILES and Kind == "torrent") or headphones.CONFIG.KEEP_ORIGINAL_FOLDER:
keep_original_folder = True
# Split cue before metadata check
if headphones.CONFIG.CUE_SPLIT and downloaded_cuecount and downloaded_cuecount >= len(
downloaded_track_list):
new_folder = None
new_albumpath = albumpath
if keep_original_folder:
temp_path = helpers.preserve_torrent_directory(new_albumpath, forced)
if not temp_path:
markAsUnprocessed(albumid, new_albumpath, keep_original_folder)
return
else:
new_albumpath = temp_path
new_folder = os.path.split(new_albumpath)[0]
Kind = "cue_split"
cuepath = helpers.cue_split(new_albumpath)
if not cuepath:
if new_folder:
shutil.rmtree(new_folder)
markAsUnprocessed(albumid, albumpath, keep_original_folder)
return
else:
albumpath = cuepath
downloaded_track_list = helpers.get_downloaded_track_list(albumpath)
keep_original_folder = False
# test #1: metadata - usually works
logger.debug('Verifying metadata...')
for downloaded_track in downloaded_track_list:
try:
f = MediaFile(downloaded_track)
except Exception as e:
logger.info(f"Exception from MediaFile for {downloaded_track}: {e}")
continue
if not f.artist:
continue
if not f.album:
continue
metaartist = helpers.latinToAscii(f.artist.lower())
dbartist = helpers.latinToAscii(release['ArtistName'].lower())
metaalbum = helpers.latinToAscii(f.album.lower())
dbalbum = helpers.latinToAscii(release['AlbumTitle'].lower())
logger.debug('Matching metadata artist: %s with artist name: %s' % (metaartist, dbartist))
logger.debug('Matching metadata album: %s with album name: %s' % (metaalbum, dbalbum))
if metaartist == dbartist and metaalbum == dbalbum:
doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list, Kind,
keep_original_folder, forced, single)
return
# test #2: filenames
logger.debug('Metadata check failed. Verifying filenames...')
for downloaded_track in downloaded_track_list:
track_name = os.path.splitext(downloaded_track)[0]
split_track_name = re.sub(r'[\.\-\_]', r' ', track_name).lower()
for track in tracks:
if not track['TrackTitle']:
continue
dbtrack = helpers.latinToAscii(track['TrackTitle'].lower())
filetrack = helpers.latinToAscii(split_track_name)
logger.debug('Checking if track title: %s is in file name: %s' % (dbtrack, filetrack))
if dbtrack in filetrack:
doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list, Kind,
keep_original_folder, forced, single)
return
# test #3: number of songs and duration
logger.debug('Filename check failed. Verifying album length...')
db_track_duration = 0
downloaded_track_duration = 0
logger.debug('Total music files in %s: %i' % (albumpath, len(downloaded_track_list)))
logger.debug('Total tracks for this album in the database: %i' % len(tracks))
if len(tracks) == len(downloaded_track_list):
for track in tracks:
try:
db_track_duration += track['TrackDuration'] / 1000
except:
downloaded_track_duration = False
break
for downloaded_track in downloaded_track_list:
try:
f = MediaFile(downloaded_track)
downloaded_track_duration += f.length
except:
downloaded_track_duration = False
break
if downloaded_track_duration and db_track_duration:
logger.debug('Downloaded album duration: %i' % downloaded_track_duration)
logger.debug('Database track duration: %i' % db_track_duration)
delta = abs(downloaded_track_duration - db_track_duration)
if delta < 240:
doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list, Kind,
keep_original_folder, forced, single)
return
logger.warn(f"Could not identify {albumpath}. It may not be the intended album")
markAsUnprocessed(albumid, albumpath, keep_original_folder)
def markAsUnprocessed(albumid, albumpath, keep_original_folder=False):
myDB = db.DBConnection()
myDB.action(
'UPDATE snatched SET status = "Unprocessed" WHERE status NOT LIKE "Seed%" and AlbumID=?', [albumid])
processed = re.search(r' \(Unprocessed\)(?:\[\d+\])?', albumpath)
if not processed:
if headphones.CONFIG.RENAME_UNPROCESSED and not keep_original_folder:
renameUnprocessedFolder(albumpath, tag="Unprocessed")
else:
logger.warn(
f"Won't rename {albumpath} to mark as 'Unprocessed', "
f"because it is disabled or folder is being kept."
)
return
def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list, Kind=None,
keep_original_folder=False, forced=False, single=False):
logger.info(
f"Starting post-processing for: {release['ArtistName']} - "
f"{release['AlbumTitle']}"
)
new_folder = None
# Preserve the torrent dir
if keep_original_folder or single:
temp_path = helpers.preserve_torrent_directory(albumpath, forced, single)
if not temp_path:
markAsUnprocessed(albumid, albumpath, keep_original_folder)
return
else:
albumpath = temp_path
new_folder = os.path.split(albumpath)[0]
elif Kind == "cue_split":
new_folder = os.path.split(albumpath)[0]
# Need to update the downloaded track list with the new location.
# Could probably just throw in the "headphones-modified" folder,
# but this is good to make sure we're not counting files that may have failed to move
if new_folder:
downloaded_track_list = []
for r, d, f in os.walk(albumpath):
for files in f:
if any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
downloaded_track_list.append(os.path.join(r, files))
builder = metadata.AlbumMetadataBuilder()
# Check if files are valid media files and are writable, before the steps
# below are executed. This simplifies errors and prevents unfinished steps.
for downloaded_track in downloaded_track_list:
try:
f = MediaFile(downloaded_track)
builder.add_media_file(f)
except (FileTypeError, UnreadableFileError):
logger.error(f"`{downloaded_track}` is not a valid media file. Not continuing.")
return
except IOError:
logger.error(f"Unable to find `{downloaded_track}`. Not continuing.")
if new_folder:
shutil.rmtree(new_folder)
return
# If one of the options below is set, it will access/touch/modify the
# files, which requires write permissions. This step just check this, so
# it will not try and fail lateron, with strange exceptions.
if headphones.CONFIG.EMBED_ALBUM_ART or headphones.CONFIG.CLEANUP_FILES or \
headphones.CONFIG.ADD_ALBUM_ART or headphones.CONFIG.CORRECT_METADATA or \
headphones.CONFIG.EMBED_LYRICS or headphones.CONFIG.RENAME_FILES or \
headphones.CONFIG.MOVE_FILES:
try:
with open(downloaded_track, "a+b") as fp:
fp.seek(0)
except IOError as e:
logger.debug("Write check exact error: %s", e)
logger.error(
f"`{downloaded_track}` is not writable. This is required "
"for some post processing steps. Not continuing."
)
if new_folder:
shutil.rmtree(new_folder)
return
metadata_dict = builder.build()
# start encoding
if headphones.CONFIG.MUSIC_ENCODER:
downloaded_track_list = music_encoder.encode(albumpath)
if not downloaded_track_list:
if new_folder:
shutil.rmtree(new_folder)
return
# get artwork and path
album_art_path = None
artwork = None
if headphones.CONFIG.EMBED_ALBUM_ART or headphones.CONFIG.ADD_ALBUM_ART or \
(headphones.CONFIG.PLEX_ENABLED and headphones.CONFIG.PLEX_NOTIFY) or \
(headphones.CONFIG.XBMC_ENABLED and headphones.CONFIG.XBMC_NOTIFY):
album_art_path, artwork = albumart.getAlbumArt(albumid)
if headphones.CONFIG.EMBED_ALBUM_ART and artwork:
embedAlbumArt(artwork, downloaded_track_list)
if headphones.CONFIG.CLEANUP_FILES:
cleanupFiles(albumpath)
if headphones.CONFIG.KEEP_NFO:
renameNFO(albumpath)
if headphones.CONFIG.ADD_ALBUM_ART and artwork:
addAlbumArt(artwork, albumpath, release, metadata_dict)
if headphones.CONFIG.CORRECT_METADATA:
correctedMetadata = correctMetadata(albumid, release, downloaded_track_list)
if not correctedMetadata and headphones.CONFIG.DO_NOT_PROCESS_UNMATCHED:
if new_folder:
shutil.rmtree(new_folder)
return
if headphones.CONFIG.EMBED_LYRICS:
embedLyrics(downloaded_track_list)
if headphones.CONFIG.RENAME_FILES:
renameFiles(albumpath, downloaded_track_list, release)
if headphones.CONFIG.MOVE_FILES and not headphones.CONFIG.DESTINATION_DIR:
logger.error(
'No DESTINATION_DIR has been set. Set "Destination Directory" to the parent directory you want to move the files to')
albumpaths = [albumpath]
elif headphones.CONFIG.MOVE_FILES and headphones.CONFIG.DESTINATION_DIR:
albumpaths = moveFiles(albumpath, release, metadata_dict)
else:
albumpaths = [albumpath]
if headphones.CONFIG.FILE_PERMISSIONS_ENABLED:
updateFilePermissions(albumpaths)
myDB = db.DBConnection()
myDB.action('UPDATE albums SET status = "Downloaded" WHERE AlbumID=?', [albumid])
myDB.action(
'UPDATE snatched SET status = "Processed" WHERE Status NOT LIKE "Seed%" and AlbumID=?',
[albumid])
# Check if torrent has finished seeding
if headphones.CONFIG.TORRENT_DOWNLOADER != 0:
seed_snatched = myDB.action(
'SELECT * from snatched WHERE Status="Seed_Snatched" and AlbumID=?',
[albumid]).fetchone()
if seed_snatched:
hash = seed_snatched['TorrentHash']
torrent_removed = False
logger.info('%s - %s. Checking if torrent has finished seeding and can be removed' % (
release['ArtistName'], release['AlbumTitle']))
if headphones.CONFIG.TORRENT_DOWNLOADER == 1:
torrent_removed = transmission.removeTorrent(hash, True)
elif headphones.CONFIG.TORRENT_DOWNLOADER == 3: # Deluge
torrent_removed = deluge.removeTorrent(hash, True)
elif headphones.CONFIG.TORRENT_DOWNLOADER == 2:
torrent_removed = utorrent.removeTorrent(hash, True)
else:
torrent_removed = qbittorrent.removeTorrent(hash, True)
# Torrent removed, delete the snatched record, else update Status for scheduled job to check
if torrent_removed:
myDB.action('DELETE from snatched WHERE status = "Seed_Snatched" and AlbumID=?',
[albumid])
else:
myDB.action(
'UPDATE snatched SET status = "Seed_Processed" WHERE status = "Seed_Snatched" and AlbumID=?',
[albumid])
# Update the have tracks for all created dirs:
for albumpath in albumpaths:
librarysync.libraryScan(dir=albumpath, append=True, ArtistID=release['ArtistID'],
ArtistName=release['ArtistName'])
logger.info(
'Post-processing for %s - %s complete' % (release['ArtistName'], release['AlbumTitle']))
pushmessage = release['ArtistName'] + ' - ' + release['AlbumTitle']
statusmessage = "Download and Postprocessing completed"
if headphones.CONFIG.GROWL_ENABLED:
logger.info("Growl request")
growl = notifiers.GROWL()
growl.notify(pushmessage, statusmessage)
if headphones.CONFIG.PROWL_ENABLED:
logger.info("Prowl request")
prowl = notifiers.PROWL()
prowl.notify(pushmessage, statusmessage)
if headphones.CONFIG.XBMC_ENABLED:
xbmc = notifiers.XBMC()
if headphones.CONFIG.XBMC_UPDATE:
xbmc.update()
if headphones.CONFIG.XBMC_NOTIFY:
xbmc.notify(release['ArtistName'],
release['AlbumTitle'],
album_art_path)
if headphones.CONFIG.LMS_ENABLED:
lms = notifiers.LMS()
lms.update()
if headphones.CONFIG.PLEX_ENABLED:
plex = notifiers.Plex()
if headphones.CONFIG.PLEX_UPDATE:
plex.update()
if headphones.CONFIG.PLEX_NOTIFY:
plex.notify(release['ArtistName'],
release['AlbumTitle'],
album_art_path)
if headphones.CONFIG.NMA_ENABLED:
nma = notifiers.NMA()
nma.notify(release['ArtistName'], release['AlbumTitle'])
if headphones.CONFIG.PUSHALOT_ENABLED:
logger.info("Pushalot request")
pushalot = notifiers.PUSHALOT()
pushalot.notify(pushmessage, statusmessage)
if headphones.CONFIG.SYNOINDEX_ENABLED:
syno = notifiers.Synoindex()
for albumpath in albumpaths:
syno.notify(albumpath)
if headphones.CONFIG.PUSHOVER_ENABLED:
logger.info("Pushover request")
pushover = notifiers.PUSHOVER()
pushover.notify(pushmessage, "Headphones")
if headphones.CONFIG.PUSHBULLET_ENABLED:
logger.info("PushBullet request")
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(pushmessage, statusmessage)
if headphones.CONFIG.JOIN_ENABLED:
logger.info("Join request")
join = notifiers.JOIN()
join.notify(pushmessage, statusmessage)
if headphones.CONFIG.TELEGRAM_ENABLED:
logger.info("Telegram request")
telegram = notifiers.TELEGRAM()
telegram.notify(statusmessage, pushmessage)
if headphones.CONFIG.TWITTER_ENABLED:
logger.info("Twitter notifications temporarily disabled")
#logger.info("Sending Twitter notification")
#twitter = notifiers.TwitterNotifier()
#twitter.notify_download(pushmessage)
if headphones.CONFIG.OSX_NOTIFY_ENABLED:
from headphones import cache
c = cache.Cache()
album_art = c.get_artwork_from_cache(None, release['AlbumID'])
logger.info("Sending OS X notification")
osx_notify = notifiers.OSX_NOTIFY()
osx_notify.notify(release['ArtistName'],
release['AlbumTitle'],
statusmessage,
image=album_art)
if headphones.CONFIG.BOXCAR_ENABLED:
logger.info("Sending Boxcar2 notification")
boxcar = notifiers.BOXCAR()
boxcar.notify('Headphones processed: ' + pushmessage,
statusmessage, release['AlbumID'])
if headphones.CONFIG.SUBSONIC_ENABLED:
logger.info("Sending Subsonic update")
subsonic = notifiers.SubSonicNotifier()
subsonic.notify(albumpaths)
if headphones.CONFIG.MPC_ENABLED:
mpc = notifiers.MPC()
mpc.notify()
if headphones.CONFIG.EMAIL_ENABLED:
logger.info("Sending Email notification")
email = notifiers.Email()
subject = release['ArtistName'] + ' - ' + release['AlbumTitle']
email.notify(subject, "Download and Postprocessing completed")
if new_folder:
shutil.rmtree(new_folder)
def embedAlbumArt(artwork, downloaded_track_list):
logger.info('Embedding album art')
for downloaded_track in downloaded_track_list:
try:
f = MediaFile(downloaded_track)
except:
logger.error(f"Could not read {downloaded_track}. Not adding album art")
continue
logger.debug(f"Adding album art to `{downloaded_track}`")
try:
f.art = artwork
f.save()
except Exception as e:
logger.error(f"Error embedding album art to `{downloaded_track}`: {e}")
continue
def addAlbumArt(artwork, albumpath, release, metadata_dict):
logger.info(f"Adding album art to `{albumpath}`")
md = metadata.album_metadata(albumpath, release, metadata_dict)
ext = ".jpg"
# PNGs are possibe here too
if artwork[:4] == '\x89PNG':
ext = ".png"
album_art_name = helpers.pattern_substitute(
headphones.CONFIG.ALBUM_ART_FORMAT.strip(), md) + ext
album_art_name = helpers.replace_illegal_chars(album_art_name)
if headphones.CONFIG.FILE_UNDERSCORES:
album_art_name = album_art_name.replace(' ', '_')
if album_art_name.startswith('.'):
album_art_name = album_art_name.replace(".", "_", 1)
try:
with open(os.path.join(albumpath, album_art_name), 'wb') as f:
f.write(artwork)
except IOError as e:
logger.error('Error saving album art: %s', e)
return
def cleanupFiles(albumpath):
logger.info('Cleaning up files')
for r, d, f in os.walk(albumpath):
for file in f:
if not any(file.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
logger.debug('Removing: %s' % file)
try:
os.remove(os.path.join(r, file))
except Exception as e:
logger.error('Could not remove file: %s. Error: %s' % (file, e))
def renameNFO(albumpath):
logger.info('Renaming NFO')
for r, d, f in os.walk(albumpath):
for file in f:
if file.lower().endswith('.nfo'):
if not file.lower().endswith('.orig.nfo'):
try:
new_file_name = os.path.join(r, file)[:-3] + 'orig.nfo'
logger.debug(f"Renaming `{file}` to `{new_file_name}`")
os.rename(os.path.join(r, file), new_file_name)
except Exception as e:
logger.error(f"Could not rename {file}: {e}")
def moveFiles(albumpath, release, metadata_dict):
logger.info(f"Moving files: `{albumpath}`")
md = metadata.album_metadata(albumpath, release, metadata_dict)
folder = helpers.pattern_substitute(
headphones.CONFIG.FOLDER_FORMAT.strip(), md, normalize=True)
if headphones.CONFIG.FILE_UNDERSCORES:
folder = folder.replace(' ', '_')
folder = helpers.replace_illegal_chars(folder, type="folder")
folder = folder.replace('./', '_/').replace('/.', '/_')
if folder.endswith('.'):
folder = folder[:-1] + '_'
if folder.startswith('.'):
folder = '_' + folder[1:]
# Grab our list of files early on so we can determine if we need to create
# the lossy_dest_dir, lossless_dest_dir, or both
files_to_move = []
lossy_media = False
lossless_media = False
for r, d, f in os.walk(albumpath):
for files in f:
files_to_move.append(os.path.join(r, files))
if any(files.lower().endswith('.' + x.lower()) for x in headphones.LOSSY_MEDIA_FORMATS):
lossy_media = True
if any(files.lower().endswith('.' + x.lower()) for x in
headphones.LOSSLESS_MEDIA_FORMATS):
lossless_media = True
# Do some sanity checking to see what directories we need to create:
make_lossy_folder = False
make_lossless_folder = False
lossy_destination_path = os.path.join(headphones.CONFIG.DESTINATION_DIR, folder)
lossless_destination_path = os.path.join(headphones.CONFIG.LOSSLESS_DESTINATION_DIR, folder)
# If they set a destination dir for lossless media, only create the lossy folder if there is lossy media
if headphones.CONFIG.LOSSLESS_DESTINATION_DIR:
if lossy_media:
make_lossy_folder = True
if lossless_media:
make_lossless_folder = True
# If they haven't set a lossless dest_dir, just create the "lossy" folder
else:
make_lossy_folder = True
last_folder = headphones.CONFIG.FOLDER_FORMAT.strip().split('/')[-1]
if make_lossless_folder:
# Only rename the folder if they use the album name, otherwise merge into existing folder
if os.path.exists(lossless_destination_path) and 'album' in last_folder.lower():
create_duplicate_folder = False
if headphones.CONFIG.REPLACE_EXISTING_FOLDERS:
try:
shutil.rmtree(lossless_destination_path)
except Exception as e:
logger.error(
f"Error deleting `{lossless_destination_path}`. "
f"Creating duplicate folder. Error: {e}"
)
create_duplicate_folder = True
if not headphones.CONFIG.REPLACE_EXISTING_FOLDERS or create_duplicate_folder:
temp_folder = folder
i = 1
while True:
newfolder = temp_folder + '[%i]' % i
lossless_destination_path = os.path.normpath(
os.path.join(
headphones.CONFIG.LOSSLESS_DESTINATION_DIR,
newfolder
)
)
if os.path.exists(lossless_destination_path):
i += 1
else:
temp_folder = newfolder
break
if not os.path.exists(lossless_destination_path):
try:
os.makedirs(lossless_destination_path)
except Exception as e:
logger.error('Could not create lossless folder for %s. (Error: %s)' % (
release['AlbumTitle'], e))
if not make_lossy_folder:
return [albumpath]
if make_lossy_folder:
if os.path.exists(lossy_destination_path) and 'album' in last_folder.lower():
create_duplicate_folder = False
if headphones.CONFIG.REPLACE_EXISTING_FOLDERS:
try:
shutil.rmtree(lossy_destination_path)
except Exception as e:
logger.error(
f"Error deleting `{lossy_destination_path}`. "
f"Creating duplicate folder. Error: {e}"
)
create_duplicate_folder = True
if not headphones.CONFIG.REPLACE_EXISTING_FOLDERS or create_duplicate_folder:
temp_folder = folder
i = 1
while True:
newfolder = temp_folder + '[%i]' % i
lossy_destination_path = os.path.normpath(
os.path.join(
headphones.CONFIG.DESTINATION_DIR,
newfolder
)
)
if os.path.exists(lossy_destination_path):
i += 1
else:
temp_folder = newfolder
break
if not os.path.exists(lossy_destination_path):
try:
os.makedirs(lossy_destination_path)
except Exception as e:
logger.error(
'Could not create folder for %s. Not moving: %s' % (release['AlbumTitle'], e))
return [albumpath]
logger.info('Checking which files we need to move.....')
# Move files to the destination folder, renaming them if they already exist
# If we have two desination_dirs, move non-music files to both
if make_lossy_folder and make_lossless_folder:
for file_to_move in files_to_move:
if any(file_to_move.lower().endswith('.' + x.lower()) for x in
headphones.LOSSY_MEDIA_FORMATS):
helpers.smartMove(file_to_move, lossy_destination_path)
elif any(file_to_move.lower().endswith('.' + x.lower()) for x in
headphones.LOSSLESS_MEDIA_FORMATS):
helpers.smartMove(file_to_move, lossless_destination_path)
# If it's a non-music file, move it to both dirs
# TODO: Move specific-to-lossless files to the lossless dir only
else:
moved_to_lossy_folder = helpers.smartMove(file_to_move, lossy_destination_path,
delete=False)
moved_to_lossless_folder = helpers.smartMove(file_to_move,
lossless_destination_path,
delete=False)
if moved_to_lossy_folder or moved_to_lossless_folder:
try:
os.remove(file_to_move)
except Exception as e:
logger.error(
f"Error deleting `{file_to_move}` from source directory")
else:
logger.error(
f"Error copying `{file_to_move}`. "
f"Not deleting from download directory")
elif make_lossless_folder and not make_lossy_folder:
for file_to_move in files_to_move:
helpers.smartMove(file_to_move, lossless_destination_path)
else:
for file_to_move in files_to_move:
helpers.smartMove(file_to_move, lossy_destination_path)
# Chmod the directories using the folder_format (script courtesy of premiso!)
folder_list = folder.split('/')
temp_fs = []
if make_lossless_folder:
temp_fs.append(headphones.CONFIG.LOSSLESS_DESTINATION_DIR)
if make_lossy_folder:
temp_fs.append(headphones.CONFIG.DESTINATION_DIR)
for temp_f in temp_fs:
for f in folder_list:
temp_f = os.path.join(temp_f, f)
if headphones.CONFIG.FOLDER_PERMISSIONS_ENABLED:
try:
os.chmod(os.path.normpath(temp_f),
int(headphones.CONFIG.FOLDER_PERMISSIONS, 8))
except Exception as e:
logger.error(f"Error trying to change permissions on `{temp_f}`: {e}")
else:
logger.debug(
f"Not changing permissions on `{temp_f}`, "
"since it is disabled")
# If we failed to move all the files out of the directory, this will fail too
try:
shutil.rmtree(albumpath)
except Exception as e:
logger.error(f"Could not remove `{albumpath}`: {e}")
destination_paths = []
if make_lossy_folder:
destination_paths.append(lossy_destination_path)
if make_lossless_folder:
destination_paths.append(lossless_destination_path)
return destination_paths
def correctMetadata(albumid, release, downloaded_track_list):
logger.info('Preparing to write metadata to tracks....')
lossy_items = []
lossless_items = []
# Process lossless & lossy media formats separately
for downloaded_track in downloaded_track_list:
try:
if any(downloaded_track.lower().endswith('.' + x.lower()) for x in
headphones.LOSSLESS_MEDIA_FORMATS):
lossless_items.append(beets.library.Item.from_path(downloaded_track))
elif any(downloaded_track.lower().endswith('.' + x.lower()) for x in
headphones.LOSSY_MEDIA_FORMATS):
lossy_items.append(beets.library.Item.from_path(downloaded_track))
else:
logger.warn(
f"Skipping `{downloaded_track}` because it is "
f"not a mutagen friendly file format"
)
continue
except Exception as e:
logger.error(
f"Beets couldn't create an Item from `{downloaded_track}`: {e}")
continue
for items in [lossy_items, lossless_items]:
if not items:
continue
search_ids = []
logger.debug('Getting recommendation from beets. Artist: %s. Album: %s. Tracks: %s', release['ArtistName'],
release['AlbumTitle'], len(items))
# Try with specific release, e.g. alternate release selected from albumPage
if release['ReleaseID'] != release['AlbumID']:
logger.debug('trying beets with specific Release ID: %s', release['ReleaseID'])
search_ids = [release['ReleaseID']]
try:
beetslog = beetslogging.getLogger('beets')
beetslog.set_global_level(beetslogging.DEBUG) if headphones.VERBOSE else beetslog.set_global_level(
beetslogging.CRITICAL)
with helpers.capture_beets_log() as logs:
cur_artist, cur_album, prop = autotag.tag_album(items,
search_artist=release['ArtistName'],
search_album=release['AlbumTitle'],
search_ids=search_ids)
candidates = prop.candidates
rec = prop.recommendation
for log in logs:
logger.debug('Beets: %s', log)
beetslog.set_global_level(beetslogging.NOTSET)
except Exception as e:
logger.error('Error getting recommendation: %s. Not writing metadata', e)
return False
if str(rec) == 'Recommendation.none':
logger.warn('No accurate album match found for %s, %s - not writing metadata',
release['ArtistName'], release['AlbumTitle'])
return False
if candidates:
dist, info, mapping, extra_items, extra_tracks = candidates[0]
else:
logger.warn('No accurate album match found for %s, %s - not writing metadata',
release['ArtistName'], release['AlbumTitle'])
return False
logger.info('Beets recommendation for tagging items: %s' % rec)
# TODO: Handle extra_items & extra_tracks
autotag.apply_metadata(info, mapping)
# Set ID3 tag version
if headphones.CONFIG.IDTAG:
beetsconfig['id3v23'] = True
logger.debug("Using ID3v2.3")
else:
beetsconfig['id3v23'] = False
logger.debug("Using ID3v2.4")
for item in items:
try:
item.write()
logger.info(f"Successfully applied metadata to `{item.path}`")
except Exception as e:
logger.warn(f"Error writing metadata to `{item.path}: {e}")
return False
return True
def embedLyrics(downloaded_track_list):
logger.info('Adding lyrics')
# TODO: If adding lyrics for flac & lossy, only fetch the lyrics once and apply it to both files
# TODO: Get beets to add automatically by enabling the plugin
lossy_items = []
lossless_items = []
lp = beetslyrics.LyricsPlugin()
for downloaded_track in downloaded_track_list:
try:
if any(downloaded_track.lower().endswith('.' + x.lower()) for x in
headphones.LOSSLESS_MEDIA_FORMATS):
lossless_items.append(beets.library.Item.from_path(downloaded_track))
elif any(downloaded_track.lower().endswith('.' + x.lower()) for x in
headphones.LOSSY_MEDIA_FORMATS):
lossy_items.append(beets.library.Item.from_path(downloaded_track))
else:
logger.warn(
f"Skipping `{downloaded_track}` because it is "
f"not a mutagen friendly file format")
except Exception as e:
logger.error(f"Beets couldn't create an Item from `{downloaded_track}`: {e}")
for items in [lossy_items, lossless_items]:
if not items:
continue
for item in items:
lyrics = None
for artist, titles in beetslyrics.search_pairs(item):
lyrics = [lp.get_lyrics(artist, title) for title in titles]
if any(lyrics):
break
lyrics = "\n\n---\n\n".join([l for l in lyrics if l])
if lyrics:
logger.debug('Adding lyrics to: %s', item.title)
item.lyrics = lyrics
try:
item.write()
except Exception as e:
logger.error('Cannot save lyrics to: %s. Skipping', item.title)
else:
logger.debug('No lyrics found for track: %s', item.title)
def renameFiles(albumpath, downloaded_track_list, release):
logger.info('Renaming files')
# Until tagging works better I'm going to rely on the already provided metadata
for downloaded_track in downloaded_track_list:
md, from_metadata = metadata.file_metadata(
downloaded_track,
release,
headphones.CONFIG.RENAME_SINGLE_DISC_IGNORE
)
if md is None:
# unable to parse media file, skip file
continue
ext = md[metadata.Vars.EXTENSION]
if not from_metadata:
title = md[metadata.Vars.TITLE]
new_file_name = helpers.cleanTitle(title) + ext
else:
new_file_name = helpers.pattern_substitute(
headphones.CONFIG.FILE_FORMAT.strip(), md
).replace('/', '_') + ext
new_file_name = helpers.replace_illegal_chars(new_file_name)
if headphones.CONFIG.FILE_UNDERSCORES:
new_file_name = new_file_name.replace(' ', '_')
if new_file_name.startswith('.'):
new_file_name = new_file_name.replace(".", "_", 1)
new_file = os.path.join(albumpath, new_file_name)
if downloaded_track == new_file_name:
logger.debug(f"Renaming for {downloaded_track} is not neccessary")
continue
logger.debug(f"Renaming {downloaded_track} ---> {new_file_name}")
try:
os.rename(downloaded_track, new_file)
except Exception as e:
logger.error(f"Error renaming {downloaded_track}: {e}")
continue
def updateFilePermissions(albumpaths):
for folder in albumpaths:
logger.info(f"Updating file permissions in `{folder}`")
for r, d, f in os.walk(folder):
for files in f:
full_path = os.path.join(r, files)
try:
os.chmod(full_path, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
except:
logger.error(f"Could not change permissions for `{full_path}`")
continue
def renameUnprocessedFolder(path, tag):
"""
Rename a unprocessed folder to a new unique name to indicate a certain
status.
"""
for i in itertools.count():
if i == 0:
new_path = "%s (%s)" % (path, tag)
else:
new_path = "%s (%s[%d])" % (path, tag, i)
if os.path.exists(new_path):
i += 1
else:
os.rename(path, new_path)
return
def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_original_folder=False):
logger.info('Force checking download folder for completed downloads')
ignored = 0
if album_dir:
folders = [album_dir]
else:
download_dirs = []
if dir:
download_dirs.append(dir)
else:
if headphones.CONFIG.DOWNLOAD_DIR:
download_dirs.append(headphones.CONFIG.DOWNLOAD_DIR)
if headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR:
download_dirs.append(headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR)
if headphones.CONFIG.DOWNLOAD_TORRENT_DIR:
download_dirs.append(headphones.CONFIG.DOWNLOAD_TORRENT_DIR)
if headphones.CONFIG.BANDCAMP:
download_dirs.append(headphones.CONFIG.BANDCAMP_DIR)
# If DOWNLOAD_DIR and DOWNLOAD_TORRENT_DIR are the same, remove the duplicate to prevent us from trying to process the same folder twice.
download_dirs = list(set(download_dirs))
logger.debug('Post processing folders: %s', download_dirs)
# Get a list of folders in the download_dir
folders = []
for download_dir in download_dirs:
download_dir = download_dir.encode(headphones.SYS_ENCODING, 'replace')
if not os.path.isdir(download_dir):
logger.warn('Directory %s does not exist. Skipping', download_dir)
continue
# Scan for subfolders
subfolders = os.listdir(download_dir)
ignored += helpers.path_filter_patterns(subfolders,
headphones.CONFIG.IGNORED_FOLDERS,
root=download_dir)
for folder in subfolders:
path_to_folder = os.path.join(download_dir, folder)
if os.path.isdir(path_to_folder):
subfolders = helpers.expand_subfolders(path_to_folder)
if expand_subfolders and subfolders is not None:
folders.extend(subfolders.decode(headphones.SYS_ENCODING, 'replace'))
else:
folders.append(path_to_folder.decode(headphones.SYS_ENCODING, 'replace'))
# Log number of folders
if folders:
logger.debug('Expanded post processing folders: %s', folders)
logger.info('Found %d folders to process (%d ignored).',
len(folders), ignored)
else:
logger.info('Found no folders to process. Aborting.')
return
# Parse the folder names to get artist album info
myDB = db.DBConnection()
for folder in folders:
folder_basename = os.path.basename(folder)
logger.info('Processing: %s', folder_basename)
# Attempt 1: First try to see if there's a match in the snatched table,
# then we'll try to parse the foldername.
# TODO: Iterate through underscores -> spaces, spaces -> dots,
# underscores -> dots (this might be hit or miss since it assumes all
# spaces/underscores came from sab replacing values
logger.debug('Attempting to find album in the snatched table')
snatched = myDB.action(
'SELECT AlbumID, Title, Kind, Status from snatched WHERE FolderName LIKE ?',
[folder_basename]).fetchone()
if snatched:
if headphones.CONFIG.KEEP_TORRENT_FILES and snatched['Kind'] == 'torrent' and snatched[
'Status'] == 'Processed':
logger.info(
'%s is a torrent folder being preserved for seeding and has already been processed. Skipping.',
folder_basename)
continue
else:
logger.info(
'Found a match in the database: %s. Verifying to make sure it is the correct album',
snatched['Title'])
verify(snatched['AlbumID'], folder, snatched['Kind'],
forced=True, keep_original_folder=keep_original_folder)
continue
# Attempt 2: strip release group id from filename
logger.debug('Attempting to extract release group from folder name')
try:
possible_rgid = folder_basename[-36:]
rgid = uuid.UUID(possible_rgid)
except:
rgid = possible_rgid = None
if rgid:
rgid = possible_rgid
release = myDB.action(
'SELECT ArtistName, AlbumTitle, AlbumID from albums WHERE AlbumID=?',
[rgid]).fetchone()
if release:
logger.info(
'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
release['ArtistName'], release['AlbumTitle'])
verify(release['AlbumID'], folder, forced=True,
keep_original_folder=keep_original_folder)
continue
else:
logger.info(
'Found a (possibly) valid Musicbrainz release group id in album folder name.')
verify(rgid, folder, forced=True,
keep_original_folder=keep_original_folder)
continue
# Attempt 3a: parse the folder name into a valid format
logger.debug('Attempting to extract name, album and year from folder name')
try:
name, album, year = helpers.extract_data(folder_basename)
except Exception:
name = album = year = None
if name and album:
release = myDB.action(
'SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE ArtistName LIKE ? and AlbumTitle LIKE ?',
[name, album]).fetchone()
if release:
logger.info(
'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
release['ArtistName'], release['AlbumTitle'])
verify(release['AlbumID'], folder, forced=True, keep_original_folder=keep_original_folder)
continue
else:
logger.info('Querying MusicBrainz for the release group id for: %s - %s', name,
album)
try:
rgid = mb.findAlbumID(helpers.latinToAscii(name), helpers.latinToAscii(album))
except:
logger.error('Can not get release information for this album')
rgid = None
if rgid:
verify(rgid, folder, forced=True, keep_original_folder=keep_original_folder)
continue
else:
logger.info('No match found on MusicBrainz for: %s - %s', name, album)
# Attempt 3b: deduce meta data into a valid format
logger.debug('Attempting to extract name, album and year from metadata')
try:
name, album, year = helpers.extract_metadata(folder)
except Exception:
name = album = None
# Not found from meta data, check if there's a cue to split and try meta data again
kind = None
if headphones.CONFIG.CUE_SPLIT and not name and not album:
cue_folder = helpers.cue_split(folder, keep_original_folder=keep_original_folder)
if cue_folder:
try:
name, album, year = helpers.extract_metadata(cue_folder)
except Exception:
name = album = None
if name:
folder = cue_folder
if keep_original_folder:
keep_original_folder = False
kind = "cue_split"
elif folder != cue_folder:
cue_folder = os.path.split(cue_folder)[0]
shutil.rmtree(cue_folder)
if name and album:
release = myDB.action(
'SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE ArtistName LIKE ? and AlbumTitle LIKE ?',
[name, album]).fetchone()
if release:
logger.info(
'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
release['ArtistName'], release['AlbumTitle'])
verify(release['AlbumID'], folder, Kind=kind, forced=True, keep_original_folder=keep_original_folder)
continue
else:
logger.info('Querying MusicBrainz for the release group id for: %s - %s', name,
album)
try:
rgid = mb.findAlbumID(helpers.latinToAscii(name), helpers.latinToAscii(album))
except:
logger.error('Can not get release information for this album')
rgid = None
if rgid:
verify(rgid, folder, Kind=kind, forced=True, keep_original_folder=keep_original_folder)
continue
else:
logger.info('No match found on MusicBrainz for: %s - %s', name, album)
# Attempt 4: Hail mary. Just assume the folder name is the album name
# if it doesn't have a separator in it
logger.debug('Attempt to extract album name by assuming it is the folder name')
if '-' not in folder_basename:
release = myDB.action(
'SELECT AlbumID, ArtistName, AlbumTitle from albums WHERE AlbumTitle LIKE ?',
[folder_basename]).fetchone()
if release:
logger.info(
'Found a match in the database: %s - %s. Verifying to make sure it is the correct album',
release['ArtistName'], release['AlbumTitle'])
verify(release['AlbumID'], folder, forced=True, keep_original_folder=keep_original_folder)
continue
else:
logger.info('Querying MusicBrainz for the release group id for: %s',
folder_basename)
try:
rgid = mb.findAlbumID(album=helpers.latinToAscii(folder_basename))
except:
logger.error('Can not get release information for this album')
rgid = None
if rgid:
verify(rgid, folder, forced=True, keep_original_folder=keep_original_folder)
continue
else:
logger.info('No match found on MusicBrainz for: %s - %s', name, album)
# Fail here
logger.info("Couldn't parse '%s' into any valid format. If adding "
"albums from another source, they must be in an 'Artist - Album "
"[Year]' format, or end with the musicbrainz release group id.",
folder_basename)
| 59,381
|
Python
|
.py
| 1,176
| 36.988095
| 145
| 0.587265
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,283
|
librarysync.py
|
rembo10_headphones/headphones/librarysync.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import os
import math
import headphones
from mediafile import MediaFile, FileTypeError, UnreadableFileError
from headphones import db, logger, helpers, importer, lastfm
# You can scan a single directory and append it to the current library by
# specifying append=True, ArtistID and ArtistName.
def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
cron=False, artistScan=False):
if cron and not headphones.CONFIG.LIBRARYSCAN:
return
if not dir:
if not headphones.CONFIG.MUSIC_DIR:
logger.info(
"No music directory configured. Add it under "
"Manage -> Scan Music Library"
)
return
else:
dir = headphones.CONFIG.MUSIC_DIR
if not os.path.isdir(dir):
logger.warn(f"Cannot find music directory: {dir}")
return
myDB = db.DBConnection()
new_artists = []
logger.info(f"Scanning music directory: {dir}")
if not append:
# Clean up bad filepaths. Queries can take some time, ensure all results are loaded before processing
if ArtistID:
dbtracks = myDB.action(
'SELECT Location FROM alltracks WHERE ArtistID = ? AND Location IS NOT NULL UNION SELECT Location FROM tracks WHERE ArtistID = ? AND Location '
'IS NOT NULL',
[ArtistID, ArtistID])
else:
dbtracks = myDB.action(
'SELECT Location FROM alltracks WHERE Location IS NOT NULL UNION SELECT Location FROM tracks WHERE Location IS NOT NULL')
for track in dbtracks:
track_location = track['Location']
if not os.path.isfile(track_location):
myDB.action('UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
[None, None, None, track_location])
myDB.action('UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
[None, None, None, track_location])
if ArtistName:
del_have_tracks = myDB.select('SELECT Location, Matched, ArtistName FROM have WHERE ArtistName = ? COLLATE NOCASE', [ArtistName])
else:
del_have_tracks = myDB.select('SELECT Location, Matched, ArtistName FROM have')
for track in del_have_tracks:
if not os.path.isfile(track['Location']):
if track['ArtistName']:
# Make sure deleted files get accounted for when updating artist track counts
new_artists.append(track['ArtistName'])
myDB.action('DELETE FROM have WHERE Location=?', [track['Location']])
logger.info(
f"{track['Location']} removed from Headphones, as it "
f"is no longer on disk"
)
bitrates = []
track_list = []
latest_subdirectory = []
new_track_count = 0
file_count = 0
for r, d, f in helpers.walk_directory(dir):
# Filter paths based on config. Note that these methods work directly
# on the inputs
helpers.path_filter_patterns(d, headphones.CONFIG.IGNORED_FOLDERS, r)
helpers.path_filter_patterns(f, headphones.CONFIG.IGNORED_FILES, r)
for files in f:
# MEDIA_FORMATS = music file extensions, e.g. mp3, flac, etc
if any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
subdirectory = r.replace(dir, '')
latest_subdirectory.append(subdirectory)
track_path = os.path.join(r, files)
# Try to read the metadata
try:
f = MediaFile(track_path)
except (FileTypeError, UnreadableFileError):
logger.warning(f"Cannot read `{track_path}`. It may be corrupted or not a media file.")
continue
except IOError:
logger.warning(f"Cannnot read `{track_path}`. Does the file exists?")
continue
# Grab the bitrates for the auto detect bit rate option
if f.bitrate:
bitrates.append(f.bitrate)
# Use the album artist over the artist if available
if f.albumartist:
f_artist = f.albumartist
elif f.artist:
f_artist = f.artist
else:
f_artist = None
# Add the track to our track list -
# TODO: skip adding tracks without the minimum requisite information (just a matter of putting together the right if statements)
if f_artist and f.album and f.title:
CleanName = helpers.clean_name(f_artist + ' ' + f.album + ' ' + f.title)
else:
CleanName = None
controlValueDict = {'Location': track_path}
newValueDict = {'TrackID': f.mb_trackid,
# 'ReleaseID' : f.mb_albumid,
'ArtistName': f_artist,
'AlbumTitle': f.album,
'TrackNumber': f.track,
'TrackLength': f.length,
'Genre': f.genre,
'Date': f.date,
'TrackTitle': f.title,
'BitRate': f.bitrate,
'Format': f.format,
'CleanName': CleanName
}
# track_list.append(track_dict)
check_exist_track = myDB.action("SELECT * FROM have WHERE Location=?",
[track_path]).fetchone()
# Only attempt to match tracks that are new, haven't yet been matched, or metadata has changed.
if not check_exist_track:
# This is a new track
if f_artist:
new_artists.append(f_artist)
myDB.upsert("have", newValueDict, controlValueDict)
new_track_count += 1
else:
if check_exist_track['ArtistName'] != f_artist or check_exist_track[
'AlbumTitle'] != f.album or check_exist_track['TrackTitle'] != f.title:
# Important track metadata has been modified, need to run matcher again
if f_artist and f_artist != check_exist_track['ArtistName']:
new_artists.append(f_artist)
elif f_artist and f_artist == check_exist_track['ArtistName'] and \
check_exist_track['Matched'] != "Ignored":
new_artists.append(f_artist)
else:
continue
newValueDict['Matched'] = None
myDB.upsert("have", newValueDict, controlValueDict)
myDB.action(
'UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
[None, None, None, track_path])
myDB.action(
'UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
[None, None, None, track_path])
new_track_count += 1
else:
# This track information hasn't changed
if f_artist and check_exist_track['Matched'] != "Ignored":
new_artists.append(f_artist)
file_count += 1
# Now we start track matching
logger.info(f"{new_track_count} new/modified tracks found and added to the database")
dbtracks = myDB.action(
"SELECT * FROM have WHERE Matched IS NULL AND LOCATION LIKE ?",
[f"{dir}%"]
)
dbtracks_count = myDB.action(
"SELECT COUNT(*) FROM have WHERE Matched IS NULL AND LOCATION LIKE ?",
[f"{dir}%"]
).fetchone()[0]
logger.info(f"Found {dbtracks_count} new/modified tracks in `{dir}`")
logger.info("Matching tracks to the appropriate releases....")
# Sort the track_list by most vague (e.g. no trackid or releaseid)
# to most specific (both trackid & releaseid)
# When we insert into the database, the tracks with the most
# specific information will overwrite the more general matches
sorted_dbtracks = helpers.multikeysort(dbtracks, ['ArtistName', 'AlbumTitle'])
# We'll use this to give a % completion, just because the
# track matching might take a while
tracks_completed = 0
latest_artist = None
last_completion_percentage = 0
prev_artist_name = None
artistid = None
for track in sorted_dbtracks:
if latest_artist != track['ArtistName']:
logger.info(f"Now matching tracks by {track['ArtistName']}")
latest_artist = track['ArtistName']
tracks_completed += 1
completion_percentage = math.floor(
float(tracks_completed) / dbtracks_count * 1000
) / 10
if completion_percentage >= (last_completion_percentage + 10):
logger.info("Track matching is " + str(completion_percentage) + "% complete")
last_completion_percentage = completion_percentage
# THE "MORE-SPECIFIC" CLAUSES HERE HAVE ALL BEEN REMOVED. WHEN RUNNING A LIBRARY SCAN, THE ONLY CLAUSES THAT
# EVER GOT HIT WERE [ARTIST/ALBUM/TRACK] OR CLEANNAME. ARTISTID & RELEASEID ARE NEVER PASSED TO THIS FUNCTION,
# ARE NEVER FOUND, AND THE OTHER CLAUSES WERE NEVER HIT. FURTHERMORE, OTHER MATCHING FUNCTIONS IN THIS PROGRAM
# (IMPORTER.PY, MB.PY) SIMPLY DO A [ARTIST/ALBUM/TRACK] OR CLEANNAME MATCH, SO IT'S ALL CONSISTENT.
albumid = None
if track['ArtistName'] and track['CleanName']:
artist_name = track['ArtistName']
clean_name = track['CleanName']
# Only update if artist is in the db
if artist_name != prev_artist_name:
prev_artist_name = artist_name
artistid = None
artist_lookup = "\"" + artist_name.replace("\"", "\"\"") + "\""
try:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM artists WHERE ArtistName LIKE ' + artist_lookup + '')
except:
dbartist = None
if not dbartist:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM tracks WHERE CleanName = ?', [clean_name])
if not dbartist:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM alltracks WHERE CleanName = ?', [clean_name])
if not dbartist:
clean_artist = helpers.clean_name(artist_name)
if clean_artist:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM tracks WHERE CleanName >= ? and CleanName < ?',
[clean_artist, clean_artist + '{'])
if not dbartist:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM alltracks WHERE CleanName >= ? and CleanName < ?',
[clean_artist, clean_artist + '{'])
if dbartist:
artistid = dbartist[0][0]
if artistid:
# This was previously using Artist, Album, Title with a SELECT LIKE ? and was not using an index
# (Possible issue: https://stackoverflow.com/questions/37845854/python-sqlite3-not-using-index-with-like)
# Now selects/updates using CleanName index (may have to revert if not working)
# matching on CleanName should be enough, ensure it's the same artist just in case
# Update tracks
dbtrack = myDB.action('SELECT AlbumID, ArtistName FROM tracks WHERE CleanName = ? AND ArtistID = ?', [clean_name, artistid]).fetchone()
if dbtrack:
albumid = dbtrack['AlbumID']
myDB.action(
'UPDATE tracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ? AND ArtistID = ?',
[track['Location'], track['BitRate'], track['Format'], clean_name, artistid])
# Update alltracks
alltrack = myDB.action('SELECT AlbumID, ArtistName FROM alltracks WHERE CleanName = ? AND ArtistID = ?', [clean_name, artistid]).fetchone()
if alltrack:
albumid = alltrack['AlbumID']
myDB.action(
'UPDATE alltracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ? AND ArtistID = ?',
[track['Location'], track['BitRate'], track['Format'], clean_name, artistid])
# Update have
controlValueDict2 = {'Location': track['Location']}
if albumid:
newValueDict2 = {'Matched': albumid}
else:
newValueDict2 = {'Matched': "Failed"}
myDB.upsert("have", newValueDict2, controlValueDict2)
# myDB.action('INSERT INTO have (ArtistName, AlbumTitle, TrackNumber, TrackTitle, TrackLength, BitRate, Genre, Date, TrackID, Location, CleanName, Format) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', [track['ArtistName'], track['AlbumTitle'], track['TrackNumber'], track['TrackTitle'], track['TrackLength'], track['BitRate'], track['Genre'], track['Date'], track['TrackID'], track['Location'], CleanName, track['Format']])
logger.info(f"Completed matching tracks from `{dir}`")
if not append or artistScan:
logger.info('Updating scanned artist track counts')
# Clean up the new artist list
unique_artists = list({}.fromkeys(new_artists).keys())
# # Don't think we need to do this, check the db instead below
#
# # artist scan
# if ArtistName:
# current_artists = [[ArtistName]]
# # directory scan
# else:
# current_artists = myDB.select('SELECT ArtistName, ArtistID FROM artists WHERE ArtistName IS NOT NULL')
#
# # There was a bug where artists with special characters (-,') would show up in new artists.
#
# # artist_list = scanned artists not in the db
# artist_list = [
# x for x in unique_artists
# if helpers.clean_name(x).lower() not in [
# helpers.clean_name(y[0]).lower()
# for y in current_artists
# ]
# ]
#
# # artists_checked = scanned artists that exist in the db
# artists_checked = [
# x for x in unique_artists
# if helpers.clean_name(x).lower() in [
# helpers.clean_name(y[0]).lower()
# for y in current_artists
# ]
# ]
new_artist_list = []
for artist in unique_artists:
if not artist:
continue
logger.info('Processing artist: %s' % artist)
# check if artist is already in the db
artist_lookup = "\"" + artist.replace("\"", "\"\"") + "\""
try:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM artists WHERE ArtistName LIKE ' + artist_lookup + '')
except:
dbartist = None
if not dbartist:
clean_artist = helpers.clean_name(artist)
if clean_artist:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM tracks WHERE CleanName >= ? and CleanName < ?',
[clean_artist, clean_artist + '{'])
if not dbartist:
dbartist = myDB.select('SELECT DISTINCT ArtistID, ArtistName FROM alltracks WHERE CleanName >= ? and CleanName < ?',
[clean_artist, clean_artist + '{'])
# new artist not in db, add to list
if not dbartist:
new_artist_list.append(artist)
else:
# artist in db, update have track counts
artistid = dbartist[0][0]
# Have tracks are selected from tracks table and not all tracks because of duplicates
# We update the track count upon an album switch to compliment this
# havetracks = (
# len(myDB.select(
# 'SELECT TrackTitle from tracks WHERE ArtistName like ? AND Location IS NOT NULL',
# [artist])) + len(myDB.select(
# 'SELECT TrackTitle from have WHERE ArtistName like ? AND Matched = "Failed"',
# [artist]))
# )
try:
havetracks = (
len(myDB.select(
'SELECT ArtistID From tracks WHERE ArtistID = ? AND Location IS NOT NULL',
[artistid])) + len(myDB.select(
'SELECT ArtistName FROM have WHERE ArtistName LIKE ' + artist_lookup + ' AND Matched = "Failed"'))
)
except Exception as e:
logger.warn('Error updating counts for artist: %s: %s' % (artist, e))
# Note: some people complain about having "artist have tracks" > # of tracks total in artist official releases
# (can fix by getting rid of second len statement)
if havetracks:
myDB.action('UPDATE artists SET HaveTracks = ? WHERE ArtistID = ?', [havetracks, artistid])
# Update albums to downloaded
update_album_status(ArtistID=artistid)
logger.info('Found %i new artists' % len(new_artist_list))
# Add scanned artists not in the db
if new_artist_list:
if headphones.CONFIG.AUTO_ADD_ARTISTS:
logger.info('Importing %i new artists' % len(new_artist_list))
importer.artistlist_to_mbids(new_artist_list)
else:
logger.info('To add these artists, go to Manage->Manage New Artists')
# myDB.action('DELETE from newartists')
for artist in new_artist_list:
myDB.action('INSERT OR IGNORE INTO newartists VALUES (?)', [artist])
if headphones.CONFIG.DETECT_BITRATE and bitrates:
headphones.CONFIG.PREFERRED_BITRATE = sum(bitrates) / len(bitrates) / 1000
else:
# If we're appending a new album to the database, update the artists total track counts
logger.info('Updating artist track counts')
artist_lookup = "\"" + ArtistName.replace("\"", "\"\"") + "\""
try:
havetracks = len(
myDB.select('SELECT ArtistID FROM tracks WHERE ArtistID = ? AND Location IS NOT NULL',
[ArtistID])) + len(myDB.select(
'SELECT ArtistName FROM have WHERE ArtistName LIKE ' + artist_lookup + ' AND Matched = "Failed"'))
except Exception as e:
logger.warn('Error updating counts for artist: %s: %s' % (ArtistName, e))
if havetracks:
myDB.action('UPDATE artists SET HaveTracks=? WHERE ArtistID=?', [havetracks, ArtistID])
# Moved above to call for each artist
# if not append:
# update_album_status()
if not append and not artistScan:
lastfm.getSimilar()
if ArtistName:
logger.info('Scanning complete for artist: %s', ArtistName)
else:
logger.info('Library scan complete')
# ADDED THIS SECTION TO MARK ALBUMS AS DOWNLOADED IF ARTISTS ARE ADDED EN MASSE BEFORE LIBRARY IS SCANNED
# Think the above comment relates to calling from Manage Unmatched
# This used to select and update all albums and would clobber the db, changed to run by ArtistID.
def update_album_status(AlbumID=None, ArtistID=None):
myDB = db.DBConnection()
# logger.info('Counting matched tracks to mark albums as skipped/downloaded')
if AlbumID:
album_status_updater = myDB.action(
'SELECT'
' a.AlbumID, a.ArtistName, a.AlbumTitle, a.Status, AVG(t.Location IS NOT NULL) * 100 AS album_completion '
'FROM'
' albums AS a '
'JOIN tracks AS t ON t.AlbumID = a.AlbumID '
'WHERE'
' a.AlbumID = ? AND a.Status != "Downloaded" '
'GROUP BY'
' a.AlbumID '
'HAVING'
' AVG(t.Location IS NOT NULL) * 100 >= ?',
[AlbumID, headphones.CONFIG.ALBUM_COMPLETION_PCT]
)
else:
album_status_updater = myDB.action(
'SELECT'
' a.AlbumID, a.ArtistID, a.ArtistName, a.AlbumTitle, a.Status, AVG(t.Location IS NOT NULL) * 100 AS album_completion '
'FROM'
' albums AS a '
'JOIN tracks AS t ON t.AlbumID = a.AlbumID '
'WHERE'
' a.ArtistID = ? AND a.Status != "Downloaded" '
'GROUP BY'
' a.AlbumID '
'HAVING'
' AVG(t.Location IS NOT NULL) * 100 >= ?',
[ArtistID, headphones.CONFIG.ALBUM_COMPLETION_PCT]
)
new_album_status = "Downloaded"
albums = []
for album in album_status_updater:
albums.append([album['AlbumID'], album['ArtistName'], album['AlbumTitle']])
for album in albums:
# I don't think we want to change Downloaded->Skipped.....
# I think we can only automatically change Skipped->Downloaded when updating
# There was a bug report where this was causing infinite downloads if the album was
# recent, but matched to less than 80%. It would go Downloaded->Skipped->Wanted->Downloaded->Skipped->Wanted->etc....
# else:
# if album['Status'] == "Skipped" or album['Status'] == "Downloaded":
# new_album_status = "Skipped"
# else:
# new_album_status = album['Status']
# else:
# new_album_status = album['Status']
#
# myDB.upsert("albums", {'Status': new_album_status}, {'AlbumID': album['AlbumID']})
# if new_album_status != album['Status']:
# logger.info('Album %s changed to %s' % (album['AlbumTitle'], new_album_status))
# logger.info('Album status update complete')
myDB.action('UPDATE albums SET Status = ? WHERE AlbumID = ?', [new_album_status, album[0]])
logger.info('Album: %s - %s. Status updated to %s' % (album[1], album[2], new_album_status))
| 23,868
|
Python
|
.py
| 430
| 40.930233
| 431
| 0.561162
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,284
|
versioncheck.py
|
rembo10_headphones/headphones/versioncheck.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import tarfile
import platform
import subprocess
import re
import os
import headphones
from headphones import logger, version, request
def runGit(args):
if headphones.CONFIG.GIT_PATH:
git_locations = ['"' + headphones.CONFIG.GIT_PATH + '"']
else:
git_locations = ['git']
if platform.system().lower() == 'darwin':
git_locations.append('/usr/local/git/bin/git')
output = err = None
for cur_git in git_locations:
cmd = cur_git + ' ' + args
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + headphones.PROG_DIR)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True,
cwd=headphones.PROG_DIR)
output, err = p.communicate()
output = output.decode('utf-8').strip()
logger.debug('Git output: ' + output)
except OSError as e:
logger.debug('Command failed: %s. Error: %s' % (cmd, e))
continue
if 'not found' in output or "not recognized as an internal or external command" in output:
logger.debug('Unable to find git with command ' + cmd)
output = None
elif 'fatal:' in output or err:
logger.error('Git returned bad info. Are you sure this is a git installation?')
output = None
elif output:
break
return (output, err)
def getVersion():
if version.HEADPHONES_VERSION.startswith('win32build'):
headphones.INSTALL_TYPE = 'win'
# Don't have a way to update exe yet, but don't want to set VERSION to None
return 'Windows Install', 'master'
elif os.path.isdir(os.path.join(headphones.PROG_DIR, '.git')):
headphones.INSTALL_TYPE = 'git'
output, err = runGit('rev-parse HEAD')
if not output:
logger.error('Couldn\'t find latest installed version.')
cur_commit_hash = None
cur_commit_hash = str(output)
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.error('Output doesn\'t look like a hash, not using it')
cur_commit_hash = None
if headphones.CONFIG.DO_NOT_OVERRIDE_GIT_BRANCH and headphones.CONFIG.GIT_BRANCH:
branch_name = headphones.CONFIG.GIT_BRANCH
else:
branch_name, err = runGit('rev-parse --abbrev-ref HEAD')
branch_name = branch_name
if not branch_name and headphones.CONFIG.GIT_BRANCH:
logger.error(
'Could not retrieve branch name from git. Falling back to %s' % headphones.CONFIG.GIT_BRANCH)
branch_name = headphones.CONFIG.GIT_BRANCH
if not branch_name:
logger.error('Could not retrieve branch name from git. Defaulting to master')
branch_name = 'master'
return cur_commit_hash, branch_name
else:
headphones.INSTALL_TYPE = 'source'
version_file = os.path.join(headphones.PROG_DIR, 'version.txt')
if not os.path.isfile(version_file):
return None, 'master'
with open(version_file, 'r') as f:
current_version = f.read().strip(' \n\r')
if current_version:
return current_version, headphones.CONFIG.GIT_BRANCH
else:
return None, 'master'
def checkGithub():
headphones.COMMITS_BEHIND = 0
# Get the latest version available from github
logger.info('Retrieving latest version information from GitHub')
url = 'https://api.github.com/repos/%s/headphones/commits/%s' % (
headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
version = request.request_json(url, timeout=20, validator=lambda x: type(x) == dict)
if version is None:
logger.warn(
'Could not get the latest version from GitHub. Are you running a local development version?')
return headphones.CURRENT_VERSION
headphones.LATEST_VERSION = version['sha']
logger.debug("Latest version is %s", headphones.LATEST_VERSION)
# See how many commits behind we are
if not headphones.CURRENT_VERSION:
logger.info(
'You are running an unknown version of Headphones. Run the updater to identify your version')
return headphones.LATEST_VERSION
if headphones.LATEST_VERSION == headphones.CURRENT_VERSION:
logger.info('Headphones is up to date')
return headphones.LATEST_VERSION
logger.info('Comparing currently installed version with latest GitHub version')
url = 'https://api.github.com/repos/%s/headphones/compare/%s...%s' % (
headphones.CONFIG.GIT_USER, headphones.LATEST_VERSION, headphones.CURRENT_VERSION)
commits = request.request_json(url, timeout=20, whitelist_status_code=404,
validator=lambda x: type(x) == dict)
if commits is None:
logger.warn('Could not get commits behind from GitHub.')
return headphones.LATEST_VERSION
try:
headphones.COMMITS_BEHIND = int(commits['behind_by'])
logger.debug("In total, %d commits behind", headphones.COMMITS_BEHIND)
except KeyError:
logger.info('Cannot compare versions. Are you running a local development version?')
headphones.COMMITS_BEHIND = 0
if headphones.COMMITS_BEHIND > 0:
logger.info(
'New version is available. You are %s commits behind' % headphones.COMMITS_BEHIND)
elif headphones.COMMITS_BEHIND == 0:
logger.info('Headphones is up to date')
return headphones.LATEST_VERSION
def update():
if headphones.INSTALL_TYPE == 'win':
logger.info('Windows .exe updating not supported yet.')
elif headphones.INSTALL_TYPE == 'git':
output, err = runGit('pull origin ' + headphones.CONFIG.GIT_BRANCH)
if not output:
logger.error('Couldn\'t download latest version')
for line in output.split('\n'):
if 'Already up-to-date.' in line:
logger.info('No update available, not updating')
logger.info('Output: ' + str(output))
elif line.endswith('Aborting.'):
logger.error('Unable to update from git: ' + line)
logger.info('Output: ' + str(output))
else:
tar_download_url = 'https://github.com/%s/headphones/tarball/%s' % (
headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
update_dir = os.path.join(headphones.PROG_DIR, 'update')
version_path = os.path.join(headphones.PROG_DIR, 'version.txt')
logger.info('Downloading update from: ' + tar_download_url)
data = request.request_content(tar_download_url)
if not data:
logger.error("Unable to retrieve new version from '%s', can't update", tar_download_url)
return
download_name = headphones.CONFIG.GIT_BRANCH + '-github'
tar_download_path = os.path.join(headphones.PROG_DIR, download_name)
# Save tar to disk
with open(tar_download_path, 'wb') as f:
f.write(data)
# Extract the tar to update folder
logger.info('Extracting file: ' + tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(update_dir)
tar.close()
# Delete the tar.gz
logger.info('Deleting file: ' + tar_download_path)
os.remove(tar_download_path)
# Find update dir name
update_dir_contents = [x for x in os.listdir(update_dir) if
os.path.isdir(os.path.join(update_dir, x))]
if len(update_dir_contents) != 1:
logger.error("Invalid update data, update failed: " + str(update_dir_contents))
return
content_dir = os.path.join(update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
for dirname, dirnames, filenames in os.walk(content_dir):
dirname = dirname[len(content_dir) + 1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(headphones.PROG_DIR, dirname, curfile)
if os.path.isfile(new_path):
os.remove(new_path)
os.renames(old_path, new_path)
# Update version.txt
try:
with open(version_path, 'w') as f:
f.write(str(headphones.LATEST_VERSION))
except IOError as e:
logger.error(
"Unable to write current version to version.txt, update not complete: %s",
e
)
return
| 9,424
|
Python
|
.py
| 194
| 38.824742
| 113
| 0.633886
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,285
|
qbittorrent.py
|
rembo10_headphones/headphones/qbittorrent.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
import json
import time
import mimetypes
import random
import string
import os
import headphones
from headphones import logger
from collections import namedtuple
from qbittorrentv2 import Client
class qbittorrentclient(object):
TOKEN_REGEX = "<div id='token' style='display:none;'>([^<>]+)</div>"
UTSetting = namedtuple("UTSetting", ["name", "int", "str", "access"])
def __init__(self, base_url=None, username=None, password=None,):
host = headphones.CONFIG.QBITTORRENT_HOST
if not host.startswith('http'):
host = 'http://' + host
if host.endswith('/'):
host = host[:-1]
if host.endswith('/gui'):
host = host[:-4]
self.base_url = host
self.username = headphones.CONFIG.QBITTORRENT_USERNAME
self.password = headphones.CONFIG.QBITTORRENT_PASSWORD
# Try new v2 api
try:
self.qb = Client(self.base_url)
login_text = self.qb.login(self.username, self.password)
if login_text:
logger.warning("Could not login to qBittorrent v2 api, check credentials: %s", login_text)
self.version = 2
except Exception as e:
logger.warning("Error with qBittorrent v2 api, check settings or update, will try v1: %s" % e)
self.cookiejar = http.cookiejar.CookieJar()
self.opener = self._make_opener()
self._get_sid(self.base_url, self.username, self.password)
self.version = 1
def _make_opener(self):
# create opener with cookie handler to carry QBitTorrent SID cookie
cookie_handler = urllib.request.HTTPCookieProcessor(self.cookiejar)
handlers = [cookie_handler]
return urllib.request.build_opener(*handlers)
def _get_sid(self, base_url, username, password):
# login so we can capture SID cookie
login_data = urllib.parse.urlencode({'username': username, 'password': password})
try:
self.opener.open(base_url + '/login', login_data)
except urllib.error.URLError as err:
logger.debug('Error getting SID. qBittorrent responded with error: ' + str(err.reason))
return
for cookie in self.cookiejar:
logger.debug('login cookie: ' + cookie.name + ', value: ' + cookie.value)
return
def _command(self, command, args=None, content_type=None, files=None):
logger.debug('QBittorrent WebAPI Command: %s' % command)
url = self.base_url + '/' + command
data = None
headers = dict()
if content_type == 'multipart/form-data':
data, headers = encode_multipart(args, files)
else:
if args:
data = urllib.parse.urlencode(args)
if content_type:
headers['Content-Type'] = content_type
logger.debug('%s' % json.dumps(headers, indent=4))
logger.debug('%s' % data)
request = urllib.request.Request(url, data, headers)
try:
response = self.opener.open(request)
info = response.info()
if info:
if info.getheader('content-type'):
if info.getheader('content-type') == 'application/json':
resp = ''
for line in response:
resp = resp + line
logger.debug('response code: %s' % str(response.code))
logger.debug('response: %s' % resp)
return response.code, json.loads(resp)
logger.debug('response code: %s' % str(response.code))
return response.code, None
except urllib.error.URLError as err:
logger.debug('Failed URL: %s' % url)
logger.debug('QBitTorrent webUI raised the following error: %s' % str(err))
return None, None
def _get_list(self, **args):
return self._command('query/torrents', args)
def _get_settings(self):
status, value = self._command('query/preferences')
logger.debug('get_settings() returned %d items' % len(value))
return value
def get_savepath(self, hash):
logger.debug('qb.get_savepath(%s)' % hash)
status, torrentList = self._get_list()
for torrent in torrentList:
if torrent['hash']:
if torrent['hash'].upper() == hash.upper():
return torrent['save_path']
return None
def start(self, hash):
logger.debug('qb.start(%s)' % hash)
args = {'hash': hash}
return self._command('command/resume', args, 'application/x-www-form-urlencoded')
def pause(self, hash):
logger.debug('qb.pause(%s)' % hash)
args = {'hash': hash}
return self._command('command/pause', args, 'application/x-www-form-urlencoded')
def getfiles(self, hash):
logger.debug('qb.getfiles(%s)' % hash)
return self._command('query/propertiesFiles/' + hash)
def getprops(self, hash):
logger.debug('qb.getprops(%s)' % hash)
return self._command('query/propertiesGeneral/' + hash)
def setprio(self, hash, priority):
logger.debug('qb.setprio(%s,%d)' % (hash, priority))
args = {'hash': hash, 'priority': priority}
return self._command('command/setFilePrio', args, 'application/x-www-form-urlencoded')
def remove(self, hash, remove_data=False):
logger.debug('qb.remove(%s,%s)' % (hash, remove_data))
args = {'hashes': hash}
if remove_data:
command = 'command/deletePerm'
else:
command = 'command/delete'
return self._command(command, args, 'application/x-www-form-urlencoded')
def removeTorrent(hash, remove_data=False):
logger.debug('removeTorrent(%s,%s)' % (hash, remove_data))
qbclient = qbittorrentclient()
if qbclient.version == 2:
torrentlist = qbclient.qb.torrents(hashes=hash.lower())
else:
status, torrentlist = qbclient._get_list()
for torrent in torrentlist:
if torrent['hash'].lower() == hash.lower():
if torrent['ratio'] >= torrent['ratio_limit'] and torrent['ratio_limit'] >= 0:
if qbclient.version == 2:
if remove_data:
logger.info(
'%s has finished seeding, removing torrent and data. '
'Ratio: %s, Ratio Limit: %s' % (torrent['name'], torrent['ratio'], torrent['ratio_limit']))
qbclient.qb.delete_permanently(hash)
else:
logger.info('%s has finished seeding, removing torrent' % torrent['name'])
qbclient.qb.delete(hash)
else:
qbclient.remove(hash, remove_data)
return True
else:
logger.info(
'%s has not finished seeding yet, torrent will not be removed, will try again on next run. '
'Ratio: %s, Ratio Limit: %s' % (torrent['name'], torrent['ratio'], torrent['ratio_limit']))
return False
return False
def addTorrent(link):
logger.debug('addTorrent(%s)' % link)
qbclient = qbittorrentclient()
if qbclient.version == 2:
return qbclient.qb.download_from_link(link, savepath=headphones.CONFIG.DOWNLOAD_TORRENT_DIR,
category=headphones.CONFIG.QBITTORRENT_LABEL)
else:
args = {'urls': link, 'savepath': headphones.CONFIG.DOWNLOAD_TORRENT_DIR}
if headphones.CONFIG.QBITTORRENT_LABEL:
args['category'] = headphones.CONFIG.QBITTORRENT_LABEL
return qbclient._command('command/download', args, 'multipart/form-data')
def addFile(data):
logger.debug('addFile(data)')
qbclient = qbittorrentclient()
if qbclient.version == 2:
return qbclient.qb.download_from_file(data, savepath=headphones.CONFIG.DOWNLOAD_TORRENT_DIR,
category=headphones.CONFIG.QBITTORRENT_LABEL)
else:
files = {'torrents': {'filename': '', 'content': data}}
return qbclient._command('command/upload', filelist=files)
def getName(hash):
logger.debug('getName(%s)' % hash)
qbclient = qbittorrentclient()
tries = 1
while tries <= 6:
time.sleep(10)
if qbclient.version == 2:
torrentlist = qbclient.qb.torrents(hashes=hash.lower())
else:
status, torrentlist = qbclient._get_list()
for torrent in torrentlist:
if torrent['hash'].lower() == hash.lower():
return torrent['name']
tries += 1
return None
def getFolder(hash):
logger.debug('getFolder(%s)' % hash)
torrent_folder = None
single_file = False
qbclient = qbittorrentclient()
try:
if qbclient.version == 2:
torrent_files = qbclient.qb.get_torrent_files(hash.lower())
else:
status, torrent_files = qbclient.getfiles(hash.lower())
if torrent_files:
if len(torrent_files) == 1:
torrent_folder = torrent_files[0]['name']
single_file = True
else:
torrent_folder = os.path.split(torrent_files[0]['name'])[0]
torrent_folder = torrent_folder.split(os.sep)[0]
single_file = False
except:
torrent_folder = None
single_file = False
return torrent_folder, single_file
def setSeedRatio(hash, ratio):
logger.debug('setSeedRatio(%s)' % hash)
qbclient = qbittorrentclient()
if qbclient.version == 2:
ratio = -1 if ratio == 0 else ratio
return qbclient.qb.set_share_ratio(hash.lower(), ratio)
else:
logger.warn('setSeedRatio only available with qBittorrent v2 api')
return
def apiVersion2():
logger.debug('getApiVersion')
qbclient = qbittorrentclient()
if qbclient.version == 2:
return True
else:
return False
_BOUNDARY_CHARS = string.digits + string.ascii_letters
# Taken from http://code.activestate.com/recipes/578668-encode-multipart-form-data-for-uploading-files-via/
# "MIT License" which is compatible with GPL
def encode_multipart(args, files, boundary=None):
logger.debug('encode_multipart()')
def escape_quote(s):
return s.replace('"', '\\"')
if boundary is None:
boundary = ''.join(random.choice(_BOUNDARY_CHARS) for i in range(30))
lines = []
if args:
for name, value in list(args.items()):
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"'.format(escape_quote(name)),
'',
str(value),
))
logger.debug(''.join(lines))
if files:
for name, value in list(files.items()):
filename = value['filename']
if 'mimetype' in value:
mimetype = value['mimetype']
else:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(
escape_quote(name), escape_quote(filename)),
'Content-Type: {0}'.format(mimetype),
'',
value['content'],
))
lines.extend((
'--{0}--'.format(boundary),
'',
))
body = '\r\n'.join(lines)
headers = {
'Content-Type': 'multipart/form-data; boundary={0}'.format(boundary),
'Content-Length': str(len(body)),
}
return (body, headers)
| 12,634
|
Python
|
.py
| 288
| 33.847222
| 119
| 0.600114
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,286
|
helpers_test.py
|
rembo10_headphones/headphones/helpers_test.py
|
# -*- coding: utf-8 -*-
from .unittestcompat import TestCase
from headphones.helpers import clean_name, is_valid_date, age, has_token
class HelpersTest(TestCase):
def test_clean_name(self):
"""helpers: check correctness of clean_name() function"""
cases = {
' Weiße & rose ': 'Weisse and rose',
'Multiple / spaces': 'Multiple spaces',
'Kevin\'s m²': 'Kevins m2',
'Symphonęy Nº9': 'Symphoney No.9',
'ÆæßðÞIJij': 'AeaessdThIJıj',
'Obsessió (Cerebral Apoplexy remix)': 'obsessio cerebral '
'apoplexy remix',
'Doktór Hałabała i siedmiu zbojów': 'doktor halabala i siedmiu '
'zbojow',
'Arbetets Söner och Döttrar': 'arbetets soner och dottrar',
'Björk Guðmundsdóttir': 'bjork gudmundsdottir',
'L\'Arc~en~Ciel': 'larc en ciel',
'Orquesta de la Luz (オルケスタ・デ・ラ・ルス)':
'Orquesta de la Luz オルケスタ デ ラ ルス'
}
for first, second in cases.items():
nf = clean_name(first).lower()
ns = clean_name(second).lower()
self.assertEqual(
nf, ns, "check cleaning of case (%s,"
"%s)" % (nf, ns)
)
def test_clean_name_nonunicode(self):
"""helpers: check if clean_name() works on non-unicode input"""
input = 'foo $ bar/BAZ'
test = clean_name(input).lower()
expected = 'foo bar baz'
self.assertEqual(
test, expected, "check clean_name() works on non-unicode"
)
input = 'fóó $ BAZ'
test = clean_name(input).lower()
expected = clean_name('%fóó baz ').lower()
self.assertEqual(
test, expected, "check clean_name() with narrow non-ascii input"
)
def test_is_valid_date(date):
test_cases = [
('2021-11-12', True, "check is_valid_date returns True for valid date"),
(None, False, "check is_valid_date returns False for None"),
('2021-11', False, "check is_valid_date returns False for incomplete"),
('2021', False, "check is_valid_date returns False for incomplete")
]
for input, expected, desc in test_cases:
self.assertEqual(is_valid_date(input), expected, desc)
def test_has_token(self):
"""helpers: has_token()"""
self.assertEqual(
has_token("a cat ran", "cat"),
True,
"return True if token is in string"
)
self.assertEqual(
has_token("acatran", "cat"),
False,
"return False if token is part of another word"
)
| 2,852
|
Python
|
.py
| 64
| 31.453125
| 84
| 0.541498
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,287
|
cuesplit.py
|
rembo10_headphones/headphones/cuesplit.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
# Most of this lifted from here: https://github.com/SzieberthAdam/gneposis-cdgrab
import sys
import subprocess
import copy
import glob
import os
import re
import headphones
from headphones import logger
from mutagen.flac import FLAC
CUE_HEADER = {
'genre': '^REM GENRE (.+?)$',
'date': '^REM DATE (.+?)$',
'discid': '^REM DISCID (.+?)$',
'comment': '^REM COMMENT (.+?)$',
'catalog': '^CATALOG (.+?)$',
'artist': '^PERFORMER (.+?)$',
'title': '^TITLE (.+?)$',
'file': '^FILE (.+?) (WAVE|FLAC)$',
'accurateripid': '^REM ACCURATERIPID (.+?)$'
}
CUE_TRACK = 'TRACK (\d\d) AUDIO$'
CUE_TRACK_INFO = {
'artist': 'PERFORMER (.+?)$',
'title': 'TITLE (.+?)$',
'isrc': 'ISRC (.+?)$',
'index': 'INDEX (\d\d) (.+?)$'
}
ALBUM_META_FILE_NAME = 'album.dat'
SPLIT_FILE_NAME = 'split.dat'
ALBUM_META_ALBUM_BY_CUE = ('artist', 'title', 'date', 'genre')
HTOA_LENGTH_TRIGGER = 3
WAVE_FILE_TYPE_BY_EXTENSION = {
'.wav': 'Waveform Audio',
'.wv': 'WavPack',
'.ape': "Monkey's Audio",
'.m4a': 'Apple Lossless',
'.flac': 'Free Lossless Audio Codec'
}
# SHNTOOL_COMPATIBLE = ("Free Lossless Audio Codec", "Waveform Audio", "Monkey's Audio")
# TODO: Make this better!
# this module-level variable is bad. :(
CUE_META = None
def check_splitter(command):
'''Check xld or shntool installed'''
try:
env = os.environ.copy()
if 'xld' in command:
env['PATH'] += os.pathsep + '/Applications'
elif headphones.CONFIG.CUE_SPLIT_FLAC_PATH:
command = os.path.join(headphones.CONFIG.CUE_SPLIT_SHNTOOL_PATH, 'shntool')
devnull = open(os.devnull)
subprocess.Popen([command], stdout=devnull, stderr=devnull, env=env).communicate()
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def split_baby(split_file, split_cmd):
'''Let's split baby'''
logger.info(f"Splitting {split_file}...")
logger.debug(subprocess.list2cmdline(split_cmd))
# Prevent Windows from opening a terminal window
startupinfo = None
if headphones.SYS_PLATFORM == "win32":
startupinfo = subprocess.STARTUPINFO()
try:
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
env = os.environ.copy()
if 'xld' in split_cmd:
env['PATH'] += os.pathsep + '/Applications'
elif headphones.CONFIG.CUE_SPLIT_FLAC_PATH:
env['PATH'] += os.pathsep + headphones.CONFIG.CUE_SPLIT_FLAC_PATH
process = subprocess.Popen(split_cmd, startupinfo=startupinfo,
stdin=open(os.devnull, 'rb'), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env, text=True)
stdout, stderr = process.communicate()
if process.returncode:
logger.error(f"Split failed for {split_file}")
out = stdout or stderr
logger.error(f"Error details: {out}")
return False
else:
logger.info(f"Split succeeded for {split_file}")
return True
def check_list(list, ignore=0):
'''Checks a list for None elements. If list have None (after ignore index) then it should pass only if all elements
are None threreafter. Returns a tuple without the None entries.'''
if ignore:
try:
list[int(ignore)]
except:
raise ValueError('non-integer ignore index or ignore index not in list')
list1 = list[:ignore]
list2 = list[ignore:]
try:
first_none = list2.index(None)
except:
return tuple(list1 + list2)
for i in range(first_none, len(list2)):
if list2[i]:
raise ValueError('non-None entry after None entry in list at index {0}'.format(i))
while True:
list2.remove(None)
try:
list2.index(None)
except:
break
return tuple(list1 + list2)
def trim_cue_entry(string):
'''Removes leading and trailing "s.'''
if string[0] == '"' and string[-1] == '"':
string = string[1:-1]
return string
def int_to_str(value, length=2):
'''Converts integer to string eg 3 to "03"'''
try:
int(value)
except:
raise ValueError('expected an integer value')
content = str(value)
while len(content) < length:
content = '0' + content
return content
class Directory:
def __init__(self, path):
self.path = path
self.name = os.path.split(self.path)[-1]
self.content = []
self.update()
def filter(self, classname):
content = []
for c in self.content:
if c.__class__.__name__ == classname:
content.append(c)
return content
def tracks(self, ext=None, split=False):
content = []
for c in self.content:
ext_match = False
if c.__class__.__name__ == 'WaveFile':
if not ext or (ext and ext == c.name_ext):
ext_match = True
if ext_match and c.track_nr:
if not split or (split and c.split_file):
content.append(c)
return content
def update(self):
def check_match(filename):
for i in self.content:
if i.name == filename:
return True
return False
def identify_track_number(filename):
if 'split-track' in filename:
search = re.search('split-track(\d\d)', filename)
if search:
n = int(search.group(1))
if n:
return n
for n in range(0, 100):
search = re.search(int_to_str(n), filename)
if search:
# TODO: not part of other value such as year
return n
list_dir = glob.glob1(self.path, '*')
# TODO: for some reason removes only one file
rem_list = []
for i in self.content:
if i.name not in list_dir:
rem_list.append(i)
for i in rem_list:
self.content.remove(i)
for i in list_dir:
if not check_match(i):
# music file
if os.path.splitext(i)[-1] in list(WAVE_FILE_TYPE_BY_EXTENSION.keys()):
track_nr = identify_track_number(i)
if track_nr:
self.content.append(WaveFile(self.path + os.sep + i, track_nr=track_nr))
else:
self.content.append(WaveFile(self.path + os.sep + i))
# cue file
elif os.path.splitext(i)[-1] == '.cue':
self.content.append(CueFile(self.path + os.sep + i))
# meta file
elif i == ALBUM_META_FILE_NAME:
self.content.append(MetaFile(self.path + os.sep + i))
# directory
elif os.path.isdir(i):
self.content.append(Directory(self.path + os.sep + i))
else:
self.content.append(File(self.path + os.sep + i))
class File(object):
def __init__(self, path):
self.path = path
self.name = os.path.split(self.path)[-1]
self.name_name = ''.join(os.path.splitext(self.name)[:-1])
self.name_ext = os.path.splitext(self.name)[-1]
self.split_file = True if self.name_name[:11] == 'split-track' else False
def get_name(self, ext=True, cmd=False):
if ext is True:
content = self.name
elif ext is False:
content = self.name_name
elif ext[0] == '.':
content = self.name_name + ext
else:
raise ValueError('ext parameter error')
if cmd:
content = content.replace(' ', '\ ')
return content
class CueFile(File):
def __init__(self, path):
def header_parser():
global line_content
c = self.content.splitlines()
header_dict = {}
# remaining_headers = CUE_HEADER
remaining_headers = copy.copy(CUE_HEADER)
line_index = 0
match = True
while match:
match = False
saved_match = None
line_content = c[line_index]
for e in remaining_headers:
search_result = re.search(remaining_headers[e], line_content, re.I)
if search_result:
search_content = trim_cue_entry(search_result.group(1))
header_dict[e] = search_content
saved_match = e
match = True
line_index += 1
if saved_match:
del remaining_headers[saved_match]
return header_dict, line_index
def track_parser(start_line):
c = self.content.splitlines()
line_index = start_line
line_content = c[line_index]
search_result = re.search(CUE_TRACK, line_content, re.I)
if not search_result:
raise ValueError(
'inconsistent CUE sheet, TRACK expected at line {0}'.format(line_index + 1))
track_nr = int(search_result.group(1))
line_index += 1
next_track = False
track_meta = {}
# we make room for future indexes
track_meta['index'] = [None for m in range(100)]
while not next_track:
if line_index < len(c):
line_content = c[line_index]
artist_search = re.search(CUE_TRACK_INFO['artist'], line_content, re.I)
title_search = re.search(CUE_TRACK_INFO['title'], line_content, re.I)
isrc_search = re.search(CUE_TRACK_INFO['isrc'], line_content, re.I)
index_search = re.search(CUE_TRACK_INFO['index'], line_content, re.I)
if artist_search:
if trim_cue_entry(artist_search.group(1)) != self.header['artist']:
track_meta['artist'] = trim_cue_entry(artist_search.group(1))
line_index += 1
elif title_search:
track_meta['title'] = trim_cue_entry(title_search.group(1))
line_index += 1
elif isrc_search:
track_meta['isrc'] = trim_cue_entry(isrc_search.group(1))
line_index += 1
elif index_search:
track_meta['index'][int(index_search.group(1))] = index_search.group(2)
line_index += 1
elif re.search(CUE_TRACK, line_content, re.I):
next_track = True
elif line_index == len(c) - 1 and not line_content:
# last line is empty
line_index += 1
elif re.search('FLAGS DCP$', line_content, re.I):
track_meta['dcpflag'] = True
line_index += 1
else:
raise ValueError(
'unknown entry in track error, line {0}'.format(line_index + 1))
else:
next_track = True
track_meta['index'] = check_list(track_meta['index'], ignore=1)
return track_nr, track_meta, line_index
super(CueFile, self).__init__(path)
try:
with open(self.name) as cue_file:
self.content = cue_file.read()
except:
self.content = None
if not self.content:
try:
with open(self.name, encoding="cp1252") as cue_file:
self.content = cue_file.read()
except:
raise ValueError('Cant encode CUE Sheet.')
if self.content[0] == '\ufeff':
self.content = self.content[1:]
header = header_parser()
self.header = header[0]
line_index = header[1]
# we make room for tracks
tracks = [None for m in range(100)]
while line_index < len(self.content.splitlines()):
parsed_track = track_parser(line_index)
line_index = parsed_track[2]
tracks[parsed_track[0]] = parsed_track[1]
self.tracks = check_list(tracks, ignore=1)
def get_meta(self):
content = ''
for i in ALBUM_META_ALBUM_BY_CUE:
if self.header.get(i):
content += i + '\t' + self.header[i] + '\n'
else:
content += i + '\t' + '\n'
for i in range(len(self.tracks)):
if self.tracks[i]:
if self.tracks[i].get('artist'):
content += 'track' + int_to_str(i) + 'artist' + '\t' + self.tracks[i].get(
'artist') + '\n'
if self.tracks[i].get('title'):
content += 'track' + int_to_str(i) + 'title' + '\t' + self.tracks[i].get(
'title') + '\n'
return content
def htoa(self):
'''Returns true if Hidden Track exists.'''
if int(self.tracks[1]['index'][1][-5:-3]) >= HTOA_LENGTH_TRIGGER:
return True
return False
def breakpoints(self):
'''Returns track break points. Identical as CUETools' cuebreakpoints, with the exception of my standards for HTOA.'''
content = ''
for t in range(len(self.tracks)):
if t == 1 and not self.htoa():
content += ''
elif t >= 1:
t_index = self.tracks[t]['index']
content += t_index[1]
if t < (len(self.tracks) - 1):
content += '\n'
return content
class MetaFile(File):
def __init__(self, path):
super(MetaFile, self).__init__(path)
with open(self.path) as meta_file:
self.rawcontent = meta_file.read()
content = {}
content['tracks'] = [None for m in range(100)]
for l in self.rawcontent.splitlines():
parsed_line = re.search('^(.+?)\t(.+?)$', l)
if parsed_line:
if parsed_line.group(1)[:5] == 'track':
parsed_track = re.search('^track(\d\d)(.+?)$', parsed_line.group(1))
if not parsed_track:
raise ValueError('Syntax error in album meta file')
if not content['tracks'][int(parsed_track.group(1))]:
content['tracks'][int(parsed_track.group(1))] = dict()
content['tracks'][int(parsed_track.group(1))][
parsed_track.group(2)] = parsed_line.group(2)
else:
content[parsed_line.group(1)] = parsed_line.group(2)
content['tracks'] = check_list(content['tracks'], ignore=1)
self.content = content
def flac_tags(self, track_nr):
common_tags = dict()
freeform_tags = dict()
# common flac tags
common_tags['artist'] = self.content['artist']
common_tags['album'] = self.content['title']
common_tags['title'] = self.content['tracks'][track_nr]['title']
common_tags['tracknumber'] = str(track_nr)
common_tags['tracktotal'] = str(len(self.content['tracks']) - 1)
if 'date' in self.content:
common_tags['date'] = self.content['date']
if 'genre' in CUE_META.content:
common_tags['genre'] = CUE_META.content['genre']
# freeform tags
# freeform_tags['country'] = self.content['country']
# freeform_tags['releasedate'] = self.content['releasedate']
return common_tags, freeform_tags
def folders(self):
artist = self.content['artist']
album = self.content['date'] + ' - ' + self.content['title'] + ' (' + self.content[
'label'] + ' - ' + self.content['catalog'] + ')'
return artist, album
def complete(self):
'''Check MetaFile for containing all data'''
self.__init__(self.path)
for l in self.rawcontent.splitlines():
if re.search('^[0-9A-Za-z]+?\t$', l):
return False
return True
def count_tracks(self):
'''Returns tracks count'''
return len(self.content['tracks']) - self.content['tracks'].count(None)
class WaveFile(File):
def __init__(self, path, track_nr=None):
super(WaveFile, self).__init__(path)
self.track_nr = track_nr
self.type = WAVE_FILE_TYPE_BY_EXTENSION[self.name_ext]
def filename(self, ext=None, cmd=False):
title = CUE_META.content['tracks'][self.track_nr]['title']
if ext:
if ext[0] != '.':
ext = '.' + ext
else:
ext = self.name_ext
f_name = int_to_str(self.track_nr) + ' - ' + title + ext
if cmd:
f_name = f_name.replace(' ', '\ ')
f_name = f_name.replace('!', '')
f_name = f_name.replace('?', '')
f_name = f_name.replace('/', ';')
return f_name
def tag(self):
if self.type == 'Free Lossless Audio Codec':
f = FLAC(self.name)
tags = CUE_META.flac_tags(self.track_nr)
for t in tags[0]:
f[t] = tags[0][t]
f.save()
def mutagen(self):
if self.type == 'Free Lossless Audio Codec':
return FLAC(self.name)
def split(albumpath):
global CUE_META
os.chdir(albumpath)
base_dir = Directory(os.getcwd())
cue = None
wave = None
# determining correct cue file
# if perfect match found
for _cue in base_dir.filter('CueFile'):
for _wave in base_dir.filter('WaveFile'):
if _cue.header['file'] == _wave.name:
logger.info('CUE Sheet found: %s', _cue.name)
logger.info('Music file found: %s', _wave.name)
cue = _cue
wave = _wave
# if no perfect match found then try without extensions
if not cue and not wave:
logger.info('No match for music files, trying to match without extensions...')
for _cue in base_dir.filter('CueFile'):
for _wave in base_dir.filter('WaveFile'):
if ''.join(os.path.splitext(_cue.header['file'])[:-1]) == _wave.name_name:
logger.info('Possible CUE Sheet found: %s', _cue.name)
logger.info('CUE Sheet refers music file: %s', _cue.header['file'])
logger.info('Possible Music file found: %s', _wave.name)
cue = _cue
wave = _wave
cue.header['file'] = wave.name
# if still no match then raise an exception
if not cue and not wave:
raise ValueError('No music file match found!')
# Split with xld or shntool
splitter = 'shntool'
xldprofile = None
# use xld profile to split cue
if headphones.CONFIG.ENCODER == 'xld' and headphones.CONFIG.MUSIC_ENCODER and headphones.CONFIG.XLDPROFILE:
from . import getXldProfile
xldprofile, xldformat, _ = getXldProfile.getXldProfile(headphones.CONFIG.XLDPROFILE)
if not xldformat:
raise ValueError(
'Details for xld profile "%s" not found, cannot split cue' % (xldprofile))
else:
if headphones.CONFIG.ENCODERFOLDER:
splitter = os.path.join(headphones.CONFIG.ENCODERFOLDER, 'xld')
else:
splitter = 'xld'
# use standard xld command to split cue
elif sys.platform == 'darwin':
splitter = 'xld'
if not check_splitter(splitter):
splitter = 'shntool'
if splitter == 'shntool' and not check_splitter(splitter):
raise ValueError('Command not found, ensure shntool or xld installed')
# Determine if file can be split
if wave.name_ext not in list(WAVE_FILE_TYPE_BY_EXTENSION.keys()):
raise ValueError('Cannot split, audio file has unsupported extension')
# Split with xld
if 'xld' in splitter:
cmd = [splitter]
cmd.extend([wave.name])
cmd.extend(['-c'])
cmd.extend([cue.name])
if xldprofile:
cmd.extend(['--profile'])
cmd.extend([xldprofile])
else:
cmd.extend(['-f'])
cmd.extend(['flac'])
cmd.extend(['-o'])
cmd.extend([base_dir.path])
split = split_baby(wave.name, cmd)
else:
# Split with shntool
# generate temporary metafile describing the cue
with open(ALBUM_META_FILE_NAME, mode='w') as meta_file:
meta_file.write(cue.get_meta())
base_dir.content.append(MetaFile(os.path.abspath(ALBUM_META_FILE_NAME)))
# check metafile for completeness
if not base_dir.filter('MetaFile'):
raise ValueError('Cue Meta file {0} missing!'.format(ALBUM_META_FILE_NAME))
else:
CUE_META = base_dir.filter('MetaFile')[0]
with open(SPLIT_FILE_NAME, mode='w') as split_file:
split_file.write(cue.breakpoints())
if headphones.CONFIG.CUE_SPLIT_SHNTOOL_PATH:
cmd = [os.path.join(headphones.CONFIG.CUE_SPLIT_SHNTOOL_PATH, 'shntool')]
else:
cmd = ['shntool']
cmd.extend(['split'])
cmd.extend(['-f'])
cmd.extend([SPLIT_FILE_NAME])
cmd.extend(['-o'])
cmd.extend([wave.name_ext.lstrip('.')])
cmd.extend([wave.name])
split = split_baby(wave.name, cmd)
os.remove(SPLIT_FILE_NAME)
base_dir.update()
# tag FLAC files
if split and CUE_META.count_tracks() == len(base_dir.tracks(ext='.flac', split=True)):
for t in base_dir.tracks(ext='.flac', split=True):
logger.info('Tagging %s...', t.name)
t.tag()
# rename files
if split and CUE_META.count_tracks() == len(base_dir.tracks(ext=wave.name_ext, split=True)):
for t in base_dir.tracks(ext=wave.name_ext, split=True):
if t.name != t.filename():
logger.info('Renaming %s to %s...', t.name, t.filename())
os.rename(t.name, t.filename())
os.remove(ALBUM_META_FILE_NAME)
if not split:
raise ValueError('Failed to split, check logs')
else:
# Rename original file
os.rename(wave.name, wave.name + '.original')
return True
| 23,438
|
Python
|
.py
| 555
| 30.987387
| 125
| 0.549728
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,288
|
crier.py
|
rembo10_headphones/headphones/crier.py
|
import pprint
import sys
import threading
import traceback
from headphones import logger
def cry():
"""
Logs thread traces.
"""
tmap = {}
main_thread = None
# get a map of threads by their ID so we can print their names
# during the traceback dump
for t in threading.enumerate():
if t.ident:
tmap[t.ident] = t
else:
main_thread = t
# Loop over each thread's current frame, writing info about it
for tid, frame in sys._current_frames().items():
thread = tmap.get(tid, main_thread)
lines = []
lines.append('%s\n' % thread.getName())
lines.append('========================================\n')
lines += traceback.format_stack(frame)
lines.append('========================================\n')
lines.append('LOCAL VARIABLES:\n')
lines.append('========================================\n')
lines.append(pprint.pformat(frame.f_locals))
lines.append('\n\n')
logger.info("".join(lines))
| 1,049
|
Python
|
.py
| 31
| 27.258065
| 66
| 0.537019
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,289
|
lastfm.py
|
rembo10_headphones/headphones/lastfm.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import random
from collections import defaultdict
import headphones
import headphones.lock
from headphones import db, logger, request
TIMEOUT = 60.0 # seconds
REQUEST_LIMIT = 1.0 / 5 # seconds
ENTRY_POINT = "https://ws.audioscrobbler.com/2.0/"
APP_API_KEY = "395e6ec6bb557382fc41fde867bce66f"
# Required for API request limit
lastfm_lock = headphones.lock.TimedLock(REQUEST_LIMIT)
def request_lastfm(method, **kwargs):
"""
Call a Last.fm API method. Automatically sets the method and API key. Method
will return the result if no error occured.
By default, this method will request the JSON format, since it is more
lightweight than XML.
"""
# Prepare request
kwargs["method"] = method
kwargs.setdefault("api_key", headphones.CONFIG.LASTFM_APIKEY or APP_API_KEY)
kwargs.setdefault("format", "json")
# Send request
logger.debug("Calling Last.fm method: %s", method)
logger.debug("Last.fm call parameters: %s", kwargs)
data = request.request_json(ENTRY_POINT, timeout=TIMEOUT, params=kwargs, lock=lastfm_lock)
# Parse response and check for errors.
if not data:
logger.error("Error calling Last.fm method: %s", method)
return
if "error" in data:
logger.debug("Last.fm returned an error: %s", data["message"])
return
return data
def getSimilar():
if not headphones.CONFIG.LASTFM_APIKEY:
logger.info(
'To update the Similar Artists cloud tag, create a Last.fm application api key '
'and add it under the Advanced config tab'
)
return
myDB = db.DBConnection()
results = myDB.select("SELECT ArtistID from artists ORDER BY HaveTracks DESC LIMIT 10")
logger.info("Fetching similar artists from Last.fm for tag cloud")
artistlist = []
for result in results:
data = request_lastfm("artist.getsimilar", mbid=result["ArtistId"])
if data and "similarartists" in data:
artists = data["similarartists"]["artist"]
for artist in artists:
try:
artist_mbid = artist["mbid"]
artist_name = artist["name"]
except KeyError:
continue
if not any(artist_mbid in x for x in results):
artistlist.append((artist_name, artist_mbid))
# Add new artists to tag cloud
logger.debug("Fetched %d artists from Last.fm", len(artistlist))
count = defaultdict(int)
for artist, mbid in artistlist:
count[artist, mbid] += 1
items = list(count.items())
top_list = sorted(items, key=lambda x: x[1], reverse=True)[:25]
random.shuffle(top_list)
myDB.action("DELETE from lastfmcloud")
for item in top_list:
artist_name, artist_mbid = item[0]
count = item[1]
myDB.action("INSERT INTO lastfmcloud VALUES( ?, ?, ?)", [artist_name, artist_mbid, count])
logger.debug("Inserted %d artists into Last.fm tag cloud", len(top_list))
def getArtists():
myDB = db.DBConnection()
results = myDB.select("SELECT ArtistID from artists")
if not headphones.CONFIG.LASTFM_USERNAME:
logger.warn("Last.fm username not set, not importing artists.")
return
logger.info("Fetching artists from Last.fm for username: %s", headphones.CONFIG.LASTFM_USERNAME)
data = request_lastfm("library.getartists", limit=1000, user=headphones.CONFIG.LASTFM_USERNAME)
if data and "artists" in data:
artistlist = []
artists = data["artists"]["artist"]
logger.debug("Fetched %d artists from Last.fm", len(artists))
for artist in artists:
artist_mbid = artist["mbid"]
if not any(artist_mbid in x for x in results):
artistlist.append(artist_mbid)
from headphones import importer
for artistid in artistlist:
importer.addArtisttoDB(artistid)
logger.info("Imported %d new artists from Last.fm", len(artistlist))
def getTagTopArtists(tag, limit=50):
myDB = db.DBConnection()
results = myDB.select("SELECT ArtistID from artists")
logger.info("Fetching top artists from Last.fm for tag: %s", tag)
data = request_lastfm("tag.gettopartists", limit=limit, tag=tag)
if data and "topartists" in data:
artistlist = []
artists = data["topartists"]["artist"]
logger.debug("Fetched %d artists from Last.fm", len(artists))
for artist in artists:
try:
artist_mbid = artist["mbid"]
except KeyError:
continue
if not any(artist_mbid in x for x in results):
artistlist.append(artist_mbid)
from headphones import importer
for artistid in artistlist:
importer.addArtisttoDB(artistid)
logger.debug("Added %d new artists from Last.fm", len(artistlist))
| 5,594
|
Python
|
.py
| 125
| 37.488
| 100
| 0.672446
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,290
|
nzbget.py
|
rembo10_headphones/headphones/nzbget.py
|
# This file is modified to work with headphones by CurlyMo <curlymoo1@gmail.com> as a part of XBian - XBMC on the Raspberry Pi
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from base64 import standard_b64encode
import http.client
import xmlrpc.client
import headphones
from headphones import logger
def sendNZB(nzb):
addToTop = False
nzbgetXMLrpc = "%(protocol)s://%(username)s:%(password)s@%(host)s/xmlrpc"
if not headphones.CONFIG.NZBGET_HOST:
logger.error("No NZBget host found in configuration. Please configure it.")
return False
if headphones.CONFIG.NZBGET_HOST.startswith('https://'):
protocol = 'https'
host = headphones.CONFIG.NZBGET_HOST.replace('https://', '', 1)
else:
protocol = 'http'
host = headphones.CONFIG.NZBGET_HOST.replace('http://', '', 1)
url = nzbgetXMLrpc % {"protocol": protocol, "host": host,
"username": headphones.CONFIG.NZBGET_USERNAME,
"password": headphones.CONFIG.NZBGET_PASSWORD}
nzbGetRPC = xmlrpc.client.ServerProxy(url)
try:
if nzbGetRPC.writelog("INFO", "headphones connected to drop of %s any moment now." % (
nzb.name + ".nzb")):
logger.debug("Successfully connected to NZBget")
else:
logger.info("Successfully connected to NZBget, but unable to send a message" % (
nzb.name + ".nzb"))
except http.client.socket.error:
logger.error(
"Please check your NZBget host and port (if it is running). NZBget is not responding to this combination")
return False
except xmlrpc.client.ProtocolError as e:
if e.errmsg == "Unauthorized":
logger.error("NZBget password is incorrect.")
else:
logger.error("Protocol Error: " + e.errmsg)
return False
nzbcontent64 = None
if nzb.resultType == "nzbdata":
data = nzb.extraInfo[0]
# NZBGet needs a string, not bytes
nzbcontent64 = standard_b64encode(data).decode("utf-8")
logger.info("Sending NZB to NZBget")
logger.debug("URL: " + url)
dupekey = ""
dupescore = 0
try:
# Find out if nzbget supports priority (Version 9.0+), old versions beginning with a 0.x will use the old command
nzbget_version_str = nzbGetRPC.version()
nzbget_version = int(nzbget_version_str[:nzbget_version_str.find(".")])
if nzbget_version == 0:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
headphones.CONFIG.NZBGET_CATEGORY, addToTop,
nzbcontent64)
else:
# from headphones.common.providers.generic import GenericProvider
# if nzb.resultType == "nzb":
# genProvider = GenericProvider("")
# data = genProvider.getURL(nzb.url)
# if (data is None):
# return False
# nzbcontent64 = standard_b64encode(data)
# nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", headphones.CONFIG.NZBGET_CATEGORY, addToTop, nzbcontent64)
return False
elif nzbget_version == 12:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
headphones.CONFIG.NZBGET_CATEGORY,
headphones.CONFIG.NZBGET_PRIORITY, False,
nzbcontent64, False, dupekey, dupescore, "score")
else:
nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb",
headphones.CONFIG.NZBGET_CATEGORY,
headphones.CONFIG.NZBGET_PRIORITY, False,
nzb.url, False, dupekey, dupescore, "score")
# v13+ has a new combined append method that accepts both (url and content)
# also the return value has changed from boolean to integer
# (Positive number representing NZBID of the queue item. 0 and negative numbers represent error codes.)
elif nzbget_version >= 13:
nzbget_result = True if nzbGetRPC.append(nzb.name + ".nzb",
nzbcontent64 if nzbcontent64 is not None else nzb.url,
headphones.CONFIG.NZBGET_CATEGORY,
headphones.CONFIG.NZBGET_PRIORITY, False,
False, dupekey, dupescore,
"score") > 0 else False
else:
if nzbcontent64 is not None:
nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
headphones.CONFIG.NZBGET_CATEGORY,
headphones.CONFIG.NZBGET_PRIORITY, False,
nzbcontent64)
else:
nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb",
headphones.CONFIG.NZBGET_CATEGORY,
headphones.CONFIG.NZBGET_PRIORITY, False,
nzb.url)
if nzbget_result:
logger.debug("NZB sent to NZBget successfully")
return True
else:
logger.error("NZBget could not add %s to the queue" % (nzb.name + ".nzb"))
return False
except:
logger.error(
"Connect Error to NZBget: could not add %s to the queue" % (nzb.name + ".nzb"))
return False
| 6,684
|
Python
|
.py
| 126
| 37.68254
| 128
| 0.560923
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,291
|
api.py
|
rembo10_headphones/headphones/api.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import json
from headphones import db, mb, updater, importer, searcher, cache, postprocessor, versioncheck, \
logger
import headphones
cmd_list = ['getIndex', 'getArtist', 'getAlbum', 'getUpcoming', 'getWanted', 'getSnatched',
'getSimilar', 'getHistory', 'getLogs',
'findArtist', 'findAlbum', 'addArtist', 'delArtist', 'pauseArtist', 'resumeArtist',
'refreshArtist',
'addAlbum', 'queueAlbum', 'unqueueAlbum', 'forceSearch', 'forceProcess',
'forceActiveArtistsUpdate',
'getVersion', 'checkGithub', 'shutdown', 'restart', 'update', 'getArtistArt',
'getAlbumArt',
'getArtistInfo', 'getAlbumInfo', 'getArtistThumb', 'getAlbumThumb', 'clearLogs',
'choose_specific_download', 'download_specific_release']
class Api(object):
def __init__(self):
self.apikey = None
self.cmd = None
self.id = None
self.kwargs = None
self.data = None
self.callback = None
def checkParams(self, *args, **kwargs):
if not headphones.CONFIG.API_ENABLED:
self.data = 'API not enabled'
return
if not headphones.CONFIG.API_KEY:
self.data = 'API key not generated'
return
if len(headphones.CONFIG.API_KEY) != 32:
self.data = 'API key not generated correctly'
return
if 'apikey' not in kwargs:
self.data = 'Missing api key'
return
if kwargs['apikey'] != headphones.CONFIG.API_KEY:
self.data = 'Incorrect API key'
return
else:
self.apikey = kwargs.pop('apikey')
if 'cmd' not in kwargs:
self.data = 'Missing parameter: cmd'
return
if kwargs['cmd'] not in cmd_list:
self.data = 'Unknown command: %s' % kwargs['cmd']
return
else:
self.cmd = kwargs.pop('cmd')
self.kwargs = kwargs
self.data = 'OK'
def fetchData(self):
if self.data == 'OK':
logger.info('Received API command: %s', self.cmd)
methodToCall = getattr(self, "_" + self.cmd)
methodToCall(**self.kwargs)
if 'callback' not in self.kwargs:
if isinstance(self.data, str):
return self.data
else:
return json.dumps(self.data)
else:
self.callback = self.kwargs['callback']
self.data = json.dumps(self.data)
self.data = self.callback + '(' + self.data + ');'
return self.data
else:
return self.data
def _dic_from_query(self, query):
myDB = db.DBConnection()
rows = myDB.select(query)
rows_as_dic = []
for row in rows:
row_as_dic = dict(list(zip(list(row.keys()), row)))
rows_as_dic.append(row_as_dic)
return rows_as_dic
def _getIndex(self, **kwargs):
self.data = self._dic_from_query(
'SELECT * from artists order by ArtistSortName COLLATE NOCASE')
return
def _getArtist(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
artist = self._dic_from_query(
'SELECT * from artists WHERE ArtistID="' + self.id + '"')
albums = self._dic_from_query(
'SELECT * from albums WHERE ArtistID="' + self.id + '" order by ReleaseDate DESC')
description = self._dic_from_query(
'SELECT * from descriptions WHERE ArtistID="' + self.id + '"')
self.data = {
'artist': artist, 'albums': albums, 'description': description}
return
def _getAlbum(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
album = self._dic_from_query(
'SELECT * from albums WHERE AlbumID="' + self.id + '"')
tracks = self._dic_from_query(
'SELECT * from tracks WHERE AlbumID="' + self.id + '"')
description = self._dic_from_query(
'SELECT * from descriptions WHERE ReleaseGroupID="' + self.id + '"')
self.data = {
'album': album, 'tracks': tracks, 'description': description}
return
def _getHistory(self, **kwargs):
self.data = self._dic_from_query(
'SELECT * from snatched WHERE status NOT LIKE "Seed%" order by DateAdded DESC')
return
def _getUpcoming(self, **kwargs):
self.data = self._dic_from_query(
"SELECT * from albums WHERE ReleaseDate > date('now') order by ReleaseDate DESC")
return
def _getWanted(self, **kwargs):
self.data = self._dic_from_query(
"SELECT * from albums WHERE Status='Wanted'")
return
def _getSnatched(self, **kwargs):
self.data = self._dic_from_query(
"SELECT * from albums WHERE Status='Snatched'")
return
def _getSimilar(self, **kwargs):
self.data = self._dic_from_query('SELECT * from lastfmcloud')
return
def _getLogs(self, **kwargs):
self.data = headphones.LOG_LIST
return
def _clearLogs(self, **kwargs):
headphones.LOG_LIST = []
self.data = 'Cleared log'
return
def _findArtist(self, **kwargs):
if 'name' not in kwargs:
self.data = 'Missing parameter: name'
return
if 'limit' in kwargs:
limit = kwargs['limit']
else:
limit = 50
self.data = mb.findArtist(kwargs['name'], limit)
def _findAlbum(self, **kwargs):
if 'name' not in kwargs:
self.data = 'Missing parameter: name'
return
if 'limit' in kwargs:
limit = kwargs['limit']
else:
limit = 50
self.data = mb.findRelease(kwargs['name'], limit)
def _addArtist(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
try:
importer.addArtisttoDB(self.id)
except Exception as e:
self.data = e
return
def _delArtist(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
myDB.action('DELETE from artists WHERE ArtistID="' + self.id + '"')
myDB.action('DELETE from albums WHERE ArtistID="' + self.id + '"')
myDB.action('DELETE from tracks WHERE ArtistID="' + self.id + '"')
def _pauseArtist(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
controlValueDict = {'ArtistID': self.id}
newValueDict = {'Status': 'Paused'}
myDB.upsert("artists", newValueDict, controlValueDict)
def _resumeArtist(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
controlValueDict = {'ArtistID': self.id}
newValueDict = {'Status': 'Active'}
myDB.upsert("artists", newValueDict, controlValueDict)
def _refreshArtist(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
try:
importer.addArtisttoDB(self.id)
except Exception as e:
self.data = e
return
def _addAlbum(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
try:
importer.addReleaseById(self.id)
except Exception as e:
self.data = e
return
def _queueAlbum(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
if 'new' in kwargs:
new = kwargs['new']
else:
new = False
if 'lossless' in kwargs:
lossless = kwargs['lossless']
else:
lossless = False
myDB = db.DBConnection()
controlValueDict = {'AlbumID': self.id}
if lossless:
newValueDict = {'Status': 'Wanted Lossless'}
else:
newValueDict = {'Status': 'Wanted'}
myDB.upsert("albums", newValueDict, controlValueDict)
searcher.searchforalbum(self.id, new)
def _unqueueAlbum(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
controlValueDict = {'AlbumID': self.id}
newValueDict = {'Status': 'Skipped'}
myDB.upsert("albums", newValueDict, controlValueDict)
def _forceSearch(self, **kwargs):
searcher.searchforalbum()
def _forceProcess(self, **kwargs):
if 'album_dir' in kwargs:
album_dir = kwargs['album_dir']
dir = None
postprocessor.forcePostProcess(self, dir, album_dir)
elif 'dir' in kwargs:
self.dir = kwargs['dir']
postprocessor.forcePostProcess(self.dir)
else:
postprocessor.forcePostProcess()
def _forceActiveArtistsUpdate(self, **kwargs):
updater.dbUpdate()
def _getVersion(self, **kwargs):
self.data = {
'git_path': headphones.CONFIG.GIT_PATH,
'install_type': headphones.INSTALL_TYPE,
'current_version': headphones.CURRENT_VERSION,
'latest_version': headphones.LATEST_VERSION,
'commits_behind': headphones.COMMITS_BEHIND,
}
def _checkGithub(self, **kwargs):
versioncheck.checkGithub()
self._getVersion()
def _shutdown(self, **kwargs):
headphones.SIGNAL = 'shutdown'
def _restart(self, **kwargs):
headphones.SIGNAL = 'restart'
def _update(self, **kwargs):
headphones.SIGNAL = 'update'
def _getArtistArt(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getArtwork(ArtistID=self.id)
def _getAlbumArt(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getArtwork(AlbumID=self.id)
def _getArtistInfo(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getInfo(ArtistID=self.id)
def _getAlbumInfo(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getInfo(AlbumID=self.id)
def _getArtistThumb(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getThumb(ArtistID=self.id)
def _getAlbumThumb(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getThumb(AlbumID=self.id)
def _choose_specific_download(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
results = searcher.searchforalbum(
self.id, choose_specific_download=True)
results_as_dicts = []
for result in results:
result_dict = {
'title': result[0],
'size': result[1],
'url': result[2],
'provider': result[3],
'kind': result[4]
}
results_as_dicts.append(result_dict)
self.data = results_as_dicts
def _download_specific_release(self, **kwargs):
expected_kwargs = ['id', 'title', 'size', 'url', 'provider', 'kind']
for kwarg in expected_kwargs:
if kwarg not in kwargs:
self.data = 'Missing parameter: ' + kwarg
return self.data
title = kwargs['title']
size = kwargs['size']
url = kwargs['url']
provider = kwargs['provider']
kind = kwargs['kind']
id = kwargs['id']
for kwarg in expected_kwargs:
del kwargs[kwarg]
# Handle situations where the torrent url contains arguments that are
# parsed
if kwargs:
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
url = urllib.parse.quote(
url, safe=":?/=&") + '&' + urllib.parse.urlencode(kwargs)
try:
result = [(title, int(size), url, provider, kind)]
except ValueError:
result = [(title, float(size), url, provider, kind)]
logger.info("Making sure we can download the chosen result")
(data, bestqual) = searcher.preprocess(result)
if data and bestqual:
myDB = db.DBConnection()
album = myDB.action(
'SELECT * from albums WHERE AlbumID=?', [id]).fetchone()
searcher.send_to_downloader(data, bestqual, album)
| 14,776
|
Python
|
.py
| 386
| 27.974093
| 97
| 0.563717
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,292
|
albumart_test.py
|
rembo10_headphones/headphones/albumart_test.py
|
from headphones.unittestcompat import TestCase
import headphones.albumart
class AlbumArtTest(TestCase):
def test_nothing(self):
x = 100 - 2 * 50
if x:
headphones.albumart.getAlbumArt('asdf')
self.assertTrue(True)
| 256
|
Python
|
.py
| 8
| 25.625
| 51
| 0.693878
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,293
|
request.py
|
rembo10_headphones/headphones/request.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from xml.dom import minidom
import collections
import sys
from bs4 import BeautifulSoup
import requests
from headphones import logger
import feedparser
import headphones
import headphones.lock
# Disable SSL certificate warnings. We have our own handling
requests.packages.urllib3.disable_warnings()
# Dictionary with last request times, for rate limiting.
last_requests = collections.defaultdict(int)
fake_lock = headphones.lock.FakeLock()
def request_response(url, method="get", auto_raise=True,
whitelist_status_code=None, lock=fake_lock, **kwargs):
"""
Convenient wrapper for `requests.get', which will capture the exceptions
and log them. On success, the Response object is returned. In case of a
exception, None is returned.
Additionally, there is support for rate limiting. To use this feature,
supply a tuple of (lock, request_limit). The lock is used to make sure no
other request with the same lock is executed. The request limit is the
minimal time between two requests (and so 1/request_limit is the number of
requests per seconds).
"""
# Convert whitelist_status_code to a list if needed
if whitelist_status_code and type(whitelist_status_code) != list:
whitelist_status_code = [whitelist_status_code]
# Disable verification of SSL certificates if requested. Note: this could
# pose a security issue!
kwargs["verify"] = bool(headphones.CONFIG.VERIFY_SSL_CERT)
# This fix is put in place for systems with broken SSL (like QNAP)
if not headphones.CONFIG.VERIFY_SSL_CERT and sys.version_info >= (2, 7, 9):
try:
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
# Map method to the request.XXX method. This is a simple hack, but it
# allows requests to apply more magic per method. See lib/requests/api.py.
request_method = getattr(requests, method.lower())
try:
# Request URL and wait for response
with lock:
logger.debug(
"Requesting URL via %s method: %s", method.upper(), url)
response = request_method(url, **kwargs)
# If status code != OK, then raise exception, except if the status code
# is white listed.
if whitelist_status_code and auto_raise:
if response.status_code not in whitelist_status_code:
try:
response.raise_for_status()
except:
logger.debug(
"Response status code %d is not white "
"listed, raised exception", response.status_code)
raise
elif auto_raise:
response.raise_for_status()
return response
except requests.exceptions.SSLError as e:
if kwargs["verify"]:
logger.error(
"Unable to connect to remote host because of a SSL error. "
"It is likely that your system cannot verify the validity "
"of the certificate. The remote certificate is either "
"self-signed, or the remote server uses SNI. See the wiki for "
"more information on this topic.")
else:
logger.error(
"SSL error raised during connection, with certificate "
"verification turned off: %s", e)
except requests.ConnectionError:
logger.error(
"Unable to connect to remote host. Check if the remote "
"host is up and running.")
except requests.Timeout:
logger.error(
"Request timed out. The remote host did not respond in a timely "
"manner.")
except requests.HTTPError as e:
if e.response is not None:
if e.response.status_code >= 500:
cause = "remote server error"
elif e.response.status_code >= 400:
cause = "local client error"
else:
# I don't think we will end up here, but for completeness
cause = "unknown"
logger.error(
"Request raise HTTP error with status code %d (%s).",
e.response.status_code, cause)
# Debug response
if headphones.VERBOSE:
server_message(e.response)
else:
logger.error("Request raised HTTP error.")
except requests.RequestException as e:
logger.error("Request raised exception: %s", e)
def request_soup(url, **kwargs):
"""
Wrapper for `request_response', which will return a BeatifulSoup object if
no exceptions are raised.
"""
parser = kwargs.pop("parser", "html.parser")
response = request_response(url, **kwargs)
if response is not None:
return BeautifulSoup(response.content, parser)
def request_minidom(url, **kwargs):
"""
Wrapper for `request_response', which will return a Minidom object if no
exceptions are raised.
"""
response = request_response(url, **kwargs)
if response is not None:
return minidom.parseString(response.content)
def request_json(url, **kwargs):
"""
Wrapper for `request_response', which will decode the response as JSON
object and return the result, if no exceptions are raised.
As an option, a validator callback can be given, which should return True
if the result is valid.
"""
validator = kwargs.pop("validator", None)
response = request_response(url, **kwargs)
if response is not None:
try:
result = response.json()
if validator and not validator(result):
logger.error("JSON validation result failed")
else:
return result
except ValueError:
logger.error("Response returned invalid JSON data")
# Debug response
if headphones.VERBOSE:
server_message(response)
def request_content(url, **kwargs):
"""
Wrapper for `request_response', which will return the raw content.
"""
response = request_response(url, **kwargs)
if response is not None:
return response.content
def request_feed(url, **kwargs):
"""
Wrapper for `request_response', which will return a feed object.
"""
response = request_response(url, **kwargs)
if response is not None:
return feedparser.parse(response.content)
def server_message(response):
"""
Extract server message from response and log in to logger with DEBUG level.
Some servers return extra information in the result. Try to parse it for
debugging purpose. Messages are limited to 150 characters, since it may
return the whole page in case of normal web page URLs
"""
message = None
# First attempt is to 'read' the response as HTML
if response.headers.get("content-type") and \
"text/html" in response.headers.get("content-type"):
try:
soup = BeautifulSoup(response.content, "html.parser")
except Exception:
pass
# Find body and cleanup common tags to grab content, which probably
# contains the message.
message = soup.find("body")
elements = ("header", "script", "footer", "nav", "input", "textarea")
for element in elements:
for tag in soup.find_all(element):
tag.replaceWith("")
message = message.text if message else soup.text
message = message.strip()
# Second attempt is to just take the response
if message is None:
message = response.content.strip()
if message:
# Truncate message if it is too long.
if len(message) > 200:
if not type(message) == str:
message = message.decode(headphones.SYS_ENCODING, 'replace')
message = message[:200] + "..."
logger.debug("Server responded with message: %s", message)
| 8,703
|
Python
|
.py
| 201
| 34.870647
| 79
| 0.650651
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,294
|
getXldProfile.py
|
rembo10_headphones/headphones/getXldProfile.py
|
import os.path
import plistlib
from headphones import logger
def getXldProfile(xldProfile):
xldProfileNotFound = xldProfile
expanded = os.path.expanduser('~/Library/Preferences/jp.tmkk.XLD.plist')
if not os.path.isfile(expanded):
logger.warn("Could not find xld preferences at: %s", expanded)
return (xldProfileNotFound, None, None)
# Get xld preferences plist
try:
with open(expanded, 'rb') as _f:
preferences = plistlib.load(_f)
except Exception as e:
logger.error("Error reading xld preferences plist: %s", e)
return (xldProfileNotFound, None, None)
if not isinstance(preferences, dict):
logger.error("Error reading xld preferences plist, not a dict: %r", preferences)
return (xldProfileNotFound, None, None)
profiles = preferences.get('Profiles', []) # pylint:disable=E1103
xldProfile = xldProfile.lower()
for profile in profiles:
profilename = profile.get('XLDProfileManager_ProfileName')
xldProfileForCmd = profilename
profilename = profilename.lower()
xldFormat = None
xldBitrate = None
if profilename == xldProfile:
OutputFormatName = profile.get('OutputFormatName')
ShortDesc = profile.get('ShortDesc')
# Determine format and bitrate
if OutputFormatName == 'WAV':
xldFormat = 'wav'
elif OutputFormatName == 'AIFF':
xldFormat = 'aiff'
elif 'PCM' in OutputFormatName:
xldFormat = 'pcm'
elif OutputFormatName == 'Wave64':
xldFormat = 'w64'
elif OutputFormatName == 'MPEG-4 AAC':
xldFormat = 'm4a'
if 'CBR' in ShortDesc or 'ABR' in ShortDesc or 'CVBR' in ShortDesc:
xldBitrate = int(profile.get('XLDAacOutput2_Bitrate'))
elif 'TVBR' in ShortDesc:
XLDAacOutput2_VBRQuality = int(profile.get('XLDAacOutput2_VBRQuality'))
if XLDAacOutput2_VBRQuality > 122:
xldBitrate = 320
elif XLDAacOutput2_VBRQuality > 113 and XLDAacOutput2_VBRQuality <= 122:
xldBitrate = 285
elif XLDAacOutput2_VBRQuality > 104 and XLDAacOutput2_VBRQuality <= 113:
xldBitrate = 255
elif XLDAacOutput2_VBRQuality > 95 and XLDAacOutput2_VBRQuality <= 104:
xldBitrate = 225
elif XLDAacOutput2_VBRQuality > 86 and XLDAacOutput2_VBRQuality <= 95:
xldBitrate = 195
elif XLDAacOutput2_VBRQuality > 77 and XLDAacOutput2_VBRQuality <= 86:
xldBitrate = 165
elif XLDAacOutput2_VBRQuality > 68 and XLDAacOutput2_VBRQuality <= 77:
xldBitrate = 150
elif XLDAacOutput2_VBRQuality > 58 and XLDAacOutput2_VBRQuality <= 68:
xldBitrate = 135
elif XLDAacOutput2_VBRQuality > 49 and XLDAacOutput2_VBRQuality <= 58:
xldBitrate = 115
elif XLDAacOutput2_VBRQuality > 40 and XLDAacOutput2_VBRQuality <= 49:
xldBitrate = 105
elif XLDAacOutput2_VBRQuality > 31 and XLDAacOutput2_VBRQuality <= 40:
xldBitrate = 95
elif XLDAacOutput2_VBRQuality > 22 and XLDAacOutput2_VBRQuality <= 31:
xldBitrate = 80
elif XLDAacOutput2_VBRQuality > 13 and XLDAacOutput2_VBRQuality <= 22:
xldBitrate = 75
elif XLDAacOutput2_VBRQuality > 4 and XLDAacOutput2_VBRQuality <= 13:
xldBitrate = 45
elif XLDAacOutput2_VBRQuality >= 0 and XLDAacOutput2_VBRQuality <= 4:
xldBitrate = 40
elif OutputFormatName == 'Apple Lossless':
xldFormat = 'm4a'
elif OutputFormatName == 'FLAC':
if 'ogg' in ShortDesc:
xldFormat = 'oga'
else:
xldFormat = 'flac'
elif OutputFormatName == 'MPEG-4 HE-AAC':
xldFormat = 'm4a'
xldBitrate = int(profile.get('Bitrate'))
elif OutputFormatName == 'LAME MP3':
xldFormat = 'mp3'
if 'VBR' in ShortDesc:
VbrQuality = float(profile.get('VbrQuality'))
if VbrQuality < 1:
xldBitrate = 260
elif VbrQuality >= 1 and VbrQuality < 2:
xldBitrate = 250
elif VbrQuality >= 2 and VbrQuality < 3:
xldBitrate = 210
elif VbrQuality >= 3 and VbrQuality < 4:
xldBitrate = 195
elif VbrQuality >= 4 and VbrQuality < 5:
xldBitrate = 185
elif VbrQuality >= 5 and VbrQuality < 6:
xldBitrate = 150
elif VbrQuality >= 6 and VbrQuality < 7:
xldBitrate = 130
elif VbrQuality >= 7 and VbrQuality < 8:
xldBitrate = 120
elif VbrQuality >= 8 and VbrQuality < 9:
xldBitrate = 105
elif VbrQuality >= 9:
xldBitrate = 85
elif 'CBR' in ShortDesc:
xldBitrate = int(profile.get('Bitrate'))
elif 'ABR' in ShortDesc:
xldBitrate = int(profile.get('AbrBitrate'))
elif OutputFormatName == 'Opus':
xldFormat = 'opus'
xldBitrate = int(profile.get('XLDOpusOutput_Bitrate'))
elif OutputFormatName == 'Ogg Vorbis':
xldFormat = 'ogg'
XLDVorbisOutput_Quality = float(profile.get('XLDVorbisOutput_Quality'))
if XLDVorbisOutput_Quality <= -2:
xldBitrate = 32
elif XLDVorbisOutput_Quality > -2 and XLDVorbisOutput_Quality <= -1:
xldBitrate = 48
elif XLDVorbisOutput_Quality > -1 and XLDVorbisOutput_Quality <= 0:
xldBitrate = 64
elif XLDVorbisOutput_Quality > 0 and XLDVorbisOutput_Quality <= 1:
xldBitrate = 80
elif XLDVorbisOutput_Quality > 1 and XLDVorbisOutput_Quality <= 2:
xldBitrate = 96
elif XLDVorbisOutput_Quality > 2 and XLDVorbisOutput_Quality <= 3:
xldBitrate = 112
elif XLDVorbisOutput_Quality > 3 and XLDVorbisOutput_Quality <= 4:
xldBitrate = 128
elif XLDVorbisOutput_Quality > 4 and XLDVorbisOutput_Quality <= 5:
xldBitrate = 160
elif XLDVorbisOutput_Quality > 5 and XLDVorbisOutput_Quality <= 6:
xldBitrate = 192
elif XLDVorbisOutput_Quality > 6 and XLDVorbisOutput_Quality <= 7:
xldBitrate = 224
elif XLDVorbisOutput_Quality > 7 and XLDVorbisOutput_Quality <= 8:
xldBitrate = 256
elif XLDVorbisOutput_Quality > 8 and XLDVorbisOutput_Quality <= 9:
xldBitrate = 320
elif XLDVorbisOutput_Quality > 9:
xldBitrate = 400
elif OutputFormatName == 'WavPack':
xldFormat = 'wv'
if ShortDesc != 'normal':
xldBitrate = int(profile.get('XLDWavpackOutput_BitRate'))
# Lossless
if xldFormat and not xldBitrate:
xldBitrate = 400
return (xldProfileForCmd, xldFormat, xldBitrate)
return (xldProfileNotFound, None, None)
| 8,011
|
Python
|
.py
| 154
| 34.792208
| 92
| 0.540613
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,295
|
utorrent.py
|
rembo10_headphones/headphones/utorrent.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import urllib.request, urllib.parse, urllib.error
import json
import time
from collections import namedtuple
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import http.cookiejar
import re
import os
import headphones
from headphones import logger
class utorrentclient(object):
TOKEN_REGEX = "<div id='token' style='display:none;'>([^<>]+)</div>"
UTSetting = namedtuple("UTSetting", ["name", "int", "str", "access"])
def __init__(self, base_url=None, username=None, password=None, ):
host = headphones.CONFIG.UTORRENT_HOST
if not host.startswith('http'):
host = 'http://' + host
if host.endswith('/'):
host = host[:-1]
if host.endswith('/gui'):
host = host[:-4]
self.base_url = host
self.username = headphones.CONFIG.UTORRENT_USERNAME
self.password = headphones.CONFIG.UTORRENT_PASSWORD
self.opener = self._make_opener('uTorrent', self.base_url, self.username, self.password)
self.token = self._get_token()
# TODO refresh token, when necessary
def _make_opener(self, realm, base_url, username, password):
"""uTorrent API need HTTP Basic Auth and cookie support for token verify."""
auth = urllib.request.HTTPBasicAuthHandler()
auth.add_password(realm=realm, uri=base_url, user=username, passwd=password)
opener = urllib.request.build_opener(auth)
urllib.request.install_opener(opener)
cookie_jar = http.cookiejar.CookieJar()
cookie_handler = urllib.request.HTTPCookieProcessor(cookie_jar)
handlers = [auth, cookie_handler]
opener = urllib.request.build_opener(*handlers)
return opener
def _get_token(self):
url = urllib.parse.urljoin(self.base_url, 'gui/token.html')
try:
response = self.opener.open(url)
except urllib.error.HTTPError as err:
logger.debug('URL: ' + str(url))
logger.debug('Error getting Token. uTorrent responded with error: ' + str(err))
return
match = re.search(utorrentclient.TOKEN_REGEX, response.read())
return match.group(1)
def list(self, **kwargs):
params = [('list', '1')]
params += list(kwargs.items())
return self._action(params)
def add_url(self, url):
# can receive magnet or normal .torrent link
params = [('action', 'add-url'), ('s', url)]
return self._action(params)
def start(self, *hashes):
params = [('action', 'start'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def stop(self, *hashes):
params = [('action', 'stop'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def pause(self, *hashes):
params = [('action', 'pause'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def forcestart(self, *hashes):
params = [('action', 'forcestart'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def getfiles(self, hash):
params = [('action', 'getfiles'), ('hash', hash)]
return self._action(params)
def getprops(self, hash):
params = [('action', 'getprops'), ('hash', hash)]
return self._action(params)
def setprops(self, hash, s, v):
params = [('action', 'setprops'), ('hash', hash), ("s", s), ("v", v)]
return self._action(params)
def setprio(self, hash, priority, *files):
params = [('action', 'setprio'), ('hash', hash), ('p', str(priority))]
for file_index in files:
params.append(('f', str(file_index)))
return self._action(params)
def get_settings(self, key=None):
params = [('action', 'getsettings'), ]
status, value = self._action(params)
settings = {}
for args in value['settings']:
settings[args[0]] = self.UTSetting(*args)
if key:
return settings[key]
return settings
def remove(self, hash, remove_data=False):
if remove_data:
params = [('action', 'removedata'), ('hash', hash)]
else:
params = [('action', 'remove'), ('hash', hash)]
return self._action(params)
def _action(self, params, body=None, content_type=None):
if not self.token:
return
url = self.base_url + '/gui/' + '?token=' + self.token + '&' + urllib.parse.urlencode(params)
request = urllib.request.Request(url)
if body:
request.add_data(body)
request.add_header('Content-length', len(body))
if content_type:
request.add_header('Content-type', content_type)
try:
response = self.opener.open(request)
return response.code, json.loads(response.read())
except urllib.error.HTTPError as err:
logger.debug('URL: ' + str(url))
logger.debug('uTorrent webUI raised the following error: ' + str(err))
def labelTorrent(hash):
label = headphones.CONFIG.UTORRENT_LABEL
uTorrentClient = utorrentclient()
if label:
uTorrentClient.setprops(hash, 'label', label)
def removeTorrent(hash, remove_data=False):
uTorrentClient = utorrentclient()
status, torrentList = uTorrentClient.list()
torrents = torrentList['torrents']
for torrent in torrents:
if torrent[0].upper() == hash.upper():
if torrent[21] == 'Finished':
logger.info('%s has finished seeding, removing torrent and data' % torrent[2])
uTorrentClient.remove(hash, remove_data)
return True
else:
logger.info(
'%s has not finished seeding yet, torrent will not be removed, will try again on next run' %
torrent[2])
return False
return False
def setSeedRatio(hash, ratio):
uTorrentClient = utorrentclient()
uTorrentClient.setprops(hash, 'seed_override', '1')
if ratio != 0:
uTorrentClient.setprops(hash, 'seed_ratio', ratio * 10)
else:
# TODO passing -1 should be unlimited
uTorrentClient.setprops(hash, 'seed_ratio', -10)
def dirTorrent(hash, cacheid=None, return_name=None):
uTorrentClient = utorrentclient()
if not cacheid:
status, torrentList = uTorrentClient.list()
else:
params = [('list', '1'), ('cid', cacheid)]
status, torrentList = uTorrentClient._action(params)
if 'torrentp' in torrentList:
torrents = torrentList['torrentp']
else:
torrents = torrentList['torrents']
cacheid = torrentList['torrentc']
for torrent in torrents:
if torrent[0].upper() == hash.upper():
if not return_name:
return torrent[26], cacheid
else:
return torrent[2], cacheid
return None, None
def addTorrent(link):
uTorrentClient = utorrentclient()
uTorrentClient.add_url(link)
def getFolder(hash):
# Get Active Directory from settings
active_dir, completed_dir = getSettingsDirectories()
if not active_dir:
logger.error(
'Could not get "Put new downloads in:" directory from uTorrent settings, please ensure it is set')
return None
# Get Torrent Folder Name
torrent_folder, cacheid = dirTorrent(hash)
# If there's no folder yet then it's probably a magnet, try until folder is populated
if torrent_folder == active_dir or not torrent_folder:
tries = 1
while (torrent_folder == active_dir or torrent_folder is None) and tries <= 10:
tries += 1
time.sleep(6)
torrent_folder, cacheid = dirTorrent(hash, cacheid)
if torrent_folder == active_dir or not torrent_folder:
torrent_folder, cacheid = dirTorrent(hash, cacheid, return_name=True)
return torrent_folder
else:
if headphones.SYS_PLATFORM != "win32":
torrent_folder = torrent_folder.replace('\\', '/')
return os.path.basename(os.path.normpath(torrent_folder))
def getSettingsDirectories():
uTorrentClient = utorrentclient()
settings = uTorrentClient.get_settings()
active = None
completed = None
if 'dir_active_download' in settings:
active = settings['dir_active_download'][2]
if 'dir_completed_download' in settings:
completed = settings['dir_completed_download'][2]
return active, completed
| 9,339
|
Python
|
.py
| 220
| 34.454545
| 112
| 0.633054
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,296
|
cache.py
|
rembo10_headphones/headphones/cache.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import os
from six.moves.urllib.parse import urlencode
import headphones
from headphones import db, helpers, logger, lastfm, request, mb
LASTFM_API_KEY = "8d983789c771afaeb7412ac358d4bad0"
FANART_URL = 'https://webservice.fanart.tv/v3/music/'
FANART_PROJECT_KEY = '22b73c9603eba09d0c855f2d2bdba31c'
FANART_CLIENT_KEY = '919b389a18a3f0b2c916090022ab3c7a'
class Cache(object):
"""
This class deals with getting, storing and serving up artwork (album art,
artist images, etc) and info/descriptions (album info, artist descrptions)
to and from the cache folder. This can be called from within a web
interface. For example, using the helper functions `getInfo(id)` and
`getArtwork(id)`, to utilize the cached images rather than having to
retrieve them every time the page is reloaded.
You can call `getArtwork(id)` which will return an absolute path to the
image file on the local machine, or if the cache directory doesn't exist,
or can not be written to, it will return a url to the image.
Call `getInfo(id)` to grab the artist/album info. This will return the
text description.
The basic format for art in the cache is `<musicbrainzid>.<date>.<ext>`
and for info it is `<musicbrainzid>.<date>.txt`
"""
path_to_art_cache = os.path.join(headphones.CONFIG.CACHE_DIR, 'artwork')
def __init__(self):
self.id = None
self.id_type = None # 'artist' or 'album' - set automatically depending on whether ArtistID or AlbumID is passed
self.query_type = None # 'artwork','thumb' or 'info' - set automatically
self.artwork_files = []
self.thumb_files = []
self.artwork_errors = False
self.artwork_url = None
self.thumb_errors = False
self.thumb_url = None
self.info_summary = None
self.info_content = None
def _findfilesstartingwith(self, pattern, folder):
files = []
if os.path.exists(folder):
for fname in os.listdir(folder):
if fname.startswith(pattern):
files.append(os.path.join(folder, fname))
return files
def _exists(self, type):
self.artwork_files = []
self.thumb_files = []
if type == 'artwork':
self.artwork_files = self._findfilesstartingwith(self.id, self.path_to_art_cache)
if self.artwork_files:
return True
else:
return False
elif type == 'thumb':
self.thumb_files = self._findfilesstartingwith("T_" + self.id, self.path_to_art_cache)
if self.thumb_files:
return True
else:
return False
def _get_age(self, date):
# There's probably a better way to do this
split_date = date.split('-')
days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2])
return days_old
def _is_current(self, filename=None, date=None):
if filename:
# 2019 last.fm no longer allows access to artist images, for now we'll keep the cached artist image if it exists and get new from fanart.tv
if self.id_type == 'artist' and 'fanart' not in filename:
return True
base_filename = os.path.basename(filename)
date = base_filename.split('.')[1]
# Calculate how old the cached file is based on todays date & file date stamp
# helpers.today() returns todays date in yyyy-mm-dd format
if self._get_age(helpers.today()) - self._get_age(date) < 30:
return True
else:
return False
def _get_thumb_url(self, data):
thumb_url = None
try:
images = data[self.id_type]['image']
except KeyError:
return None
for image in images:
if image['size'] == 'medium' and '#text' in image:
thumb_url = image['#text']
break
return thumb_url
def get_artwork_from_cache(self, ArtistID=None, AlbumID=None):
"""
Pass a musicbrainz id to this function (either ArtistID or AlbumID)
"""
self.query_type = 'artwork'
if ArtistID:
self.id = ArtistID
self.id_type = 'artist'
else:
self.id = AlbumID
self.id_type = 'album'
if self._exists('artwork') and self._is_current(filename=self.artwork_files[0]):
return self.artwork_files[0]
else:
self._update_cache()
# If we failed to get artwork, either return the url or the older file
if self.artwork_errors and self.artwork_url:
return self.artwork_url
elif self._exists('artwork'):
return self.artwork_files[0]
else:
return None
def get_thumb_from_cache(self, ArtistID=None, AlbumID=None):
"""
Pass a musicbrainz id to this function (either ArtistID or AlbumID)
"""
self.query_type = 'thumb'
if ArtistID:
self.id = ArtistID
self.id_type = 'artist'
else:
self.id = AlbumID
self.id_type = 'album'
if self._exists('thumb') and self._is_current(filename=self.thumb_files[0]):
return self.thumb_files[0]
else:
self._update_cache()
# If we failed to get artwork, either return the url or the older file
if self.thumb_errors and self.thumb_url:
return self.thumb_url
elif self._exists('thumb'):
return self.thumb_files[0]
else:
return None
def get_info_from_cache(self, ArtistID=None, AlbumID=None):
self.query_type = 'info'
myDB = db.DBConnection()
if ArtistID:
self.id = ArtistID
self.id_type = 'artist'
db_info = myDB.action(
'SELECT Summary, Content, LastUpdated FROM descriptions WHERE ArtistID=?',
[self.id]).fetchone()
else:
self.id = AlbumID
self.id_type = 'album'
db_info = myDB.action(
'SELECT Summary, Content, LastUpdated FROM descriptions WHERE ReleaseGroupID=?',
[self.id]).fetchone()
if not db_info or not db_info['LastUpdated'] or not self._is_current(
date=db_info['LastUpdated']):
self._update_cache()
info_dict = {'Summary': self.info_summary, 'Content': self.info_content}
return info_dict
else:
info_dict = {'Summary': db_info['Summary'], 'Content': db_info['Content']}
return info_dict
def get_image_links(self, ArtistID=None, AlbumID=None):
"""
Here we're just going to open up the last.fm url, grab the image links and return them
Won't save any image urls, or save the artwork in the cache. Useful for search results, etc.
"""
if ArtistID:
self.id_type = 'artist'
# 2019 last.fm no longer allows access to artist images, try fanart.tv instead
image_url = None
thumb_url = None
data = request.request_json(FANART_URL + ArtistID, whitelist_status_code=404,
headers={'api-key': FANART_PROJECT_KEY, 'client-key': FANART_CLIENT_KEY})
if not data:
return
if data.get('artistthumb'):
image_url = data['artistthumb'][0]['url']
elif data.get('artistbackground'):
image_url = data['artistbackground'][0]['url']
# elif data.get('hdmusiclogo'):
# image_url = data['hdmusiclogo'][0]['url']
# fallback to 1st album cover if none of the above
elif 'albums' in data:
for mbid, art in list(data.get('albums', dict()).items()):
if 'albumcover' in art:
image_url = art['albumcover'][0]['url']
break
if image_url:
thumb_url = image_url
else:
logger.debug('No artist image found on fanart.tv for Artist Id: %s', self.id)
else:
self.id_type = 'album'
data = lastfm.request_lastfm("album.getinfo", mbid=AlbumID, api_key=LASTFM_API_KEY)
if not data:
return
try:
image_url = data['album']['image'][-1]['#text']
except (KeyError, IndexError):
logger.debug('No album image found on last.fm')
image_url = None
thumb_url = self._get_thumb_url(data)
if not thumb_url:
logger.debug('No album thumbnail image found on last.fm')
return {'artwork': image_url, 'thumbnail': thumb_url}
def remove_from_cache(self, ArtistID=None, AlbumID=None):
"""
Pass a musicbrainz id to this function (either ArtistID or AlbumID)
"""
if ArtistID:
self.id = ArtistID
self.id_type = 'artist'
else:
self.id = AlbumID
self.id_type = 'album'
self.query_type = 'artwork'
if self._exists('artwork'):
for artwork_file in self.artwork_files:
try:
os.remove(artwork_file)
except:
logger.warn('Error deleting file from the cache: %s', artwork_file)
self.query_type = 'thumb'
if self._exists('thumb'):
for thumb_file in self.thumb_files:
try:
os.remove(thumb_file)
except Exception:
logger.warn('Error deleting file from the cache: %s', thumb_file)
def _update_cache(self):
"""
Since we call the same url for both info and artwork, we'll update both at the same time
"""
myDB = db.DBConnection()
fanart = False
# Since lastfm uses release ids rather than release group ids for albums, we have to do a artist + album search for albums
# Exception is when adding albums manually, then we should use release id
if self.id_type == 'artist':
data = lastfm.request_lastfm("artist.getinfo", mbid=self.id, api_key=LASTFM_API_KEY)
# Try with name if not found
if not data:
dbartist = myDB.action('SELECT ArtistName, Type FROM artists WHERE ArtistID=?', [self.id]).fetchone()
if dbartist:
data = lastfm.request_lastfm("artist.getinfo",
artist=helpers.clean_musicbrainz_name(dbartist['ArtistName']),
api_key=LASTFM_API_KEY)
if not data:
return
try:
self.info_summary = data['artist']['bio']['summary']
except KeyError:
logger.debug('No artist bio summary found')
self.info_summary = None
try:
self.info_content = data['artist']['bio']['content']
except KeyError:
logger.debug('No artist bio found')
self.info_content = None
# 2019 last.fm no longer allows access to artist images, try fanart.tv instead
image_url = None
thumb_url = None
data = request.request_json(FANART_URL + self.id, whitelist_status_code=404,
headers={'api-key': FANART_PROJECT_KEY, 'client-key': FANART_CLIENT_KEY})
if data.get('artistthumb'):
image_url = data['artistthumb'][0]['url']
elif data.get('artistbackground'):
image_url = data['artistbackground'][0]['url']
# elif data.get('hdmusiclogo'):
# image_url = data['hdmusiclogo'][0]['url']
# fallback to 1st album cover if none of the above
elif 'albums' in data:
for mbid, art in list(data.get('albums', dict()).items()):
if 'albumcover' in art:
image_url = art['albumcover'][0]['url']
break
# finally, use 1st album cover from last.fm
if image_url:
fanart = True
thumb_url = image_url
else:
dbalbum = myDB.action(
'SELECT ArtworkURL, ThumbURL FROM albums WHERE ArtworkURL IS NOT NULL AND ArtistID=?',
[self.id]).fetchone()
if dbalbum:
fanart = True
image_url = dbalbum['ArtworkURL']
thumb_url = dbalbum['ThumbURL']
if not image_url:
logger.debug('No artist image found on fanart.tv for Artist Id: %s', self.id)
else:
dbalbum = myDB.action(
'SELECT ArtistName, AlbumTitle, ReleaseID, Type FROM albums WHERE AlbumID=?',
[self.id]).fetchone()
if dbalbum['ReleaseID'] != self.id:
data = lastfm.request_lastfm("album.getinfo", mbid=dbalbum['ReleaseID'],
api_key=LASTFM_API_KEY)
if not data:
data = lastfm.request_lastfm("album.getinfo",
artist=helpers.clean_musicbrainz_name(dbalbum['ArtistName']),
album=helpers.clean_musicbrainz_name(dbalbum['AlbumTitle']),
api_key=LASTFM_API_KEY)
else:
if dbalbum['Type'] != "part of":
data = lastfm.request_lastfm("album.getinfo",
artist=helpers.clean_musicbrainz_name(dbalbum['ArtistName']),
album=helpers.clean_musicbrainz_name(dbalbum['AlbumTitle']),
api_key=LASTFM_API_KEY)
else:
# Series, use actual artist for the release-group
artist = mb.getArtistForReleaseGroup(self.id)
if artist:
data = lastfm.request_lastfm("album.getinfo",
artist=helpers.clean_musicbrainz_name(artist),
album=helpers.clean_musicbrainz_name(dbalbum['AlbumTitle']),
api_key=LASTFM_API_KEY)
if not data:
return
try:
self.info_summary = data['album']['wiki']['summary']
except KeyError:
logger.debug('No album summary found')
self.info_summary = None
try:
self.info_content = data['album']['wiki']['content']
except KeyError:
logger.debug('No album infomation found')
self.info_content = None
try:
image_url = data['album']['image'][-1]['#text']
except KeyError:
logger.debug('No album image link found')
image_url = None
thumb_url = self._get_thumb_url(data)
if not thumb_url:
logger.debug('No album thumbnail image found')
# Save the content & summary to the database no matter what if we've
# opened up the url
if self.id_type == 'artist':
controlValueDict = {"ArtistID": self.id}
else:
controlValueDict = {"ReleaseGroupID": self.id}
newValueDict = {"Summary": self.info_summary,
"Content": self.info_content,
"LastUpdated": helpers.today()}
myDB.upsert("descriptions", newValueDict, controlValueDict)
# Save the image URL to the database
if image_url:
if self.id_type == 'artist':
myDB.action('UPDATE artists SET ArtworkURL=? WHERE ArtistID=?',
[image_url, self.id])
else:
myDB.action('UPDATE albums SET ArtworkURL=? WHERE AlbumID=?', [image_url, self.id])
# Save the thumb URL to the database
if thumb_url:
if self.id_type == 'artist':
myDB.action('UPDATE artists SET ThumbURL=? WHERE ArtistID=?', [thumb_url, self.id])
else:
myDB.action('UPDATE albums SET ThumbURL=? WHERE AlbumID=?', [thumb_url, self.id])
# Should we grab the artwork here if we're just grabbing thumbs or
# info? Probably not since the files can be quite big
if image_url and self.query_type == 'artwork':
artwork = request.request_content(image_url, timeout=20)
if artwork:
# Make sure the artwork dir exists:
if not os.path.isdir(self.path_to_art_cache):
try:
os.makedirs(self.path_to_art_cache)
os.chmod(self.path_to_art_cache,
int(headphones.CONFIG.FOLDER_PERMISSIONS, 8))
except OSError as e:
logger.error('Unable to create artwork cache dir. Error: %s', e)
self.artwork_errors = True
self.artwork_url = image_url
# Delete the old stuff
for artwork_file in self.artwork_files:
try:
os.remove(artwork_file)
except:
logger.error('Error deleting file from the cache: %s', artwork_file)
ext = os.path.splitext(image_url)[1]
if fanart:
artwork_path = os.path.join(self.path_to_art_cache,
self.id + '_fanart_' + '.' + helpers.today() + ext)
else:
artwork_path = os.path.join(self.path_to_art_cache,
self.id + '.' + helpers.today() + ext)
try:
with open(artwork_path, 'wb') as f:
f.write(artwork)
os.chmod(artwork_path, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
except (OSError, IOError) as e:
logger.error('Unable to write to the cache dir: %s', e)
self.artwork_errors = True
self.artwork_url = image_url
# Grab the thumbnail as well if we're getting the full artwork (as long
# as it's missing/outdated.
if thumb_url and self.query_type in ['thumb', 'artwork'] and not (
self.thumb_files and self._is_current(self.thumb_files[0])):
if not (self.query_type == 'artwork' and 'fanart' in thumb_url and artwork):
artwork = request.request_content(thumb_url, timeout=20)
if artwork:
# Make sure the artwork dir exists:
if not os.path.isdir(self.path_to_art_cache):
try:
os.makedirs(self.path_to_art_cache)
os.chmod(self.path_to_art_cache,
int(headphones.CONFIG.FOLDER_PERMISSIONS, 8))
except OSError as e:
logger.error('Unable to create artwork cache dir. Error: %s' + e)
self.thumb_errors = True
self.thumb_url = thumb_url
# Delete the old stuff
for thumb_file in self.thumb_files:
try:
os.remove(thumb_file)
except OSError as e:
logger.error('Error deleting file from the cache: %s', thumb_file)
ext = os.path.splitext(image_url)[1]
if fanart:
thumb_path = os.path.join(self.path_to_art_cache,
'T_' + self.id + '_fanart_' + '.' + helpers.today() + ext)
else:
thumb_path = os.path.join(self.path_to_art_cache,
'T_' + self.id + '.' + helpers.today() + ext)
try:
if self.id_type != 'artist':
with open(thumb_path, 'wb') as f:
f.write(artwork)
else:
# 2019 last.fm no longer allows access to artist images, use the fanart.tv image to create a thumb
artwork_thumb = None
if 'fanart' in thumb_url:
# Create thumb using image resizing service
url = "https://images.weserv.nl"
params = {
"url": thumb_url,
"w": 300
}
artwork_thumb = request.request_content(
url,
params=params,
timeout=20,
whitelist_status_code=404
)
if artwork_thumb:
with open(thumb_path, 'wb') as f:
f.write(artwork_thumb)
else:
with open(thumb_path, 'wb') as f:
f.write(artwork)
os.chmod(thumb_path, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
except (OSError, IOError) as e:
logger.error('Unable to write to the cache dir: %s', e)
self.thumb_errors = True
self.thumb_url = image_url
def getArtwork(ArtistID=None, AlbumID=None):
c = Cache()
artwork_path = c.get_artwork_from_cache(ArtistID, AlbumID)
if not artwork_path:
return None
if artwork_path.startswith(('http://', 'https://')):
return artwork_path
else:
artwork_file = os.path.basename(artwork_path)
return "cache/artwork/" + artwork_file
def getThumb(ArtistID=None, AlbumID=None):
c = Cache()
artwork_path = c.get_thumb_from_cache(ArtistID, AlbumID)
if not artwork_path:
return None
if artwork_path.startswith(('http://', 'https://')):
return artwork_path
else:
thumbnail_file = os.path.basename(artwork_path)
return "cache/artwork/" + thumbnail_file
def getInfo(ArtistID=None, AlbumID=None):
c = Cache()
info_dict = c.get_info_from_cache(ArtistID, AlbumID)
return info_dict
def getImageLinks(ArtistID=None, AlbumID=None):
c = Cache()
image_links = c.get_image_links(ArtistID, AlbumID)
return image_links
| 23,824
|
Python
|
.py
| 491
| 33.356415
| 151
| 0.534737
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,297
|
metadata.py
|
rembo10_headphones/headphones/metadata.py
|
# encoding=utf8
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
"""
Track/album metadata handling routines.
"""
from mediafile import MediaFile, UnreadableFileError
import headphones
from headphones import logger
import os.path
import datetime
__author__ = "Andrzej Ciarkowski <andrzej.ciarkowski@gmail.com>"
class MetadataDict(dict):
"""
Dictionary which allows for case-insensitive, but case-preserving lookup,
allowing to put different values under $Album and $album, but still
finding some value if only single key is present and called with any
variation of the name's case.
Keeps case-sensitive mapping in superclass dict, and case-insensitive (
lowercase) in member variable self._lower. If case-sensitive lookup
fails, another case-insensitive attempt is made.
"""
def __setitem__(self, key, value):
super(MetadataDict, self).__setitem__(key, value)
self._lower.__setitem__(key.lower(), value)
def add_items(self, items):
# type: (Iterable[Tuple[Any,Any]])->None
"""
Add (key,value) pairs to this dictionary using iterable as an input.
:param items: input items.
"""
for key, value in items:
self.__setitem__(key, value)
def __init__(self, seq=None, **kwargs):
if isinstance(seq, MetadataDict):
super(MetadataDict, self).__init__(seq)
self._lower = dict(seq._lower)
else:
super(MetadataDict, self).__init__()
self._lower = {}
if seq is not None:
try:
self.add_items(iter(seq.items()))
except KeyError:
self.add_items(seq)
def __getitem__(self, item):
try:
return super(MetadataDict, self).__getitem__(item)
except KeyError:
return self._lower.__getitem__(item.lower())
def __contains__(self, item):
return self._lower.__contains__(item.lower())
class Vars:
"""
Metadata $variable names (only ones set explicitly by headphones).
"""
DISC = '$Disc'
DISC_TOTAL = '$DiscTotal'
TRACK = '$Track'
TITLE = '$Title'
ARTIST = '$Artist'
SORT_ARTIST = '$SortArtist'
ALBUM = '$Album'
YEAR = '$Year'
DATE = '$Date'
EXTENSION = '$Extension'
ORIGINAL_FOLDER = '$OriginalFolder'
FIRST_LETTER = '$First'
TYPE = '$Type'
TITLE_LOWER = TITLE.lower()
ARTIST_LOWER = ARTIST.lower()
SORT_ARTIST_LOWER = SORT_ARTIST.lower()
ALBUM_LOWER = ALBUM.lower()
ORIGINAL_FOLDER_LOWER = ORIGINAL_FOLDER.lower()
FIRST_LETTER_LOWER = FIRST_LETTER.lower()
TYPE_LOWER = TYPE.lower()
def _verify_var_type(val):
"""
Check if type of value is allowed as a variable in pathname substitution.
"""
return isinstance(val, (str, int, float, datetime.date))
def _as_str(val):
if isinstance(val, str):
return val
else:
return str(val)
def _media_file_to_dict(mf, d):
# type: (MediaFile, MutableMapping[basestring,basestring])->None
"""
Populate dict with tags read from media file.
"""
for fld in mf.readable_fields():
if 'art' == fld:
# skip embedded artwork as it's a BLOB
continue
val = getattr(mf, fld)
if val is None:
val = ''
# include only types with meaningful string representation
if _verify_var_type(val):
d['$' + fld] = _as_str(val)
def _row_to_dict(row, d):
"""
Populate dict with database row fields.
"""
for fld in list(row.keys()):
val = row[fld]
if val is None:
val = ''
if _verify_var_type(val):
d['$' + fld] = _as_str(val)
def _date_year(release):
# type: (sqlite3.Row)->Tuple[str,str]
"""
Extract release date and year from database row
"""
try:
date = release['ReleaseDate']
except TypeError:
date = ''
if date is not None:
year = date[:4]
else:
year = ''
return date, year
def _lower(s):
# type: basestring->basestring
"""
Return s.lower() if not None
:param s:
:return:
"""
if s:
return s.lower()
return None
def file_metadata(path, release, single_disc_ignore=False):
# type: (str,sqlite3.Row)->Tuple[Mapping[str,str],bool]
"""
Prepare metadata dictionary for path substitution, based on file name,
the tags stored within it and release info from the db.
:param path: media file path
:param release: database row with release info
:return: pair (dict,boolean indicating if Vars.TITLE is taken from tags or
file name). (None,None) if unable to parse the media file.
"""
try:
f = MediaFile(path)
except UnreadableFileError as ex:
logger.info(f"MediaFile couldn't parse {path}: {e}")
return None, None
res = MetadataDict()
# add existing tags first, these will get overwritten by musicbrainz from db
_media_file_to_dict(f, res)
# raw database fields come next
_row_to_dict(release, res)
date, year = _date_year(release)
if not f.disctotal or (f.disctotal == 1 and single_disc_ignore):
disc_total = ''
else:
disc_total = '%d' % f.disctotal
if not f.disc or (f.disctotal == 1 and single_disc_ignore):
disc_number = ''
else:
disc_number = '%d' % f.disc
if not f.track:
track_number = ''
else:
track_number = '%02d' % f.track
if not f.title:
basename = os.path.basename(path)
title = os.path.splitext(basename)[0]
from_metadata = False
else:
title = f.title
from_metadata = True
ext = os.path.splitext(path)[1]
if release['ArtistName'] == "Various Artists" and f.artist:
artist_name = f.artist
else:
artist_name = release['ArtistName']
if artist_name and artist_name.startswith('The '):
sort_name = artist_name[4:] + ", The"
else:
sort_name = artist_name
album_title = release['AlbumTitle']
override_values = {
Vars.DISC: disc_number,
Vars.DISC_TOTAL: disc_total,
Vars.TRACK: track_number,
Vars.TITLE: title,
Vars.ARTIST: artist_name,
Vars.SORT_ARTIST: sort_name,
Vars.ALBUM: album_title,
Vars.YEAR: year,
Vars.DATE: date,
Vars.EXTENSION: ext,
Vars.TITLE_LOWER: _lower(title),
Vars.ARTIST_LOWER: _lower(artist_name),
Vars.SORT_ARTIST_LOWER: _lower(sort_name),
Vars.ALBUM_LOWER: _lower(album_title),
}
res.add_items(iter(override_values.items()))
return res, from_metadata
def _intersect(d1, d2):
# type: (Mapping,Mapping)->Mapping
"""
Create intersection (common part) of two dictionaries.
"""
res = {}
for key, val in d1.items():
if key in d2 and d2[key] == val:
res[key] = val
return res
def album_metadata(path, release, common_tags):
# type: (str,sqlite3.Row,Mapping[str,str])->Mapping[str,str]
"""
Prepare metadata dictionary for path substitution of album folder.
:param path: album path to prepare metadata for.
:param release: database row with release properties.
:param common_tags: common set of tags gathered from media files.
:return: metadata dictionary with substitution variables for rendering path.
"""
date, year = _date_year(release)
artist = release['ArtistName']
if artist:
artist = artist.replace('/', '_')
album = release['AlbumTitle']
if album:
album = album.replace('/', '_')
release_type = release['Type']
if release_type:
release_type = release_type.replace('/', '_')
if artist and artist.startswith('The '):
sort_name = artist[4:] + ", The"
else:
sort_name = artist
if not sort_name or sort_name[0].isdigit():
first_char = '0-9'
else:
first_char = sort_name[0]
orig_folder = ''
# Get from temp path
if "_@hp@_" in path:
orig_folder = path.rsplit("headphones_", 1)[1].split("_@hp@_")[0]
else:
for r, d, f in os.walk(path):
try:
orig_folder = os.path.basename(os.path.normpath(r))
break
except:
pass
override_values = {
Vars.ARTIST: artist,
Vars.SORT_ARTIST: sort_name,
Vars.ALBUM: album,
Vars.YEAR: year,
Vars.DATE: date,
Vars.TYPE: release_type,
Vars.ORIGINAL_FOLDER: orig_folder,
Vars.FIRST_LETTER: first_char.upper(),
Vars.ARTIST_LOWER: _lower(artist),
Vars.SORT_ARTIST_LOWER: _lower(sort_name),
Vars.ALBUM_LOWER: _lower(album),
Vars.TYPE_LOWER: _lower(release_type),
Vars.FIRST_LETTER_LOWER: _lower(first_char),
Vars.ORIGINAL_FOLDER_LOWER: _lower(orig_folder)
}
res = MetadataDict(common_tags)
res.add_items(iter(override_values.items()))
return res
def albumart_metadata(release, common_tags):
# type: (sqlite3.Row,Mapping)->Mapping
"""
Prepare metadata dictionary for path subtitution of album art file.
:param release: database row with release properties.
:param common_tags: common set of tags gathered from media files.
:return: metadata dictionary with substitution variables for rendering path.
"""
date, year = _date_year(release)
artist = release['ArtistName']
album = release['AlbumTitle']
override_values = {
Vars.ARTIST: artist,
Vars.ALBUM: album,
Vars.YEAR: year,
Vars.DATE: date,
Vars.ARTIST_LOWER: _lower(artist),
Vars.ALBUM_LOWER: _lower(album)
}
res = MetadataDict(common_tags)
res.add_items(iter(override_values.items()))
return res
class AlbumMetadataBuilder(object):
"""
Facilitates building of album metadata as a common set of tags retrieved
from media files.
"""
def __init__(self):
self._common = None
def add_media_file(self, mf):
# type: (Mapping)->None
"""
Add metadata tags read from media file to album metadata.
:param mf: MediaFile
"""
md = {}
_media_file_to_dict(mf, md)
if self._common is None:
self._common = md
else:
self._common = _intersect(self._common, md)
def build(self):
# type: (None)->Mapping
"""
Build case-insensitive, case-preserving dict from gathered metadata
tags.
:return: dictinary-like object filled with $variables based on common
tags.
"""
return MetadataDict(self._common)
| 11,373
|
Python
|
.py
| 332
| 27.506024
| 80
| 0.624135
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,298
|
__init__.py
|
rembo10_headphones/headphones/__init__.py
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
# NZBGet support added by CurlyMo <curlymoo1@gmail.com> as a part of
# XBian - XBMC on the Raspberry Pi
import sys
import subprocess
import threading
import webbrowser
import sqlite3
import datetime
import os
import cherrypy
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from headphones import versioncheck, logger
import headphones.config
from headphones.softchroot import SoftChroot
import headphones.exceptions
# (append new extras to the end)
POSSIBLE_EXTRAS = [
"single",
"ep",
"compilation",
"soundtrack",
"live",
"remix",
"spokenword",
"audiobook",
"other",
"dj-mix",
"mixtape/street",
"broadcast",
"interview",
"demo"
]
PROG_DIR = None
FULL_PATH = None
ARGS = None
SIGNAL = None
SYS_PLATFORM = None
SYS_ENCODING = None
QUIET = False
VERBOSE = False
DAEMON = False
CREATEPID = False
PIDFILE = None
SCHED = BackgroundScheduler()
SCHED_LOCK = threading.Lock()
INIT_LOCK = threading.Lock()
_INITIALIZED = False
started = False
DATA_DIR = None
CONFIG = None
SOFT_CHROOT = None
DB_FILE = None
LOG_LIST = []
INSTALL_TYPE = None
CURRENT_VERSION = None
LATEST_VERSION = None
COMMITS_BEHIND = None
LOSSY_MEDIA_FORMATS = ["mp3", "aac", "ogg", "ape", "m4a", "asf", "wma", "opus"]
LOSSLESS_MEDIA_FORMATS = ["flac", "aiff", "aif"]
MEDIA_FORMATS = LOSSY_MEDIA_FORMATS + LOSSLESS_MEDIA_FORMATS
MIRRORLIST = ["musicbrainz.org", "headphones", "custom"]
UMASK = None
def initialize(config_file):
with INIT_LOCK:
global CONFIG
global SOFT_CHROOT
global _INITIALIZED
global CURRENT_VERSION
global LATEST_VERSION
global UMASK
CONFIG = headphones.config.Config(config_file)
assert CONFIG is not None
if _INITIALIZED:
return False
if CONFIG.HTTP_PORT < 21 or CONFIG.HTTP_PORT > 65535:
headphones.logger.warn(
'HTTP_PORT out of bounds: 21 < %s < 65535', CONFIG.HTTP_PORT)
CONFIG.HTTP_PORT = 8181
if CONFIG.HTTPS_CERT == '':
CONFIG.HTTPS_CERT = os.path.join(DATA_DIR, 'server.crt')
if CONFIG.HTTPS_KEY == '':
CONFIG.HTTPS_KEY = os.path.join(DATA_DIR, 'server.key')
if not CONFIG.LOG_DIR:
CONFIG.LOG_DIR = os.path.join(DATA_DIR, 'logs')
if not os.path.exists(CONFIG.LOG_DIR):
try:
os.makedirs(CONFIG.LOG_DIR)
except OSError:
CONFIG.LOG_DIR = None
if not QUIET:
sys.stderr.write("Unable to create the log directory. "
"Logging to screen only.\n")
# Start the logger, disable console if needed
logger.initLogger(console=not QUIET, log_dir=CONFIG.LOG_DIR,
verbose=VERBOSE)
try:
SOFT_CHROOT = SoftChroot(str(CONFIG.SOFT_CHROOT))
if SOFT_CHROOT.isEnabled():
logger.info("Soft-chroot enabled for dir: %s", str(CONFIG.SOFT_CHROOT))
except headphones.exceptions.SoftChrootError as e:
logger.error("SoftChroot error: %s", e)
raise e
if not CONFIG.CACHE_DIR:
# Put the cache dir in the data dir for now
CONFIG.CACHE_DIR = os.path.join(DATA_DIR, 'cache')
if not os.path.exists(CONFIG.CACHE_DIR):
try:
os.makedirs(CONFIG.CACHE_DIR)
except OSError as e:
logger.error("Could not create cache dir '%s': %s", DATA_DIR, e)
# Sanity check for search interval. Set it to at least 6 hours
if CONFIG.SEARCH_INTERVAL and CONFIG.SEARCH_INTERVAL < 360:
logger.info("Search interval too low. Resetting to 6 hour minimum.")
CONFIG.SEARCH_INTERVAL = 360
# Initialize the database
logger.info('Checking to see if the database has all tables....')
try:
dbcheck()
except Exception as e:
logger.error("Can't connect to the database: %s", e)
# Get the currently installed version. Returns None, 'win32' or the git
# hash.
CURRENT_VERSION, CONFIG.GIT_BRANCH = versioncheck.getVersion()
# Write current version to a file, so we know which version did work.
# This allowes one to restore to that version. The idea is that if we
# arrive here, most parts of Headphones seem to work.
if CURRENT_VERSION:
version_lock_file = os.path.join(DATA_DIR, "version.lock")
try:
with open(version_lock_file, "w") as fp:
fp.write(CURRENT_VERSION)
except IOError as e:
logger.error("Unable to write current version to file '%s': %s",
version_lock_file, e)
# Check for new versions
if CONFIG.CHECK_GITHUB and CONFIG.CHECK_GITHUB_ON_STARTUP:
try:
LATEST_VERSION = versioncheck.checkGithub()
except:
logger.exception("Unhandled exception")
LATEST_VERSION = CURRENT_VERSION
else:
LATEST_VERSION = CURRENT_VERSION
# Store the original umask
UMASK = os.umask(0)
os.umask(UMASK)
_INITIALIZED = True
return True
def daemonize():
if threading.activeCount() != 1:
logger.warn(
'There are %r active threads. Daemonizing may cause'
' strange behavior.',
threading.enumerate())
sys.stdout.flush()
sys.stderr.flush()
# Do first fork
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError as e:
raise RuntimeError("1st fork failed: %s [%d]", e.strerror, e.errno)
os.setsid()
# Make sure I can read my own files and shut out others
prev = os.umask(0) # @UndefinedVariable - only available in UNIX
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError as e:
raise RuntimeError("2nd fork failed: %s [%d]", e.strerror, e.errno)
dev_null = open('/dev/null', 'r')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
si = open('/dev/null', "r")
so = open('/dev/null', "a+")
se = open('/dev/null', "a+")
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
pid = os.getpid()
logger.info('Daemonized to PID: %d', pid)
if CREATEPID:
logger.info("Writing PID %d to %s", pid, PIDFILE)
with open(PIDFILE, 'w') as fp:
fp.write("%s\n" % pid)
def launch_browser(host, port, root):
if host == '0.0.0.0':
host = 'localhost'
if CONFIG.ENABLE_HTTPS:
protocol = 'https'
else:
protocol = 'http'
try:
webbrowser.open('%s://%s:%i%s' % (protocol, host, port, root))
except Exception as e:
logger.error('Could not launch browser: %s', e)
def initialize_scheduler():
"""
Start the scheduled background tasks. Re-schedule if interval settings changed.
"""
from headphones import updater, searcher, librarysync, postprocessor, \
torrentfinished
with SCHED_LOCK:
# Check if scheduler should be started
start_jobs = not len(SCHED.get_jobs())
# Regular jobs
minutes = CONFIG.SEARCH_INTERVAL
schedule_job(searcher.searchforalbum, 'Search for Wanted', hours=0, minutes=minutes)
minutes = CONFIG.DOWNLOAD_SCAN_INTERVAL
schedule_job(postprocessor.checkFolder, 'Download Scan', hours=0, minutes=minutes)
hours = CONFIG.LIBRARYSCAN_INTERVAL
schedule_job(librarysync.libraryScan, 'Library Scan', hours=hours, minutes=0)
hours = CONFIG.UPDATE_DB_INTERVAL
schedule_job(updater.dbUpdate, 'MusicBrainz Update', hours=hours, minutes=0)
# Update check
if CONFIG.CHECK_GITHUB:
if CONFIG.CHECK_GITHUB_INTERVAL:
minutes = CONFIG.CHECK_GITHUB_INTERVAL
else:
minutes = 0
schedule_job(versioncheck.checkGithub, 'Check GitHub for updates', hours=0,
minutes=minutes)
# Remove Torrent + data if Post Processed and finished Seeding
if headphones.CONFIG.TORRENT_DOWNLOADER != 0:
minutes = CONFIG.TORRENT_REMOVAL_INTERVAL
schedule_job(torrentfinished.checkTorrentFinished, 'Torrent removal check', hours=0,
minutes=minutes)
# Start scheduler
if start_jobs and len(SCHED.get_jobs()):
try:
SCHED.start()
except Exception as e:
logger.info(e)
# Debug
# SCHED.print_jobs()
def schedule_job(function, name, hours=0, minutes=0):
"""
Start scheduled job if starting or restarting headphones.
Reschedule job if Interval Settings have changed.
Remove job if if Interval Settings changed to 0
"""
job = SCHED.get_job(name)
if job:
if hours == 0 and minutes == 0:
SCHED.remove_job(name)
logger.info("Removed background task: %s", name)
elif job.trigger.interval != datetime.timedelta(hours=hours, minutes=minutes):
SCHED.reschedule_job(name, trigger=IntervalTrigger(
hours=hours, minutes=minutes))
logger.info("Re-scheduled background task: %s", name)
elif hours > 0 or minutes > 0:
SCHED.add_job(function, id=name, trigger=IntervalTrigger(
hours=hours, minutes=minutes))
logger.info("Scheduled background task: %s", name)
def start():
global started
if _INITIALIZED:
initialize_scheduler()
started = True
def sig_handler(signum=None, frame=None):
if signum is not None:
logger.info("Signal %i caught, saving and exiting...", signum)
shutdown()
def dbcheck():
logger.debug("SQLite Version: %s", sqlite3.sqlite_version)
logger.debug("DB-API Version: %s", sqlite3.version)
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS artists (ArtistID TEXT UNIQUE, ArtistName TEXT, ArtistSortName TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, LatestAlbum TEXT, ReleaseDate TEXT, AlbumID TEXT, HaveTracks INTEGER, TotalTracks INTEGER, LastUpdated TEXT, ArtworkURL TEXT, ThumbURL TEXT, Extras TEXT, Type TEXT, MetaCritic TEXT)')
# ReleaseFormat here means CD,Digital,Vinyl, etc. If using the default
# Headphones hybrid release, ReleaseID will equal AlbumID (AlbumID is
# releasegroup id)
c.execute(
'CREATE TABLE IF NOT EXISTS albums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, DateAdded TEXT, AlbumID TEXT UNIQUE, Status TEXT, Type TEXT, ArtworkURL TEXT, ThumbURL TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT, SearchTerm TEXT, CriticScore TEXT, UserScore TEXT)')
# Format here means mp3, flac, etc.
c.execute(
'CREATE TABLE IF NOT EXISTS tracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS allalbums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, AlbumID TEXT, Type TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS alltracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS snatched (AlbumID TEXT, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT, FolderName TEXT, Kind TEXT, TorrentHash TEXT)')
# Matched is a temporary value used to see if there was a match found in
# alltracks
c.execute(
'CREATE TABLE IF NOT EXISTS have (ArtistName TEXT, AlbumTitle TEXT, TrackNumber TEXT, TrackTitle TEXT, TrackLength TEXT, BitRate TEXT, Genre TEXT, Date TEXT, TrackID TEXT, Location TEXT, CleanName TEXT, Format TEXT, Matched TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS lastfmcloud (ArtistName TEXT, ArtistID TEXT, Count INTEGER)')
c.execute(
'CREATE TABLE IF NOT EXISTS descriptions (ArtistID TEXT, ReleaseGroupID TEXT, ReleaseID TEXT, Summary TEXT, Content TEXT, LastUpdated TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS blacklist (ArtistID TEXT UNIQUE)')
c.execute('CREATE TABLE IF NOT EXISTS newartists (ArtistName TEXT UNIQUE)')
c.execute(
'CREATE TABLE IF NOT EXISTS releases (ReleaseID TEXT, ReleaseGroupID TEXT, UNIQUE(ReleaseID, ReleaseGroupID))')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_albumid ON tracks(AlbumID ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS album_artistid_reldate ON albums(ArtistID ASC, ReleaseDate DESC)')
# Below creates indices to speed up Active Artist updating
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_relid ON alltracks(ReleaseID ASC, TrackID ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS allalbums_relid ON allalbums(ReleaseID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS have_location ON have(Location ASC)')
# Below creates indices to speed up library scanning & matching
c.execute(
'CREATE INDEX IF NOT EXISTS have_Metadata ON have(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS have_CleanName ON have(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_Metadata ON tracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_CleanName ON tracks(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_Metadata ON alltracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_CleanName ON alltracks(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_Location ON tracks(Location ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_Location ON alltracks(Location ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_artistid ON tracks(ArtistID ASC)')
# Speed up album page
c.execute('CREATE INDEX IF NOT EXISTS allalbums_albumid ON allalbums(AlbumID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS alltracks_albumid ON alltracks(AlbumID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS releases_albumid ON releases(ReleaseGroupID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS descriptions_albumid ON descriptions(ReleaseGroupID ASC)')
# Speed up artist deletion
c.execute('CREATE INDEX IF NOT EXISTS allalbums_artistid ON allalbums(ArtistID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS alltracks_artistid ON alltracks(ArtistID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS descriptions_artistid ON descriptions(ArtistID ASC)')
# Speed up Artist refresh hybrid release
c.execute('CREATE INDEX IF NOT EXISTS albums_releaseid ON albums(ReleaseID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS tracks_releaseid ON tracks(ReleaseID ASC)')
# Speed up scanning and track matching
c.execute('CREATE INDEX IF NOT EXISTS artist_artistname ON artists(ArtistName COLLATE NOCASE ASC)')
# General speed up
c.execute('CREATE INDEX IF NOT EXISTS artist_artistsortname ON artists(ArtistSortName COLLATE NOCASE ASC)')
c.execute(
"""CREATE INDEX IF NOT EXISTS have_matched_artist_album ON have(Matched ASC, ArtistName COLLATE NOCASE ASC, AlbumTitle COLLATE NOCASE ASC)""")
c.execute('DROP INDEX IF EXISTS have_matched')
try:
c.execute('SELECT IncludeExtras from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN IncludeExtras INTEGER DEFAULT 0')
try:
c.execute('SELECT LatestAlbum from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN LatestAlbum TEXT')
try:
c.execute('SELECT ReleaseDate from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN ReleaseDate TEXT')
try:
c.execute('SELECT AlbumID from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN AlbumID TEXT')
try:
c.execute('SELECT HaveTracks from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN HaveTracks INTEGER DEFAULT 0')
try:
c.execute('SELECT TotalTracks from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN TotalTracks INTEGER DEFAULT 0')
try:
c.execute('SELECT Type from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN Type TEXT DEFAULT "Album"')
try:
c.execute('SELECT TrackNumber from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN TrackNumber INTEGER')
try:
c.execute('SELECT FolderName from snatched')
except sqlite3.OperationalError:
c.execute('ALTER TABLE snatched ADD COLUMN FolderName TEXT')
try:
c.execute('SELECT Location from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN Location TEXT')
try:
c.execute('SELECT Location from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN Location TEXT')
try:
c.execute('SELECT BitRate from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN BitRate INTEGER')
try:
c.execute('SELECT CleanName from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN CleanName TEXT')
try:
c.execute('SELECT CleanName from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN CleanName TEXT')
# Add the Format column
try:
c.execute('SELECT Format from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN Format TEXT DEFAULT NULL')
try:
c.execute('SELECT Format from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN Format TEXT DEFAULT NULL')
try:
c.execute('SELECT LastUpdated from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN LastUpdated TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtworkURL from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN ArtworkURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtworkURL from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ArtworkURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ThumbURL from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN ThumbURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ThumbURL from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ThumbURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtistID from descriptions')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE descriptions ADD COLUMN ArtistID TEXT DEFAULT NULL')
try:
c.execute('SELECT LastUpdated from descriptions')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE descriptions ADD COLUMN LastUpdated TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseID from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ReleaseID TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseFormat from albums')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE albums ADD COLUMN ReleaseFormat TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseCountry from albums')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE albums ADD COLUMN ReleaseCountry TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseID from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN ReleaseID TEXT DEFAULT NULL')
try:
c.execute('SELECT Matched from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN Matched TEXT DEFAULT NULL')
try:
c.execute('SELECT Extras from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN Extras TEXT DEFAULT NULL')
# Need to update some stuff when people are upgrading and have 'include
# extras' set globally/for an artist
if CONFIG.INCLUDE_EXTRAS:
CONFIG.EXTRAS = "1,2,3,4,5,6,7,8"
logger.info("Copying over current artist IncludeExtras information")
artists = c.execute(
'SELECT ArtistID, IncludeExtras from artists').fetchall()
for artist in artists:
if artist[1]:
c.execute(
'UPDATE artists SET Extras=? WHERE ArtistID=?', ("1,2,3,4,5,6,7,8", artist[0]))
try:
c.execute('SELECT Kind from snatched')
except sqlite3.OperationalError:
c.execute('ALTER TABLE snatched ADD COLUMN Kind TEXT DEFAULT NULL')
try:
c.execute('SELECT SearchTerm from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN SearchTerm TEXT DEFAULT NULL')
try:
c.execute('SELECT CriticScore from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN CriticScore TEXT DEFAULT NULL')
try:
c.execute('SELECT UserScore from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN UserScore TEXT DEFAULT NULL')
try:
c.execute('SELECT Type from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN Type TEXT DEFAULT NULL')
try:
c.execute('SELECT MetaCritic from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN MetaCritic TEXT DEFAULT NULL')
try:
c.execute('SELECT TorrentHash from snatched')
except sqlite3.OperationalError:
c.execute('ALTER TABLE snatched ADD COLUMN TorrentHash TEXT')
c.execute('UPDATE snatched SET TorrentHash = FolderName WHERE Status LIKE "Seed_%"')
# One off script to set CleanName to lower case
clean_name_mixed = c.execute('SELECT CleanName FROM have ORDER BY Date Desc').fetchone()
if clean_name_mixed and clean_name_mixed[0] != clean_name_mixed[0].lower():
logger.info("Updating track clean name, this could take some time...")
c.execute('UPDATE tracks SET CleanName = LOWER(CleanName) WHERE LOWER(CleanName) != CleanName')
c.execute('UPDATE alltracks SET CleanName = LOWER(CleanName) WHERE LOWER(CleanName) != CleanName')
c.execute('UPDATE have SET CleanName = LOWER(CleanName) WHERE LOWER(CleanName) != CleanName')
conn.commit()
c.close()
def shutdown(restart=False, update=False):
cherrypy.engine.exit()
SCHED.shutdown(wait=False)
CONFIG.write()
if not restart and not update:
logger.info('Headphones is shutting down...')
if update:
logger.info('Headphones is updating...')
try:
versioncheck.update()
except Exception as e:
logger.warn('Headphones failed to update: %s. Restarting.', e)
if CREATEPID:
logger.info('Removing pidfile %s', PIDFILE)
os.remove(PIDFILE)
if restart:
logger.info('Headphones is restarting...')
popen_list = [sys.executable, FULL_PATH]
popen_list += ARGS
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.info('Restarting Headphones with %s', popen_list)
subprocess.Popen(popen_list, cwd=os.getcwd())
os._exit(0)
| 25,513
|
Python
|
.py
| 561
| 37.757576
| 340
| 0.677558
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
9,299
|
pathrender_test.py
|
rembo10_headphones/headphones/pathrender_test.py
|
# encoding=utf8
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
"""
Test module for pathrender.
"""
import headphones.pathrender as _pr
from headphones.pathrender import Pattern, Warnings
from .unittestcompat import TestCase
__author__ = "Andrzej Ciarkowski <andrzej.ciarkowski@gmail.com>"
class PathRenderTest(TestCase):
"""
Tests for pathrender module.
"""
def test_parsing(self):
"""pathrender: pattern parsing"""
pattern = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]}")
expected = [
_pr._OptionalBlock([
_pr._Replacement("$Disc"),
_pr._LiteralText(".")
]),
_pr._Replacement("$Track"),
_pr._LiteralText(" - "),
_pr._Replacement("$Artist"),
_pr._LiteralText(" - "),
_pr._Replacement("$Title"),
_pr._OptionalBlock([
_pr._LiteralText(" ["),
_pr._Replacement("$Year"),
_pr._LiteralText("]")
])
]
self.assertEqual(expected, pattern._pattern)
self.assertItemsEqual([], pattern.warnings)
def test_parsing_warnings(self):
"""pathrender: pattern parsing with warnings"""
pattern = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]")
self.assertEqual(set([Warnings.UNCLOSED_OPTIONAL]), pattern.warnings)
pattern = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]'}")
self.assertEqual(set([Warnings.UNCLOSED_ESCAPE, Warnings.UNCLOSED_OPTIONAL]), pattern.warnings)
def test_replacement(self):
"""pathrender: _Replacement variable substitution"""
r = _pr._Replacement("$Title")
subst = {'$Title': 'foo', '$Track': 'bar'}
res = r.render(subst)
self.assertEqual(res, 'foo', 'check valid replacement')
subst = {}
res = r.render(subst)
self.assertEqual(res, '$Title', 'check missing replacement')
subst = {'$Title': None}
res = r.render(subst)
self.assertEqual(res, '', 'check render() works with None')
def test_literal(self):
"""pathrender: _Literal text rendering"""
l = _pr._LiteralText("foo")
subst = {'$foo': 'bar'}
res = l.render(subst)
self.assertEqual(res, 'foo')
def test_optional(self):
"""pathrender: _OptionalBlock element processing"""
o = _pr._OptionalBlock([
_pr._Replacement("$Title"),
_pr._LiteralText(".foobar")
])
subst = {'$Title': 'foo', '$Track': 'bar'}
res = o.render(subst)
self.assertEqual(res, 'foo.foobar', 'check non-empty replacement')
subst = {'$Title': ''}
res = o.render(subst)
self.assertEqual(res, '', 'check empty replacement')
subst = {'$Title': None}
res = o.render(subst)
self.assertEqual(res, '', 'check render() works with None')
| 3,564
|
Python
|
.py
| 86
| 33.860465
| 103
| 0.608708
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|