id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11,100
|
locales.py
|
metabrainz_picard/picard/const/locales.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2014, 2020, 2024 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2019, 2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.i18n import N_
# List of alias locales
ALIAS_LOCALES = {
'aa': N_('Afar'),
'aa_DJ': N_('Afar (Djibouti)'),
'aa_ER': N_('Afar (Eritrea)'),
'aa_ER_SAAHO': N_('Afar (Eritrea) (Saho)'),
'aa_ET': N_('Afar (Ethiopia)'),
'ab': N_('Abkhazian'),
'ab_GE': N_('Abkhazian (Georgia)'),
'af': N_('Afrikaans'),
'af_NA': N_('Afrikaans (Namibia)'),
'af_ZA': N_('Afrikaans (South Africa)'),
'agq': N_('Aghem'),
'agq_CM': N_('Aghem (Cameroon)'),
'ak': N_('Akan'),
'ak_GH': N_('Akan (Ghana)'),
'am': N_('Amharic'),
'am_ET': N_('Amharic (Ethiopia)'),
'an': N_('Aragonese'),
'an_ES': N_('Aragonese (Spain)'),
'ann': N_('Obolo'),
'ann_NG': N_('Obolo (Nigeria)'),
'apc_SY': N_('Syria'),
'ar': N_('Arabic'),
'ar_001': N_('Arabic (world)'),
'ar_AE': N_('Arabic (United Arab Emirates)'),
'ar_BH': N_('Arabic (Bahrain)'),
'ar_DJ': N_('Arabic (Djibouti)'),
'ar_DZ': N_('Arabic (Algeria)'),
'ar_EG': N_('Arabic (Egypt)'),
'ar_EH': N_('Arabic (Western Sahara)'),
'ar_ER': N_('Arabic (Eritrea)'),
'ar_IL': N_('Arabic (Israel)'),
'ar_IQ': N_('Arabic (Iraq)'),
'ar_JO': N_('Arabic (Jordan)'),
'ar_KM': N_('Arabic (Comoros)'),
'ar_KW': N_('Arabic (Kuwait)'),
'ar_LB': N_('Arabic (Lebanon)'),
'ar_LY': N_('Arabic (Libya)'),
'ar_MA': N_('Arabic (Morocco)'),
'ar_MR': N_('Arabic (Mauritania)'),
'ar_OM': N_('Arabic (Oman)'),
'ar_PS': N_('Arabic (Palestinian Territories)'),
'ar_QA': N_('Arabic (Qatar)'),
'ar_SA': N_('Arabic (Saudi Arabia)'),
'ar_SD': N_('Arabic (Sudan)'),
'ar_SO': N_('Arabic (Somalia)'),
'ar_SS': N_('Arabic (South Sudan)'),
'ar_SY': N_('Arabic (Syria)'),
'ar_TD': N_('Arabic (Chad)'),
'ar_TN': N_('Arabic (Tunisia)'),
'ar_YE': N_('Arabic (Yemen)'),
'arn': N_('Mapuche'),
'arn_CL': N_('Mapuche (Chile)'),
'as': N_('Assamese'),
'as_IN': N_('Assamese (India)'),
'asa': N_('Asu'),
'asa_TZ': N_('Asu (Tanzania)'),
'ast': N_('Asturian'),
'ast_ES': N_('Asturian (Spain)'),
'az': N_('Azerbaijani'),
'az_Arab': N_('Azerbaijani (Arabic)'),
'az_Arab_IQ': N_('Azerbaijani (Arabic) (Iraq)'),
'az_Arab_IR': N_('Azerbaijani (Arabic) (Iran)'),
'az_Arab_TR': N_('Azerbaijani (Arabic) (Türkiye)'),
'az_Cyrl': N_('Azerbaijani (Cyrillic)'),
'az_Cyrl_AZ': N_('Azerbaijani (Cyrillic) (Azerbaijan)'),
'az_Latn': N_('Azerbaijani (Latin)'),
'az_Latn_AZ': N_('Azerbaijani (Latin) (Azerbaijan)'),
'ba': N_('Bashkir'),
'ba_RU': N_('Bashkir (Russia)'),
'bal': N_('Baluchi'),
'bal_Arab': N_('Baluchi (Arabic)'),
'bal_Arab_PK': N_('Baluchi (Arabic) (Pakistan)'),
'bal_Latn': N_('Baluchi (Latin)'),
'bal_Latn_PK': N_('Baluchi (Latin) (Pakistan)'),
'bas': N_('Basaa'),
'bas_CM': N_('Basaa (Cameroon)'),
'be': N_('Belarusian'),
'be_BY': N_('Belarusian (Belarus)'),
'be_tarask': N_('Belarusian (Taraskievica orthography)'),
'bem': N_('Bemba'),
'bem_ZM': N_('Bemba (Zambia)'),
'bez': N_('Bena'),
'bez_TZ': N_('Bena (Tanzania)'),
'bg': N_('Bulgarian'),
'bg_BG': N_('Bulgarian (Bulgaria)'),
'bgc': N_('Haryanvi'),
'bgc_IN': N_('Haryanvi (India)'),
'bgn': N_('Western Balochi'),
'bgn_AE': N_('Western Balochi (United Arab Emirates)'),
'bgn_AF': N_('Western Balochi (Afghanistan)'),
'bgn_IR': N_('Western Balochi (Iran)'),
'bgn_OM': N_('Western Balochi (Oman)'),
'bgn_PK': N_('Western Balochi (Pakistan)'),
'bho': N_('Bhojpuri'),
'bho_IN': N_('Bhojpuri (India)'),
'blt': N_('Tai Dam'),
'blt_VN': N_('Tai Dam (Vietnam)'),
'bm': N_('Bambara'),
'bm_ML': N_('Bambara (Mali)'),
'bm_Nkoo': N_('Bambara (N’Ko)'),
'bm_Nkoo_ML': N_('Bambara (N’Ko) (Mali)'),
'bn': N_('Bangla'),
'bn_BD': N_('Bangla (Bangladesh)'),
'bn_IN': N_('Bangla (India)'),
'bo': N_('Tibetan'),
'bo_CN': N_('Tibetan (China)'),
'bo_IN': N_('Tibetan (India)'),
'br': N_('Breton'),
'br_FR': N_('Breton (France)'),
'brx': N_('Bodo'),
'brx_IN': N_('Bodo (India)'),
'bs': N_('Bosnian'),
'bs_Cyrl': N_('Bosnian (Cyrillic)'),
'bs_Cyrl_BA': N_('Bosnian (Cyrillic) (Bosnia & Herzegovina)'),
'bs_Latn': N_('Bosnian (Latin)'),
'bs_Latn_BA': N_('Bosnian (Latin) (Bosnia & Herzegovina)'),
'bss': N_('Akoose'),
'bss_CM': N_('Akoose (Cameroon)'),
'byn': N_('Blin'),
'byn_ER': N_('Blin (Eritrea)'),
'ca': N_('Catalan'),
'ca_AD': N_('Catalan (Andorra)'),
'ca_ES': N_('Catalan (Spain)'),
'ca_ES_valencia': N_('Catalan (Spain Valencian)'),
'ca_FR': N_('Catalan (France)'),
'ca_IT': N_('Catalan (Italy)'),
'cad': N_('Caddo'),
'cad_US': N_('Caddo (United States)'),
'cch': N_('Atsam'),
'cch_NG': N_('Atsam (Nigeria)'),
'ccp': N_('Chakma'),
'ccp_BD': N_('Chakma (Bangladesh)'),
'ccp_IN': N_('Chakma (India)'),
'ce': N_('Chechen'),
'ce_RU': N_('Chechen (Russia)'),
'ceb': N_('Cebuano'),
'ceb_PH': N_('Cebuano (Philippines)'),
'cgg': N_('Chiga'),
'cgg_UG': N_('Chiga (Uganda)'),
'cho': N_('Choctaw'),
'cho_US': N_('Choctaw (United States)'),
'chr': N_('Cherokee'),
'chr_US': N_('Cherokee (United States)'),
'cic': N_('Chickasaw'),
'cic_US': N_('Chickasaw (United States)'),
'ckb': N_('Central Kurdish'),
'ckb_IQ': N_('Central Kurdish (Iraq)'),
'ckb_IR': N_('Central Kurdish (Iran)'),
'co': N_('Corsican'),
'co_FR': N_('Corsican (France)'),
'cs': N_('Czech'),
'cs_CZ': N_('Czech (Czechia)'),
'cu': N_('Church Slavic'),
'cu_RU': N_('Church Slavic (Russia)'),
'cv': N_('Chuvash'),
'cv_RU': N_('Chuvash (Russia)'),
'cy': N_('Welsh'),
'cy_GB': N_('Welsh (United Kingdom)'),
'da': N_('Danish'),
'da_DK': N_('Danish (Denmark)'),
'da_GL': N_('Danish (Greenland)'),
'dav': N_('Taita'),
'dav_KE': N_('Taita (Kenya)'),
'de': N_('German'),
'de_AT': N_('German (Austria)'),
'de_BE': N_('German (Belgium)'),
'de_CH': N_('German (Switzerland)'),
'de_DE': N_('German (Germany)'),
'de_IT': N_('German (Italy)'),
'de_LI': N_('German (Liechtenstein)'),
'de_LU': N_('German (Luxembourg)'),
'dje': N_('Zarma'),
'dje_NE': N_('Zarma (Niger)'),
'doi': N_('Dogri'),
'doi_IN': N_('Dogri (India)'),
'dsb': N_('Lower Sorbian'),
'dsb_DE': N_('Lower Sorbian (Germany)'),
'dua': N_('Duala'),
'dua_CM': N_('Duala (Cameroon)'),
'dv': N_('Divehi'),
'dv_MV': N_('Divehi (Maldives)'),
'dyo': N_('Jola-Fonyi'),
'dyo_SN': N_('Jola-Fonyi (Senegal)'),
'dz': N_('Dzongkha'),
'dz_BT': N_('Dzongkha (Bhutan)'),
'ebu': N_('Embu'),
'ebu_KE': N_('Embu (Kenya)'),
'ee': N_('Ewe'),
'ee_GH': N_('Ewe (Ghana)'),
'ee_TG': N_('Ewe (Togo)'),
'el': N_('Greek'),
'el_CY': N_('Greek (Cyprus)'),
'el_GR': N_('Greek (Greece)'),
'el_polyton': N_('Greek (Polytonic)'),
'en': N_('English'),
'en_001': N_('English (world)'),
'en_150': N_('English (Europe)'),
'en_AE': N_('English (United Arab Emirates)'),
'en_AG': N_('English (Antigua & Barbuda)'),
'en_AI': N_('English (Anguilla)'),
'en_AS': N_('English (American Samoa)'),
'en_AT': N_('English (Austria)'),
'en_AU': N_('English (Australia)'),
'en_BB': N_('English (Barbados)'),
'en_BE': N_('English (Belgium)'),
'en_BI': N_('English (Burundi)'),
'en_BM': N_('English (Bermuda)'),
'en_BS': N_('English (Bahamas)'),
'en_BW': N_('English (Botswana)'),
'en_BZ': N_('English (Belize)'),
'en_CA': N_('English (Canada)'),
'en_CC': N_('English (Cocos (Keeling) Islands)'),
'en_CH': N_('English (Switzerland)'),
'en_CK': N_('English (Cook Islands)'),
'en_CM': N_('English (Cameroon)'),
'en_CX': N_('English (Christmas Island)'),
'en_CY': N_('English (Cyprus)'),
'en_DE': N_('English (Germany)'),
'en_DG': N_('English (Diego Garcia)'),
'en_DK': N_('English (Denmark)'),
'en_DM': N_('English (Dominica)'),
'en_Dsrt': N_('English (Deseret)'),
'en_Dsrt_US': N_('English (Deseret) (United States)'),
'en_ER': N_('English (Eritrea)'),
'en_FI': N_('English (Finland)'),
'en_FJ': N_('English (Fiji)'),
'en_FK': N_('English (Falkland Islands)'),
'en_FM': N_('English (Micronesia)'),
'en_GB': N_('English (United Kingdom)'),
'en_GD': N_('English (Grenada)'),
'en_GG': N_('English (Guernsey)'),
'en_GH': N_('English (Ghana)'),
'en_GI': N_('English (Gibraltar)'),
'en_GM': N_('English (Gambia)'),
'en_GU': N_('English (Guam)'),
'en_GY': N_('English (Guyana)'),
'en_HK': N_('English (Hong Kong)'),
'en_IE': N_('English (Ireland)'),
'en_IL': N_('English (Israel)'),
'en_IM': N_('English (Isle of Man)'),
'en_IN': N_('English (India)'),
'en_IO': N_('English (British Indian Ocean Territory)'),
'en_JE': N_('English (Jersey)'),
'en_JM': N_('English (Jamaica)'),
'en_KE': N_('English (Kenya)'),
'en_KI': N_('English (Kiribati)'),
'en_KN': N_('English (St. Kitts & Nevis)'),
'en_KY': N_('English (Cayman Islands)'),
'en_LC': N_('English (St. Lucia)'),
'en_LR': N_('English (Liberia)'),
'en_LS': N_('English (Lesotho)'),
'en_MG': N_('English (Madagascar)'),
'en_MH': N_('English (Marshall Islands)'),
'en_MO': N_('English (Macao)'),
'en_MP': N_('English (Northern Mariana Islands)'),
'en_MS': N_('English (Montserrat)'),
'en_MT': N_('English (Malta)'),
'en_MU': N_('English (Mauritius)'),
'en_MV': N_('English (Maldives)'),
'en_MW': N_('English (Malawi)'),
'en_MY': N_('English (Malaysia)'),
'en_NA': N_('English (Namibia)'),
'en_NF': N_('English (Norfolk Island)'),
'en_NG': N_('English (Nigeria)'),
'en_NL': N_('English (Netherlands)'),
'en_NR': N_('English (Nauru)'),
'en_NU': N_('English (Niue)'),
'en_NZ': N_('English (New Zealand)'),
'en_PG': N_('English (Papua New Guinea)'),
'en_PH': N_('English (Philippines)'),
'en_PK': N_('English (Pakistan)'),
'en_PN': N_('English (Pitcairn Islands)'),
'en_PR': N_('English (Puerto Rico)'),
'en_PW': N_('English (Palau)'),
'en_RW': N_('English (Rwanda)'),
'en_SB': N_('English (Solomon Islands)'),
'en_SC': N_('English (Seychelles)'),
'en_SD': N_('English (Sudan)'),
'en_SE': N_('English (Sweden)'),
'en_SG': N_('English (Singapore)'),
'en_SH': N_('English (St. Helena)'),
'en_SI': N_('English (Slovenia)'),
'en_SL': N_('English (Sierra Leone)'),
'en_SS': N_('English (South Sudan)'),
'en_SX': N_('English (Sint Maarten)'),
'en_SZ': N_('English (Eswatini)'),
'en_Shaw': N_('English (Shavian)'),
'en_Shaw_GB': N_('English (Shavian) (United Kingdom)'),
'en_TC': N_('English (Turks & Caicos Islands)'),
'en_TK': N_('English (Tokelau)'),
'en_TO': N_('English (Tonga)'),
'en_TT': N_('English (Trinidad & Tobago)'),
'en_TV': N_('English (Tuvalu)'),
'en_TZ': N_('English (Tanzania)'),
'en_UG': N_('English (Uganda)'),
'en_UM': N_('English (U.S. Outlying Islands)'),
'en_US': N_('English (United States)'),
'en_VC': N_('English (St. Vincent & Grenadines)'),
'en_VG': N_('English (British Virgin Islands)'),
'en_VI': N_('English (U.S. Virgin Islands)'),
'en_VU': N_('English (Vanuatu)'),
'en_WS': N_('English (Samoa)'),
'en_ZA': N_('English (South Africa)'),
'en_ZM': N_('English (Zambia)'),
'en_ZW': N_('English (Zimbabwe)'),
'eo': N_('Esperanto'),
'eo_001': N_('Esperanto (world)'),
'es': N_('Spanish'),
'es_419': N_('Spanish (Latin America)'),
'es_AR': N_('Spanish (Argentina)'),
'es_BO': N_('Spanish (Bolivia)'),
'es_BR': N_('Spanish (Brazil)'),
'es_BZ': N_('Spanish (Belize)'),
'es_CL': N_('Spanish (Chile)'),
'es_CO': N_('Spanish (Colombia)'),
'es_CR': N_('Spanish (Costa Rica)'),
'es_CU': N_('Spanish (Cuba)'),
'es_DO': N_('Spanish (Dominican Republic)'),
'es_EA': N_('Spanish (Ceuta & Melilla)'),
'es_EC': N_('Spanish (Ecuador)'),
'es_ES': N_('Spanish (Spain)'),
'es_GQ': N_('Spanish (Equatorial Guinea)'),
'es_GT': N_('Spanish (Guatemala)'),
'es_HN': N_('Spanish (Honduras)'),
'es_IC': N_('Spanish (Canary Islands)'),
'es_MX': N_('Spanish (Mexico)'),
'es_NI': N_('Spanish (Nicaragua)'),
'es_PA': N_('Spanish (Panama)'),
'es_PE': N_('Spanish (Peru)'),
'es_PH': N_('Spanish (Philippines)'),
'es_PR': N_('Spanish (Puerto Rico)'),
'es_PY': N_('Spanish (Paraguay)'),
'es_SV': N_('Spanish (El Salvador)'),
'es_US': N_('Spanish (United States)'),
'es_UY': N_('Spanish (Uruguay)'),
'es_VE': N_('Spanish (Venezuela)'),
'et': N_('Estonian'),
'et_EE': N_('Estonian (Estonia)'),
'eu': N_('Basque'),
'eu_ES': N_('Basque (Spain)'),
'ewo': N_('Ewondo'),
'ewo_CM': N_('Ewondo (Cameroon)'),
'fa': N_('Persian'),
'fa_AF': N_('Persian (Afghanistan)'),
'fa_IR': N_('Persian (Iran)'),
'ff': N_('Fula'),
'ff_Adlm': N_('Fula (Adlam)'),
'ff_Adlm_BF': N_('Fula (Adlam) (Burkina Faso)'),
'ff_Adlm_CM': N_('Fula (Adlam) (Cameroon)'),
'ff_Adlm_GH': N_('Fula (Adlam) (Ghana)'),
'ff_Adlm_GM': N_('Fula (Adlam) (Gambia)'),
'ff_Adlm_GN': N_('Fula (Adlam) (Guinea)'),
'ff_Adlm_GW': N_('Fula (Adlam) (Guinea-Bissau)'),
'ff_Adlm_LR': N_('Fula (Adlam) (Liberia)'),
'ff_Adlm_MR': N_('Fula (Adlam) (Mauritania)'),
'ff_Adlm_NE': N_('Fula (Adlam) (Niger)'),
'ff_Adlm_NG': N_('Fula (Adlam) (Nigeria)'),
'ff_Adlm_SL': N_('Fula (Adlam) (Sierra Leone)'),
'ff_Adlm_SN': N_('Fula (Adlam) (Senegal)'),
'ff_Latn': N_('Fula (Latin)'),
'ff_Latn_BF': N_('Fula (Latin) (Burkina Faso)'),
'ff_Latn_CM': N_('Fula (Latin) (Cameroon)'),
'ff_Latn_GH': N_('Fula (Latin) (Ghana)'),
'ff_Latn_GM': N_('Fula (Latin) (Gambia)'),
'ff_Latn_GN': N_('Fula (Latin) (Guinea)'),
'ff_Latn_GW': N_('Fula (Latin) (Guinea-Bissau)'),
'ff_Latn_LR': N_('Fula (Latin) (Liberia)'),
'ff_Latn_MR': N_('Fula (Latin) (Mauritania)'),
'ff_Latn_NE': N_('Fula (Latin) (Niger)'),
'ff_Latn_NG': N_('Fula (Latin) (Nigeria)'),
'ff_Latn_SL': N_('Fula (Latin) (Sierra Leone)'),
'ff_Latn_SN': N_('Fula (Latin) (Senegal)'),
'fi': N_('Finnish'),
'fi_FI': N_('Finnish (Finland)'),
'fil': N_('Filipino'),
'fil_PH': N_('Filipino (Philippines)'),
'fo': N_('Faroese'),
'fo_DK': N_('Faroese (Denmark)'),
'fo_FO': N_('Faroese (Faroe Islands)'),
'fr': N_('French'),
'fr_BE': N_('French (Belgium)'),
'fr_BF': N_('French (Burkina Faso)'),
'fr_BI': N_('French (Burundi)'),
'fr_BJ': N_('French (Benin)'),
'fr_BL': N_('French (St. Barthélemy)'),
'fr_CA': N_('French (Canada)'),
'fr_CD': N_('French (Congo - Kinshasa)'),
'fr_CF': N_('French (Central African Republic)'),
'fr_CG': N_('French (Congo - Brazzaville)'),
'fr_CH': N_('French (Switzerland)'),
'fr_CI': N_('French (Côte d’Ivoire)'),
'fr_CM': N_('French (Cameroon)'),
'fr_DJ': N_('French (Djibouti)'),
'fr_DZ': N_('French (Algeria)'),
'fr_FR': N_('French (France)'),
'fr_GA': N_('French (Gabon)'),
'fr_GF': N_('French (French Guiana)'),
'fr_GN': N_('French (Guinea)'),
'fr_GP': N_('French (Guadeloupe)'),
'fr_GQ': N_('French (Equatorial Guinea)'),
'fr_HT': N_('French (Haiti)'),
'fr_KM': N_('French (Comoros)'),
'fr_LU': N_('French (Luxembourg)'),
'fr_MA': N_('French (Morocco)'),
'fr_MC': N_('French (Monaco)'),
'fr_MF': N_('French (St. Martin)'),
'fr_MG': N_('French (Madagascar)'),
'fr_ML': N_('French (Mali)'),
'fr_MQ': N_('French (Martinique)'),
'fr_MR': N_('French (Mauritania)'),
'fr_MU': N_('French (Mauritius)'),
'fr_NC': N_('French (New Caledonia)'),
'fr_NE': N_('French (Niger)'),
'fr_PF': N_('French (French Polynesia)'),
'fr_PM': N_('French (St. Pierre & Miquelon)'),
'fr_RE': N_('French (Réunion)'),
'fr_RW': N_('French (Rwanda)'),
'fr_SC': N_('French (Seychelles)'),
'fr_SN': N_('French (Senegal)'),
'fr_SY': N_('French (Syria)'),
'fr_TD': N_('French (Chad)'),
'fr_TG': N_('French (Togo)'),
'fr_TN': N_('French (Tunisia)'),
'fr_VU': N_('French (Vanuatu)'),
'fr_WF': N_('French (Wallis & Futuna)'),
'fr_YT': N_('French (Mayotte)'),
'frr': N_('Northern Frisian'),
'frr_DE': N_('Northern Frisian (Germany)'),
'fur': N_('Friulian'),
'fur_IT': N_('Friulian (Italy)'),
'fy': N_('Western Frisian'),
'fy_NL': N_('Western Frisian (Netherlands)'),
'ga': N_('Irish'),
'ga_GB': N_('Irish (United Kingdom)'),
'ga_IE': N_('Irish (Ireland)'),
'gaa': N_('Ga'),
'gaa_GH': N_('Ga (Ghana)'),
'gd': N_('Scottish Gaelic'),
'gd_GB': N_('Scottish Gaelic (United Kingdom)'),
'gez': N_('Geez'),
'gez_ER': N_('Geez (Eritrea)'),
'gez_ET': N_('Geez (Ethiopia)'),
'gl': N_('Galician'),
'gl_ES': N_('Galician (Spain)'),
'gn': N_('Guarani'),
'gn_PY': N_('Guarani (Paraguay)'),
'gsw': N_('Swiss German'),
'gsw_CH': N_('Swiss German (Switzerland)'),
'gsw_FR': N_('Swiss German (France)'),
'gsw_LI': N_('Swiss German (Liechtenstein)'),
'gu': N_('Gujarati'),
'gu_IN': N_('Gujarati (India)'),
'guz': N_('Gusii'),
'guz_KE': N_('Gusii (Kenya)'),
'gv': N_('Manx'),
'gv_IM': N_('Manx (Isle of Man)'),
'ha': N_('Hausa'),
'ha_Arab': N_('Hausa (Arabic)'),
'ha_Arab_NG': N_('Hausa (Arabic) (Nigeria)'),
'ha_Arab_SD': N_('Hausa (Arabic) (Sudan)'),
'ha_GH': N_('Hausa (Ghana)'),
'ha_Latn': N_('Hausa (Latin)'),
'ha_Latn_GH': N_('Hausa (Latin) (Ghana)'),
'ha_Latn_NE': N_('Hausa (Latin) (Niger)'),
'ha_Latn_NG': N_('Hausa (Latin) (Nigeria)'),
'ha_NE': N_('Hausa (Niger)'),
'ha_NG': N_('Hausa (Nigeria)'),
'ha_SD': N_('Hausa (Sudan)'),
'haw': N_('Hawaiian'),
'haw_US': N_('Hawaiian (United States)'),
'he': N_('Hebrew'),
'he_IL': N_('Hebrew (Israel)'),
'hi': N_('Hindi'),
'hi_IN': N_('Hindi (India)'),
'hi_Latn': N_('Hindi (Latin)'),
'hi_Latn_IN': N_('Hindi (Latin) (India)'),
'hnj': N_('Hmong Njua'),
'hnj_Hmnp': N_('Hmong Njua (Nyiakeng Puachue Hmong)'),
'hnj_Hmnp_US': N_('Hmong Njua (Nyiakeng Puachue Hmong) (United States)'),
'hr': N_('Croatian'),
'hr_BA': N_('Croatian (Bosnia & Herzegovina)'),
'hr_HR': N_('Croatian (Croatia)'),
'hsb': N_('Upper Sorbian'),
'hsb_DE': N_('Upper Sorbian (Germany)'),
'hu': N_('Hungarian'),
'hu_HU': N_('Hungarian (Hungary)'),
'hy': N_('Armenian'),
'hy_AM': N_('Armenian (Armenia)'),
'hy_AM_REVISED': N_('Armenian (Armenia) (Revised Orthography)'),
'ia': N_('Interlingua'),
'ia_001': N_('Interlingua (world)'),
'id': N_('Indonesian'),
'id_ID': N_('Indonesian (Indonesia)'),
'ig': N_('Igbo'),
'ig_NG': N_('Igbo (Nigeria)'),
'ii': N_('Sichuan Yi'),
'ii_CN': N_('Sichuan Yi (China)'),
'io': N_('Ido'),
'io_001': N_('Ido (world)'),
'is': N_('Icelandic'),
'is_IS': N_('Icelandic (Iceland)'),
'it': N_('Italian'),
'it_CH': N_('Italian (Switzerland)'),
'it_IT': N_('Italian (Italy)'),
'it_SM': N_('Italian (San Marino)'),
'it_VA': N_('Italian (Vatican City)'),
'iu': N_('Inuktitut'),
'iu_CA': N_('Inuktitut (Canada)'),
'iu_Latn': N_('Inuktitut (Latin)'),
'iu_Latn_CA': N_('Inuktitut (Latin) (Canada)'),
'ja': N_('Japanese'),
'ja_JP': N_('Japanese (Japan)'),
'jbo': N_('Lojban'),
'jbo_001': N_('Lojban (world)'),
'jgo': N_('Ngomba'),
'jgo_CM': N_('Ngomba (Cameroon)'),
'jmc': N_('Machame'),
'jmc_TZ': N_('Machame (Tanzania)'),
'jv': N_('Javanese'),
'jv_ID': N_('Javanese (Indonesia)'),
'ka': N_('Georgian'),
'ka_GE': N_('Georgian (Georgia)'),
'kab': N_('Kabyle'),
'kab_DZ': N_('Kabyle (Algeria)'),
'kaj': N_('Jju'),
'kaj_NG': N_('Jju (Nigeria)'),
'kam': N_('Kamba'),
'kam_KE': N_('Kamba (Kenya)'),
'kcg': N_('Tyap'),
'kcg_NG': N_('Tyap (Nigeria)'),
'kde': N_('Makonde'),
'kde_TZ': N_('Makonde (Tanzania)'),
'kea': N_('Kabuverdianu'),
'kea_CV': N_('Kabuverdianu (Cape Verde)'),
'ken': N_('Kenyang'),
'ken_CM': N_('Kenyang (Cameroon)'),
'kfo': N_('Koro'),
'kfo_CI': N_('Koro (Côte d’Ivoire)'),
'kgp': N_('Kaingang'),
'kgp_BR': N_('Kaingang (Brazil)'),
'khq': N_('Koyra Chiini'),
'khq_ML': N_('Koyra Chiini (Mali)'),
'ki': N_('Kikuyu'),
'ki_KE': N_('Kikuyu (Kenya)'),
'kk': N_('Kazakh'),
'kk_Cyrl': N_('Kazakh (Cyrillic)'),
'kk_Cyrl_KZ': N_('Kazakh (Cyrillic) (Kazakhstan)'),
'kk_KZ': N_('Kazakh (Kazakhstan)'),
'kkj': N_('Kako'),
'kkj_CM': N_('Kako (Cameroon)'),
'kl': N_('Kalaallisut'),
'kl_GL': N_('Kalaallisut (Greenland)'),
'kln': N_('Kalenjin'),
'kln_KE': N_('Kalenjin (Kenya)'),
'km': N_('Khmer'),
'km_KH': N_('Khmer (Cambodia)'),
'kn': N_('Kannada'),
'kn_IN': N_('Kannada (India)'),
'ko': N_('Korean'),
'ko_KP': N_('Korean (North Korea)'),
'ko_KR': N_('Korean (South Korea)'),
'kok': N_('Konkani'),
'kok_IN': N_('Konkani (India)'),
'kpe': N_('Kpelle'),
'kpe_GN': N_('Kpelle (Guinea)'),
'kpe_LR': N_('Kpelle (Liberia)'),
'ks': N_('Kashmiri'),
'ks_Arab': N_('Kashmiri (Arabic)'),
'ks_Arab_IN': N_('Kashmiri (Arabic) (India)'),
'ks_Deva': N_('Kashmiri (Devanagari)'),
'ks_Deva_IN': N_('Kashmiri (Devanagari) (India)'),
'ksb': N_('Shambala'),
'ksb_TZ': N_('Shambala (Tanzania)'),
'ksf': N_('Bafia'),
'ksf_CM': N_('Bafia (Cameroon)'),
'ksh': N_('Colognian'),
'ksh_DE': N_('Colognian (Germany)'),
'ku': N_('Kurdish'),
'ku_TR': N_('Kurdish (Türkiye)'),
'kw': N_('Cornish'),
'kw_GB': N_('Cornish (United Kingdom)'),
'ky': N_('Kyrgyz'),
'ky_KG': N_('Kyrgyz (Kyrgyzstan)'),
'la': N_('Latin'),
'la_VA': N_('Latin (Vatican City)'),
'lag': N_('Langi'),
'lag_TZ': N_('Langi (Tanzania)'),
'lb': N_('Luxembourgish'),
'lb_LU': N_('Luxembourgish (Luxembourg)'),
'lg': N_('Ganda'),
'lg_UG': N_('Ganda (Uganda)'),
'lij': N_('Ligurian'),
'lij_IT': N_('Ligurian (Italy)'),
'lkt': N_('Lakota'),
'lkt_US': N_('Lakota (United States)'),
'lmo': N_('Lombard'),
'lmo_IT': N_('Lombard (Italy)'),
'ln': N_('Lingala'),
'ln_AO': N_('Lingala (Angola)'),
'ln_CD': N_('Lingala (Congo - Kinshasa)'),
'ln_CF': N_('Lingala (Central African Republic)'),
'ln_CG': N_('Lingala (Congo - Brazzaville)'),
'lo': N_('Lao'),
'lo_LA': N_('Lao (Laos)'),
'lrc': N_('Northern Luri'),
'lrc_IQ': N_('Northern Luri (Iraq)'),
'lrc_IR': N_('Northern Luri (Iran)'),
'lt': N_('Lithuanian'),
'lt_LT': N_('Lithuanian (Lithuania)'),
'lu': N_('Luba-Katanga'),
'lu_CD': N_('Luba-Katanga (Congo - Kinshasa)'),
'luo': N_('Luo'),
'luo_KE': N_('Luo (Kenya)'),
'luy': N_('Luyia'),
'luy_KE': N_('Luyia (Kenya)'),
'lv': N_('Latvian'),
'lv_LV': N_('Latvian (Latvia)'),
'mai': N_('Maithili'),
'mai_IN': N_('Maithili (India)'),
'mas': N_('Masai'),
'mas_KE': N_('Masai (Kenya)'),
'mas_TZ': N_('Masai (Tanzania)'),
'mdf': N_('Moksha'),
'mdf_RU': N_('Moksha (Russia)'),
'mer': N_('Meru'),
'mer_KE': N_('Meru (Kenya)'),
'mfe': N_('Morisyen'),
'mfe_MU': N_('Morisyen (Mauritius)'),
'mg': N_('Malagasy'),
'mg_MG': N_('Malagasy (Madagascar)'),
'mgh': N_('Makhuwa-Meetto'),
'mgh_MZ': N_('Makhuwa-Meetto (Mozambique)'),
'mgo': N_('Metaʼ'),
'mgo_CM': N_('Metaʼ (Cameroon)'),
'mi': N_('Māori'),
'mi_NZ': N_('Māori (New Zealand)'),
'mk': N_('Macedonian'),
'mk_MK': N_('Macedonian (North Macedonia)'),
'ml': N_('Malayalam'),
'ml_IN': N_('Malayalam (India)'),
'mn': N_('Mongolian'),
'mn_Cyrl': N_('Mongolian (Cyrillic)'),
'mn_Cyrl_MN': N_('Mongolian (Cyrillic) (Mongolia)'),
'mn_MN': N_('Mongolian (Mongolia)'),
'mn_Mong': N_('Mongolian (Mongolian)'),
'mn_Mong_CN': N_('Mongolian (Mongolian) (China)'),
'mn_Mong_MN': N_('Mongolian (Mongolian) (Mongolia)'),
'mni': N_('Manipuri'),
'mni_Beng': N_('Manipuri (Bangla)'),
'mni_Beng_IN': N_('Manipuri (Bangla) (India)'),
'mni_Mtei': N_('Manipuri (Meitei Mayek)'),
'mni_Mtei_IN': N_('Manipuri (Meitei Mayek) (India)'),
'moh': N_('Mohawk'),
'moh_CA': N_('Mohawk (Canada)'),
'mr': N_('Marathi'),
'mr_IN': N_('Marathi (India)'),
'ms': N_('Malay'),
'ms_Arab': N_('Malay (Arabic)'),
'ms_Arab_BN': N_('Malay (Arabic) (Brunei)'),
'ms_Arab_MY': N_('Malay (Arabic) (Malaysia)'),
'ms_BN': N_('Malay (Brunei)'),
'ms_ID': N_('Malay (Indonesia)'),
'ms_MY': N_('Malay (Malaysia)'),
'ms_SG': N_('Malay (Singapore)'),
'mt': N_('Maltese'),
'mt_MT': N_('Maltese (Malta)'),
'mua': N_('Mundang'),
'mua_CM': N_('Mundang (Cameroon)'),
'mus': N_('Muscogee'),
'mus_US': N_('Muscogee (United States)'),
'my': N_('Burmese'),
'my_MM': N_('Burmese (Myanmar (Burma))'),
'myv': N_('Erzya'),
'myv_RU': N_('Erzya (Russia)'),
'mzn': N_('Mazanderani'),
'mzn_IR': N_('Mazanderani (Iran)'),
'naq': N_('Nama'),
'naq_NA': N_('Nama (Namibia)'),
'nb': N_('Norwegian Bokmål'),
'nb_NO': N_('Norwegian Bokmål (Norway)'),
'nb_SJ': N_('Norwegian Bokmål (Svalbard & Jan Mayen)'),
'nd': N_('North Ndebele'),
'nd_ZW': N_('North Ndebele (Zimbabwe)'),
'nds': N_('Low German'),
'nds_DE': N_('Low German (Germany)'),
'nds_NL': N_('Low German (Netherlands)'),
'ne': N_('Nepali'),
'ne_IN': N_('Nepali (India)'),
'ne_NP': N_('Nepali (Nepal)'),
'nl': N_('Dutch'),
'nl_AW': N_('Dutch (Aruba)'),
'nl_BE': N_('Dutch (Belgium)'),
'nl_BQ': N_('Dutch (Caribbean Netherlands)'),
'nl_CW': N_('Dutch (Curaçao)'),
'nl_NL': N_('Dutch (Netherlands)'),
'nl_SR': N_('Dutch (Suriname)'),
'nl_SX': N_('Dutch (Sint Maarten)'),
'nmg': N_('Kwasio'),
'nmg_CM': N_('Kwasio (Cameroon)'),
'nn': N_('Norwegian Nynorsk'),
'nn_NO': N_('Norwegian Nynorsk (Norway)'),
'nnh': N_('Ngiemboon'),
'nnh_CM': N_('Ngiemboon (Cameroon)'),
'no': N_('Norwegian'),
'nqo': N_('N’Ko'),
'nqo_GN': N_('N’Ko (Guinea)'),
'nr': N_('South Ndebele'),
'nr_ZA': N_('South Ndebele (South Africa)'),
'nso': N_('Northern Sotho'),
'nso_ZA': N_('Northern Sotho (South Africa)'),
'nus': N_('Nuer'),
'nus_SS': N_('Nuer (South Sudan)'),
'nv': N_('Navajo'),
'nv_US': N_('Navajo (United States)'),
'ny': N_('Nyanja'),
'ny_MW': N_('Nyanja (Malawi)'),
'nyn': N_('Nyankole'),
'nyn_UG': N_('Nyankole (Uganda)'),
'oc': N_('Occitan'),
'oc_ES': N_('Occitan (Spain)'),
'oc_FR': N_('Occitan (France)'),
'om': N_('Oromo'),
'om_ET': N_('Oromo (Ethiopia)'),
'om_KE': N_('Oromo (Kenya)'),
'or': N_('Odia'),
'or_IN': N_('Odia (India)'),
'os': N_('Ossetic'),
'os_GE': N_('Ossetic (Georgia)'),
'os_RU': N_('Ossetic (Russia)'),
'osa': N_('Osage'),
'osa_US': N_('Osage (United States)'),
'pa': N_('Punjabi'),
'pa_Arab': N_('Punjabi (Arabic)'),
'pa_Arab_PK': N_('Punjabi (Arabic) (Pakistan)'),
'pa_Guru': N_('Punjabi (Gurmukhi)'),
'pa_Guru_IN': N_('Punjabi (Gurmukhi) (India)'),
'pa_IN': N_('Punjabi (India)'),
'pa_PK': N_('Punjabi (Pakistan)'),
'pap': N_('Papiamento'),
'pap_AW': N_('Papiamento (Aruba)'),
'pap_CW': N_('Papiamento (Curaçao)'),
'pcm': N_('Nigerian Pidgin'),
'pcm_NG': N_('Nigerian Pidgin (Nigeria)'),
'pis': N_('Pijin'),
'pis_SB': N_('Pijin (Solomon Islands)'),
'pl': N_('Polish'),
'pl_PL': N_('Polish (Poland)'),
'prg': N_('Prussian'),
'prg_001': N_('Prussian (world)'),
'ps': N_('Pashto'),
'ps_AF': N_('Pashto (Afghanistan)'),
'ps_PK': N_('Pashto (Pakistan)'),
'pt': N_('Portuguese'),
'pt_AO': N_('Portuguese (Angola)'),
'pt_BR': N_('Portuguese (Brazil)'),
'pt_CH': N_('Portuguese (Switzerland)'),
'pt_CV': N_('Portuguese (Cape Verde)'),
'pt_GQ': N_('Portuguese (Equatorial Guinea)'),
'pt_GW': N_('Portuguese (Guinea-Bissau)'),
'pt_LU': N_('Portuguese (Luxembourg)'),
'pt_MO': N_('Portuguese (Macao)'),
'pt_MZ': N_('Portuguese (Mozambique)'),
'pt_PT': N_('Portuguese (Portugal)'),
'pt_ST': N_('Portuguese (São Tomé & Príncipe)'),
'pt_TL': N_('Portuguese (Timor-Leste)'),
'qu': N_('Quechua'),
'qu_BO': N_('Quechua (Bolivia)'),
'qu_EC': N_('Quechua (Ecuador)'),
'qu_PE': N_('Quechua (Peru)'),
'quc': N_('Kʼicheʼ'),
'quc_GT': N_('Kʼicheʼ (Guatemala)'),
'raj': N_('Rajasthani'),
'raj_IN': N_('Rajasthani (India)'),
'rhg': N_('Rohingya'),
'rhg_Rohg': N_('Rohingya (Hanifi)'),
'rhg_Rohg_BD': N_('Rohingya (Hanifi) (Bangladesh)'),
'rhg_Rohg_MM': N_('Rohingya (Hanifi) (Myanmar (Burma))'),
'rif': N_('Riffian'),
'rif_MA': N_('Riffian (Morocco)'),
'rm': N_('Romansh'),
'rm_CH': N_('Romansh (Switzerland)'),
'rn': N_('Rundi'),
'rn_BI': N_('Rundi (Burundi)'),
'ro': N_('Romanian'),
'ro_MD': N_('Romanian (Moldova)'),
'ro_RO': N_('Romanian (Romania)'),
'rof': N_('Rombo'),
'rof_TZ': N_('Rombo (Tanzania)'),
'ru': N_('Russian'),
'ru_BY': N_('Russian (Belarus)'),
'ru_KG': N_('Russian (Kyrgyzstan)'),
'ru_KZ': N_('Russian (Kazakhstan)'),
'ru_MD': N_('Russian (Moldova)'),
'ru_RU': N_('Russian (Russia)'),
'ru_UA': N_('Russian (Ukraine)'),
'rw': N_('Kinyarwanda'),
'rw_RW': N_('Kinyarwanda (Rwanda)'),
'rwk': N_('Rwa'),
'rwk_TZ': N_('Rwa (Tanzania)'),
'sa': N_('Sanskrit'),
'sa_IN': N_('Sanskrit (India)'),
'sah': N_('Yakut'),
'sah_RU': N_('Yakut (Russia)'),
'saq': N_('Samburu'),
'saq_KE': N_('Samburu (Kenya)'),
'sat': N_('Santali'),
'sat_Deva': N_('Santali (Devanagari)'),
'sat_Deva_IN': N_('Santali (Devanagari) (India)'),
'sat_Olck': N_('Santali (Ol Chiki)'),
'sat_Olck_IN': N_('Santali (Ol Chiki) (India)'),
'sbp': N_('Sangu'),
'sbp_TZ': N_('Sangu (Tanzania)'),
'sc': N_('Sardinian'),
'sc_IT': N_('Sardinian (Italy)'),
'scn': N_('Sicilian'),
'scn_IT': N_('Sicilian (Italy)'),
'sd': N_('Sindhi'),
'sd_Arab': N_('Sindhi (Arabic)'),
'sd_Arab_PK': N_('Sindhi (Arabic) (Pakistan)'),
'sd_Deva': N_('Sindhi (Devanagari)'),
'sd_Deva_IN': N_('Sindhi (Devanagari) (India)'),
'sdh': N_('Southern Kurdish'),
'sdh_IQ': N_('Southern Kurdish (Iraq)'),
'sdh_IR': N_('Southern Kurdish (Iran)'),
'se': N_('Northern Sami'),
'se_FI': N_('Northern Sami (Finland)'),
'se_NO': N_('Northern Sami (Norway)'),
'se_SE': N_('Northern Sami (Sweden)'),
'seh': N_('Sena'),
'seh_MZ': N_('Sena (Mozambique)'),
'ses': N_('Koyraboro Senni'),
'ses_ML': N_('Koyraboro Senni (Mali)'),
'sg': N_('Sango'),
'sg_CF': N_('Sango (Central African Republic)'),
'shi': N_('Tachelhit'),
'shi_Latn': N_('Tachelhit (Latin)'),
'shi_Latn_MA': N_('Tachelhit (Latin) (Morocco)'),
'shi_Tfng': N_('Tachelhit (Tifinagh)'),
'shi_Tfng_MA': N_('Tachelhit (Tifinagh) (Morocco)'),
'shn': N_('Shan'),
'shn_MM': N_('Shan (Myanmar (Burma))'),
'shn_TH': N_('Shan (Thailand)'),
'si': N_('Sinhala'),
'si_LK': N_('Sinhala (Sri Lanka)'),
'sid': N_('Sidamo'),
'sid_ET': N_('Sidamo (Ethiopia)'),
'sk': N_('Slovak'),
'sk_SK': N_('Slovak (Slovakia)'),
'sl': N_('Slovenian'),
'sl_SI': N_('Slovenian (Slovenia)'),
'sma': N_('Southern Sami'),
'sma_NO': N_('Southern Sami (Norway)'),
'sma_SE': N_('Southern Sami (Sweden)'),
'smj': N_('Lule Sami'),
'smj_NO': N_('Lule Sami (Norway)'),
'smj_SE': N_('Lule Sami (Sweden)'),
'smn': N_('Inari Sami'),
'smn_FI': N_('Inari Sami (Finland)'),
'sms': N_('Skolt Sami'),
'sms_FI': N_('Skolt Sami (Finland)'),
'sn': N_('Shona'),
'sn_ZW': N_('Shona (Zimbabwe)'),
'so': N_('Somali'),
'so_DJ': N_('Somali (Djibouti)'),
'so_ET': N_('Somali (Ethiopia)'),
'so_KE': N_('Somali (Kenya)'),
'so_SO': N_('Somali (Somalia)'),
'sq': N_('Albanian'),
'sq_AL': N_('Albanian (Albania)'),
'sq_MK': N_('Albanian (North Macedonia)'),
'sq_XK': N_('Albanian (Kosovo)'),
'sr': N_('Serbian'),
'sr_Cyrl': N_('Serbian (Cyrillic)'),
'sr_Cyrl_BA': N_('Serbian (Cyrillic) (Bosnia & Herzegovina)'),
'sr_Cyrl_ME': N_('Serbian (Cyrillic) (Montenegro)'),
'sr_Cyrl_RS': N_('Serbian (Cyrillic) (Serbia)'),
'sr_Cyrl_XK': N_('Serbian (Cyrillic) (Kosovo)'),
'sr_Latn': N_('Serbian (Latin)'),
'sr_Latn_BA': N_('Serbian (Latin) (Bosnia & Herzegovina)'),
'sr_Latn_ME': N_('Serbian (Latin) (Montenegro)'),
'sr_Latn_RS': N_('Serbian (Latin) (Serbia)'),
'sr_Latn_XK': N_('Serbian (Latin) (Kosovo)'),
'ss': N_('Swati'),
'ss_SZ': N_('Swati (Eswatini)'),
'ss_ZA': N_('Swati (South Africa)'),
'ssy': N_('Saho'),
'ssy_ER': N_('Saho (Eritrea)'),
'st': N_('Southern Sotho'),
'st_LS': N_('Southern Sotho (Lesotho)'),
'st_ZA': N_('Southern Sotho (South Africa)'),
'su': N_('Sundanese'),
'su_Latn': N_('Sundanese (Latin)'),
'su_Latn_ID': N_('Sundanese (Latin) (Indonesia)'),
'sv': N_('Swedish'),
'sv_AX': N_('Swedish (Åland Islands)'),
'sv_FI': N_('Swedish (Finland)'),
'sv_SE': N_('Swedish (Sweden)'),
'sw': N_('Swahili'),
'sw_CD': N_('Swahili (Congo - Kinshasa)'),
'sw_KE': N_('Swahili (Kenya)'),
'sw_TZ': N_('Swahili (Tanzania)'),
'sw_UG': N_('Swahili (Uganda)'),
'syr': N_('Syriac'),
'syr_IQ': N_('Syriac (Iraq)'),
'syr_SY': N_('Syriac (Syria)'),
'szl': N_('Silesian'),
'szl_PL': N_('Silesian (Poland)'),
'ta': N_('Tamil'),
'ta_IN': N_('Tamil (India)'),
'ta_LK': N_('Tamil (Sri Lanka)'),
'ta_MY': N_('Tamil (Malaysia)'),
'ta_SG': N_('Tamil (Singapore)'),
'te': N_('Telugu'),
'te_IN': N_('Telugu (India)'),
'teo': N_('Teso'),
'teo_KE': N_('Teso (Kenya)'),
'teo_UG': N_('Teso (Uganda)'),
'tg': N_('Tajik'),
'tg_Cyrl': N_('Tajik (Cyrillic)'),
'tg_Cyrl_TJ': N_('Tajik (Cyrillic) (Tajikistan)'),
'tg_TJ': N_('Tajik (Tajikistan)'),
'th': N_('Thai'),
'th_TH': N_('Thai (Thailand)'),
'ti': N_('Tigrinya'),
'ti_ER': N_('Tigrinya (Eritrea)'),
'ti_ET': N_('Tigrinya (Ethiopia)'),
'tig': N_('Tigre'),
'tig_ER': N_('Tigre (Eritrea)'),
'tk': N_('Turkmen'),
'tk_TM': N_('Turkmen (Turkmenistan)'),
'tn': N_('Tswana'),
'tn_BW': N_('Tswana (Botswana)'),
'tn_ZA': N_('Tswana (South Africa)'),
'to': N_('Tongan'),
'to_TO': N_('Tongan (Tonga)'),
'tok': N_('Toki Pona'),
'tok_001': N_('Toki Pona (world)'),
'tpi': N_('Tok Pisin'),
'tpi_PG': N_('Tok Pisin (Papua New Guinea)'),
'tr': N_('Turkish'),
'tr_CY': N_('Turkish (Cyprus)'),
'tr_TR': N_('Turkish (Türkiye)'),
'trv': N_('Taroko'),
'trv_TW': N_('Taroko (Taiwan)'),
'trw': N_('Torwali'),
'trw_PK': N_('Torwali (Pakistan)'),
'ts': N_('Tsonga'),
'ts_ZA': N_('Tsonga (South Africa)'),
'tt': N_('Tatar'),
'tt_RU': N_('Tatar (Russia)'),
'twq': N_('Tasawaq'),
'twq_NE': N_('Tasawaq (Niger)'),
'tzm': N_('Central Atlas Tamazight'),
'tzm_MA': N_('Central Atlas Tamazight (Morocco)'),
'ug': N_('Uyghur'),
'ug_CN': N_('Uyghur (China)'),
'uk': N_('Ukrainian'),
'uk_UA': N_('Ukrainian (Ukraine)'),
'und': N_('Unknown language'),
'ur': N_('Urdu'),
'ur_IN': N_('Urdu (India)'),
'ur_PK': N_('Urdu (Pakistan)'),
'uz': N_('Uzbek'),
'uz_Arab': N_('Uzbek (Arabic)'),
'uz_Arab_AF': N_('Uzbek (Arabic) (Afghanistan)'),
'uz_Cyrl': N_('Uzbek (Cyrillic)'),
'uz_Cyrl_UZ': N_('Uzbek (Cyrillic) (Uzbekistan)'),
'uz_Latn': N_('Uzbek (Latin)'),
'uz_Latn_UZ': N_('Uzbek (Latin) (Uzbekistan)'),
'vai': N_('Vai'),
'vai_Latn': N_('Vai (Latin)'),
'vai_Latn_LR': N_('Vai (Latin) (Liberia)'),
'vai_Vaii': N_('Vai (Vai)'),
'vai_Vaii_LR': N_('Vai (Vai) (Liberia)'),
've': N_('Venda'),
've_ZA': N_('Venda (South Africa)'),
'vec': N_('Venetian'),
'vec_IT': N_('Venetian (Italy)'),
'vi': N_('Vietnamese'),
'vi_VN': N_('Vietnamese (Vietnam)'),
'vo': N_('Volapük'),
'vo_001': N_('Volapük (world)'),
'vun': N_('Vunjo'),
'vun_TZ': N_('Vunjo (Tanzania)'),
'wa': N_('Walloon'),
'wa_BE': N_('Walloon (Belgium)'),
'wae': N_('Walser'),
'wae_CH': N_('Walser (Switzerland)'),
'wal': N_('Wolaytta'),
'wal_ET': N_('Wolaytta (Ethiopia)'),
'wbp': N_('Warlpiri'),
'wbp_AU': N_('Warlpiri (Australia)'),
'wo': N_('Wolof'),
'wo_SN': N_('Wolof (Senegal)'),
'xh': N_('Xhosa'),
'xh_ZA': N_('Xhosa (South Africa)'),
'xog': N_('Soga'),
'xog_UG': N_('Soga (Uganda)'),
'yav': N_('Yangben'),
'yav_CM': N_('Yangben (Cameroon)'),
'yi': N_('Yiddish'),
'yi_001': N_('Yiddish (world)'),
'yo': N_('Yoruba'),
'yo_BJ': N_('Yoruba (Benin)'),
'yo_NG': N_('Yoruba (Nigeria)'),
'yrl': N_('Nheengatu'),
'yrl_BR': N_('Nheengatu (Brazil)'),
'yrl_CO': N_('Nheengatu (Colombia)'),
'yrl_VE': N_('Nheengatu (Venezuela)'),
'yue': N_('Cantonese'),
'yue_Hans': N_('Cantonese (Simplified)'),
'yue_Hans_CN': N_('Cantonese (Simplified) (China)'),
'yue_Hant': N_('Cantonese (Traditional)'),
'yue_Hant_HK': N_('Cantonese (Traditional) (Hong Kong)'),
'zgh': N_('Standard Moroccan Tamazight'),
'zgh_MA': N_('Standard Moroccan Tamazight (Morocco)'),
'zh': N_('Chinese'),
'zh_Hans': N_('Chinese (Simplified)'),
'zh_Hans_CN': N_('Chinese (Simplified) (China)'),
'zh_Hans_HK': N_('Chinese (Simplified) (Hong Kong)'),
'zh_Hans_MO': N_('Chinese (Simplified) (Macao)'),
'zh_Hans_SG': N_('Chinese (Simplified) (Singapore)'),
'zh_Hant': N_('Chinese (Traditional)'),
'zh_Hant_HK': N_('Chinese (Traditional) (Hong Kong)'),
'zh_Hant_MO': N_('Chinese (Traditional) (Macao)'),
'zh_Hant_TW': N_('Chinese (Traditional) (Taiwan)'),
'zu': N_('Zulu'),
'zu_ZA': N_('Zulu (South Africa)'),
}
| 39,193
|
Python
|
.py
| 1,054
| 32.241935
| 80
| 0.52125
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,101
|
languages.py
|
metabrainz_picard/picard/const/languages.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2014, 2018, 2020, 2024 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Shen-Ta Hsieh
# Copyright (C) 2018-2020, 2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.i18n import N_
# List of available user interface languages
UI_LANGUAGES = [
# ('af', 'Afrikaans', N_('Afrikaans')),
('ar', 'العربية', N_('Arabic')),
# ('ast', 'Asturian', N_('Asturian')),
# ('bg', 'Български', N_('Bulgarian')),
('ca', 'Català', N_('Catalan')),
('cs', 'Čeština', N_('Czech')),
# ('cy', 'Cymraeg', N_('Welsh')),
('da', 'Dansk', N_('Danish')),
('de', 'Deutsch', N_('German')),
# ('de_CH', 'Deutsch (Schweiz)', N_('German (Switzerland)')),
('el', 'ελληνικά', N_('Greek')),
('en', 'English', N_('English')),
('en_AU', 'English (Australia)', N_('English (Australia)')),
('en_CA', 'English (Canada)', N_('English (Canada)')),
('en_GB', 'English (United Kingdom)', N_('English (United Kingdom)')),
# ('eo', 'Esperanto', N_('Esperanto')),
('es', 'Español', N_('Spanish')),
('et', 'Eesti', N_('Estonian')),
# ('fa', 'فارسی', N_('Persian')),
('fi', 'Suomi', N_('Finnish')),
# ('fo', 'Føroyskt', N_('Faroese')),
('fr', 'Français', N_('French')),
('fr_CA', 'Français (Canada)', N_('French (Canada)')),
# ('fy', 'Frysk', N_('Frisian')),
('gl', 'Galego', N_('Galician')),
('he', 'עברית', N_('Hebrew')),
# ('hi', 'हिन्दी', N_('Hindi')),
('hu', 'Magyar', N_('Hungarian')),
# ('id', 'Bahasa Indonesia', N_('Indonesian')),
('is', 'Íslenska', N_('Icelandic')),
('it', 'Italiano', N_('Italian')),
('ja', '日本語', N_('Japanese')),
# ('kn', 'ಕನ್ನಡ', N_('Kannada')),
('ko', '한국어', N_('Korean')),
('lt', 'Lietuvių', N_('Lithuanian')),
('ms_MY', 'Bahasa Melayu (Malaysia)', N_('Malay (Malaysia)')),
('nb', 'Norsk bokmål', N_('Norwegian Bokmål')),
# ('nds', 'Plattdüütsch', N_('Low German')),
('nl', 'Nederlands', N_('Dutch')),
('oc', 'Occitan', N_('Occitan')),
('pl', 'Polski', N_('Polish')),
('pt', 'Português', N_('Portuguese')),
('pt_BR', 'Português (Brasil)', N_('Portuguese (Brazil)')),
('ro', 'Română', N_('Romanian')),
('ru', 'Pyccĸий', N_('Russian')),
# ('sco', 'Scots leid', N_('Scots')),
('sk', 'Slovenčina', N_('Slovak')),
('sl', 'Slovenščina', N_('Slovenian')),
('sq', 'Shqip', N_('Albanian')),
# ('sr', 'Србин', N_('Serbian')),
('sv', 'Svenska', N_('Swedish')),
# ('ta', 'தமிழ்', N_('Tamil')),
('tr', 'Türkçe', N_('Turkish')),
('uk', 'Украї́нська', N_('Ukrainian')),
('zh_CN', '中文(中国大陆)', N_('Chinese (China)')),
('zh_TW', '中文(台灣)', N_('Chinese (Taiwan)')),
]
| 3,644
|
Python
|
.py
| 83
| 38.361446
| 80
| 0.568955
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,102
|
cover_processing.py
|
metabrainz_picard/picard/const/cover_processing.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2024 Bob Swift
# Copyright (C) 2024 Giorgio Fontanive
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import namedtuple
from enum import IntEnum
from picard.i18n import N_
class ResizeModes(IntEnum):
MAINTAIN_ASPECT_RATIO = 0,
SCALE_TO_WIDTH = 1,
SCALE_TO_HEIGHT = 2,
CROP_TO_FIT = 3,
STRETCH_TO_FIT = 4
CoverResizeMode = namedtuple('CoverResizeMode', ['mode', 'title', 'tooltip'])
COVER_RESIZE_MODES = (
# Items are entered in the order they should appear in the combo box.
# The number is the mode number stored in the settings and may be
# different from the order of appearance in the combo box. This will
# allow modes to be added or removed and re-ordered if required.
CoverResizeMode(ResizeModes.MAINTAIN_ASPECT_RATIO, N_('Maintain aspect ratio'), N_(
"<p>"
"Scale the source image so that it fits within the target dimensions."
"</p><p>"
"One of the final image dimensions may be less than the target dimension if "
"the source image and target dimensions have different aspect ratios."
"</p><p>"
"For example, a 2000x1000 image resized to target dimensions of "
"1000x1000 would result in a final image size of 1000x500."
"</p>"
)),
CoverResizeMode(ResizeModes.SCALE_TO_WIDTH, N_('Scale to width'), N_(
"<p>"
"Scale the width of the source image to the target width while keeping aspect ratio."
"</p><p>"
"For example, a 2000x1000 image resized to a target width of "
"1000 would result in a final image size of 1000x500."
"</p>"
)),
CoverResizeMode(ResizeModes.SCALE_TO_HEIGHT, N_('Scale to height'), N_(
"<p>"
"Scale the height of the source image to the target height while keeping aspect ratio."
"</p><p>"
"For example, a 1000x2000 image resized to a target height of "
"1000 would result in a final image size of 500x1000."
"</p>"
)),
CoverResizeMode(ResizeModes.CROP_TO_FIT, N_('Crop to fit'), N_(
"<p>"
"Scale the source image so that it completely fills the target dimensions "
"in both directions."
"</p><p>"
"If the source image and target dimensions have different aspect ratios"
"then there will be overflow in one direction which will be (center) cropped."
"</p><p>"
"For example, a 500x1000 image resized to target dimensions of "
"1000x1000 would first scale up to 1000x2000, then the excess height "
"would be center cropped resulting in the final image size of 1000x1000."
"</p>"
)),
CoverResizeMode(ResizeModes.STRETCH_TO_FIT, N_('Stretch to fit'), N_(
"<p>"
"Stretch the image to exactly fit the specified dimensions, "
"distorting it if necessary."
"</p><p>"
"For example, a 500x1000 image with target dimension of 1000x1000 "
"would be stretched horizontally resulting in the final image "
"size of 1000x1000."
"</p>"
)),
)
COVER_CONVERTING_FORMATS = ('JPEG', 'PNG', 'WebP', 'TIFF')
COVER_PROCESSING_SLEEP = 0.001
| 3,924
|
Python
|
.py
| 88
| 38.931818
| 95
| 0.679487
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,103
|
defaults.py
|
metabrainz_picard/picard/const/defaults.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007, 2014, 2016 Lukáš Lalinský
# Copyright (C) 2014, 2019-2022, 2024 Philipp Wolfer
# Copyright (C) 2014-2016, 2018-2021, 2024 Laurent Monin
# Copyright (C) 2015 Ohm Patel
# Copyright (C) 2016 Rahul Raturi
# Copyright (C) 2016 Wieland Hoffmann
# Copyright (C) 2016-2017 Frederik “Freso” S. Olesen
# Copyright (C) 2017 Antonio Larrosa
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2018, 2021, 2023 Bob Swift
# Copyright (C) 2020 RomFouq
# Copyright (C) 2021 Gabriel Ferreira
# Copyright (C) 2021 Vladislav Karbovskii
# Copyright (C) 2024 Giorgio Fontanive
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from PyQt6.QtCore import QStandardPaths
from picard.const import (
CACHE_SIZE_DISPLAY_UNIT,
RELEASE_PRIMARY_GROUPS,
RELEASE_SECONDARY_GROUPS,
)
from picard.const.cover_processing import ResizeModes
from picard.const.sys import (
IS_MACOS,
IS_WIN,
)
from picard.i18n import N_
from picard.util import system_supports_long_paths
from picard.util.cdrom import get_default_cdrom_drives
from picard.ui.enums import MainAction
from picard.ui.theme import UiTheme
DEFAULT_REPLACEMENT = '_'
DEFAULT_WIN_COMPAT_REPLACEMENTS = {
'*': DEFAULT_REPLACEMENT,
':': DEFAULT_REPLACEMENT,
'<': DEFAULT_REPLACEMENT,
'>': DEFAULT_REPLACEMENT,
'?': DEFAULT_REPLACEMENT,
'|': DEFAULT_REPLACEMENT,
'"': DEFAULT_REPLACEMENT,
}
DEFAULT_MUSIC_DIR = QStandardPaths.writableLocation(QStandardPaths.StandardLocation.MusicLocation)
DEFAULT_RELEASE_SCORE = 0.5
DEFAULT_RELEASE_TYPE_SCORES = [(g, DEFAULT_RELEASE_SCORE) for g in list(RELEASE_PRIMARY_GROUPS.keys()) + list(RELEASE_SECONDARY_GROUPS.keys())]
DEFAULT_CAA_IMAGE_SIZE = 500
DEFAULT_CAA_IMAGE_TYPE_INCLUDE = ('front',)
DEFAULT_CAA_IMAGE_TYPE_EXCLUDE = ('matrix/runout', 'raw/unedited', 'watermark')
DEFAULT_LOCAL_COVER_ART_REGEX = r'^(?:cover|folder|albumart)(.*)\.(?:jpe?g|png|gif|tiff?|webp)$'
DEFAULT_CURRENT_BROWSER_PATH = QStandardPaths.writableLocation(QStandardPaths.StandardLocation.HomeLocation)
# Default query limit
DEFAULT_QUERY_LIMIT = 50
DEFAULT_DRIVES = get_default_cdrom_drives()
DEFAULT_CA_NEVER_REPLACE_TYPE_INCLUDE = ('front',)
DEFAULT_CA_NEVER_REPLACE_TYPE_EXCLUDE = ('matrix/runout', 'raw/unedited', 'watermark')
DEFAULT_CA_PROVIDERS = [
('Cover Art Archive', True),
('UrlRelationships', True),
('CaaReleaseGroup', True),
('Local', False),
]
DEFAULT_COVER_IMAGE_FILENAME = 'cover'
DEFAULT_FPCALC_THREADS = 2
DEFAULT_PROGRAM_UPDATE_LEVEL = 0
# On macOS it is not common that the global menu shows icons
DEFAULT_SHOW_MENU_ICONS = not IS_MACOS
DEFAULT_STARTING_DIR = QStandardPaths.writableLocation(QStandardPaths.StandardLocation.HomeLocation)
DEFAULT_THEME_NAME = str(UiTheme.DEFAULT)
DEFAULT_TOOLBAR_LAYOUT = (
MainAction.ADD_DIRECTORY,
MainAction.ADD_FILES,
'-',
MainAction.CLUSTER,
'-',
MainAction.AUTOTAG,
MainAction.ANALYZE,
MainAction.BROWSER_LOOKUP,
'-',
MainAction.SAVE,
MainAction.VIEW_INFO,
MainAction.REMOVE,
'-',
MainAction.CD_LOOKUP,
'-',
MainAction.SUBMIT_ACOUSTID,
)
DEFAULT_TOP_TAGS = [
'title',
'artist',
'album',
'tracknumber',
'~length',
'date',
]
DEFAULT_AUTOBACKUP_DIRECTORY = os.path.normpath(QStandardPaths.writableLocation(QStandardPaths.StandardLocation.DocumentsLocation))
DEFAULT_CACHE_SIZE_IN_BYTES = 100*CACHE_SIZE_DISPLAY_UNIT
DEFAULT_LONG_PATHS = system_supports_long_paths() if IS_WIN else False
DEFAULT_FILE_NAMING_FORMAT = "$if2(%albumartist%,%artist%)/\n" \
"$if(%albumartist%,%album%/,)\n" \
"$if($gt(%totaldiscs%,1),$if($gt(%totaldiscs%,9),$num(%discnumber%,2),%discnumber%)-,)" \
"$if($and(%albumartist%,%tracknumber%),$num(%tracknumber%,2) ,)" \
"$if(%_multiartist%,%artist% - ,)" \
"%title%"
DEFAULT_SCRIPT_NAME = N_("My script")
DEFAULT_PROFILE_NAME = N_("My profile")
DEFAULT_COPY_TEXT = N_("(copy)")
DEFAULT_NUMBERED_TITLE_FORMAT = N_("{title} ({count})")
DEFAULT_NAMING_PRESET_ID = "Preset 1"
DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
DEFAULT_COVER_MIN_SIZE = 250
DEFAULT_COVER_MAX_SIZE = 1000
DEFAULT_COVER_RESIZE_MODE = ResizeModes.MAINTAIN_ASPECT_RATIO
DEFAULT_COVER_CONVERTING_FORMAT = 'JPEG'
| 5,010
|
Python
|
.py
| 131
| 35.641221
| 143
| 0.742409
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,104
|
__init__.py
|
metabrainz_picard/picard/const/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007, 2014, 2016 Lukáš Lalinský
# Copyright (C) 2014, 2019-2022 Philipp Wolfer
# Copyright (C) 2014-2016, 2018-2021, 2023 Laurent Monin
# Copyright (C) 2015 Ohm Patel
# Copyright (C) 2016 Rahul Raturi
# Copyright (C) 2016 Wieland Hoffmann
# Copyright (C) 2016-2017 Frederik “Freso” S. Olesen
# Copyright (C) 2017 Antonio Larrosa
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2018, 2021, 2023 Bob Swift
# Copyright (C) 2020 RomFouq
# Copyright (C) 2021 Gabriel Ferreira
# Copyright (C) 2021 Vladislav Karbovskii
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import OrderedDict
from picard import PICARD_VERSION
from picard.const import appdirs
from picard.const.attributes import MB_ATTRIBUTES
from picard.i18n import N_
# Config directory
USER_DIR = appdirs.config_folder()
USER_PLUGIN_DIR = appdirs.plugin_folder()
# Network Cache default settings
CACHE_SIZE_DISPLAY_UNIT = 1000*1000
# AcoustID client API key
ACOUSTID_KEY = 'v8pQ6oyB'
ACOUSTID_URL = 'https://api.acoustid.org/v2'
FPCALC_NAMES = ['fpcalc', 'pyfpcalc']
# MB OAuth client credentials
MUSICBRAINZ_OAUTH_CLIENT_ID = 'ACa9wsDX19cLp-AeEP-vVw'
MUSICBRAINZ_OAUTH_CLIENT_SECRET = 'xIsvXbIuntaLuRRhzuazOA'
# Cover art archive URL
CAA_URL = 'https://coverartarchive.org'
# Prepare documentation URLs
if PICARD_VERSION.identifier == 'final':
DOCS_VERSION = "v{}.{}/".format(PICARD_VERSION.major, PICARD_VERSION.minor)
else:
DOCS_VERSION = "" # points to latest version
DOCS_LANGUAGE = 'en'
DOCS_SERVER_URL = "https://picard-docs.musicbrainz.org/"
DOCS_BASE_URL = DOCS_SERVER_URL + DOCS_VERSION + DOCS_LANGUAGE
# URLs
PICARD_URLS = {
'home': "https://picard.musicbrainz.org/",
'license': "https://www.gnu.org/licenses/gpl-2.0.html",
'documentation_server': DOCS_SERVER_URL, # Shows latest version and tries to match the user's language if available.
'documentation': DOCS_BASE_URL + "/",
'troubleshooting': DOCS_BASE_URL + "/troubleshooting/troubleshooting.html",
'doc_options': DOCS_BASE_URL + "/config/configuration.html",
'doc_scripting': DOCS_BASE_URL + "/extending/scripting.html",
'doc_tags_from_filenames': DOCS_BASE_URL + "/usage/tags_from_file_names.html",
'doc_naming_script_edit': DOCS_BASE_URL + "/config/options_filerenaming_editor.html",
'doc_cover_art_types': "https://musicbrainz.org/doc/Cover_Art/Types",
'plugins': "https://picard.musicbrainz.org/plugins/",
'forum': "https://community.metabrainz.org/c/picard",
'donate': "https://metabrainz.org/donate",
'chromaprint': "https://acoustid.org/chromaprint#download",
'acoustid_apikey': "https://acoustid.org/api-key",
'acoustid_track': "https://acoustid.org/track/",
}
# Various Artists MBID
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
# Artist alias types
ALIAS_TYPE_ARTIST_NAME_ID = '894afba6-2816-3c24-8072-eadb66bd04bc'
ALIAS_TYPE_LEGAL_NAME_ID = 'd4dcd0c0-b341-3612-a332-c0ce797b25cf'
ALIAS_TYPE_SEARCH_HINT_ID = '1937e404-b981-3cb7-8151-4c86ebfc8d8e'
# Special purpose track titles
SILENCE_TRACK_TITLE = '[silence]'
DATA_TRACK_TITLE = '[data track]'
# Release formats
RELEASE_FORMATS = {}
RELEASE_PRIMARY_GROUPS = {}
RELEASE_SECONDARY_GROUPS = {}
RELEASE_STATUS = {}
for k, v in MB_ATTRIBUTES.items():
if k.startswith('DB:medium_format/name:'):
RELEASE_FORMATS[v] = v
elif k.startswith('DB:release_group_primary_type/name:'):
RELEASE_PRIMARY_GROUPS[v] = v
elif k.startswith('DB:release_group_secondary_type/name:'):
RELEASE_SECONDARY_GROUPS[v] = v
elif k.startswith('DB:release_status/name:'):
RELEASE_STATUS[v] = v
# TODO: remove those 4 imports before 3.0 release
# Release countries
from picard.const.countries import ( # noqa: F401,E402 # pylint: disable=unused-import
RELEASE_COUNTRIES,
)
# List of available user interface languages
from picard.const.languages import ( # noqa: F401,E402 # pylint: disable=unused-import
UI_LANGUAGES,
)
# List of alias locales
from picard.const.locales import ( # noqa: F401,E402 # pylint: disable=unused-import
ALIAS_LOCALES,
)
# List of available charsets
from picard.const.scripts import ( # noqa: F401,E402 # pylint: disable=unused-import
SCRIPTS,
)
# List of official musicbrainz servers - must support SSL for mblogin requests (such as collections).
MUSICBRAINZ_SERVERS = [
'musicbrainz.org',
'beta.musicbrainz.org',
]
# Plugins and Release Versions API
PLUGINS_API_BASE_URL = 'https://picard.musicbrainz.org/api/v2/'
PLUGINS_API = {
'urls': {
'plugins': PLUGINS_API_BASE_URL + 'plugins/',
'download': PLUGINS_API_BASE_URL + 'download/',
'releases': PLUGINS_API_BASE_URL + 'releases',
},
}
# Maximum number of covers to draw in a stack in CoverArtThumbnail
MAX_COVERS_TO_STACK = 4
# Update levels available for automatic checking
PROGRAM_UPDATE_LEVELS = OrderedDict(
[
(
0, {
'name': 'stable',
'title': N_("Stable releases only"),
}
),
(
1, {
'name': 'beta',
'title': N_("Stable and Beta releases"),
}
),
(
2, {
'name': 'dev',
'title': N_("Stable, Beta and Dev releases"),
}
),
]
)
SCRIPT_LANGUAGE_VERSION = '1.1'
| 6,320
|
Python
|
.py
| 158
| 36.278481
| 127
| 0.690313
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,105
|
scripts.py
|
metabrainz_picard/picard/const/scripts.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2021 Bob Swift
# Copyright (C) 2021 Vladislav Karbovskii
# Copyright (C) 2021-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.i18n import (
N_,
gettext as _,
)
# List of available scripts (character sets)
SCRIPTS = {
'GREEK': N_('Greek'),
'CYRILLIC': N_('Cyrillic'),
'LATIN': N_('Latin'),
'ARABIC': N_('Arabic'),
'HEBREW': N_('Hebrew'),
'CJK': N_('Chinese'),
'HANGUL': N_('Hangul'),
'HIRAGANA': N_('Hiragana'),
'KATAKANA': N_('Katakana'),
'THAI': N_('Thai')
}
def scripts_sorted_by_localized_name():
for script_id, label in sorted([(k, _(v)) for k, v in SCRIPTS.items()], key=lambda i: i[1]):
yield script_id, label
| 1,464
|
Python
|
.py
| 41
| 33.121951
| 96
| 0.696756
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,106
|
asf.py
|
metabrainz_picard/picard/formats/asf.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007, 2011 Lukáš Lalinský
# Copyright (C) 2009-2011, 2014, 2018-2021, 2023 Philipp Wolfer
# Copyright (C) 2011-2014 Wieland Hoffmann
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013-2014, 2018-2021, 2023-2024 Laurent Monin
# Copyright (C) 2014-2015, 2017 Sophist-UK
# Copyright (C) 2016-2018 Sambhav Kothari
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import struct
from mutagen.asf import (
ASF,
ASFByteArrayAttribute,
)
from picard import log
from picard.config import get_config
from picard.coverart.image import (
CoverArtImageError,
TagCoverArtImage,
)
from picard.coverart.utils import types_from_id3
from picard.file import File
from picard.formats.mutagenext import delall_ci
from picard.metadata import Metadata
from picard.util import encode_filename
def unpack_image(data):
"""
Helper function to unpack image data from a WM/Picture tag.
The data has the following format:
1 byte: Picture type (0-20), see ID3 APIC frame specification at
http://www.id3.org/id3v2.4.0-frames
4 bytes: Picture data length in LE format
MIME type, null terminated UTF-16-LE string
Description, null terminated UTF-16-LE string
The image data in the given length
"""
try:
(type_, size) = struct.unpack_from('<bi', data)
except struct.error as e:
raise ValueError(e)
data = data[5:]
mime = b''
while data:
char, data = data[:2], data[2:]
if char == b'\x00\x00':
break
mime += char
else:
raise ValueError("mime: missing data")
mime = mime.decode('utf-16-le')
description = b''
while data:
char, data = data[:2], data[2:]
if char == b'\x00\x00':
break
description += char
else:
raise ValueError("desc: missing data")
description = description.decode('utf-16-le')
if size != len(data):
raise ValueError("image data size mismatch")
return (mime, data, type_, description)
def pack_image(mime, data, image_type=3, description=""):
"""
Helper function to pack image data for a WM/Picture tag.
See unpack_image for a description of the data format.
"""
tag_data = struct.pack('<bi', image_type, len(data))
tag_data += mime.encode('utf-16-le') + b'\x00\x00'
tag_data += description.encode('utf-16-le') + b'\x00\x00'
tag_data += data
return tag_data
class ASFFile(File):
"""
ASF (WMA) metadata reader/writer
See http://msdn.microsoft.com/en-us/library/ms867702.aspx for official
WMA tag specifications.
"""
EXTENSIONS = [".wma", ".wmv", ".asf"]
NAME = "Windows Media Audio"
_File = ASF
__TRANS = {
'album': 'WM/AlbumTitle',
'title': 'Title',
'artist': 'Author',
'albumartist': 'WM/AlbumArtist',
'date': 'WM/Year',
'originalalbum': 'WM/OriginalAlbumTitle',
'originalartist': 'WM/OriginalArtist',
'originaldate': 'WM/OriginalReleaseTime',
'originalyear': 'WM/OriginalReleaseYear',
'originalfilename': 'WM/OriginalFilename',
'composer': 'WM/Composer',
'lyricist': 'WM/Writer',
'conductor': 'WM/Conductor',
'remixer': 'WM/ModifiedBy',
'producer': 'WM/Producer',
'grouping': 'WM/ContentGroupDescription',
'subtitle': 'WM/SubTitle',
'discsubtitle': 'WM/SetSubTitle',
'tracknumber': 'WM/TrackNumber',
'discnumber': 'WM/PartOfSet',
'comment': 'Description',
'genre': 'WM/Genre',
'bpm': 'WM/BeatsPerMinute',
'key': 'WM/InitialKey',
'script': 'WM/Script',
'language': 'WM/Language',
'mood': 'WM/Mood',
'isrc': 'WM/ISRC',
'copyright': 'Copyright',
'lyrics': 'WM/Lyrics',
'~rating': 'WM/SharedUserRating',
'media': 'WM/Media',
'barcode': 'WM/Barcode',
'catalognumber': 'WM/CatalogNo',
'label': 'WM/Publisher',
'encodedby': 'WM/EncodedBy',
'encodersettings': 'WM/EncodingSettings',
'albumsort': 'WM/AlbumSortOrder',
'albumartistsort': 'WM/AlbumArtistSortOrder',
'artistsort': 'WM/ArtistSortOrder',
'titlesort': 'WM/TitleSortOrder',
'composersort': 'WM/ComposerSortOrder',
'musicbrainz_recordingid': 'MusicBrainz/Track Id',
'musicbrainz_trackid': 'MusicBrainz/Release Track Id',
'musicbrainz_albumid': 'MusicBrainz/Album Id',
'musicbrainz_artistid': 'MusicBrainz/Artist Id',
'musicbrainz_albumartistid': 'MusicBrainz/Album Artist Id',
'musicbrainz_trmid': 'MusicBrainz/TRM Id',
'musicbrainz_discid': 'MusicBrainz/Disc Id',
'musicbrainz_workid': 'MusicBrainz/Work Id',
'musicbrainz_releasegroupid': 'MusicBrainz/Release Group Id',
'musicbrainz_originalalbumid': 'MusicBrainz/Original Album Id',
'musicbrainz_originalartistid': 'MusicBrainz/Original Artist Id',
'musicip_puid': 'MusicIP/PUID',
'releasestatus': 'MusicBrainz/Album Status',
'releasetype': 'MusicBrainz/Album Type',
'releasecountry': 'MusicBrainz/Album Release Country',
'acoustid_id': 'Acoustid/Id',
'acoustid_fingerprint': 'Acoustid/Fingerprint',
'compilation': 'WM/IsCompilation',
'engineer': 'WM/Engineer',
'asin': 'ASIN',
'djmixer': 'WM/DJMixer',
'mixer': 'WM/Mixer',
'artists': 'WM/ARTISTS',
'director': 'WM/Director',
'work': 'WM/Work',
'website': 'WM/AuthorURL',
}
__RTRANS = {b: a for a, b in __TRANS.items()}
# Tags to load case insensitive
__TRANS_CI = {
'replaygain_album_gain': 'REPLAYGAIN_ALBUM_GAIN',
'replaygain_album_peak': 'REPLAYGAIN_ALBUM_PEAK',
'replaygain_album_range': 'REPLAYGAIN_ALBUM_RANGE',
'replaygain_track_gain': 'REPLAYGAIN_TRACK_GAIN',
'replaygain_track_peak': 'REPLAYGAIN_TRACK_PEAK',
'replaygain_track_range': 'REPLAYGAIN_TRACK_RANGE',
'replaygain_reference_loudness': 'REPLAYGAIN_REFERENCE_LOUDNESS',
}
__RTRANS_CI = {b.lower(): a for a, b in __TRANS_CI.items()}
def __init__(self, filename):
super().__init__(filename)
self.__casemap = {}
def _load(self, filename):
log.debug("Loading file %r", filename)
config = get_config()
self.__casemap = {}
file = ASF(encode_filename(filename))
metadata = Metadata()
for name, values in file.tags.items():
if name == 'WM/Picture':
for image in values:
try:
(mime, data, image_type, description) = unpack_image(image.value)
except ValueError as e:
log.warning("Cannot unpack image from %r: %s",
filename, e)
continue
try:
coverartimage = TagCoverArtImage(
file=filename,
tag=name,
types=types_from_id3(image_type),
comment=description,
support_types=True,
data=data,
id3_type=image_type,
)
except CoverArtImageError as e:
log.error("Cannot load image from %r: %s", filename, e)
else:
metadata.images.append(coverartimage)
continue
elif name == 'WM/SharedUserRating':
# Rating in WMA ranges from 0 to 99, normalize this to the range 0 to 5
values[0] = int(round(int(str(values[0])) / 99.0 * (config.setting['rating_steps'] - 1)))
elif name == 'WM/PartOfSet':
disc = str(values[0]).split("/")
if len(disc) > 1:
metadata['totaldiscs'] = disc[1]
values[0] = disc[0]
name_lower = name.lower()
if name in self.__RTRANS:
name = self.__RTRANS[name]
elif name_lower in self.__RTRANS_CI:
orig_name = name
name = self.__RTRANS_CI[name_lower]
self.__casemap[name] = orig_name
else:
continue
values = [str(value) for value in values if value]
if values:
metadata[name] = values
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
config = get_config()
file = ASF(encode_filename(filename))
tags = file.tags
if config.setting['clear_existing_tags']:
cover = tags.get('WM/Picture') if config.setting['preserve_images'] else None
tags.clear()
if cover:
tags['WM/Picture'] = cover
cover = []
for image in metadata.images.to_be_saved_to_tags():
tag_data = pack_image(image.mimetype, image.data, image.id3_type,
image.comment)
cover.append(ASFByteArrayAttribute(tag_data))
if cover:
tags['WM/Picture'] = cover
for name, values in metadata.rawitems():
if name.startswith('lyrics:'):
name = 'lyrics'
elif name == '~rating':
values = [int(values[0]) * 99 // (config.setting['rating_steps'] - 1)]
elif name == 'discnumber' and 'totaldiscs' in metadata:
values = ['%s/%s' % (metadata['discnumber'], metadata['totaldiscs'])]
if name in self.__TRANS:
name = self.__TRANS[name]
elif name in self.__TRANS_CI:
if name in self.__casemap:
name = self.__casemap[name]
else:
name = self.__TRANS_CI[name]
delall_ci(tags, name)
else:
continue
tags[name] = values
self._remove_deleted_tags(metadata, tags)
file.save()
def _remove_deleted_tags(self, metadata, tags):
"""Remove the tags from the file that were deleted in the UI"""
for tag in metadata.deleted_tags:
real_name = self._get_tag_name(tag)
if real_name and real_name in tags:
del tags[real_name]
@classmethod
def supports_tag(cls, name):
return (name in cls.__TRANS
or name in cls.__TRANS_CI
or name in {'~rating', 'totaldiscs'}
or name.startswith('lyrics:'))
def _get_tag_name(self, name):
if name.startswith('lyrics:'):
name = 'lyrics'
if name == 'totaldiscs':
return self.__TRANS['discnumber']
elif name in self.__TRANS:
return self.__TRANS[name]
else:
return None
def _info(self, metadata, file):
super()._info(metadata, file)
filename = file.filename
if isinstance(filename, bytes):
filename = filename.decode()
if filename.lower().endswith(".wmv"):
metadata['~video'] = '1'
| 12,102
|
Python
|
.py
| 300
| 30.606667
| 105
| 0.583702
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,107
|
util.py
|
metabrainz_picard/picard/formats/util.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008, 2012 Lukáš Lalinský
# Copyright (C) 2008 Will
# Copyright (C) 2010, 2014, 2018-2020, 2023-2024 Philipp Wolfer
# Copyright (C) 2013 Michael Wiencek
# Copyright (C) 2013, 2017-2019, 2021, 2023-2024 Laurent Monin
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Sophist-UK
# Copyright (C) 2017 Ville Skyttä
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
from picard import log
from picard.extension_points.formats import (
ext_point_formats,
ext_to_format,
)
def supported_formats():
"""Returns list of supported formats."""
return [(file_format.EXTENSIONS, file_format.NAME) for file_format in ext_point_formats]
def supported_extensions():
"""Returns list of supported extensions."""
return [ext for exts, name in supported_formats() for ext in exts]
def guess_format(filename, options=None):
"""Select the best matching file type amongst supported formats."""
if options is None:
options = ext_point_formats
results = []
# Since we are reading only 128 bytes and then immediately closing the file,
# use unbuffered mode.
with open(filename, 'rb', 0) as fileobj:
header = fileobj.read(128)
# Calls the score method of a particular format's associated filetype
# and assigns a positive score depending on how closely the fileobj's header matches
# the header for a particular file format.
results = [(option._File.score(filename, fileobj, header), option.__name__, option)
for option in options
if getattr(option, "_File", None)]
if results:
results.sort()
if results[-1][0] > 0:
# return the format with the highest matching score
return results[-1][2](filename)
# No positive score i.e. the fileobj's header did not match any supported format
return None
def open_(filename):
"""Open the specified file and return a File instance with the appropriate format handler, or None."""
try:
# Use extension based opening as default
_name, ext = os.path.splitext(filename)
if ext:
if file_format := ext_to_format(ext):
return file_format(filename)
# If detection by extension failed, try to guess the format based on file headers
return guess_format(filename)
except Exception as error:
log.error("Error occurred:\n%s", error)
return None
| 3,221
|
Python
|
.py
| 73
| 39.054795
| 106
| 0.706033
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,108
|
wav.py
|
metabrainz_picard/picard/formats/wav.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2012-2013, 2017 Wieland Hoffmann
# Copyright (C) 2013 Michael Wiencek
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2018, 2020-2021, 2023-2024 Laurent Monin
# Copyright (C) 2018-2022, 2024 Philipp Wolfer
# Copyright (C) 2024 Suryansh Shakya
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections.abc import MutableMapping
import mutagen
from picard import log
from picard.config import get_config
from picard.file import File
from picard.formats.id3 import NonCompatID3File
from picard.metadata import Metadata
try:
from mutagen._iff import assert_valid_chunk_id
from mutagen._riff import RiffFile
from mutagen._util import loadfile
import mutagen.wave
# See https://exiftool.org/TagNames/RIFF.html
TRANSLATE_RIFF_INFO = {
# Minimal, as e.g. supported by Windows Explorer,
# Audacity and foobar2000
'IART': 'artist',
'ICMT': 'comment',
'ICOP': 'copyright',
'ICRD': 'date',
'IGNR': 'genre',
'INAM': 'title',
'IPRD': 'album',
'ITRK': 'tracknumber',
# Extended, not well supported by other tools
'ICNT': 'releasecountry',
'IENC': 'encodedby',
'IENG': 'engineer',
'ILNG': 'language',
'IMED': 'media',
'IMUS': 'composer',
'IPRO': 'producer',
'IWRI': 'writer',
}
R_TRANSLATE_RIFF_INFO = {v: k for k, v in TRANSLATE_RIFF_INFO.items()}
def translate_tag_to_riff_name(name):
if name.startswith('comment:'):
name = 'comment'
return R_TRANSLATE_RIFF_INFO.get(name, None)
class RiffListInfo(MutableMapping):
"""Allows loading / saving RIFF INFO tags from / to RIFF files.
"""
def __init__(self, encoding='windows-1252'):
self.encoding = encoding
self.__tags = {}
self.__deleted_tags = set()
@loadfile()
def load(self, filething):
"""Load the INFO tags from the file."""
riff_file = RiffFile(filething.fileobj)
info = self.__find_info_chunk(riff_file.root)
if info:
for tag in info.subchunks():
self.__tags[tag.id] = self.__decode_data(tag.read())
@loadfile(writable=True)
def save(self, filething):
"""Save the INFO tags to the file."""
riff_file = RiffFile(filething.fileobj)
info = self.__find_info_chunk(riff_file.root)
if not info:
info = riff_file.insert_chunk('LIST', b'INFO')
for name, value in self.__tags.items():
self.__save_tag_data(info, name, value)
for name in self.__deleted_tags:
self.__delete_tag(info, name)
@loadfile(writable=True)
def delete(self, filething):
"""Deletes the INFO chunk completely from the file."""
riff_file = RiffFile(filething.fileobj)
info = self.__find_info_chunk(riff_file.root)
if info:
info.delete()
@staticmethod
def __find_info_chunk(parent):
for chunk in parent.subchunks():
if chunk.id == 'LIST' and chunk.name == 'INFO':
return chunk
return None
@staticmethod
def __find_subchunk(parent, name):
for chunk in parent.subchunks():
if chunk.id == name:
return chunk
return None
def __save_tag_data(self, info, name, value):
data = self.__encode_data(value)
chunk = self.__find_subchunk(info, name)
if chunk:
chunk.resize(len(data))
chunk.write(data)
return chunk
else:
return info.insert_chunk(name, data)
def __delete_tag(self, info, name):
chunk = self.__find_subchunk(info, name)
if chunk:
chunk.delete()
@staticmethod
def __decode_data(value):
try: # Always try first to decode as Unicode
value = value.decode('utf-8')
except UnicodeDecodeError: # Fall back to Windows-1252 encoding
value = value.decode('windows-1252', errors='replace')
return value.rstrip('\0')
def __encode_data(self, value):
return value.encode(self.encoding, errors='replace') + b'\x00'
def __contains__(self, name):
return self.__tags.__contains__(name)
def __getitem__(self, key):
return self.__tags.get(key)
def __setitem__(self, key, value):
assert_valid_chunk_id(key)
self.__tags[key] = value
self.__deleted_tags.discard(key)
def __delitem__(self, key):
if key in self.__tags:
del self.__tags[key]
self.__deleted_tags.add(key)
def __iter__(self):
return iter(self.__tags)
def __len__(self):
return len(self.__tags)
def __repr__(self):
return repr(self.__tags)
def __str__(self):
return str(self.__tags)
class WAVFile(NonCompatID3File):
EXTENSIONS = [".wav"]
NAME = "Microsoft WAVE"
_File = mutagen.wave.WAVE
def _info(self, metadata, file):
super()._info(metadata, file)
metadata['~format'] = self.NAME
config = get_config()
info = RiffListInfo(encoding=config.setting['wave_riff_info_encoding'])
info.load(file.filename)
for tag, value in info.items():
if tag in TRANSLATE_RIFF_INFO:
name = TRANSLATE_RIFF_INFO[tag]
if name not in metadata:
metadata[name] = value
def _save(self, filename, metadata):
super()._save(filename, metadata)
# Save RIFF LIST INFO
config = get_config()
if config.setting['write_wave_riff_info']:
info = RiffListInfo(encoding=config.setting['wave_riff_info_encoding'])
if config.setting['clear_existing_tags']:
info.delete(filename)
for name, values in metadata.rawitems():
name = translate_tag_to_riff_name(name)
if name:
value = ", ".join(values)
info[name] = value
for name in metadata.deleted_tags:
name = translate_tag_to_riff_name(name)
if name:
del info[name]
info.save(filename)
elif config.setting['remove_wave_riff_info']:
info = RiffListInfo(encoding=config.setting['wave_riff_info_encoding'])
info.delete(filename)
except ImportError:
import wave
class WAVFile(File):
EXTENSIONS = [".wav"]
NAME = "Microsoft WAVE"
_File = None
def _load(self, filename):
log.debug("Loading file %r", filename)
f = wave.open(filename, "rb")
metadata = Metadata()
metadata['~channels'] = f.getnchannels()
metadata['~bits_per_sample'] = f.getsampwidth() * 8
metadata['~sample_rate'] = f.getframerate()
metadata.length = 1000 * f.getnframes() // f.getframerate()
metadata['~format'] = self.NAME
self._update_filesystem_metadata(metadata)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
@classmethod
def supports_tag(cls, name):
return False
| 8,548
|
Python
|
.py
| 209
| 30.229665
| 87
| 0.57315
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,109
|
ac3.py
|
metabrainz_picard/picard/formats/ac3.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019-2020 Philipp Wolfer
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import mutagen
from picard import log
from picard.config import get_config
from picard.formats.apev2 import APEv2File
from picard.util import encode_filename
from .mutagenext import ac3
class AC3File(APEv2File):
EXTENSIONS = [".ac3", ".eac3"]
NAME = "AC-3"
_File = ac3.AC3APEv2
def _info(self, metadata, file):
super()._info(metadata, file)
if hasattr(file.info, 'codec') and file.info.codec == 'ec-3':
format = 'Enhanced AC-3'
else:
format = self.NAME
if file.tags:
metadata['~format'] = "%s (APEv2)" % format
else:
metadata['~format'] = format
def _save(self, filename, metadata):
config = get_config()
if config.setting['ac3_save_ape']:
super()._save(filename, metadata)
elif config.setting['remove_ape_from_ac3']:
try:
mutagen.apev2.delete(encode_filename(filename))
except BaseException:
log.exception('Error removing APEv2 tags from %s', filename)
@classmethod
def supports_tag(cls, name):
config = get_config()
if config.setting['ac3_save_ape']:
return APEv2File.supports_tag(name)
else:
return False
| 2,139
|
Python
|
.py
| 56
| 32.446429
| 80
| 0.676315
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,110
|
vorbis.py
|
metabrainz_picard/picard/formats/vorbis.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008, 2012 Lukáš Lalinský
# Copyright (C) 2008 Hendrik van Antwerpen
# Copyright (C) 2008-2010, 2014-2015, 2018-2023 Philipp Wolfer
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2012-2014 Wieland Hoffmann
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013-2014, 2017-2023 Laurent Monin
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Ville Skyttä
# Copyright (C) 2022 Marcin Szalowicz
# Copyright (C) 2023 certuna
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import base64
import re
import mutagen.flac
import mutagen.ogg
import mutagen.oggflac
import mutagen.oggopus
import mutagen.oggspeex
import mutagen.oggtheora
import mutagen.oggvorbis
from picard import log
from picard.config import get_config
from picard.coverart.image import (
CoverArtImageError,
TagCoverArtImage,
)
from picard.coverart.utils import types_from_id3
from picard.file import File
from picard.formats.util import guess_format
from picard.metadata import Metadata
from picard.util import (
encode_filename,
sanitize_date,
)
FLAC_MAX_BLOCK_SIZE = 2 ** 24 - 1 # FLAC block size is limited to a 24 bit integer
INVALID_CHARS = re.compile('([^\x20-\x7d]|=)')
UNSUPPORTED_TAGS = {'syncedlyrics', 'r128_album_gain', 'r128_track_gain'}
def sanitize_key(key):
"""
Remove characters from key which are invalid for a Vorbis comment field name.
See https://www.xiph.org/vorbis/doc/v-comment.html#vectorformat
"""
return INVALID_CHARS.sub('', key)
def is_valid_key(key):
"""
Return true if a string is a valid Vorbis comment key.
Valid characters for Vorbis comment field names are
ASCII 0x20 through 0x7D, 0x3D ('=') excluded.
"""
return key and INVALID_CHARS.search(key) is None
def flac_sort_pics_after_tags(metadata_blocks):
"""
Reorder the metadata_blocks so that all picture blocks are located after
the first Vorbis comment block.
Windows fails to read FLAC tags if the picture blocks are located before
the Vorbis comments. Reordering the blocks fixes this.
"""
# First remember all picture blocks that are located before the tag block.
tagindex = 0
picblocks = []
for block in metadata_blocks:
if block.code == mutagen.flac.VCFLACDict.code:
tagindex = metadata_blocks.index(block)
break
elif block.code == mutagen.flac.Picture.code:
picblocks.append(block)
else:
return # No tags found, nothing to sort
# Now move those picture block after the tag block, maintaining their order.
for pic in picblocks:
metadata_blocks.remove(pic)
metadata_blocks.insert(tagindex, pic)
def flac_remove_empty_seektable(file):
"""Removes an existing but empty seektable from the Flac file.
Some software has issues with files that contain an empty seek table. Since
no seektable is also valid, remove it.
"""
seektable = file.seektable
if seektable and not seektable.seekpoints:
file.metadata_blocks = [b for b in file.metadata_blocks if b != file.seektable]
file.seektable = None
class VCommentFile(File):
"""Generic VComment-based file."""
_File = None
__translate = {
'movement': 'movementnumber',
'movementname': 'movement',
'musicbrainz_releasetrackid': 'musicbrainz_trackid',
'musicbrainz_trackid': 'musicbrainz_recordingid',
'waveformatextensible_channel_mask': '~waveformatextensible_channel_mask',
}
__rtranslate = {v: k for k, v in __translate.items()}
def _load(self, filename):
log.debug("Loading file %r", filename)
config = get_config()
file = self._File(encode_filename(filename))
file.tags = file.tags or {}
metadata = Metadata()
for origname, values in file.tags.items():
for value in values:
value = value.rstrip('\0')
name = origname
if name in {'date', 'originaldate', 'releasedate'}:
# YYYY-00-00 => YYYY
value = sanitize_date(value)
elif name == 'performer' or name == 'comment':
# transform "performer=Joe Barr (Piano)" to "performer:Piano=Joe Barr"
name += ':'
if value.endswith(')'):
start = len(value) - 2
count = 1
while count > 0 and start > 0:
if value[start] == ')':
count += 1
elif value[start] == '(':
count -= 1
start -= 1
if start > 0:
name += value[start + 2:-1]
value = value[:start]
elif name.startswith('rating'):
try:
name, email = name.split(':', 1)
except ValueError:
email = ''
if email != sanitize_key(config.setting['rating_user_email']):
continue
name = '~rating'
try:
value = str(round((float(value) * (config.setting['rating_steps'] - 1))))
except ValueError:
log.warning('Invalid rating value in %r: %s', filename, value)
elif name == 'fingerprint' and value.startswith('MusicMagic Fingerprint'):
name = 'musicip_fingerprint'
value = value[22:]
elif name == 'tracktotal':
if 'totaltracks' in file.tags:
continue
name = 'totaltracks'
elif name == 'disctotal':
if 'totaldiscs' in file.tags:
continue
name = 'totaldiscs'
elif name == 'metadata_block_picture':
try:
image = mutagen.flac.Picture(base64.standard_b64decode(value))
coverartimage = TagCoverArtImage(
file=filename,
tag=name,
types=types_from_id3(image.type),
comment=image.desc,
support_types=True,
data=image.data,
id3_type=image.type
)
except (CoverArtImageError, TypeError, ValueError, mutagen.flac.error) as e:
log.error("Cannot load image from %r: %s", filename, e)
else:
metadata.images.append(coverartimage)
continue
elif name in self.__translate:
name = self.__translate[name]
metadata.add(name, value)
if self._File == mutagen.flac.FLAC:
for image in file.pictures:
try:
coverartimage = TagCoverArtImage(
file=filename,
tag='FLAC/PICTURE',
types=types_from_id3(image.type),
comment=image.desc,
support_types=True,
data=image.data,
id3_type=image.type
)
except CoverArtImageError as e:
log.error("Cannot load image from %r: %s", filename, e)
else:
metadata.images.append(coverartimage)
# Read the unofficial COVERART tags, for backward compatibility only
if 'metadata_block_picture' not in file.tags:
try:
for data in file['COVERART']:
try:
coverartimage = TagCoverArtImage(
file=filename,
tag='COVERART',
data=base64.standard_b64decode(data)
)
except (CoverArtImageError, TypeError, ValueError) as e:
log.error("Cannot load image from %r: %s", filename, e)
else:
metadata.images.append(coverartimage)
except KeyError:
pass
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
"""Save metadata to the file."""
log.debug("Saving file %r", filename)
config = get_config()
is_flac = self._File == mutagen.flac.FLAC
file = self._File(encode_filename(filename))
if file.tags is None:
file.add_tags()
if config.setting['clear_existing_tags']:
preserve_tags = ['waveformatextensible_channel_mask']
if not is_flac and config.setting['preserve_images']:
preserve_tags.append('metadata_block_picture')
preserve_tags.append('coverart')
preserved_values = {}
for name in preserve_tags:
if name in file.tags and file.tags[name]:
preserved_values[name] = file.tags[name]
file.tags.clear()
for name, value in preserved_values.items():
file.tags[name] = value
images_to_save = list(metadata.images.to_be_saved_to_tags())
if is_flac and (images_to_save
or (config.setting['clear_existing_tags']
and not config.setting['preserve_images'])):
file.clear_pictures()
tags = {}
for name, value in metadata.items():
if name == '~rating':
# Save rating according to http://code.google.com/p/quodlibet/wiki/Specs_VorbisComments
user_email = sanitize_key(config.setting['rating_user_email'])
if user_email:
name = 'rating:%s' % user_email
else:
name = 'rating'
value = str(float(value) / (config.setting['rating_steps'] - 1))
# don't save private tags
elif name.startswith("~") or not self.supports_tag(name):
continue
elif name.startswith('lyrics:'):
name = 'lyrics'
elif name in {'date', 'originaldate', 'releasedate'}:
# YYYY-00-00 => YYYY
value = sanitize_date(value)
elif name.startswith('performer:') or name.startswith('comment:'):
# transform "performer:Piano=Joe Barr" to "performer=Joe Barr (Piano)"
name, desc = name.split(':', 1)
if desc:
value += ' (%s)' % desc
elif name == "musicip_fingerprint":
name = "fingerprint"
value = "MusicMagic Fingerprint%s" % value
elif name in self.__rtranslate:
name = self.__rtranslate[name]
tags.setdefault(name.upper(), []).append(value.rstrip('\0'))
if 'totaltracks' in metadata:
tags.setdefault('TRACKTOTAL', []).append(metadata['totaltracks'])
if 'totaldiscs' in metadata:
tags.setdefault('DISCTOTAL', []).append(metadata['totaldiscs'])
for image in images_to_save:
picture = mutagen.flac.Picture()
picture.data = image.data
picture.mime = image.mimetype
picture.desc = image.comment
picture.width = image.width
picture.height = image.height
picture.type = image.id3_type
if is_flac:
# See https://xiph.org/flac/format.html#metadata_block_picture
expected_block_size = (8 * 4 + len(picture.data)
+ len(picture.mime)
+ len(picture.desc.encode('UTF-8')))
if expected_block_size > FLAC_MAX_BLOCK_SIZE:
log.error("Failed saving image to %r: Image size of %d bytes exceeds maximum FLAC block size of %d bytes",
filename, expected_block_size, FLAC_MAX_BLOCK_SIZE)
continue
file.add_picture(picture)
else:
tags.setdefault('METADATA_BLOCK_PICTURE', []).append(
base64.b64encode(picture.write()).decode('ascii'))
file.tags.update(tags)
self._remove_deleted_tags(metadata, file.tags)
kwargs = {}
if is_flac:
flac_sort_pics_after_tags(file.metadata_blocks)
if config.setting['fix_missing_seekpoints_flac']:
flac_remove_empty_seektable(file)
if config.setting['remove_id3_from_flac']:
kwargs['deleteid3'] = True
try:
file.save(**kwargs)
except TypeError:
file.save()
def _remove_deleted_tags(self, metadata, tags):
"""Remove the tags from the file that were deleted in the UI"""
for tag in metadata.deleted_tags:
real_name = self._get_tag_name(tag)
if is_valid_key(real_name) and real_name in tags:
if real_name in {'performer', 'comment'}:
parts = tag.split(':', 1)
if len(parts) == 2:
tag_type_regex = re.compile(r"\(%s\)$" % re.escape(parts[1]))
else:
tag_type_regex = re.compile(r"[^)]$")
existing_tags = tags.get(real_name)
for item in existing_tags:
if re.search(tag_type_regex, item):
existing_tags.remove(item)
tags[real_name] = existing_tags
else:
if tag in {'totaldiscs', 'totaltracks'} and tag in tags:
# both tag and real_name are to be deleted in this case
del tags[tag]
del tags[real_name]
def _get_tag_name(self, name):
if name == '~rating':
config = get_config()
if config.setting['rating_user_email']:
return 'rating:%s' % config.setting['rating_user_email']
else:
return 'rating'
elif name.startswith("~"):
return None
elif name.startswith('lyrics:'):
return 'lyrics'
elif name.startswith('performer:') or name.startswith('comment:'):
return name.split(':', 1)[0]
elif name == 'musicip_fingerprint':
return 'fingerprint'
elif name == 'totaltracks':
return 'tracktotal'
elif name == 'totaldiscs':
return 'disctotal'
elif name in self.__rtranslate:
return self.__rtranslate[name]
else:
return name
@classmethod
def supports_tag(cls, name):
return (bool(name) and name not in UNSUPPORTED_TAGS
and (is_valid_key(name)
or name.startswith('comment:')
or name.startswith('lyrics:')
or name.startswith('performer:')))
class FLACFile(VCommentFile):
"""FLAC file."""
EXTENSIONS = [".flac"]
NAME = "FLAC"
_File = mutagen.flac.FLAC
class OggFLACFile(VCommentFile):
"""FLAC file."""
EXTENSIONS = [".oggflac"]
NAME = "Ogg FLAC"
_File = mutagen.oggflac.OggFLAC
class OggSpeexFile(VCommentFile):
"""Ogg Speex file."""
EXTENSIONS = [".spx"]
NAME = "Speex"
_File = mutagen.oggspeex.OggSpeex
class OggTheoraFile(VCommentFile):
"""Ogg Theora file."""
EXTENSIONS = [".oggtheora"]
NAME = "Ogg Theora"
_File = mutagen.oggtheora.OggTheora
def _info(self, metadata, file):
super()._info(metadata, file)
metadata['~video'] = '1'
class OggVorbisFile(VCommentFile):
"""Ogg Vorbis file."""
EXTENSIONS = []
NAME = "Ogg Vorbis"
_File = mutagen.oggvorbis.OggVorbis
class OggOpusFile(VCommentFile):
"""Ogg Opus file."""
EXTENSIONS = [".opus"]
NAME = "Ogg Opus"
_File = mutagen.oggopus.OggOpus
@classmethod
def supports_tag(cls, name):
if name.startswith('r128_'):
return True
return VCommentFile.supports_tag(name)
def OggAudioFile(filename):
"""Generic Ogg audio file."""
options = [OggFLACFile, OggOpusFile, OggSpeexFile, OggVorbisFile]
return guess_format(filename, options)
OggAudioFile.EXTENSIONS = [".oga"]
OggAudioFile.NAME = "Ogg Audio"
OggAudioFile.supports_tag = VCommentFile.supports_tag
def OggVideoFile(filename):
"""Generic Ogg video file."""
options = [OggTheoraFile]
return guess_format(filename, options)
OggVideoFile.EXTENSIONS = [".ogv"]
OggVideoFile.NAME = "Ogg Video"
OggVideoFile.supports_tag = VCommentFile.supports_tag
def OggContainerFile(filename):
"""Generic Ogg file."""
options = [
OggFLACFile,
OggOpusFile,
OggSpeexFile,
OggTheoraFile,
OggVorbisFile
]
return guess_format(filename, options)
OggContainerFile.EXTENSIONS = [".ogg", ".ogx"]
OggContainerFile.NAME = "Ogg"
OggContainerFile.supports_tag = VCommentFile.supports_tag
| 18,068
|
Python
|
.py
| 423
| 30.770686
| 126
| 0.567763
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,111
|
apev2.py
|
metabrainz_picard/picard/formats/apev2.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2009, 2011 Lukáš Lalinský
# Copyright (C) 2009-2011, 2018-2021, 2023 Philipp Wolfer
# Copyright (C) 2011-2014 Wieland Hoffmann
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013-2015, 2018-2021, 2023-2024 Laurent Monin
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Ville Skyttä
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import absolute_import
from os.path import isfile
import re
import mutagen.apev2
import mutagen.monkeysaudio
import mutagen.musepack
import mutagen.optimfrog
import mutagen.wavpack
from picard import log
from picard.config import get_config
from picard.coverart.image import (
CoverArtImageError,
TagCoverArtImage,
)
from picard.file import File
from picard.metadata import Metadata
from picard.util import (
encode_filename,
sanitize_date,
)
from picard.util.filenaming import (
get_available_filename,
move_ensure_casing,
replace_extension,
)
from .mutagenext import (
aac,
tak,
)
INVALID_CHARS = re.compile('[^\x20-\x7e]')
DISALLOWED_KEYS = {'ID3', 'TAG', 'OggS', 'MP+'}
UNSUPPORTED_TAGS = {
'gapless',
'musicip_fingerprint',
'podcast',
'podcasturl',
'show',
'showsort',
'syncedlyrics',
'r128_album_gain',
'r128_track_gain',
}
def is_valid_key(key):
"""
Return true if a string is a valid APE tag key.
APE tag item keys can have a length of 2 (including) up to 255 (including)
characters in the range from 0x20 (Space) until 0x7E (Tilde).
Not allowed are the following keys: ID3, TAG, OggS and MP+.
See http://wiki.hydrogenaud.io/index.php?title=APE_key
"""
return (key and 2 <= len(key) <= 255
and key not in DISALLOWED_KEYS
and INVALID_CHARS.search(key) is None)
class APEv2File(File):
"""Generic APEv2-based file."""
_File = None
__translate = {
'albumartist': 'Album Artist',
'remixer': 'MixArtist',
'director': 'Director',
'website': 'Weblink',
'discsubtitle': 'DiscSubtitle',
'bpm': 'BPM',
'isrc': 'ISRC',
'catalognumber': 'CatalogNumber',
'barcode': 'Barcode',
'encodedby': 'EncodedBy',
'language': 'Language',
'movementnumber': 'MOVEMENT',
'movement': 'MOVEMENTNAME',
'movementtotal': 'MOVEMENTTOTAL',
'showmovement': 'SHOWMOVEMENT',
'releasestatus': 'MUSICBRAINZ_ALBUMSTATUS',
'releasetype': 'MUSICBRAINZ_ALBUMTYPE',
'musicbrainz_recordingid': 'musicbrainz_trackid',
'musicbrainz_trackid': 'musicbrainz_releasetrackid',
'originalartist': 'Original Artist',
'replaygain_album_gain': 'REPLAYGAIN_ALBUM_GAIN',
'replaygain_album_peak': 'REPLAYGAIN_ALBUM_PEAK',
'replaygain_album_range': 'REPLAYGAIN_ALBUM_RANGE',
'replaygain_track_gain': 'REPLAYGAIN_TRACK_GAIN',
'replaygain_track_peak': 'REPLAYGAIN_TRACK_PEAK',
'replaygain_track_range': 'REPLAYGAIN_TRACK_RANGE',
'replaygain_reference_loudness': 'REPLAYGAIN_REFERENCE_LOUDNESS',
}
__rtranslate = {v.lower(): k for k, v in __translate.items()}
def __init__(self, filename):
super().__init__(filename)
self.__casemap = {}
def _load(self, filename):
log.debug("Loading file %r", filename)
self.__casemap = {}
file = self._File(encode_filename(filename))
metadata = Metadata()
if file.tags:
for origname, values in file.tags.items():
name_lower = origname.lower()
if (values.kind == mutagen.apev2.BINARY
and name_lower.startswith('cover art')):
if b'\0' in values.value:
descr, data = values.value.split(b'\0', 1)
try:
coverartimage = TagCoverArtImage(
file=filename,
tag=name_lower,
data=data,
)
except CoverArtImageError as e:
log.error("Cannot load image from %r: %s", filename, e)
else:
metadata.images.append(coverartimage)
# skip EXTERNAL and BINARY values
if values.kind != mutagen.apev2.TEXT:
continue
for value in values:
name = name_lower
if name == 'year':
name = 'date'
value = sanitize_date(value)
elif name == 'track':
name = 'tracknumber'
track = value.split('/')
if len(track) > 1:
metadata['totaltracks'] = track[1]
value = track[0]
elif name == 'disc':
name = 'discnumber'
disc = value.split('/')
if len(disc) > 1:
metadata['totaldiscs'] = disc[1]
value = disc[0]
elif name in {'performer', 'comment'}:
if value.endswith(')'):
start = value.rfind(' (')
if start > 0:
name += ':' + value[start + 2:-1]
value = value[:start]
elif name in self.__rtranslate:
name = self.__rtranslate[name]
self.__casemap[name] = origname
metadata.add(name, value)
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
"""Save metadata to the file."""
log.debug("Saving file %r", filename)
config = get_config()
try:
tags = mutagen.apev2.APEv2(encode_filename(filename))
except mutagen.apev2.APENoHeaderError:
tags = mutagen.apev2.APEv2()
images_to_save = list(metadata.images.to_be_saved_to_tags())
if config.setting['clear_existing_tags']:
preserved = []
if config.setting['preserve_images']:
preserved = list(self._iter_cover_art_tags(tags))
tags.clear()
for name, value in preserved:
tags[name] = value
elif images_to_save:
for name, value in self._iter_cover_art_tags(tags):
del tags[name]
temp = {}
for name, value in metadata.items():
if name.startswith('~') or not self.supports_tag(name):
continue
real_name = self._get_tag_name(name)
# tracknumber/totaltracks => Track
if name == 'tracknumber':
if 'totaltracks' in metadata:
value = '%s/%s' % (value, metadata['totaltracks'])
# discnumber/totaldiscs => Disc
elif name == 'discnumber':
if 'totaldiscs' in metadata:
value = '%s/%s' % (value, metadata['totaldiscs'])
elif name in {'totaltracks', 'totaldiscs'}:
continue
# "performer:Piano=Joe Barr" => "Performer=Joe Barr (Piano)"
elif name.startswith('performer:') or name.startswith('comment:'):
name, desc = name.split(':', 1)
if desc:
value += ' (%s)' % desc
temp.setdefault(real_name, []).append(value)
for name, values in temp.items():
tags[name] = values
for image in images_to_save:
cover_filename = 'Cover Art (Front)'
cover_filename += image.extension
tags['Cover Art (Front)'] = mutagen.apev2.APEValue(
cover_filename.encode('ascii') + b'\0' + image.data, mutagen.apev2.BINARY)
break
# can't save more than one item with the same name
# (mp3tags does this, but it's against the specs)
self._remove_deleted_tags(metadata, tags)
tags.save(encode_filename(filename))
def _remove_deleted_tags(self, metadata, tags):
"""Remove the tags from the file that were deleted in the UI"""
for tag in metadata.deleted_tags:
real_name = self._get_tag_name(tag)
if real_name in {'Lyrics', 'Comment', 'Performer'}:
parts = tag.split(':', 1)
if len(parts) == 2:
tag_type_regex = re.compile(r"\(%s\)$" % re.escape(parts[1]))
else:
tag_type_regex = re.compile(r"[^)]$")
existing_tags = tags.get(real_name, [])
for item in existing_tags:
if re.search(tag_type_regex, item):
existing_tags.remove(item)
tags[real_name] = existing_tags
elif tag in {'totaltracks', 'totaldiscs'}:
tagstr = real_name.lower() + 'number'
if tagstr in metadata:
tags[real_name] = metadata[tagstr]
else:
if real_name in tags:
del tags[real_name]
def _get_tag_name(self, name):
if name in self.__casemap:
return self.__casemap[name]
elif name.startswith('lyrics:'):
return 'Lyrics'
elif name == 'date':
return 'Year'
elif name in {'tracknumber', 'totaltracks'}:
return 'Track'
elif name in {'discnumber', 'totaldiscs'}:
return 'Disc'
elif name.startswith('performer:') or name.startswith('comment:'):
return name.split(':', 1)[0].title()
elif name in self.__translate:
return self.__translate[name]
else:
return name.title()
@staticmethod
def _iter_cover_art_tags(tags):
for name, value in tags.items():
if value.kind == mutagen.apev2.BINARY and name.lower().startswith('cover art'):
yield (name, value)
@classmethod
def supports_tag(cls, name):
return (bool(name) and name not in UNSUPPORTED_TAGS
and not name.startswith('~')
and (is_valid_key(name)
or name.startswith('comment:')
or name.startswith('lyrics:')
or name.startswith('performer:')))
class MusepackFile(APEv2File):
"""Musepack file."""
EXTENSIONS = [".mpc", ".mp+"]
NAME = "Musepack"
_File = mutagen.musepack.Musepack
def _info(self, metadata, file):
super()._info(metadata, file)
metadata['~format'] = "Musepack, SV%d" % file.info.version
class WavPackFile(APEv2File):
"""WavPack file."""
EXTENSIONS = [".wv"]
NAME = "WavPack"
_File = mutagen.wavpack.WavPack
def _move_or_rename_wvc(self, old_filename, new_filename):
wvc_filename = replace_extension(old_filename, ".wvc")
if not isfile(wvc_filename):
return
wvc_new_filename = replace_extension(new_filename, ".wvc")
wvc_new_filename = get_available_filename(wvc_new_filename, wvc_filename)
log.debug('Moving Wavepack correction file %r => %r', wvc_filename, wvc_new_filename)
move_ensure_casing(wvc_filename, wvc_new_filename)
def _move_additional_files(self, old_filename, new_filename, config):
"""Includes an additional check for WavPack correction files"""
if config.setting['rename_files'] or config.setting['move_files']:
self._move_or_rename_wvc(old_filename, new_filename)
return super()._move_additional_files(old_filename, new_filename, config)
class OptimFROGFile(APEv2File):
"""OptimFROG file."""
EXTENSIONS = [".ofr", ".ofs"]
NAME = "OptimFROG"
_File = mutagen.optimfrog.OptimFROG
def _info(self, metadata, file):
super()._info(metadata, file)
# mutagen.File.filename can be either a bytes or str object
filename = file.filename
if isinstance(filename, bytes):
filename = filename.decode()
if filename.lower().endswith(".ofs"):
metadata['~format'] = "OptimFROG DualStream Audio"
else:
metadata['~format'] = "OptimFROG Lossless Audio"
class MonkeysAudioFile(APEv2File):
"""Monkey's Audio file."""
EXTENSIONS = [".ape"]
NAME = "Monkey's Audio"
_File = mutagen.monkeysaudio.MonkeysAudio
class TAKFile(APEv2File):
"""TAK file."""
EXTENSIONS = [".tak"]
NAME = "Tom's lossless Audio Kompressor"
_File = tak.TAK
class AACFile(APEv2File):
EXTENSIONS = [".aac"]
NAME = "AAC"
_File = aac.AACAPEv2
def _info(self, metadata, file):
super()._info(metadata, file)
if file.tags:
metadata['~format'] = "%s (APEv2)" % self.NAME
def _save(self, filename, metadata):
config = get_config()
if config.setting['aac_save_ape']:
super()._save(filename, metadata)
elif config.setting['remove_ape_from_aac']:
try:
mutagen.apev2.delete(encode_filename(filename))
except BaseException:
log.exception("Error removing APEv2 tags from %s", filename)
@classmethod
def supports_tag(cls, name):
config = get_config()
if config.setting['aac_save_ape']:
return APEv2File.supports_tag(name)
else:
return False
| 14,362
|
Python
|
.py
| 348
| 30.649425
| 93
| 0.573127
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,112
|
__init__.py
|
metabrainz_picard/picard/formats/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008, 2012 Lukáš Lalinský
# Copyright (C) 2008 Will
# Copyright (C) 2010, 2014, 2018-2020 Philipp Wolfer
# Copyright (C) 2013 Michael Wiencek
# Copyright (C) 2013, 2017-2021 Laurent Monin
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Sophist-UK
# Copyright (C) 2017 Ville Skyttä
# Copyright (C) 2020 Gabriel Ferreira
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.extension_points.formats import register_format
from picard.formats.ac3 import AC3File
from picard.formats.apev2 import (
AACFile,
MonkeysAudioFile,
MusepackFile,
OptimFROGFile,
TAKFile,
WavPackFile,
)
from picard.formats.asf import ASFFile
from picard.formats.id3 import (
AiffFile,
DSDIFFFile,
DSFFile,
MP3File,
TrueAudioFile,
)
from picard.formats.midi import MIDIFile
from picard.formats.mp4 import MP4File
from picard.formats.util import ( # noqa: F401 # pylint: disable=unused-import
ext_to_format,
guess_format,
open_,
supported_extensions,
supported_formats,
)
from picard.formats.vorbis import (
FLACFile,
OggAudioFile,
OggContainerFile,
OggFLACFile,
OggOpusFile,
OggSpeexFile,
OggTheoraFile,
OggVideoFile,
OggVorbisFile,
)
from picard.formats.wav import WAVFile
register_format(AACFile)
register_format(AC3File)
register_format(AiffFile)
register_format(ASFFile)
if DSDIFFFile:
register_format(DSDIFFFile)
register_format(DSFFile)
register_format(FLACFile)
register_format(MIDIFile)
register_format(MonkeysAudioFile)
register_format(MP3File)
register_format(MP4File)
register_format(MusepackFile)
register_format(OggAudioFile)
register_format(OggContainerFile)
register_format(OggFLACFile)
register_format(OggOpusFile)
register_format(OggSpeexFile)
register_format(OggTheoraFile)
register_format(OggVideoFile)
register_format(OggVorbisFile)
register_format(OptimFROGFile)
register_format(TAKFile)
register_format(TrueAudioFile)
register_format(WAVFile)
register_format(WavPackFile)
| 2,756
|
Python
|
.py
| 92
| 27.73913
| 80
| 0.79631
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,113
|
id3.py
|
metabrainz_picard/picard/formats/id3.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2009, 2011-2012 Lukáš Lalinský
# Copyright (C) 2008-2011, 2014, 2018-2021, 2023 Philipp Wolfer
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2011-2012 Johannes Weißl
# Copyright (C) 2011-2014 Michael Wiencek
# Copyright (C) 2011-2014 Wieland Hoffmann
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013-2014, 2017-2021, 2023-2024 Laurent Monin
# Copyright (C) 2013-2015, 2017, 2021 Sophist-UK
# Copyright (C) 2015 Frederik “Freso” S. Olesen
# Copyright (C) 2016 Christoph Reiter
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 tungol
# Copyright (C) 2019 Zenara Daley
# Copyright (C) 2023 certuna
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import Counter
from enum import IntEnum
import re
from urllib.parse import urlparse
from mutagen import id3
import mutagen.aiff
import mutagen.apev2
import mutagen.dsf
import mutagen.mp3
import mutagen.trueaudio
from picard import log
from picard.config import get_config
from picard.coverart.image import (
CoverArtImageError,
TagCoverArtImage,
)
from picard.coverart.utils import types_from_id3
from picard.file import File
from picard.formats.mutagenext import (
compatid3,
delall_ci,
)
from picard.metadata import Metadata
from picard.util import (
encode_filename,
sanitize_date,
)
from picard.util.tags import (
parse_comment_tag,
parse_subtag,
)
try:
from itertools import batched
except ImportError:
# itertools.batched is only available in Python >= 3.12
from itertools import islice
def batched(iterable, n):
if n < 1:
raise ValueError('n must be at least one')
it = iter(iterable)
while batch := tuple(islice(it, n)):
yield batch
UNSUPPORTED_TAGS = {'r128_album_gain', 'r128_track_gain'}
id3.GRP1 = compatid3.GRP1
class Id3Encoding(IntEnum):
LATIN1 = 0
UTF16 = 1
UTF16BE = 2
UTF8 = 3
def from_config(id3v2_encoding):
return {
'utf-8': Id3Encoding.UTF8,
'utf-16': Id3Encoding.UTF16
}.get(id3v2_encoding, Id3Encoding.LATIN1)
def id3text(text, encoding):
"""Returns a string which only contains code points which can
be encododed with the given numeric id3 encoding.
"""
if encoding == Id3Encoding.LATIN1:
return text.encode('latin1', 'replace').decode('latin1')
return text
def _remove_people_with_role(tags, frames, role):
for frame in tags.values():
if frame.FrameID in frames:
for people in list(frame.people):
if people[0] == role:
frame.people.remove(people)
class ID3File(File):
"""Generic ID3-based file."""
_IsMP3 = False
__upgrade = {
'XSOP': 'TSOP',
'TXXX:ALBUMARTISTSORT': 'TSO2',
'TXXX:COMPOSERSORT': 'TSOC',
'TXXX:mood': 'TMOO',
'TXXX:RELEASEDATE': 'TDRL',
}
__translate = {
# In same sequence as defined at http://id3.org/id3v2.4.0-frames
# 'TIT1': 'grouping', # Depends on itunes_compatible_grouping
'TIT2': 'title',
'TIT3': 'subtitle',
'TALB': 'album',
'TSST': 'discsubtitle',
'TSRC': 'isrc',
'TPE1': 'artist',
'TPE2': 'albumartist',
'TPE3': 'conductor',
'TPE4': 'remixer',
'TEXT': 'lyricist',
'TCOM': 'composer',
'TENC': 'encodedby',
'TBPM': 'bpm',
'TKEY': 'key',
'TLAN': 'language',
'TCON': 'genre',
'TMED': 'media',
'TMOO': 'mood',
'TCOP': 'copyright',
'TPUB': 'label',
'TDOR': 'originaldate',
'TDRC': 'date',
'TDRL': 'releasedate',
'TSSE': 'encodersettings',
'TSOA': 'albumsort',
'TSOP': 'artistsort',
'TSOT': 'titlesort',
'WCOP': 'license',
'WOAR': 'website',
'COMM': 'comment',
'TOAL': 'originalalbum',
'TOPE': 'originalartist',
'TOFN': 'originalfilename',
# The following are informal iTunes extensions to id3v2:
'TCMP': 'compilation',
'TSOC': 'composersort',
'TSO2': 'albumartistsort',
'MVNM': 'movement'
}
__rtranslate = {v: k for k, v in __translate.items()}
__translate['GRP1'] = 'grouping' # Always read, but writing depends on itunes_compatible_grouping
__translate_freetext = {
'MusicBrainz Artist Id': 'musicbrainz_artistid',
'MusicBrainz Album Id': 'musicbrainz_albumid',
'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid',
'MusicBrainz Album Type': 'releasetype',
'MusicBrainz Album Status': 'releasestatus',
'MusicBrainz TRM Id': 'musicbrainz_trmid',
'MusicBrainz Release Track Id': 'musicbrainz_trackid',
'MusicBrainz Disc Id': 'musicbrainz_discid',
'MusicBrainz Work Id': 'musicbrainz_workid',
'MusicBrainz Release Group Id': 'musicbrainz_releasegroupid',
'MusicBrainz Original Album Id': 'musicbrainz_originalalbumid',
'MusicBrainz Original Artist Id': 'musicbrainz_originalartistid',
'MusicBrainz Album Release Country': 'releasecountry',
'MusicIP PUID': 'musicip_puid',
'Acoustid Fingerprint': 'acoustid_fingerprint',
'Acoustid Id': 'acoustid_id',
'SCRIPT': 'script',
'LICENSE': 'license',
'CATALOGNUMBER': 'catalognumber',
'BARCODE': 'barcode',
'ASIN': 'asin',
'MusicMagic Fingerprint': 'musicip_fingerprint',
'ARTISTS': 'artists',
'DIRECTOR': 'director',
'WORK': 'work',
'Writer': 'writer',
'SHOWMOVEMENT': 'showmovement',
}
__rtranslate_freetext = {v: k for k, v in __translate_freetext.items()}
__translate_freetext['writer'] = 'writer' # For backward compatibility of case
# Freetext fields that are loaded case-insensitive
__rtranslate_freetext_ci = {
'replaygain_album_gain': 'REPLAYGAIN_ALBUM_GAIN',
'replaygain_album_peak': 'REPLAYGAIN_ALBUM_PEAK',
'replaygain_album_range': 'REPLAYGAIN_ALBUM_RANGE',
'replaygain_track_gain': 'REPLAYGAIN_TRACK_GAIN',
'replaygain_track_peak': 'REPLAYGAIN_TRACK_PEAK',
'replaygain_track_range': 'REPLAYGAIN_TRACK_RANGE',
'replaygain_reference_loudness': 'REPLAYGAIN_REFERENCE_LOUDNESS',
}
__translate_freetext_ci = {b.lower(): a for a, b in __rtranslate_freetext_ci.items()}
# Obsolete tag names which will still be loaded, but will get renamed on saving
__rename_freetext = {
'Artists': 'ARTISTS',
'Work': 'WORK',
}
__rrename_freetext = {v: k for k, v in __rename_freetext.items()}
_tipl_roles = {
'engineer': 'engineer',
'arranger': 'arranger',
'producer': 'producer',
'DJ-mix': 'djmixer',
'mix': 'mixer',
}
_rtipl_roles = {v: k for k, v in _tipl_roles.items()}
__other_supported_tags = ('discnumber', 'tracknumber',
'totaldiscs', 'totaltracks',
'movementnumber', 'movementtotal')
__tag_re_parse = {
'TRCK': re.compile(r'^(?P<tracknumber>\d+)(?:/(?P<totaltracks>\d+))?$'),
'TPOS': re.compile(r'^(?P<discnumber>\d+)(?:/(?P<totaldiscs>\d+))?$'),
'MVIN': re.compile(r'^(?P<movementnumber>\d+)(?:/(?P<movementtotal>\d+))?$')
}
__lrc_line_re_parse = re.compile(r'(\[\d\d:\d\d\.\d\d\d\])')
__lrc_syllable_re_parse = re.compile(r'(<\d\d:\d\d\.\d\d\d>)')
__lrc_both_re_parse = re.compile(r'(\[\d\d:\d\d\.\d\d\d\]|<\d\d:\d\d\.\d\d\d>)')
def __init__(self, filename):
super().__init__(filename)
self.__casemap = {}
def build_TXXX(self, encoding, desc, values):
"""Construct and return a TXXX frame."""
# This is here so that plugins can customize the behavior of TXXX
# frames in particular via subclassing.
# discussion: https://github.com/metabrainz/picard/pull/634
# discussion: https://github.com/metabrainz/picard/pull/635
# Used in the plugin "Compatible TXXX frames"
# PR: https://github.com/metabrainz/picard-plugins/pull/83
return id3.TXXX(encoding=encoding, desc=desc, text=values)
def _load(self, filename):
log.debug("Loading file %r", filename)
self.__casemap = {}
file = self._get_file(encode_filename(filename))
tags = file.tags or {}
config = get_config()
itunes_compatible = config.setting['itunes_compatible_grouping']
rating_user_email = id3text(config.setting['rating_user_email'], Id3Encoding.LATIN1)
rating_steps = config.setting['rating_steps']
# upgrade custom 2.3 frames to 2.4
for old, new in self.__upgrade.items():
if old in tags and new not in tags:
f = tags.pop(old)
tags.add(getattr(id3, new)(encoding=f.encoding, text=f.text))
metadata = Metadata()
for frame in tags.values():
frameid = frame.FrameID
if frameid in self.__translate:
name = self.__translate[frameid]
if frameid.startswith('T') or frameid in {'GRP1', 'MVNM'}:
for text in frame.text:
if text:
metadata.add(name, text)
elif frameid == 'COMM':
for text in frame.text:
if text:
if frame.lang == 'eng':
name = '%s:%s' % (name, frame.desc)
else:
name = '%s:%s:%s' % (name, frame.lang, frame.desc)
metadata.add(name, text)
else:
metadata.add(name, frame)
elif frameid == 'TIT1':
name = 'work' if itunes_compatible else 'grouping'
for text in frame.text:
if text:
metadata.add(name, text)
elif frameid == 'TMCL':
for role, name in frame.people:
if role == 'performer':
role = ''
if role:
metadata.add('performer:%s' % role, name)
else:
metadata.add('performer', name)
elif frameid == 'TIPL':
# If file is ID3v2.3, TIPL tag could contain TMCL
# so we will test for TMCL values and add to TIPL if not TMCL
for role, name in frame.people:
if role in self._tipl_roles and name:
metadata.add(self._tipl_roles[role], name)
else:
if role == 'performer':
role = ''
if role:
metadata.add('performer:%s' % role, name)
else:
metadata.add('performer', name)
elif frameid == 'TXXX':
name = frame.desc
name_lower = name.lower()
if name in self.__rename_freetext:
name = self.__rename_freetext[name]
if name_lower in self.__translate_freetext_ci:
orig_name = name
name = self.__translate_freetext_ci[name_lower]
self.__casemap[name] = orig_name
elif name in self.__translate_freetext:
name = self.__translate_freetext[name]
elif ((name in self.__rtranslate)
!= (name in self.__rtranslate_freetext)):
# If the desc of a TXXX frame conflicts with the name of a
# Picard tag, load it into ~id3:TXXX:desc rather than desc.
#
# This basically performs an XOR, making sure that 'name'
# is in __rtranslate or __rtranslate_freetext, but not
# both. (Being in both implies we support reading it both
# ways.) Currently, the only tag in both is license.
name = '~id3:TXXX:' + name
for text in frame.text:
metadata.add(name, text)
elif frameid == 'USLT':
name = 'lyrics'
if frame.desc:
name += ':%s' % frame.desc
metadata.add(name, frame.text)
elif frameid == 'SYLT' and frame.type == 1:
if frame.format != 2:
log.warning("Unsupported SYLT format %d in %r, only 2 is supported", frame.format, filename)
continue
name = 'syncedlyrics'
if frame.lang:
name += ':%s' % frame.lang
if frame.desc:
name += ':%s' % frame.desc
elif frame.desc:
name += '::%s' % frame.desc
lrc_lyrics = self._parse_sylt_text(frame.text, file.info.length)
metadata.add(name, lrc_lyrics)
elif frameid == 'UFID' and frame.owner == "http://musicbrainz.org":
metadata['musicbrainz_recordingid'] = frame.data.decode('ascii', 'ignore')
elif frameid in self.__tag_re_parse.keys():
m = self.__tag_re_parse[frameid].search(frame.text[0])
if m:
for name, value in m.groupdict().items():
if value is not None:
metadata[name] = value
else:
log.error("Invalid %s value '%s' dropped in %r", frameid, frame.text[0], filename)
elif frameid == 'APIC':
try:
coverartimage = TagCoverArtImage(
file=filename,
tag=frameid,
types=types_from_id3(frame.type),
comment=frame.desc,
support_types=True,
data=frame.data,
id3_type=frame.type,
)
except CoverArtImageError as e:
log.error("Cannot load image from %r: %s", filename, e)
else:
metadata.images.append(coverartimage)
elif frameid == 'POPM':
# Rating in ID3 ranges from 0 to 255, normalize this to the range 0 to 5
if frame.email == rating_user_email:
rating = int(round(frame.rating / 255.0 * (rating_steps - 1)))
metadata.add('~rating', rating)
if 'date' in metadata:
sanitized = sanitize_date(metadata.getall('date')[0])
if sanitized:
metadata['date'] = sanitized
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
"""Save metadata to the file."""
log.debug("Saving file %r", filename)
tags = self._get_tags(filename)
config = get_config()
if config.setting['clear_existing_tags']:
cover = tags.getall('APIC') if config.setting['preserve_images'] else None
tags.clear()
if cover:
tags.setall('APIC', cover)
images_to_save = list(metadata.images.to_be_saved_to_tags())
if images_to_save:
tags.delall('APIC')
encoding = Id3Encoding.from_config(config.setting['id3v2_encoding'])
if 'tracknumber' in metadata:
if 'totaltracks' in metadata:
text = '%s/%s' % (metadata['tracknumber'], metadata['totaltracks'])
else:
text = metadata['tracknumber']
tags.add(id3.TRCK(encoding=Id3Encoding.LATIN1, text=id3text(text, Id3Encoding.LATIN1)))
if 'discnumber' in metadata:
if 'totaldiscs' in metadata:
text = '%s/%s' % (metadata['discnumber'], metadata['totaldiscs'])
else:
text = metadata['discnumber']
tags.add(id3.TPOS(encoding=Id3Encoding.LATIN1, text=id3text(text, Id3Encoding.LATIN1)))
if 'movementnumber' in metadata:
if 'movementtotal' in metadata:
text = '%s/%s' % (metadata['movementnumber'], metadata['movementtotal'])
else:
text = metadata['movementnumber']
tags.add(id3.MVIN(encoding=Id3Encoding.LATIN1, text=id3text(text, Id3Encoding.LATIN1)))
# This is necessary because mutagens HashKey for APIC frames only
# includes the FrameID (APIC) and description - it's basically
# impossible to save two images, even of different types, without
# any description.
counters = Counter()
for image in images_to_save:
desc = desctag = image.comment
if counters[desc] > 0:
if desc:
desctag = "%s (%i)" % (desc, counters[desc])
else:
desctag = "(%i)" % counters[desc]
counters[desc] += 1
tags.add(id3.APIC(encoding=Id3Encoding.LATIN1,
mime=image.mimetype,
type=image.id3_type,
desc=id3text(desctag, Id3Encoding.LATIN1),
data=image.data))
tmcl = mutagen.id3.TMCL(encoding=encoding, people=[])
tipl = mutagen.id3.TIPL(encoding=encoding, people=[])
for name, values in metadata.rawitems():
values = [id3text(v, encoding) for v in values]
name = id3text(name, encoding)
name_lower = name.lower()
if not self.supports_tag(name):
continue
elif name == 'performer' or name.startswith('performer:'):
if ':' in name:
role = name.split(':', 1)[1]
else:
role = 'performer'
for value in values:
if config.setting['write_id3v23']:
# TIPL will be upgraded to IPLS
tipl.people.append([role, value])
else:
tmcl.people.append([role, value])
elif name == 'comment' or name.startswith('comment:'):
(lang, desc) = parse_comment_tag(name)
if desc.lower()[:4] == 'itun':
tags.delall('COMM:' + desc)
tags.add(id3.COMM(encoding=Id3Encoding.LATIN1, desc=desc, lang='eng', text=[v + '\x00' for v in values]))
else:
tags.add(id3.COMM(encoding=encoding, desc=desc, lang=lang, text=values))
elif name.startswith('lyrics:') or name == 'lyrics':
if ':' in name:
desc = name.split(':', 1)[1]
else:
desc = ''
for value in values:
tags.add(id3.USLT(encoding=encoding, desc=desc, text=value))
elif name == 'syncedlyrics' or name.startswith('syncedlyrics:'):
(lang, desc) = parse_subtag(name)
for value in values:
sylt_lyrics = self._parse_lrc_text(value)
# If the text does not contain any timestamps, the tag is not added
if sylt_lyrics:
tags.add(id3.SYLT(encoding=encoding, lang=lang, format=2, type=1, desc=desc, text=sylt_lyrics))
elif name in self._rtipl_roles:
for value in values:
tipl.people.append([self._rtipl_roles[name], value])
elif name == 'musicbrainz_recordingid':
tags.add(id3.UFID(owner="http://musicbrainz.org", data=bytes(values[0], 'ascii')))
elif name == '~rating':
rating_user_email = id3text(config.setting['rating_user_email'], Id3Encoding.LATIN1)
# Search for an existing POPM frame to get the current playcount
for frame in tags.values():
if frame.FrameID == 'POPM' and frame.email == rating_user_email:
count = getattr(frame, 'count', 0)
break
else:
count = 0
# Convert rating to range between 0 and 255
rating = int(round(float(values[0]) * 255 / (config.setting['rating_steps'] - 1)))
tags.add(id3.POPM(email=rating_user_email, rating=rating, count=count))
elif name == 'grouping':
if config.setting['itunes_compatible_grouping']:
tags.add(id3.GRP1(encoding=encoding, text=values))
else:
tags.add(id3.TIT1(encoding=encoding, text=values))
elif name == 'work' and config.setting['itunes_compatible_grouping']:
tags.add(id3.TIT1(encoding=encoding, text=values))
tags.delall('TXXX:Work')
tags.delall('TXXX:WORK')
elif name in self.__rtranslate:
frameid = self.__rtranslate[name]
if frameid.startswith('W'):
valid_urls = all(all(urlparse(v)[:2]) for v in values)
if frameid == 'WCOP':
# Only add WCOP if there is only one license URL, otherwise use TXXX:LICENSE
if len(values) > 1 or not valid_urls:
tags.delall('WCOP')
tags.add(self.build_TXXX(encoding, self.__rtranslate_freetext[name], values))
else:
tags.delall('TXXX:' + self.__rtranslate_freetext[name])
tags.add(id3.WCOP(url=values[0]))
elif frameid == 'WOAR' and valid_urls:
tags.delall('WOAR')
for url in values:
tags.add(id3.WOAR(url=url))
elif frameid.startswith('T') or frameid == 'MVNM':
if config.setting['write_id3v23']:
if frameid == 'TMOO':
tags.add(self.build_TXXX(encoding, 'mood', values))
# No need to care about the TMOO tag being added again as it is
# automatically deleted by Mutagen if id2v23 is selected
if frameid == 'TDRL':
tags.add(self.build_TXXX(encoding, 'RELEASEDATE', values))
tags.add(getattr(id3, frameid)(encoding=encoding, text=values))
if frameid == 'TSOA':
tags.delall('XSOA')
elif frameid == 'TSOP':
tags.delall('XSOP')
elif frameid == 'TSO2':
tags.delall('TXXX:ALBUMARTISTSORT')
elif name_lower in self.__rtranslate_freetext_ci:
if name_lower in self.__casemap:
description = self.__casemap[name_lower]
else:
description = self.__rtranslate_freetext_ci[name_lower]
delall_ci(tags, 'TXXX:' + description)
tags.add(self.build_TXXX(encoding, description, values))
elif name in self.__rtranslate_freetext:
description = self.__rtranslate_freetext[name]
if description in self.__rrename_freetext:
tags.delall('TXXX:' + self.__rrename_freetext[description])
tags.add(self.build_TXXX(encoding, description, values))
elif name.startswith('~id3:'):
name = name[5:]
if name.startswith('TXXX:'):
tags.add(self.build_TXXX(encoding, name[5:], values))
else:
frameclass = getattr(id3, name[:4], None)
if frameclass:
tags.add(frameclass(encoding=encoding, text=values))
# don't save private / already stored tags
elif not name.startswith('~') and name not in self.__other_supported_tags:
tags.add(self.build_TXXX(encoding, name, values))
if tmcl.people:
tags.add(tmcl)
if tipl.people:
tags.add(tipl)
self._remove_deleted_tags(metadata, tags)
self._save_tags(tags, encode_filename(filename))
if self._IsMP3 and config.setting['remove_ape_from_mp3']:
try:
mutagen.apev2.delete(encode_filename(filename))
except BaseException:
pass
def _remove_deleted_tags(self, metadata, tags):
"""Remove the tags from the file that were deleted in the UI"""
config = get_config()
for name in metadata.deleted_tags:
real_name = self._get_tag_name(name)
try:
if name.startswith('performer:'):
role = name.split(':', 1)[1]
_remove_people_with_role(tags, ['TMCL', 'TIPL', 'IPLS'], role)
elif name.startswith('comment:') or name == 'comment':
(lang, desc) = parse_comment_tag(name)
for key, frame in list(tags.items()):
if (frame.FrameID == 'COMM' and frame.desc == desc
and frame.lang == lang):
del tags[key]
elif name.startswith('lyrics:') or name == 'lyrics':
if ':' in name:
desc = name.split(':', 1)[1]
else:
desc = ''
for key, frame in list(tags.items()):
if frame.FrameID == 'USLT' and frame.desc == desc:
del tags[key]
elif name == 'syncedlyrics' or name.startswith('syncedlyrics:'):
(lang, desc) = parse_subtag(name)
for key, frame in list(tags.items()):
if frame.FrameID == 'SYLT' and frame.desc == desc and frame.lang == lang \
and frame.type == 1:
del tags[key]
elif name in self._rtipl_roles:
role = self._rtipl_roles[name]
_remove_people_with_role(tags, ['TIPL', 'IPLS'], role)
elif name == 'musicbrainz_recordingid':
for key, frame in list(tags.items()):
if frame.FrameID == 'UFID' and frame.owner == "http://musicbrainz.org":
del tags[key]
elif name == 'license':
tags.delall(real_name)
tags.delall('TXXX:' + self.__rtranslate_freetext[name])
elif real_name == 'POPM':
rating_user_email = id3text(config.setting['rating_user_email'], Id3Encoding.LATIN1)
for key, frame in list(tags.items()):
if frame.FrameID == 'POPM' and frame.email == rating_user_email:
del tags[key]
elif real_name in self.__translate:
tags.delall(real_name)
elif name.lower() in self.__rtranslate_freetext_ci:
delall_ci(tags, 'TXXX:' + self.__rtranslate_freetext_ci[name.lower()])
elif real_name in self.__translate_freetext:
tags.delall('TXXX:' + real_name)
if real_name in self.__rrename_freetext:
tags.delall('TXXX:' + self.__rrename_freetext[real_name])
elif not name.startswith('~id3:') and name not in self.__other_supported_tags:
tags.delall('TXXX:' + name)
elif name.startswith('~id3:'):
frameid = name[5:]
tags.delall(frameid)
elif name in self.__other_supported_tags:
del tags[real_name]
except KeyError:
pass
@classmethod
def supports_tag(cls, name):
return ((name and not name.startswith('~') and name not in UNSUPPORTED_TAGS)
or name == '~rating'
or name.startswith('~id3'))
def _get_tag_name(self, name):
if name in self.__rtranslate:
return self.__rtranslate[name]
elif name in self.__rtranslate_freetext:
return self.__rtranslate_freetext[name]
elif name == '~rating':
return 'POPM'
elif name == 'tracknumber':
return 'TRCK'
elif name == 'discnumber':
return 'TPOS'
elif name == 'movementnumber':
return 'MVIN'
else:
return None
def _get_file(self, filename):
raise NotImplementedError()
def _get_tags(self, filename):
try:
return compatid3.CompatID3(encode_filename(filename))
except mutagen.id3.ID3NoHeaderError:
return compatid3.CompatID3()
def _save_tags(self, tags, filename):
config = get_config()
if config.setting['write_id3v1']:
v1 = 2
else:
v1 = 0
if config.setting['write_id3v23']:
tags.update_to_v23()
separator = config.setting['id3v23_join_with']
tags.save(filename, v2_version=3, v1=v1, v23_sep=separator)
else:
tags.update_to_v24()
tags.save(filename, v2_version=4, v1=v1)
def format_specific_metadata(self, metadata, tag, settings=None):
if not settings:
settings = get_config().setting
if not settings['write_id3v23']:
return super().format_specific_metadata(metadata, tag, settings)
values = metadata.getall(tag)
if not values:
return values
if tag == 'originaldate':
values = [v[:4] for v in values]
elif tag == 'date':
values = [(v[:4] if len(v) < 10 else v) for v in values]
# If this is a multi-valued field, then it needs to be flattened,
# unless it's TIPL or TMCL which can still be multi-valued.
if (len(values) > 1 and tag not in ID3File._rtipl_roles
and not tag.startswith('performer:')):
join_with = settings['id3v23_join_with']
values = [join_with.join(values)]
return values
def _parse_sylt_text(self, text, length):
def milliseconds_to_timestamp(ms):
minutes = ms // (60 * 1000)
seconds = (ms % (60 * 1000)) // 1000
remaining_ms = ms % 1000
return f"{minutes:02d}:{seconds:02d}.{remaining_ms:03d}"
lyrics, milliseconds = zip(*text)
milliseconds = (*milliseconds, length * 1000)
first_timestamp = milliseconds_to_timestamp(milliseconds[0])
lrc_lyrics = [f"[{first_timestamp}]"]
for i, lyrics in enumerate(lyrics):
timestamp = milliseconds_to_timestamp(milliseconds[i])
if '\n' in lyrics:
split = lyrics.split('\n')
lrc_lyrics.append(f"<{timestamp}>{split[0]}")
distribution = (milliseconds[i + 1] - milliseconds[i]) / len(lyrics.replace('\n', ''))
estimation = milliseconds[i] + distribution * len(split[0])
for line in split[1:]:
timestamp = milliseconds_to_timestamp(int(estimation))
estimation += distribution * len(line)
lrc_lyrics.append(f"\n[{timestamp}]{line}")
else:
lrc_lyrics.append(f"<{timestamp}>{lyrics}")
return "".join(lrc_lyrics)
def _parse_lrc_text(self, text):
sylt_lyrics = []
# Remove standard lrc timestamps if text is in a2 enhanced lrc
if self.__lrc_syllable_re_parse.search(text):
text = self.__lrc_line_re_parse.sub("", text)
timestamp_and_lyrics = batched(self.__lrc_both_re_parse.split(text)[1:], 2)
for timestamp, lyrics in timestamp_and_lyrics:
minutes, seconds, ms = timestamp[1:-1].replace(".", ":").split(':')
milliseconds = int(minutes) * 60 * 1000 + int(float('%s.%s' % (seconds, ms)) * 1000)
sylt_lyrics.append((lyrics, milliseconds))
# Remove frames with no lyrics and a repeating timestamp
for i, frame in enumerate(sylt_lyrics[:-1]):
if not frame[0] and frame[1] == sylt_lyrics[i + 1][1]:
sylt_lyrics.pop(i)
return sylt_lyrics
class MP3File(ID3File):
"""MP3 file."""
EXTENSIONS = [".mp3", ".mp2", ".m2a"]
NAME = "MPEG-1 Audio"
_IsMP3 = True
_File = mutagen.mp3.MP3
def _get_file(self, filename):
return self._File(filename, ID3=compatid3.CompatID3)
def _info(self, metadata, file):
super()._info(metadata, file)
id3version = ''
if file.tags is not None and file.info.layer == 3:
id3version = ' - ID3v%d.%d' % (file.tags.version[0], file.tags.version[1])
metadata['~format'] = 'MPEG-1 Layer %d%s' % (file.info.layer, id3version)
class TrueAudioFile(ID3File):
"""TTA file."""
EXTENSIONS = [".tta"]
NAME = "The True Audio"
_File = mutagen.trueaudio.TrueAudio
def _get_file(self, filename):
return self._File(filename, ID3=compatid3.CompatID3)
class NonCompatID3File(ID3File):
"""Base class for ID3 files which do not support setting `compatid3.CompatID3`."""
def _get_file(self, filename):
return self._File(filename, known_frames=compatid3.known_frames)
def _get_tags(self, filename):
file = self._get_file(filename)
if file.tags is None:
file.add_tags()
return file.tags
def _save_tags(self, tags, filename):
config = get_config()
if config.setting['write_id3v23']:
compatid3.update_to_v23(tags)
separator = config.setting['id3v23_join_with']
tags.save(filename, v2_version=3, v23_sep=separator)
else:
tags.update_to_v24()
tags.save(filename, v2_version=4)
class DSFFile(NonCompatID3File):
"""DSF file."""
EXTENSIONS = [".dsf"]
NAME = "DSF"
_File = mutagen.dsf.DSF
class AiffFile(NonCompatID3File):
"""AIFF file."""
EXTENSIONS = [".aiff", ".aif", ".aifc"]
NAME = "Audio Interchange File Format (AIFF)"
_File = mutagen.aiff.AIFF
try:
import mutagen.dsdiff
class DSDIFFFile(NonCompatID3File):
"""DSF file."""
EXTENSIONS = [".dff"]
NAME = "DSDIFF"
_File = mutagen.dsdiff.DSDIFF
except ImportError:
DSDIFFFile = None
| 35,543
|
Python
|
.py
| 767
| 33.35854
| 125
| 0.546747
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,114
|
mp4.py
|
metabrainz_picard/picard/formats/mp4.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008, 2011 Lukáš Lalinský
# Copyright (C) 2009-2011, 2015, 2018-2023 Philipp Wolfer
# Copyright (C) 2011 Johannes Weißl
# Copyright (C) 2011-2014 Wieland Hoffmann
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013 Frederik “Freso” S. Olesen
# Copyright (C) 2013-2014, 2018-2024 Laurent Monin
# Copyright (C) 2014-2015 Sophist-UK
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2019 Reinaldo Antonio Camargo Rauch
# Copyright (C) 2023 certuna
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from mutagen.mp4 import (
MP4,
MP4Cover,
)
from picard import log
from picard.config import get_config
from picard.coverart.image import (
CoverArtImageError,
TagCoverArtImage,
)
from picard.file import File
from picard.formats.mutagenext import delall_ci
from picard.metadata import Metadata
from picard.util import encode_filename
def _add_text_values_to_metadata(metadata, name, values):
for value in values:
metadata.add(name, value.decode('utf-8', 'replace').strip('\x00'))
_VALID_KEY_CHARS = re.compile('^[\x00-\xff]+$')
UNSUPPORTED_TAGS = {'syncedlyrics', 'r128_album_gain', 'r128_track_gain'}
def _is_valid_key(key):
"""
Return true if a string is a valid name for a custom tag.
"""
return bool(_VALID_KEY_CHARS.match(key))
class MP4File(File):
EXTENSIONS = [".m4a", ".m4b", ".m4p", ".m4v", ".m4r", ".mp4"]
NAME = "MPEG-4 Audio"
_File = MP4
__text_tags = {
'\xa9ART': 'artist',
'\xa9nam': 'title',
'\xa9alb': 'album',
'\xa9wrt': 'composer',
'aART': 'albumartist',
'\xa9grp': 'grouping',
'\xa9day': 'date',
'\xa9gen': 'genre',
'\xa9lyr': 'lyrics',
'\xa9cmt': 'comment',
'\xa9too': 'encodedby',
'\xa9dir': 'director',
'cprt': 'copyright',
'soal': 'albumsort',
'soaa': 'albumartistsort',
'soar': 'artistsort',
'sonm': 'titlesort',
'soco': 'composersort',
'sosn': 'showsort',
'tvsh': 'show',
'purl': 'podcasturl',
'\xa9mvn': 'movement',
'\xa9wrk': 'work',
}
__r_text_tags = {v: k for k, v in __text_tags.items()}
__bool_tags = {
'pcst': 'podcast',
'cpil': 'compilation',
'pgap': 'gapless',
}
__r_bool_tags = {v: k for k, v in __bool_tags.items()}
__int_tags = {
'tmpo': 'bpm',
'\xa9mvi': 'movementnumber',
'\xa9mvc': 'movementtotal',
'shwm': 'showmovement',
}
__r_int_tags = {v: k for k, v in __int_tags.items()}
__freeform_tags = {
'----:com.apple.iTunes:MusicBrainz Track Id': 'musicbrainz_recordingid',
'----:com.apple.iTunes:MusicBrainz Artist Id': 'musicbrainz_artistid',
'----:com.apple.iTunes:MusicBrainz Album Id': 'musicbrainz_albumid',
'----:com.apple.iTunes:MusicBrainz Album Artist Id': 'musicbrainz_albumartistid',
'----:com.apple.iTunes:MusicIP PUID': 'musicip_puid',
'----:com.apple.iTunes:MusicBrainz Album Status': 'releasestatus',
'----:com.apple.iTunes:MusicBrainz Album Release Country': 'releasecountry',
'----:com.apple.iTunes:MusicBrainz Album Type': 'releasetype',
'----:com.apple.iTunes:MusicBrainz Disc Id': 'musicbrainz_discid',
'----:com.apple.iTunes:MusicBrainz TRM Id': 'musicbrainz_trmid',
'----:com.apple.iTunes:MusicBrainz Work Id': 'musicbrainz_workid',
'----:com.apple.iTunes:MusicBrainz Release Group Id': 'musicbrainz_releasegroupid',
'----:com.apple.iTunes:MusicBrainz Release Track Id': 'musicbrainz_trackid',
'----:com.apple.iTunes:MusicBrainz Original Album Id': 'musicbrainz_originalalbumid',
'----:com.apple.iTunes:MusicBrainz Original Artist Id': 'musicbrainz_originalartistid',
'----:com.apple.iTunes:Acoustid Fingerprint': 'acoustid_fingerprint',
'----:com.apple.iTunes:Acoustid Id': 'acoustid_id',
'----:com.apple.iTunes:ASIN': 'asin',
'----:com.apple.iTunes:BARCODE': 'barcode',
'----:com.apple.iTunes:PRODUCER': 'producer',
'----:com.apple.iTunes:LYRICIST': 'lyricist',
'----:com.apple.iTunes:CONDUCTOR': 'conductor',
'----:com.apple.iTunes:ENGINEER': 'engineer',
'----:com.apple.iTunes:MIXER': 'mixer',
'----:com.apple.iTunes:DJMIXER': 'djmixer',
'----:com.apple.iTunes:REMIXER': 'remixer',
'----:com.apple.iTunes:ISRC': 'isrc',
'----:com.apple.iTunes:MEDIA': 'media',
'----:com.apple.iTunes:LABEL': 'label',
'----:com.apple.iTunes:LICENSE': 'license',
'----:com.apple.iTunes:CATALOGNUMBER': 'catalognumber',
'----:com.apple.iTunes:SUBTITLE': 'subtitle',
'----:com.apple.iTunes:DISCSUBTITLE': 'discsubtitle',
'----:com.apple.iTunes:MOOD': 'mood',
'----:com.apple.iTunes:SCRIPT': 'script',
'----:com.apple.iTunes:LANGUAGE': 'language',
'----:com.apple.iTunes:ARTISTS': 'artists',
'----:com.apple.iTunes:WORK': 'work',
'----:com.apple.iTunes:initialkey': 'key',
}
__r_freeform_tags = {v: k for k, v in __freeform_tags.items()}
# Tags to load case insensitive. Case is preserved, but the specified case
# is written if it is unset.
__r_freeform_tags_ci = {
'replaygain_album_gain': '----:com.apple.iTunes:REPLAYGAIN_ALBUM_GAIN',
'replaygain_album_peak': '----:com.apple.iTunes:REPLAYGAIN_ALBUM_PEAK',
'replaygain_album_range': '----:com.apple.iTunes:REPLAYGAIN_ALBUM_RANGE',
'replaygain_track_gain': '----:com.apple.iTunes:REPLAYGAIN_TRACK_GAIN',
'replaygain_track_peak': '----:com.apple.iTunes:REPLAYGAIN_TRACK_PEAK',
'replaygain_track_range': '----:com.apple.iTunes:REPLAYGAIN_TRACK_RANGE',
'replaygain_reference_loudness': '----:com.apple.iTunes:REPLAYGAIN_REFERENCE_LOUDNESS',
'releasedate': '----:com.apple.iTunes:RELEASEDATE',
}
__freeform_tags_ci = {b.lower(): a for a, b in __r_freeform_tags_ci.items()}
__other_supported_tags = ('discnumber', 'tracknumber',
'totaldiscs', 'totaltracks')
def __init__(self, filename):
super().__init__(filename)
self.__casemap = {}
def _load(self, filename):
log.debug("Loading file %r", filename)
self.__casemap = {}
file = MP4(encode_filename(filename))
tags = file.tags or {}
metadata = Metadata()
for name, values in tags.items():
name_lower = name.lower()
if name in self.__text_tags:
for value in values:
metadata.add(self.__text_tags[name], value)
elif name in self.__bool_tags:
metadata.add(self.__bool_tags[name], values and '1' or '0')
elif name in self.__int_tags:
for value in values:
metadata.add(self.__int_tags[name], value)
elif name in self.__freeform_tags:
tag_name = self.__freeform_tags[name]
_add_text_values_to_metadata(metadata, tag_name, values)
elif name_lower in self.__freeform_tags_ci:
tag_name = self.__freeform_tags_ci[name_lower]
self.__casemap[tag_name] = name
_add_text_values_to_metadata(metadata, tag_name, values)
elif name == '----:com.apple.iTunes:fingerprint':
for value in values:
value = value.decode('utf-8', 'replace').strip('\x00')
if value.startswith('MusicMagic Fingerprint'):
metadata.add('musicip_fingerprint', value[22:])
elif name == 'trkn':
try:
metadata['tracknumber'] = values[0][0]
metadata['totaltracks'] = values[0][1]
except IndexError:
log.debug("trkn is invalid, ignoring")
elif name == 'disk':
try:
metadata['discnumber'] = values[0][0]
metadata['totaldiscs'] = values[0][1]
except IndexError:
log.debug("disk is invalid, ignoring")
elif name == 'covr':
for value in values:
if value.imageformat not in {value.FORMAT_JPEG, value.FORMAT_PNG}:
continue
try:
coverartimage = TagCoverArtImage(
file=filename,
tag=name,
data=value,
)
except CoverArtImageError as e:
log.error("Cannot load image from %r: %s", filename, e)
else:
metadata.images.append(coverartimage)
# Read other freeform tags always case insensitive
elif name.startswith('----:com.apple.iTunes:'):
tag_name = name_lower[22:]
self.__casemap[tag_name] = name[22:]
if (name not in self.__r_text_tags
and name not in self.__r_bool_tags
and name not in self.__r_int_tags
and name not in self.__r_freeform_tags
and name_lower not in self.__r_freeform_tags_ci
and name not in self.__other_supported_tags):
_add_text_values_to_metadata(metadata, tag_name, values)
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
config = get_config()
file = MP4(encode_filename(self.filename))
if file.tags is None:
file.add_tags()
tags = file.tags
if config.setting['clear_existing_tags']:
cover = tags.get('covr') if config.setting['preserve_images'] else None
tags.clear()
if cover:
tags['covr'] = cover
for name, values in metadata.rawitems():
if name.startswith('lyrics:'):
name = 'lyrics'
if name == 'comment:':
name = 'comment'
if name in self.__r_text_tags:
tags[self.__r_text_tags[name]] = values
elif name in self.__r_bool_tags:
tags[self.__r_bool_tags[name]] = (values[0] == '1')
elif name in self.__r_int_tags:
try:
tags[self.__r_int_tags[name]] = [int(value) for value in values]
except ValueError:
pass
elif name in self.__r_freeform_tags:
values = [v.encode('utf-8') for v in values]
tags[self.__r_freeform_tags[name]] = values
elif name in self.__r_freeform_tags_ci:
values = [v.encode('utf-8') for v in values]
delall_ci(tags, self.__r_freeform_tags_ci[name])
if name in self.__casemap:
name = self.__casemap[name]
else:
name = self.__r_freeform_tags_ci[name]
tags[name] = values
elif name == 'musicip_fingerprint':
tags['----:com.apple.iTunes:fingerprint'] = [b'MusicMagic Fingerprint%s' % v.encode('ascii') for v in values]
elif self.supports_tag(name) and name not in self.__other_supported_tags:
values = [v.encode('utf-8') for v in values]
name = self.__casemap.get(name, name)
tags['----:com.apple.iTunes:' + name] = values
if 'tracknumber' in metadata:
try:
tracknumber = int(metadata['tracknumber'])
except ValueError:
pass
else:
totaltracks = 0
if 'totaltracks' in metadata:
try:
totaltracks = int(metadata['totaltracks'])
except ValueError:
pass
tags['trkn'] = [(tracknumber, totaltracks)]
if 'discnumber' in metadata:
try:
discnumber = int(metadata['discnumber'])
except ValueError:
pass
else:
totaldiscs = 0
if 'totaldiscs' in metadata:
try:
totaldiscs = int(metadata['totaldiscs'])
except ValueError:
pass
tags['disk'] = [(discnumber, totaldiscs)]
covr = []
for image in metadata.images.to_be_saved_to_tags():
if image.mimetype == 'image/jpeg':
covr.append(MP4Cover(image.data, MP4Cover.FORMAT_JPEG))
elif image.mimetype == 'image/png':
covr.append(MP4Cover(image.data, MP4Cover.FORMAT_PNG))
if covr:
tags['covr'] = covr
self._remove_deleted_tags(metadata, tags)
file.save()
def _remove_deleted_tags(self, metadata, tags):
"""Remove the tags from the file that were deleted in the UI"""
for tag in metadata.deleted_tags:
real_name = self._get_tag_name(tag)
if real_name and real_name in tags:
if tag not in {'totaltracks', 'totaldiscs'}:
del tags[real_name]
@classmethod
def supports_tag(cls, name):
return (name
and not name.startswith('~')
and name not in UNSUPPORTED_TAGS
and not (name.startswith('comment:') and len(name) > 9)
and not name.startswith('performer:')
and _is_valid_key(name))
def _get_tag_name(self, name):
if name.startswith('lyrics:'):
name = 'lyrics'
if name in self.__r_text_tags:
return self.__r_text_tags[name]
elif name in self.__r_bool_tags:
return self.__r_bool_tags[name]
elif name in self.__r_int_tags:
return self.__r_int_tags[name]
elif name in self.__r_freeform_tags:
return self.__r_freeform_tags[name]
elif name in self.__r_freeform_tags_ci:
return self.__r_freeform_tags_ci[name]
elif name == 'musicip_fingerprint':
return '----:com.apple.iTunes:fingerprint'
elif name in {'tracknumber', 'totaltracks'}:
return 'trkn'
elif name in {'discnumber', 'totaldiscs'}:
return 'disk'
elif self.supports_tag(name) and name not in self.__other_supported_tags:
name = self.__casemap.get(name, name)
return '----:com.apple.iTunes:' + name
def _info(self, metadata, file):
super()._info(metadata, file)
if hasattr(file.info, 'codec_description') and file.info.codec_description:
metadata['~format'] = "%s (%s)" % (metadata['~format'], file.info.codec_description)
filename = file.filename
if isinstance(filename, bytes):
filename = filename.decode()
if filename.lower().endswith(".m4v") or (file.tags and 'hdvd' in file.tags):
metadata['~video'] = '1'
| 16,011
|
Python
|
.py
| 350
| 34.645714
| 125
| 0.564597
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,115
|
midi.py
|
metabrainz_picard/picard/formats/midi.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2018, 2020-2022 Philipp Wolfer
# Copyright (C) 2020-2021, 2023-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from mutagen.smf import SMF
from picard import log
from picard.file import File
from picard.metadata import Metadata
from picard.util import encode_filename
class MIDIFile(File):
EXTENSIONS = [".mid", ".kar"]
NAME = "Standard MIDI File"
_File = SMF
def _load(self, filename):
log.debug("Loading file %r", filename)
metadata = Metadata()
file = self._File(encode_filename(filename))
self._info(metadata, file)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
def _info(self, metadata, file):
super()._info(metadata, file)
# mutagen.File.filename can be either a bytes or str object
filename = file.filename
if isinstance(filename, bytes):
filename = filename.decode()
if filename.lower().endswith(".kar"):
metadata['~format'] = "Standard MIDI File (Karaoke File)"
@classmethod
def supports_tag(cls, name):
return False
@property
def can_analyze(self):
return False
| 1,966
|
Python
|
.py
| 51
| 34.058824
| 80
| 0.707087
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,116
|
ac3.py
|
metabrainz_picard/picard/formats/mutagenext/ac3.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019, 2021 Philipp Wolfer
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from mutagen._file import FileType
from mutagen._util import (
MutagenError,
loadfile,
)
from mutagen.apev2 import (
APENoHeaderError,
APEv2,
_APEv2Data,
error as APEError,
)
try:
from mutagen.ac3 import AC3
native_ac3 = True
except ImportError:
native_ac3 = False
class AC3Error(MutagenError):
pass
class AC3Info:
"""AC3 stream information.
Attributes:
(none at the moment)
"""
def __init__(self, fileobj):
header = fileobj.read(4)
if len(header) != 4 or not header.startswith(b"\x0b\x77"):
raise AC3Error("not a AC3 file")
@staticmethod
def pprint():
return "AC3"
class AC3(FileType):
@loadfile()
def load(self, filething, *args, **kwargs):
self.info = AC3Info(filething.fileobj)
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"\x0b\x77") + (filename.endswith(".ac3")
or filename.endswith(".eac3"))
class AC3APEv2(AC3):
@loadfile()
def load(self, filething):
super().load(filething)
try:
self.tags = APEv2(filething.fileobj)
# Correct the calculated length
if not hasattr(self.info, 'bitrate') or self.info.bitrate == 0:
return
ape_data = _APEv2Data(filething.fileobj)
if ape_data.size is not None:
# Remove APEv2 data length from calculated track length
extra_length = (8.0 * ape_data.size) / self.info.bitrate
self.info.length = max(self.info.length - extra_length, 0.001)
except APENoHeaderError:
self.tags = None
def add_tags(self):
if self.tags is None:
self.tags = APEv2()
else:
raise APEError("%r already has tags: %r" % (self, self.tags))
| 2,811
|
Python
|
.py
| 79
| 28.696203
| 80
| 0.644461
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,117
|
__init__.py
|
metabrainz_picard/picard/formats/mutagenext/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006 Lukáš Lalinský
# Copyright (C) 2019 Philipp Wolfer
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
def delall_ci(tags, key):
"""Delete all tags with given key, case-insensitive"""
key = key.lower()
for k in list(tags.keys()):
if k.lower() == key:
del tags[k]
| 1,093
|
Python
|
.py
| 27
| 38.111111
| 80
| 0.737983
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,118
|
tak.py
|
metabrainz_picard/picard/formats/mutagenext/tak.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2008 Lukáš Lalinský
# Copyright (C) 2013, 2018-2021, 2023-2024 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018-2019, 2022 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Tom's lossless Audio Kompressor streams with APEv2 tags.
TAK is a lossless audio compressor developed by Thomas Becker.
For more information, see http://wiki.hydrogenaudio.org/index.php?title=TAK
and http://en.wikipedia.org/wiki/TAK_(audio_codec)
"""
__all__ = ["TAK", "Open", "delete"]
try:
from mutagen.tak import (
TAK,
Open,
TAKHeaderError,
TAKInfo,
delete,
)
native_tak = True
except ImportError:
from mutagen import StreamInfo
from mutagen.apev2 import (
APEv2File,
delete,
error,
)
native_tak = False
class TAKHeaderError(error):
pass
class TAKInfo(StreamInfo):
"""TAK stream information.
Attributes:
(none at the moment)
"""
def __init__(self, fileobj):
header = fileobj.read(4)
if len(header) != 4 or not header.startswith(b"tBaK"):
raise TAKHeaderError("not a TAK file")
@staticmethod
def pprint():
return "Tom's lossless Audio Kompressor"
class TAK(APEv2File):
"""TAK(filething)
Arguments:
filething (filething)
Attributes:
info (`TAKInfo`)
"""
_Info = TAKInfo
_mimes = ["audio/x-tak"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"tBaK") + filename.lower().endswith(".tak")
Open = TAK
| 2,444
|
Python
|
.py
| 72
| 28.083333
| 81
| 0.665673
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,119
|
aac.py
|
metabrainz_picard/picard/formats/mutagenext/aac.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2018-2019 Philipp Wolfer
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from mutagen._util import loadfile
from mutagen.aac import AAC
from mutagen.apev2 import (
APENoHeaderError,
APEv2,
_APEv2Data,
error as APEError,
)
class AACAPEv2(AAC):
"""AAC file with APEv2 tags.
"""
@loadfile()
def load(self, filething):
super().load(filething)
try:
self.tags = APEv2(filething)
# Correct the calculated length
if not hasattr(self.info, 'bitrate') or self.info.bitrate == 0:
return
ape_data = _APEv2Data(filething.fileobj)
if ape_data.size is not None:
# Remove APEv2 data length from calculated track length
extra_length = (8.0 * ape_data.size) / self.info.bitrate
self.info.length = max(self.info.length - extra_length, 0.001)
except APENoHeaderError:
self.tags = None
def add_tags(self):
if self.tags is None:
self.tags = APEv2()
else:
raise APEError("%r already has tags: %r" % (self, self.tags))
| 1,928
|
Python
|
.py
| 51
| 32.078431
| 80
| 0.675214
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,120
|
compatid3.py
|
metabrainz_picard/picard/formats/mutagenext/compatid3.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2005 Michael Urman
# Copyright (C) 2006-2008, 2011-2012 Lukáš Lalinský
# Copyright (C) 2013-2014 Sophist-UK
# Copyright (C) 2013-2014, 2018, 2020-2021 Laurent Monin
# Copyright (C) 2014, 2018-2021 Philipp Wolfer
# Copyright (C) 2016 Christoph Reiter
# Copyright (C) 2016 Ville Skyttä
# Copyright (C) 2017 Sambhav Kothari
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from mutagen.id3 import (
ID3,
Frames,
Frames_2_2,
TextFrame,
)
try:
from mutagen.id3 import GRP1
except ImportError:
class GRP1(TextFrame):
pass
class XSOP(TextFrame):
pass
known_frames = dict(Frames)
known_frames.update(dict(Frames_2_2))
known_frames["GRP1"] = GRP1 # Available since mutagen >= 1.38
known_frames["XSOP"] = XSOP
class CompatID3(ID3):
"""
Additional features over mutagen.id3.ID3:
* Allow some v2.4 frames also in v2.3
"""
PEDANTIC = False
def __init__(self, *args, **kwargs):
if args:
kwargs["known_frames"] = known_frames
super().__init__(*args, **kwargs)
def update_to_v23(self):
update_to_v23(self)
def update_to_v23(tags):
# leave TSOP, TSOA and TSOT even though they are officially defined
# only in ID3v2.4, because most applications use them also in ID3v2.3
frames = []
for key in {"TSOP", "TSOA", "TSOT", "TSST"}:
frames.extend(tags.getall(key))
ID3.update_to_v23(tags)
for frame in frames:
tags.add(frame)
| 2,216
|
Python
|
.py
| 64
| 31.109375
| 80
| 0.711069
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,121
|
dbpoweramplog.py
|
metabrainz_picard/picard/disc/dbpoweramplog.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2022 Philipp Wolfer
# Copyright (C) 2023-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from picard.disc.utils import (
NotSupportedTOCError,
TocEntry,
calculate_mb_toc_numbers,
)
from picard.util import detect_file_encoding
RE_TOC_ENTRY = re.compile(
r"^Track (?P<num>\d+):\s+Ripped LBA (?P<start_sector>\d+) to (?P<end_sector>\d+)")
def filter_toc_entries(lines):
"""
Take iterator of lines, return iterator of toc entries
"""
last_track_num = 0
for line in lines:
m = RE_TOC_ENTRY.match(line)
if m:
track_num = int(m['num'])
if last_track_num + 1 != track_num:
raise NotSupportedTOCError(f"Non consecutive track numbers ({last_track_num} => {track_num}) in dBPoweramp log. Likely a partial rip, disc ID cannot be calculated")
last_track_num = track_num
yield TocEntry(track_num, int(m['start_sector']), int(m['end_sector'])-1)
def toc_from_file(path):
"""Reads dBpoweramp log files, generates MusicBrainz disc TOC listing for use as discid."""
encoding = detect_file_encoding(path)
with open(path, 'r', encoding=encoding) as f:
return calculate_mb_toc_numbers(filter_toc_entries(f))
| 2,013
|
Python
|
.py
| 47
| 38.744681
| 180
| 0.708227
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,122
|
eaclog.py
|
metabrainz_picard/picard/disc/eaclog.py
|
# -*- coding: utf-8 -*-
#
# fix-header: nolicense
# MIT License
#
# Copyright(c) 2018 Konstantin Mochalov
# Copyright(c) 2022 Philipp Wolfer
# Copyright(c) 2022 Jeffrey Bosboom
#
# Original code from https://gist.github.com/kolen/765526
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from picard.disc.utils import (
TocEntry,
calculate_mb_toc_numbers,
)
from picard.util import detect_file_encoding
RE_TOC_TABLE_HEADER = re.compile(r""" \s*
\s*.+\s+ \| # track
\s+.+\s+ \| # start
\s+.+\s+ \| # length
\s+.+\s+ \| # start sector
\s+.+\s*$ # end sector
""", re.VERBOSE)
RE_TOC_TABLE_LINE = re.compile(r"""
\s*
(?P<num>\d+)
\s*\|\s*
(?P<start_time>[0-9:.]+)
\s*\|\s*
(?P<length_time>[0-9:.]+)
\s*\|\s*
(?P<start_sector>\d+)
\s*\|\s*
(?P<end_sector>\d+)
\s*$""", re.VERBOSE)
def filter_toc_entries(lines):
"""
Take iterator of lines, return iterator of toc entries
"""
# Search the TOC table header
for line in lines:
# to allow internationalized EAC output where column headings
# may differ
if RE_TOC_TABLE_HEADER.match(line):
# Skip over the table header separator
next(lines)
break
for line in lines:
m = RE_TOC_TABLE_LINE.search(line)
if not m:
break
yield TocEntry(int(m['num']), int(m['start_sector']), int(m['end_sector']))
def toc_from_file(path):
"""Reads EAC / XLD / fre:ac log files, generates MusicBrainz disc TOC listing for use as discid.
Warning: may work wrong for discs having data tracks. May generate wrong
results on other non-standard cases."""
encoding = detect_file_encoding(path)
with open(path, 'r', encoding=encoding) as f:
return calculate_mb_toc_numbers(filter_toc_entries(f))
| 2,858
|
Python
|
.py
| 75
| 33.96
| 100
| 0.68459
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,123
|
utils.py
|
metabrainz_picard/picard/disc/utils.py
|
# -*- coding: utf-8 -*-
#
# fix-header: nolicense
# MIT License
#
# Copyright(c) 2018 Konstantin Mochalov
# Copyright(c) 2022 Philipp Wolfer
#
# Original code from https://gist.github.com/kolen/765526
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import namedtuple
PREGAP_LENGTH = 150
DATA_TRACK_GAP = 11400
TocEntry = namedtuple('TocEntry', 'number start_sector end_sector')
class NotSupportedTOCError(Exception):
pass
def calculate_mb_toc_numbers(toc):
"""
Take iterator of TOC entries, return a tuple of numbers for MusicBrainz disc id
Each entry is a TocEntry namedtuple with the following fields:
- number: track number
- start_sector: start sector of the track
- end_sector: end sector of the track
"""
toc = tuple(toc)
toc = _remove_data_track(toc)
num_tracks = len(toc)
if not num_tracks:
raise NotSupportedTOCError("Empty track list")
expected_tracknums = tuple(range(1, num_tracks+1))
tracknums = tuple(e.number for e in toc)
if expected_tracknums != tracknums:
raise NotSupportedTOCError(f"Non-standard track number sequence: {tracknums}")
leadout_offset = toc[-1].end_sector + PREGAP_LENGTH + 1
offsets = tuple(e.start_sector + PREGAP_LENGTH for e in toc)
return (1, num_tracks, leadout_offset) + offsets
def _remove_data_track(toc):
if len(toc) > 1:
last_track_gap = toc[-1].start_sector - toc[-2].end_sector
if last_track_gap == DATA_TRACK_GAP + 1:
toc = toc[:-1]
return toc
| 2,552
|
Python
|
.py
| 57
| 41.350877
| 86
| 0.740024
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,124
|
__init__.py
|
metabrainz_picard/picard/disc/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006 Matthias Friedrich
# Copyright (C) 2007-2008 Lukáš Lalinský
# Copyright (C) 2008 Robert Kaye
# Copyright (C) 2009, 2013, 2018-2023 Philipp Wolfer
# Copyright (C) 2011-2013 Michael Wiencek
# Copyright (C) 2013 Johannes Dewender
# Copyright (C) 2013 Sebastian Ramacher
# Copyright (C) 2013 Wieland Hoffmann
# Copyright (C) 2013, 2018-2021, 2023-2024 Laurent Monin
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2018 Vishal Choudhary
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import traceback
from PyQt6 import QtCore
from picard import log
from picard.util.mbserver import build_submission_url
from picard.ui.cdlookup import CDLookupDialog
try:
# use python-libdiscid (http://pythonhosted.org/python-libdiscid/)
from libdiscid.compat import discid
except ImportError:
try:
# use python-discid (http://python-discid.readthedocs.org/en/latest/)
import discid
except (ImportError, OSError):
discid = None
class Disc:
def __init__(self, id=None):
self.tagger = QtCore.QCoreApplication.instance()
self.id = id
self.mcn = None
self.tracks = 0
self.toc_string = None
def read(self, device=None):
if device is None:
device = discid.get_default_device()
log.debug("Reading CD using device: %r", device)
try:
disc = discid.read(device, features=['mcn'])
self._set_disc_details(disc)
except discid.DiscError as e:
log.error("Error while reading %r: %s", device, e)
raise
def put(self, toc):
log.debug("Generating disc ID using TOC: %r", toc)
try:
first, last, sectors, *offsets = toc
disc = discid.put(first, last, sectors, offsets)
self._set_disc_details(disc)
except discid.TOCError as e:
log.error("Error while processing TOC %r: %s", toc, e)
raise
except ValueError as e:
log.error("Error while processing TOC %r: %s", toc, e)
raise discid.TOCError(e)
def _set_disc_details(self, disc):
self.id = disc.id
self.mcn = disc.mcn
self.tracks = len(disc.tracks)
self.toc_string = disc.toc_string
log.debug("Read disc ID %s with MCN %s", self.id, self.mcn)
@property
def submission_url(self):
if self.id and self.tracks and self.toc_string:
return build_submission_url("/cdtoc/attach", query_args={
'id': self.id,
'tracks': self.tracks,
'toc': self.toc_string.replace(' ', '+'),
})
else:
return None
def lookup(self):
self.tagger.mb_api.lookup_discid(self.id, self._lookup_finished)
def _lookup_finished(self, document, http, error):
self.tagger.restore_cursor()
releases = []
if error:
log.error("%r", http.errorString())
else:
try:
releases = document['releases']
except (AttributeError, IndexError):
log.error(traceback.format_exc())
dialog = CDLookupDialog(releases, self, parent=self.tagger.window)
dialog.exec()
if discid is not None:
discid_version = "discid %s, %s" % (discid.__version__,
discid.LIBDISCID_VERSION_STRING)
else:
discid_version = None
| 4,170
|
Python
|
.py
| 107
| 31.859813
| 80
| 0.648602
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,125
|
whipperlog.py
|
metabrainz_picard/picard/disc/whipperlog.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2022 Philipp Wolfer
# Copyright (C) 2022-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import yaml
from picard.disc.utils import (
TocEntry,
calculate_mb_toc_numbers,
)
def toc_from_file(path):
"""Reads whipper log files, generates musicbrainz disc TOC listing for use as discid.
Warning: may work wrong for discs having data tracks. May generate wrong
results on other non-standard cases."""
with open(path, encoding='utf-8') as f:
data = yaml.safe_load(f)
toc_entries = (
TocEntry(num, t['Start sector'], t['End sector'])
for num, t in data['TOC'].items()
)
return calculate_mb_toc_numbers(toc_entries)
| 1,464
|
Python
|
.py
| 36
| 37.277778
| 89
| 0.727145
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,126
|
api_helpers.py
|
metabrainz_picard/picard/webservice/api_helpers.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018, 2020-2021, 2023 Laurent Monin
# Copyright (C) 2018-2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from xml.sax.saxutils import quoteattr # nosec: B404
from PyQt6.QtCore import QUrl
from picard import PICARD_VERSION_STR
from picard.config import get_config
from picard.const import (
ACOUSTID_KEY,
ACOUSTID_URL,
MUSICBRAINZ_SERVERS,
)
from picard.util import encoded_queryargs
from picard.webservice import (
CLIENT_STRING,
ratecontrol,
)
from picard.webservice.utils import host_port_to_url
ratecontrol.set_minimum_delay_for_url(ACOUSTID_URL, 333)
def escape_lucene_query(text):
return re.sub(r'([+\-&|!(){}\[\]\^"~*?:\\/])', r'\\\1', text)
def build_lucene_query(args):
return ' '.join('%s:(%s)' % (item, escape_lucene_query(value))
for item, value in args.items() if value)
def _wrap_xml_metadata(data):
return ('<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">%s</metadata>'
% data)
class APIHelper:
_base_url = None
def __init__(self, webservice, base_url=None):
self._webservice = webservice
if base_url is not None:
self.base_url = base_url
@property
def base_url(self):
if self._base_url is None:
raise ValueError("base_url undefined")
return self._base_url
@base_url.setter
def base_url(self, url):
if not isinstance(url, QUrl):
url = QUrl(url)
self._base_url = url
@property
def webservice(self):
return self._webservice
def url_from_path(self, path):
url = QUrl(self.base_url)
url.setPath(url.path() + path)
return url
def get(self, path, handler, **kwargs):
kwargs['url'] = self.url_from_path(path)
kwargs['handler'] = handler
return self._webservice.get_url(**kwargs)
def post(self, path, data, handler, **kwargs):
kwargs['url'] = self.url_from_path(path)
kwargs['handler'] = handler
kwargs['data'] = data
kwargs['mblogin'] = kwargs.get('mblogin', True)
return self._webservice.post_url(**kwargs)
def put(self, path, data, handler, **kwargs):
kwargs['url'] = self.url_from_path(path)
kwargs['handler'] = handler
kwargs['data'] = data
kwargs['priority'] = kwargs.get('priority', True)
kwargs['mblogin'] = kwargs.get('mblogin', True)
return self._webservice.put_url(**kwargs)
def delete(self, path, handler, **kwargs):
kwargs['url'] = self.url_from_path(path)
kwargs['handler'] = handler
kwargs['priority'] = kwargs.get('priority', True)
kwargs['mblogin'] = kwargs.get('mblogin', True)
return self._webservice.delete_url(**kwargs)
class MBAPIHelper(APIHelper):
@property
def base_url(self):
# we have to keep it dynamic since host/port can be changed via options
config = get_config()
host = config.setting['server_host']
# FIXME: We should get rid of this hard coded exception and move the
# configuration to use proper URLs everywhere.
port = 443 if host in MUSICBRAINZ_SERVERS else config.setting['server_port']
self._base_url = host_port_to_url(host, port)
self._base_url.setPath('/ws/2')
return self._base_url
def _get_by_id(self, entitytype, entityid, handler, inc=None, **kwargs):
if inc:
kwargs['unencoded_queryargs'] = kwargs.get('queryargs', {})
kwargs['unencoded_queryargs']['inc'] = self._make_inc_arg(inc)
return self.get(f"/{entitytype}/{entityid}", handler, **kwargs)
def get_release_by_id(self, releaseid, handler, inc=None, **kwargs):
return self._get_by_id('release', releaseid, handler, inc, **kwargs)
def get_track_by_id(self, trackid, handler, inc=None, **kwargs):
return self._get_by_id('recording', trackid, handler, inc, **kwargs)
def lookup_discid(self, discid, handler, priority=True, important=True, refresh=False):
inc = ('artist-credits', 'labels')
return self._get_by_id('discid', discid, handler, inc, queryargs={'cdstubs': 'no'},
priority=priority, important=important, refresh=refresh)
def _find(self, entitytype, handler, **kwargs):
filters = {}
limit = kwargs.pop('limit')
if limit:
filters['limit'] = limit
is_search = kwargs.pop('search', False)
if is_search:
config = get_config()
use_advanced_search = kwargs.pop('advanced_search', config.setting['use_adv_search_syntax'])
if use_advanced_search:
query = kwargs['query']
else:
query = escape_lucene_query(kwargs['query']).strip().lower()
filters['dismax'] = 'true'
else:
query = build_lucene_query(kwargs)
if query:
filters['query'] = query
return self.get(f"/{entitytype}", handler, unencoded_queryargs=filters,
priority=True, important=True, mblogin=False,
refresh=False)
def find_releases(self, handler, **kwargs):
return self._find('release', handler, **kwargs)
def find_tracks(self, handler, **kwargs):
return self._find('recording', handler, **kwargs)
def find_artists(self, handler, **kwargs):
return self._find('artist', handler, **kwargs)
@staticmethod
def _make_inc_arg(inc):
"""
Convert an iterable to a string to be passed as inc paramater to MB
It drops non-unique and empty elements, and sort them before joining
them as a '+'-separated string
"""
return '+'.join(sorted(set(str(e) for e in inc if e)))
def _browse(self, entitytype, handler, inc=None, queryargs=None, mblogin=False):
if queryargs is None:
queryargs = {}
if inc:
queryargs['inc'] = self._make_inc_arg(inc)
return self.get(f"/{entitytype}", handler, unencoded_queryargs=queryargs,
priority=True, important=True, mblogin=mblogin,
refresh=False)
def browse_releases(self, handler, **kwargs):
inc = ('media', 'labels')
return self._browse('release', handler, inc, queryargs=kwargs)
def browse_recordings(self, handler, inc, **kwargs):
return self._browse('recording', handler, inc, queryargs=kwargs)
@staticmethod
def _xml_ratings(ratings):
recordings = ''.join(
'<recording id=%s><user-rating>%s</user-rating></recording>' %
(quoteattr(i[1]), int(j)*20) for i, j in ratings.items() if i[0] == 'recording'
)
return _wrap_xml_metadata('<recording-list>%s</recording-list>' % recordings)
def submit_ratings(self, ratings, handler):
params = {'client': CLIENT_STRING}
data = self._xml_ratings(ratings)
return self.post("/rating", data, handler, priority=True,
unencoded_queryargs=params, parse_response_type='xml',
request_mimetype='application/xml; charset=utf-8')
def get_collection(self, collection_id, handler, limit=100, offset=0):
if collection_id is not None:
inc = ('releases', 'artist-credits', 'media')
path = f"/collection/{collection_id}/releases"
queryargs = {
'inc': self._make_inc_arg(inc),
'limit': limit,
'offset': offset,
}
else:
path = '/collection'
queryargs = None
return self.get(path, handler, priority=True, important=True,
mblogin=True, unencoded_queryargs=queryargs)
def get_collection_list(self, handler):
return self.get_collection(None, handler)
@staticmethod
def _collection_request(collection_id, releases, batchsize=400):
for i in range(0, len(releases), batchsize):
ids = ';'.join(releases[i:i+batchsize])
yield f"/collection/{collection_id}/releases/{ids}"
@staticmethod
def _get_client_queryarg():
return {'client': CLIENT_STRING}
def put_to_collection(self, collection_id, releases, handler):
for path in self._collection_request(collection_id, releases):
self.put(path, "", handler,
unencoded_queryargs=self._get_client_queryarg())
def delete_from_collection(self, collection_id, releases, handler):
for path in self._collection_request(collection_id, releases):
self.delete(path, handler,
unencoded_queryargs=self._get_client_queryarg())
class AcoustIdAPIHelper(APIHelper):
client_key = ACOUSTID_KEY
client_version = PICARD_VERSION_STR
def __init__(self, webservice):
super().__init__(webservice, base_url=ACOUSTID_URL)
def _encode_acoustid_args(self, args):
args['client'] = self.client_key
args['clientversion'] = self.client_version
args['format'] = 'json'
return '&'.join((k + '=' + v for k, v in encoded_queryargs(args).items()))
def query_acoustid(self, handler, **args):
body = self._encode_acoustid_args(args)
return self.post(
"/lookup", body, handler, priority=False, important=False,
mblogin=False, request_mimetype='application/x-www-form-urlencoded'
)
@staticmethod
def _submissions_to_args(submissions):
config = get_config()
args = {'user': config.setting['acoustid_apikey']}
for i, submission in enumerate(submissions):
for key, value in submission.args.items():
if value:
args[".".join((key, str(i)))] = value
return args
def submit_acoustid_fingerprints(self, submissions, handler):
args = self._submissions_to_args(submissions)
body = self._encode_acoustid_args(args)
return self.post(
"/submit", body, handler, priority=True, important=False,
mblogin=False, request_mimetype='application/x-www-form-urlencoded'
)
| 11,071
|
Python
|
.py
| 243
| 36.954733
| 104
| 0.627484
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,127
|
ratecontrol.py
|
metabrainz_picard/picard/webservice/ratecontrol.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018, 2020-2021, 2023-2024 Laurent Monin
# Copyright (C) 2019, 2022, 2024 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import defaultdict
import math
import sys
import time
from picard import log
from picard.webservice.utils import hostkey_from_url
# ============================================================================
# Throttling/congestion avoidance
# ============================================================================
# Throttles requests to a given hostkey by assigning a minimum delay between
# requests in milliseconds.
#
# Plugins may assign limits to their associated service(s) like so:
#
# >>> from picard.webservice import ratecontrol
# >>> ratecontrol.set_minimum_delay(('myservice.org', 80), 100) # 10 requests/second
# Minimun delay for the given hostkey (in milliseconds), can be set using
# set_minimum_delay()
REQUEST_DELAY_MINIMUM = defaultdict(lambda: 1000)
# Current delay (adaptive) between requests to a given hostkey.
REQUEST_DELAY = defaultdict(lambda: 1000) # Conservative initial value.
# Determines delay during exponential backoff phase.
REQUEST_DELAY_EXPONENT = defaultdict(lambda: 0)
# Unacknowledged request counter.
#
# Bump this when handing a request to QNetworkManager and trim when receiving
# a response.
CONGESTION_UNACK = defaultdict(lambda: 0)
# Congestion window size in terms of unacked requests.
#
# We're allowed to send up to `int(this)` many requests at a time.
CONGESTION_WINDOW_SIZE = defaultdict(lambda: 1.0)
# Slow start threshold.
#
# After placing this many unacknowledged requests on the wire, switch from
# slow start to congestion avoidance. (See `_adjust_throttle`.) Initialized
# upon encountering a temporary error.
CONGESTION_SSTHRESH = defaultdict(lambda: 0)
# Storage of last request times per host key
LAST_REQUEST_TIMES = defaultdict(lambda: 0)
def set_minimum_delay(hostkey, delay_ms):
"""Set the minimun delay between requests
hostkey is an unique key, for example (host, port)
delay_ms is the delay in milliseconds
"""
REQUEST_DELAY_MINIMUM[hostkey] = int(delay_ms)
def set_minimum_delay_for_url(url, delay_ms):
"""Set the minimun delay between requests
url will be converted to an unique key (host, port)
delay_ms is the delay in milliseconds
"""
set_minimum_delay(hostkey_from_url(url), delay_ms)
def current_delay(hostkey):
"""Returns the current delay (adaptive) between requests for this hostkey
hostkey is an unique key, for example (host, port)
"""
return REQUEST_DELAY[hostkey]
def get_delay_to_next_request(hostkey):
"""Calculate delay to next request to hostkey (host, port)
returns a tuple (wait, delay) where:
wait is True if a delay is needed
delay is the delay in milliseconds to next request
"""
if CONGESTION_UNACK[hostkey] >= int(CONGESTION_WINDOW_SIZE[hostkey]):
# We've maxed out the number of requests to `hostkey`, so wait
# until responses begin to come back. (See `_timer_run_next_task`
# strobe in `_handle_reply`.)
return (True, sys.maxsize)
interval = REQUEST_DELAY[hostkey]
if not interval:
log.debug("%s: Starting another request without delay", hostkey)
return (False, 0)
last_request = LAST_REQUEST_TIMES[hostkey]
if not last_request:
log.debug("%s: First request", hostkey)
_remember_request_time(hostkey) # set it on first run
return (False, interval)
elapsed = (time.time() - last_request) * 1000
if elapsed >= interval:
log.debug("%s: Last request was %d ms ago, starting another one", hostkey, elapsed)
return (False, interval)
delay = int(math.ceil(interval - elapsed))
log.debug("%s: Last request was %d ms ago, waiting %d ms before starting another one",
hostkey, elapsed, delay)
return (True, delay)
def _remember_request_time(hostkey):
if REQUEST_DELAY[hostkey]:
LAST_REQUEST_TIMES[hostkey] = time.time()
def increment_requests(hostkey):
"""Store the request time for this hostkey, and increment counter
It has to be called on each request
"""
_remember_request_time(hostkey)
# Increment the number of unack'd requests on sending a new one
CONGESTION_UNACK[hostkey] += 1
log.debug("%s: Incrementing requests to: %d", hostkey, CONGESTION_UNACK[hostkey])
def decrement_requests(hostkey):
"""Decrement counter, it has to be called on each reply
"""
assert CONGESTION_UNACK[hostkey] > 0
CONGESTION_UNACK[hostkey] -= 1
log.debug("%s: Decrementing requests to: %d", hostkey, CONGESTION_UNACK[hostkey])
def copy_minimal_delay(from_hostkey, to_hostkey):
"""Copy minimal delay from one hostkey to another
Useful for redirections
"""
if (from_hostkey in REQUEST_DELAY_MINIMUM
and to_hostkey not in REQUEST_DELAY_MINIMUM):
REQUEST_DELAY_MINIMUM[to_hostkey] = REQUEST_DELAY_MINIMUM[from_hostkey]
log.debug("%s: Copy minimun delay from %s, setting it to %dms",
to_hostkey, from_hostkey, REQUEST_DELAY_MINIMUM[to_hostkey])
def adjust(hostkey, slow_down):
"""Adjust `REQUEST` and `CONGESTION` metrics when a HTTP request completes.
Args:
hostkey: `(host, port)`.
slow_down: `True` if we encountered intermittent server trouble
and need to slow down.
"""
if slow_down:
_slow_down(hostkey)
elif CONGESTION_UNACK[hostkey] <= CONGESTION_WINDOW_SIZE[hostkey]:
# not in backoff phase anymore
_out_of_backoff(hostkey)
def _slow_down(hostkey):
# Backoff exponentially until ~30 seconds between requests.
delay = max(pow(2, REQUEST_DELAY_EXPONENT[hostkey]) * 1000,
REQUEST_DELAY_MINIMUM[hostkey])
REQUEST_DELAY_EXPONENT[hostkey] = min(
REQUEST_DELAY_EXPONENT[hostkey] + 1, 5)
# Slow start threshold is ~1/2 of the window size up until we saw
# trouble. Shrink the new window size back to 1.
CONGESTION_SSTHRESH[hostkey] = int(CONGESTION_WINDOW_SIZE[hostkey] / 2.0)
CONGESTION_WINDOW_SIZE[hostkey] = 1.0
log.debug(
'%s: slowdown; delay: %dms -> %dms; ssthresh: %d; cws: %.3f',
hostkey,
REQUEST_DELAY[hostkey],
delay,
CONGESTION_SSTHRESH[hostkey],
CONGESTION_WINDOW_SIZE[hostkey]
)
REQUEST_DELAY[hostkey] = delay
def _out_of_backoff(hostkey):
REQUEST_DELAY_EXPONENT[hostkey] = 0 # Coming out of backoff, so reset.
# Shrink the delay between requests with each successive reply to
# converge on maximum throughput.
delay = max(int(REQUEST_DELAY[hostkey] / 2),
REQUEST_DELAY_MINIMUM[hostkey])
cws = CONGESTION_WINDOW_SIZE[hostkey]
sst = CONGESTION_SSTHRESH[hostkey]
if sst and cws >= sst:
# Analogous to TCP's congestion avoidance phase. Window growth is linear.
phase = 'congestion avoidance'
cws = cws + (1.0 / cws)
else:
# Analogous to TCP's slow start phase. Window growth is exponential.
phase = 'slow start'
cws += 1
if (REQUEST_DELAY[hostkey] != delay
or CONGESTION_WINDOW_SIZE[hostkey] != cws):
log.debug(
'%s: oobackoff; delay: %dms -> %dms; %s; window size %.3f -> %.3f',
hostkey,
REQUEST_DELAY[hostkey],
delay,
phase,
CONGESTION_WINDOW_SIZE[hostkey],
cws
)
CONGESTION_WINDOW_SIZE[hostkey] = cws
REQUEST_DELAY[hostkey] = delay
| 8,547
|
Python
|
.py
| 194
| 38.381443
| 91
| 0.680679
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,128
|
utils.py
|
metabrainz_picard/picard/webservice/utils.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018-2022 Philipp Wolfer
# Copyright (C) 2018-2023 Laurent Monin
# Copyright (C) 2021 Tche333
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Asynchronous web service utilities.
"""
from PyQt6.QtCore import QUrl
def port_from_qurl(qurl):
"""Returns QUrl port or default ports (443 for https, 80 for http)"""
if qurl.scheme() == 'https':
return qurl.port(443)
return qurl.port(80)
def hostkey_from_url(url):
"""Returns (host, port) from passed url (as string or QUrl)"""
if not isinstance(url, QUrl):
url = QUrl(url)
return (url.host(), port_from_qurl(url))
def host_port_to_url(host, port, path=None, scheme=None, as_string=False):
"""Convert host & port (with optional path and scheme) to an URL"""
url = QUrl()
if scheme is None:
if port == 443:
scheme = 'https'
else:
scheme = 'http'
url.setScheme(scheme)
if ((scheme == 'https' and port != 443)
or (scheme == 'http' and port != 80)):
url.setPort(port)
url.setHost(host)
if path is not None:
url.setPath(path)
return url.toString() if as_string else url
| 2,027
|
Python
|
.py
| 54
| 33.648148
| 80
| 0.695963
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,129
|
__init__.py
|
metabrainz_picard/picard/webservice/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018-2022, 2024 Philipp Wolfer
# Copyright (C) 2018-2023 Laurent Monin
# Copyright (C) 2021 Tche333
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Asynchronous web service.
"""
from collections import (
defaultdict,
deque,
namedtuple,
)
from functools import partial
import os.path
import platform
import sys
from PyQt6 import (
QtCore,
QtNetwork,
)
from PyQt6.QtCore import QUrl
from PyQt6.QtNetwork import (
QNetworkReply,
QNetworkRequest,
QSslError,
)
from picard import (
PICARD_APP_NAME,
PICARD_ORG_NAME,
PICARD_VERSION_STR,
log,
)
from picard.config import get_config
from picard.const import appdirs
from picard.const.defaults import DEFAULT_CACHE_SIZE_IN_BYTES
from picard.debug_opts import DebugOpt
from picard.oauth import OAuthManager
from picard.util import (
bytes2human,
encoded_queryargs,
parse_json,
)
from picard.util.xml import parse_xml
from picard.webservice import ratecontrol
from picard.webservice.utils import port_from_qurl
COUNT_REQUESTS_DELAY_MS = 250
TEMP_ERRORS_RETRIES = 5
USER_AGENT_STRING = '%s-%s/%s (%s;%s-%s)' % (PICARD_ORG_NAME, PICARD_APP_NAME,
PICARD_VERSION_STR,
platform.platform(),
platform.python_implementation(),
platform.python_version())
CLIENT_STRING = '%s %s-%s' % (PICARD_ORG_NAME, PICARD_APP_NAME, PICARD_VERSION_STR)
DEFAULT_RESPONSE_PARSER_TYPE = "json"
Parser = namedtuple('Parser', 'mimetype parser')
class UnknownResponseParserError(Exception):
def __init__(self, response_type):
message = "Unknown parser for response type '%s'. Parser for given response type does not exist." % response_type
super().__init__(message)
class WSRequest(QNetworkRequest):
"""Represents a single HTTP request."""
_access_token = None
_high_prio_no_cache = True
_mblogin = None
_retries = 0
response_mimetype = None
response_parser = None
def __init__(
self,
*,
method=None,
handler=None,
parse_response_type=None,
data=None,
mblogin=False,
cacheloadcontrol=None,
refresh=False,
priority=False,
important=False,
request_mimetype=None,
url=None,
queryargs=None,
unencoded_queryargs=None,
):
"""
Args:
method: HTTP method. One of ``GET``, ``POST``, ``PUT``, or ``DELETE``.
handler: Callback which takes a 3-tuple of `(str:document,
QNetworkReply:reply, QNetworkReply.Error:error)`.
parse_response_type: Specifies that request either sends or accepts
data as ``application/{{response_mimetype}}``.
data: Data to include with ``PUT`` or ``POST`` requests.
mblogin: Hints that this request should be tied to a MusicBrainz
account, requiring that we obtain an OAuth token first.
cacheloadcontrol: See `QNetworkRequest.Attribute.CacheLoadControlAttribute`.
refresh: Indicates a user-specified resource refresh, such as when
the user wishes to reload a release. Marks the request as high priority
and disables caching.
priority: Indicates that this is a high priority request.
important: Indicates that this is an important request.
request_mimetype: Set the Content-Type header.
url: URL passed as a string or as a QUrl to use for this request
queryargs: Encoded query arguments, a dictionary mapping field names to values
unencoded_queryargs: Unencoded query arguments, a dictionary mapping field names to values
"""
# mandatory parameters
self.method = method
if self.method not in {'GET', 'PUT', 'DELETE', 'POST'}:
raise AssertionError('invalid method')
self.handler = handler
if self.handler is None:
raise AssertionError('handler undefined')
if url is None:
raise AssertionError('URL undefined')
if not isinstance(url, QUrl):
url = QUrl(url)
if queryargs is not None or unencoded_queryargs is not None:
if queryargs is None:
queryargs = {}
if unencoded_queryargs:
queryargs.update(encoded_queryargs(unencoded_queryargs))
query = QtCore.QUrlQuery(url)
for k, v in queryargs.items():
query.addQueryItem(k, str(v))
url.setQuery(query)
super().__init__(url)
# To simulate an ssl error, uncomment following lines
# ssl = self.sslConfiguration()
# ssl.setCaCertificates(list())
# self.setSslConfiguration(ssl)
# optional parameters
self.parse_response_type = parse_response_type
self.request_mimetype = request_mimetype
self.data = data
self.mblogin = mblogin
self.cacheloadcontrol = cacheloadcontrol
self.refresh = refresh
self.priority = priority
self.important = important
# set headers and attributes
self.access_token = None # call _update_authorization_header
if self.method == 'GET':
self._high_prio_no_cache = self.refresh
self.setAttribute(QNetworkRequest.Attribute.HttpPipeliningAllowedAttribute, True)
# use HTTP/2 if possible
self.setAttribute(QNetworkRequest.Attribute.Http2AllowedAttribute, True)
self.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, USER_AGENT_STRING)
if self.mblogin or self._high_prio_no_cache:
self.setPriority(QNetworkRequest.Priority.HighPriority)
self.setAttribute(QNetworkRequest.Attribute.CacheLoadControlAttribute, QNetworkRequest.CacheLoadControl.AlwaysNetwork)
elif self.cacheloadcontrol is not None:
self.setAttribute(QNetworkRequest.Attribute.CacheLoadControlAttribute, self.cacheloadcontrol)
if self.parse_response_type:
try:
self.response_mimetype = WebService.get_response_mimetype(self.parse_response_type)
self.response_parser = WebService.get_response_parser(self.parse_response_type)
except UnknownResponseParserError as e:
log.error(e.args[0])
else:
self.setRawHeader(b"Accept", self.response_mimetype.encode('utf-8'))
if self.data:
if not self.request_mimetype:
self.request_mimetype = self.response_mimetype or 'application/x-www-form-urlencoded'
self.setHeader(QNetworkRequest.KnownHeaders.ContentTypeHeader, self.request_mimetype)
@property
def has_auth(self):
return self.mblogin and self.access_token
def _update_authorization_header(self):
if self.has_auth:
auth = 'Bearer ' + self.access_token
self.setRawHeader(b'Authorization', auth.encode('utf-8'))
@property
def host(self):
return self.url().host()
@property
def port(self):
return port_from_qurl(self.url())
@property
def path(self):
return self.url().path()
@property
def access_token(self):
return self._access_token
@access_token.setter
def access_token(self, access_token):
self._access_token = access_token
self._update_authorization_header()
@property
def mblogin(self):
return self._mblogin
@mblogin.setter
def mblogin(self, mblogin):
self._mblogin = mblogin
self._update_authorization_header()
def get_host_key(self):
return (self.host, self.port)
def max_retries_reached(self):
return self._retries >= TEMP_ERRORS_RETRIES
def mark_for_retry(self, important=True, priority=True):
# Put retries at the head of the list in order to not penalize
# the load an album unlucky enough to hit a temporary service
# snag.
self.important = important
self.priority = priority
self._retries += 1
return self._retries
class RequestTask(namedtuple('RequestTask', 'hostkey func priority')):
@staticmethod
def from_request(request, func):
# priority is a boolean
return RequestTask(request.get_host_key(), func, int(request.priority))
class RequestPriorityQueue:
def __init__(self, ratecontrol):
self._queues = defaultdict(lambda: defaultdict(deque))
self._ratecontrol = ratecontrol
self._count = 0
def count(self):
return self._count
def add_task(self, task, important=False):
(hostkey, func, prio) = task
queue = self._queues[prio][hostkey]
if important:
queue.appendleft(func)
else:
queue.append(func)
self._count += 1
return RequestTask(hostkey, func, prio)
def remove_task(self, task):
hostkey, func, prio = task
try:
self._queues[prio][hostkey].remove(func)
self._count -= 1
except Exception as e:
log.debug(e)
def run_ready_tasks(self):
delay = sys.maxsize
for prio in sorted(self._queues.keys(), reverse=True):
prio_queue = self._queues[prio]
if not prio_queue:
del self._queues[prio]
continue
for hostkey in sorted(prio_queue.keys(),
key=self._ratecontrol.current_delay):
queue = self._queues[prio][hostkey]
if not queue:
del self._queues[prio][hostkey]
continue
wait, d = self._ratecontrol.get_delay_to_next_request(hostkey)
if not wait:
queue.popleft()()
self._count -= 1
if d < delay:
delay = d
return delay
class WebService(QtCore.QObject):
PARSERS = dict()
def __init__(self, parent=None):
super().__init__(parent)
self.tagger = QtCore.QCoreApplication.instance()
self.manager = QtNetwork.QNetworkAccessManager()
self.manager.sslErrors.connect(self.ssl_errors)
self.oauth_manager = OAuthManager(self)
config = get_config()
self._init_cache()
self.set_cache_size()
self.setup_proxy()
self.set_transfer_timeout(config.setting['network_transfer_timeout_seconds'])
self.manager.finished.connect(self._process_reply)
self._request_methods = {
'GET': self.manager.get,
'POST': self.manager.post,
'PUT': self.manager.put,
'DELETE': self.manager.deleteResource
}
self._init_queues()
self._init_timers()
def ssl_errors(self, reply, errors):
# According to forums, sometimes sslErrors is triggered with errors set to NoError
# This can also be used to ignore others if needed
ignored_errors = {
QSslError.NoError,
}
has_errors = False
for error in errors:
if error not in ignored_errors:
has_errors = True
log.error("SSL error: %s" % error.errorString())
if not has_errors:
reply.ignoreSslErrors()
@staticmethod
def http_response_code(reply):
response_code = reply.attribute(QNetworkRequest.Attribute.HttpStatusCodeAttribute)
return int(response_code) if response_code else 0
@staticmethod
def http_response_phrase(reply):
return reply.attribute(QNetworkRequest.Attribute.HttpReasonPhraseAttribute)
@staticmethod
def display_url(url):
return url.toDisplayString(QUrl.UrlFormattingOption.RemoveUserInfo | QUrl.ComponentFormattingOption.EncodeSpaces)
def _init_queues(self):
self._active_requests = {}
self._queue = RequestPriorityQueue(ratecontrol)
self.num_pending_web_requests = 0
def _init_timers(self):
self._timer_run_next_task = QtCore.QTimer(self)
self._timer_run_next_task.setSingleShot(True)
self._timer_run_next_task.timeout.connect(self._run_next_task)
self._timer_count_pending_requests = QtCore.QTimer(self)
self._timer_count_pending_requests.setSingleShot(True)
self._timer_count_pending_requests.timeout.connect(self._count_pending_requests)
def _init_cache(self, cache_size_in_bytes=None):
cache = QtNetwork.QNetworkDiskCache()
cache.setCacheDirectory(os.path.join(appdirs.cache_folder(), 'network'))
self.manager.setCache(cache)
log.debug("NetworkDiskCache dir: %r", cache.cacheDirectory())
def get_valid_cache_size(self):
try:
config = get_config()
cache_size = int(config.setting['network_cache_size_bytes'])
if cache_size >= 0:
return cache_size
except ValueError:
pass
return DEFAULT_CACHE_SIZE_IN_BYTES
def set_cache_size(self):
cache_size_in_bytes = self.get_valid_cache_size()
cache = self.manager.cache()
if cache.maximumCacheSize() != cache_size_in_bytes:
cache.setMaximumCacheSize(cache_size_in_bytes)
log.debug(
"NetworkDiskCache size: %s maxsize: %s",
bytes2human.decimal(cache.cacheSize(), l10n=False),
bytes2human.decimal(cache.maximumCacheSize(), l10n=False)
)
def setup_proxy(self):
proxy = QtNetwork.QNetworkProxy()
config = get_config()
if config.setting['use_proxy']:
if config.setting['proxy_type'] == 'socks':
proxy.setType(QtNetwork.QNetworkProxy.ProxyType.Socks5Proxy)
else:
proxy.setType(QtNetwork.QNetworkProxy.ProxyType.HttpProxy)
proxy.setHostName(config.setting['proxy_server_host'])
proxy.setPort(config.setting['proxy_server_port'])
if config.setting['proxy_username']:
proxy.setUser(config.setting['proxy_username'])
if config.setting['proxy_password']:
proxy.setPassword(config.setting['proxy_password'])
self.manager.setProxy(proxy)
def set_transfer_timeout(self, timeout):
timeout_ms = timeout * 1000
self.manager.setTransferTimeout(timeout_ms)
def _send_request(self, request, access_token=None):
hostkey = request.get_host_key()
ratecontrol.increment_requests(hostkey)
request.access_token = access_token
send = self._request_methods[request.method]
data = request.data
reply = send(request, data.encode('utf-8')) if data is not None else send(request)
self._active_requests[reply] = request
def _start_request(self, request):
if request.mblogin and request.path != "/oauth2/token":
self.oauth_manager.get_access_token(partial(self._send_request, request))
else:
self._send_request(request)
@staticmethod
def urls_equivalent(leftUrl, rightUrl):
"""
Lazy method to determine whether two QUrls are equivalent. At the moment it assumes that if ports are unset
that they are port 80 - in absence of a URL normalization function in QUrl or ability to use qHash
from QT 4.7
"""
return leftUrl.port(80) == rightUrl.port(80) and \
leftUrl.toString(QUrl.UrlFormattingOption.RemovePort) == rightUrl.toString(QUrl.UrlFormattingOption.RemovePort)
def _handle_redirect(self, reply, request, redirect):
error = int(reply.error())
# merge with base url (to cover the possibility of the URL being relative)
redirect_url = request.url().resolved(redirect)
reply_url = reply.request().url()
display_redirect_url = self.display_url(redirect_url)
display_reply_url = self.display_url(reply_url)
if not WebService.urls_equivalent(redirect_url, reply_url):
log.debug("Redirect to %s requested", display_redirect_url)
redirect_request = WSRequest(
method='GET',
url=redirect_url,
handler=request.handler,
parse_response_type=request.parse_response_type,
priority=True,
important=True,
mblogin=request.mblogin,
cacheloadcontrol=request.attribute(QNetworkRequest.Attribute.CacheLoadControlAttribute),
refresh=request.refresh,
)
ratecontrol.copy_minimal_delay(
request.get_host_key(),
redirect_request.get_host_key(),
)
self.add_request(redirect_request)
else:
log.error("Redirect loop: %s", display_reply_url)
request.handler(reply.readAll(), reply, error)
def _handle_reply(self, reply, request):
hostkey = request.get_host_key()
ratecontrol.decrement_requests(hostkey)
self._timer_run_next_task.start(0)
slow_down = False
error = reply.error()
handler = request.handler
response_code = self.http_response_code(reply)
display_reply_url = self.display_url(reply.request().url())
if reply.attribute(QNetworkRequest.Attribute.Http2WasUsedAttribute):
proto = 'HTTP2'
else:
proto = 'HTTP'
if error != QNetworkReply.NetworkError.NoError:
errstr = reply.errorString()
log.error("Network request error for %s -> %s (QT code %r, %s code %d)",
display_reply_url, errstr, error, proto, response_code)
if (not request.max_retries_reached()
and (response_code == 503
or response_code == 429
# Sometimes QT returns a http status code of 200 even when there
# is a service unavailable error.
or error == QNetworkReply.NetworkError.ServiceUnavailableError
)):
slow_down = True
retries = request.mark_for_retry()
log.debug("Retrying %s (#%d)", display_reply_url, retries)
self.add_request(request)
elif handler is not None:
handler(reply.readAll(), reply, error)
slow_down = (slow_down or response_code >= 500)
else:
error = None
redirect = reply.attribute(QNetworkRequest.Attribute.RedirectionTargetAttribute)
from_cache = reply.attribute(QNetworkRequest.Attribute.SourceIsFromCacheAttribute)
cached = ' (CACHED)' if from_cache else ''
log.debug("Received reply for %s -> %s %d (%s) %s",
display_reply_url,
proto,
response_code,
self.http_response_phrase(reply),
cached
)
if handler is not None:
# Redirect if found and not infinite
if redirect:
self._handle_redirect(reply, request, redirect)
elif request.response_parser:
try:
document = request.response_parser(reply)
if DebugOpt.WS_REPLIES.enabled:
log.debug("Response received: %s", document)
except Exception as e:
log.error("Unable to parse the response for %s -> %s", display_reply_url, e)
document = bytes(reply.readAll())
error = e
finally:
handler(document, reply, error)
else:
# readAll() returns QtCore.QByteArray, so convert to bytes
handler(bytes(reply.readAll()), reply, error)
ratecontrol.adjust(hostkey, slow_down)
def _process_reply(self, reply):
try:
request = self._active_requests.pop(reply)
except KeyError:
display_reply_url = self.display_url(reply.request().url())
log.error("Request not found for %s", display_reply_url)
return
try:
self._handle_reply(reply, request)
finally:
reply.close()
reply.deleteLater()
def get_url(self, **kwargs):
kwargs['method'] = 'GET'
kwargs['parse_response_type'] = kwargs.get('parse_response_type', DEFAULT_RESPONSE_PARSER_TYPE)
return self.add_request(WSRequest(**kwargs))
def post_url(self, **kwargs):
kwargs['method'] = 'POST'
kwargs['parse_response_type'] = kwargs.get('parse_response_type', DEFAULT_RESPONSE_PARSER_TYPE)
kwargs['mblogin'] = kwargs.get('mblogin', True)
if DebugOpt.WS_POST.enabled:
log.debug("POST-DATA %r", kwargs['data'])
return self.add_request(WSRequest(**kwargs))
def put_url(self, **kwargs):
kwargs['method'] = 'PUT'
kwargs['priority'] = kwargs.get('priority', True)
kwargs['mblogin'] = kwargs.get('mblogin', True)
return self.add_request(WSRequest(**kwargs))
def delete_url(self, **kwargs):
kwargs['method'] = 'DELETE'
kwargs['priority'] = kwargs.get('priority', True)
kwargs['mblogin'] = kwargs.get('mblogin', True)
return self.add_request(WSRequest(**kwargs))
def download_url(self, **kwargs):
kwargs['method'] = 'GET'
return self.add_request(WSRequest(**kwargs))
def stop(self):
for reply in list(self._active_requests):
reply.abort()
self._init_queues()
def _count_pending_requests(self):
count = len(self._active_requests) + self._queue.count()
if count != self.num_pending_web_requests:
self.num_pending_web_requests = count
self.tagger.tagger_stats_changed.emit()
if count:
self._timer_count_pending_requests.start(COUNT_REQUESTS_DELAY_MS)
def _run_next_task(self):
delay = self._queue.run_ready_tasks()
if delay < sys.maxsize:
self._timer_run_next_task.start(delay)
def add_task(self, func, request):
task = RequestTask.from_request(request, func)
self._queue.add_task(task, request.important)
if not self._timer_run_next_task.isActive():
self._timer_run_next_task.start(0)
if not self._timer_count_pending_requests.isActive():
self._timer_count_pending_requests.start(0)
return task
def add_request(self, request):
return self.add_task(partial(self._start_request, request), request)
def remove_task(self, task):
self._queue.remove_task(task)
if not self._timer_count_pending_requests.isActive():
self._timer_count_pending_requests.start(0)
@classmethod
def add_parser(cls, response_type, mimetype, parser):
cls.PARSERS[response_type] = Parser(mimetype=mimetype, parser=parser)
@classmethod
def get_response_mimetype(cls, response_type):
if response_type in cls.PARSERS:
return cls.PARSERS[response_type].mimetype
else:
raise UnknownResponseParserError(response_type)
@classmethod
def get_response_parser(cls, response_type):
if response_type in cls.PARSERS:
return cls.PARSERS[response_type].parser
else:
raise UnknownResponseParserError(response_type)
WebService.add_parser('xml', 'application/xml', parse_xml)
WebService.add_parser('json', 'application/json', parse_json)
| 24,736
|
Python
|
.py
| 566
| 33.535336
| 130
| 0.626714
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,130
|
checkupdate.py
|
metabrainz_picard/picard/util/checkupdate.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2018 Bob Swift
# Copyright (C) 2018, 2020, 2022-2023 Philipp Wolfer
# Copyright (C) 2018, 2020-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from functools import partial
from PyQt6.QtWidgets import QMessageBox
from picard import (
PICARD_FANCY_VERSION_STR,
PICARD_VERSION,
log,
)
from picard.const import (
PLUGINS_API,
PROGRAM_UPDATE_LEVELS,
)
from picard.i18n import (
N_,
gettext as _,
gettext_constants,
)
from picard.util import webbrowser2
from picard.version import (
Version,
VersionError,
)
class UpdateCheckManager:
def __init__(self, tagger):
self.tagger = tagger
self._available_versions = {}
self._show_always = False
self._update_level = 0
def check_update(self, show_always=False, update_level=0, callback=None):
"""Checks if an update is available.
Compares the version number of the currently running instance of Picard
and displays a dialog box informing the user if an update is available,
with an option of opening the download site in their browser. If there
is no update available, no dialog will be shown unless the "show_always"
parameter has been set to True. This allows for silent checking during
startup if so configured.
Args:
show_always: Boolean value indicating whether the results dialog
should be shown even when there is no update available.
update_level: Determines what type of updates to check. Options are:
0 = only stable release versions are checked.
1 = stable and beta releases are checked.
2 = stable, beta and dev releases are checked.
Returns:
none.
Raises:
none.
"""
self._show_always = show_always
self._update_level = update_level
if self._available_versions:
# Release information already acquired from specified website api.
self._display_results()
else:
# Gets list of releases from specified website api.
self._query_available_updates(callback=callback)
def _query_available_updates(self, callback=None):
"""Gets list of releases from specified website api."""
log.debug("Getting Picard release information from %s", PLUGINS_API['urls']['releases'])
self.tagger.webservice.get_url(
url=PLUGINS_API['urls']['releases'],
handler=partial(self._releases_json_loaded, callback=callback),
priority=True,
important=True
)
def _releases_json_loaded(self, response, reply, error, callback=None):
"""Processes response from specified website api query."""
if error:
log.error(_("Error loading Picard releases list: {error_message}").format(error_message=reply.errorString(),))
if self._show_always:
QMessageBox.information(
self.tagger.window,
_("Picard Update"),
_("Unable to retrieve the latest version information from the website.\n({url})").format(
url=PLUGINS_API['urls']['releases'],
),
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok)
else:
if response and 'versions' in response:
self._available_versions = response['versions']
else:
self._available_versions = {}
for key in self._available_versions:
log.debug("Version key '%s' -> %s", key, self._available_versions[key])
self._display_results()
if callback:
callback(not error)
def _display_results(self):
# Display results to user.
key = ''
high_version = PICARD_VERSION
for test_key in PROGRAM_UPDATE_LEVELS:
update_level = PROGRAM_UPDATE_LEVELS[test_key]['name']
version_tuple = self._available_versions.get(update_level, {}).get('version', (0, 0, 0, ''))
try:
test_version = Version(*version_tuple)
except (TypeError, VersionError):
log.error("Invalid version %r for update level %s.", version_tuple, update_level)
continue
if self._update_level >= test_key and test_version > high_version:
key = PROGRAM_UPDATE_LEVELS[test_key]['name']
high_version = test_version
if key:
if QMessageBox.information(
self.tagger.window,
_("Picard Update"),
_("A new version of Picard is available.\n\n"
"This version: {picard_old_version}\n"
"New version: {picard_new_version}\n\n"
"Would you like to download the new version?").format(
picard_old_version=PICARD_FANCY_VERSION_STR,
picard_new_version=self._available_versions[key]['tag']
),
QMessageBox.StandardButton.Ok | QMessageBox.StandardButton.Cancel,
QMessageBox.StandardButton.Cancel
) == QMessageBox.StandardButton.Ok:
webbrowser2.open(self._available_versions[key]['urls']['download'])
else:
if self._show_always:
if self._update_level in PROGRAM_UPDATE_LEVELS:
update_level = PROGRAM_UPDATE_LEVELS[self._update_level]['title']
else:
update_level = N_("unknown")
QMessageBox.information(
self.tagger.window,
_("Picard Update"),
_("There is no update currently available for your subscribed update level: {update_level}\n\n"
"Your version: {picard_old_version}\n").format(
update_level=gettext_constants(update_level),
picard_old_version=PICARD_FANCY_VERSION_STR,
),
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok
)
| 6,952
|
Python
|
.py
| 153
| 34.764706
| 122
| 0.615362
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,131
|
periodictouch.py
|
metabrainz_picard/picard/util/periodictouch.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2022 Philipp Wolfer
# Copyright (C) 2022-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from pathlib import Path
from PyQt6.QtCore import QTimer
from picard import log
TOUCH_FILES_DELAY_SECONDS = 4 * 3600
_touch_timer = QTimer()
_files_to_touch = set()
def register_file(filepath):
if _touch_timer.isActive():
_files_to_touch.add(filepath)
def unregister_file(filepath):
if _touch_timer.isActive():
_files_to_touch.discard(filepath)
def enable_timer():
log.debug("Setup timer for touching files every %i seconds", TOUCH_FILES_DELAY_SECONDS)
_touch_timer.timeout.connect(_touch_files)
_touch_timer.start(TOUCH_FILES_DELAY_SECONDS * 1000)
def _touch_files():
log.debug("Touching %i files", len(_files_to_touch))
for filepath in _files_to_touch.copy():
path = Path(filepath)
if path.exists():
try:
path.touch()
except OSError:
log.error("error touching file `%s`", filepath, exc_info=True)
else:
unregister_file(filepath)
| 1,842
|
Python
|
.py
| 47
| 34.978723
| 91
| 0.71573
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,132
|
xml.py
|
metabrainz_picard/picard/util/xml.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018, 2020-2021, 2024 Laurent Monin
# Copyright (C) 2019, 2022 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt6.QtCore import QXmlStreamReader
_node_name_re = re.compile('[^a-zA-Z0-9]')
class XmlNode:
def __init__(self):
self.text = ''
self.children = {}
self.attribs = {}
def __repr__(self):
return repr(self.__dict__)
def append_child(self, name, node=None):
if node is None:
node = XmlNode()
self.children.setdefault(name, []).append(node)
return node
def __getattr__(self, name):
try:
return self.children[name]
except KeyError:
try:
return self.attribs[name]
except KeyError:
raise AttributeError(name)
def _node_name(n):
return _node_name_re.sub('_', n)
def parse_xml(response):
stream = QXmlStreamReader(response)
document = XmlNode()
current_node = document
path = []
while not stream.atEnd():
stream.readNext()
if stream.isStartElement():
node = XmlNode()
attrs = stream.attributes()
for i in range(attrs.count()):
attr = attrs.at(i)
node.attribs[_node_name(attr.name())] = attr.value()
current_node.append_child(_node_name(stream.name()), node)
path.append(current_node)
current_node = node
elif stream.isEndElement():
current_node = path.pop()
elif stream.isCharacters():
current_node.text += stream.text()
return document
| 2,509
|
Python
|
.py
| 69
| 29.855072
| 80
| 0.649421
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,133
|
settingsoverride.py
|
metabrainz_picard/picard/util/settingsoverride.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019-2021 Laurent Monin
# Copyright (C) 2020 Tim Gates
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections.abc import MutableMapping
class SettingsOverride(MutableMapping):
""" This class can be used to override config temporarily
Basically it returns config[key] if key isn't found in internal dict
Typical usage:
settings = SettingsOverride(config.setting)
settings["option"] = "value"
"""
def __init__(self, orig_settings, *args, **kwargs):
self.orig_settings = orig_settings
self._dict = dict()
for k, v in dict(*args, **kwargs).items():
self[k] = v
def __getitem__(self, key):
try:
return self._dict[key]
except KeyError:
return self.orig_settings[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __delitem__(self, key):
try:
del self._dict[key]
except KeyError:
pass
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
d = self.orig_settings.copy()
d.update(self._dict)
return repr(d)
| 1,974
|
Python
|
.py
| 53
| 31.622642
| 80
| 0.666143
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,134
|
cdrom.py
|
metabrainz_picard/picard/util/cdrom.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2008 Will
# Copyright (C) 2008, 2018-2021 Philipp Wolfer
# Copyright (C) 2009 david
# Copyright (C) 2013 Johannes Dewender
# Copyright (C) 2013 Sebastian Ramacher
# Copyright (C) 2013, 2018-2021, 2023-2024 Laurent Monin
# Copyright (C) 2013-2014 Michael Wiencek
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2017 Sophist-UK
# Copyright (C) 2022 Bob Swift
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
from picard import log
from picard.config import get_config
from picard.const.sys import (
IS_LINUX,
IS_WIN,
)
try:
from libdiscid.compat import discid
except ImportError:
try:
import discid
except (ImportError, OSError):
discid = None
DISCID_NOT_LOADED_MESSAGE = "CDROM: discid library not found - Lookup CD functionality disabled"
LINUX_CDROM_INFO = '/proc/sys/dev/cdrom/info'
def get_default_cdrom_drives():
default_drives = []
if discid is not None:
device = discid.get_default_device()
if device:
default_drives.append(device)
return default_drives
def _generic_iter_drives():
config = get_config()
yield from (
device.strip() for device
in config.setting['cd_lookup_device'].split(',')
if device and not device.isspace()
)
def _parse_linux_cdrom_info(f):
drive_names = []
drive_audio_caps = []
DRIVE_NAME = 'drive name:'
CAN_PLAY_AUDIO = 'Can play audio:'
for line in f:
if line.startswith(DRIVE_NAME):
drive_names = line[len(DRIVE_NAME):].split()
break
if drive_names:
for line in f:
if line.startswith(CAN_PLAY_AUDIO):
drive_audio_caps = [v == '1' for v in line[len(CAN_PLAY_AUDIO):].split()]
break
yield from zip(drive_names, drive_audio_caps)
if IS_WIN:
from ctypes import windll
AUTO_DETECT_DRIVES = True
DRIVE_TYPE_CDROM = 5
def _iter_drives():
GetLogicalDrives = windll.kernel32.GetLogicalDrives
GetDriveType = windll.kernel32.GetDriveTypeW
mask = GetLogicalDrives()
for i in range(26):
if mask >> i & 1:
drive = chr(i + ord('A')) + ':'
if GetDriveType(drive) == DRIVE_TYPE_CDROM:
yield drive
elif IS_LINUX and os.path.isfile(LINUX_CDROM_INFO):
AUTO_DETECT_DRIVES = True
def _iter_drives():
# Read info from /proc/sys/dev/cdrom/info
with open(LINUX_CDROM_INFO, 'r') as f:
# Show only drives that are capable of playing audio
yield from (
os.path.realpath('/dev/%s' % drive)
for drive, can_play_audio in _parse_linux_cdrom_info(f)
if can_play_audio
)
else:
# There might be more drives we couldn't detect
# setting uses a text field instead of a drop-down
AUTO_DETECT_DRIVES = False
_iter_drives = _generic_iter_drives
def get_cdrom_drives():
"""List available disc drives on the machine
"""
# add default drive from libdiscid to the list
from picard.const.defaults import DEFAULT_DRIVES
drives = set(DEFAULT_DRIVES)
try:
drives |= set(_iter_drives())
except OSError as error:
log.error(error)
return sorted(drives)
| 4,112
|
Python
|
.py
| 115
| 30.121739
| 96
| 0.670947
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,135
|
tags.py
|
metabrainz_picard/picard/util/tags.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007-2008, 2011 Lukáš Lalinský
# Copyright (C) 2008-2009, 2018-2021, 2023 Philipp Wolfer
# Copyright (C) 2011 Johannes Weißl
# Copyright (C) 2011-2013 Michael Wiencek
# Copyright (C) 2012 Chad Wilson
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013-2014, 2019-2021, 2023-2024 Laurent Monin
# Copyright (C) 2013-2015, 2017 Sophist-UK
# Copyright (C) 2019 Zenara Daley
# Copyright (C) 2023 Bob Swift
# Copyright (C) 2023 certuna
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from picard.i18n import (
N_,
gettext as _,
)
TAG_NAMES = {
'acoustid_fingerprint': N_('AcoustID Fingerprint'),
'acoustid_id': N_('AcoustID'),
'albumartist': N_('Album Artist'),
'albumartistsort': N_('Album Artist Sort Order'),
'album': N_('Album'),
'albumsort': N_('Album Sort Order'),
'arranger': N_('Arranger'),
'artist': N_('Artist'),
'artists': N_('Artists'),
'artistsort': N_('Artist Sort Order'),
'asin': N_('ASIN'),
'barcode': N_('Barcode'),
'bpm': N_('BPM'),
'catalognumber': N_('Catalog Number'),
'comment': N_('Comment'),
'compilation': N_('Compilation (iTunes)'),
'composer': N_('Composer'),
'composersort': N_('Composer Sort Order'),
'conductor': N_('Conductor'),
'copyright': N_('Copyright'),
'date': N_('Date'),
'director': N_('Director'),
'discid': N_('Disc Id'),
'discnumber': N_('Disc Number'),
'discsubtitle': N_('Disc Subtitle'),
'djmixer': N_('DJ-Mixer'),
'encodedby': N_('Encoded By'),
'encodersettings': N_('Encoder Settings'),
'engineer': N_('Engineer'),
'gapless': N_('Gapless Playback'),
'genre': N_('Genre'),
'grouping': N_('Grouping'),
'isrc': N_('ISRC'),
'key': N_('Key'),
'label': N_('Record Label'),
'language': N_('Language'),
'~length': N_('Length'),
'license': N_('License'),
'lyricist': N_('Lyricist'),
'lyrics': N_('Lyrics'),
'media': N_('Media'),
'mixer': N_('Mixer'),
'mood': N_('Mood'),
'movement': N_('Movement'),
'movementnumber': N_('Movement Number'),
'movementtotal': N_('Movement Count'),
'musicbrainz_albumartistid': N_('MusicBrainz Release Artist Id'),
'musicbrainz_albumid': N_('MusicBrainz Release Id'),
'musicbrainz_artistid': N_('MusicBrainz Artist Id'),
'musicbrainz_discid': N_('MusicBrainz Disc Id'),
'musicbrainz_originalalbumid': N_('MusicBrainz Original Release Id'),
'musicbrainz_originalartistid': N_('MusicBrainz Original Artist Id'),
'musicbrainz_recordingid': N_('MusicBrainz Recording Id'),
'musicbrainz_releasegroupid': N_('MusicBrainz Release Group Id'),
'musicbrainz_trackid': N_('MusicBrainz Track Id'),
'musicbrainz_workid': N_('MusicBrainz Work Id'),
'musicip_fingerprint': N_('MusicIP Fingerprint'),
'musicip_puid': N_('MusicIP PUID'),
'originalalbum': N_('Original Album'),
'originalartist': N_('Original Artist'),
'originaldate': N_('Original Release Date'),
'originalfilename': N_('Original Filename'),
'originalyear': N_('Original Year'),
'performer': N_('Performer'),
'podcast': N_('Podcast'),
'podcasturl': N_('Podcast URL'),
'producer': N_('Producer'),
'r128_album_gain': N_('R128 Album Gain'),
'r128_track_gain': N_('R128 Track Gain'),
'~rating': N_('Rating'),
'releasecountry': N_('Release Country'),
'releasedate': N_('Release Date'),
'releasestatus': N_('Release Status'),
'releasetype': N_('Release Type'),
'remixer': N_('Remixer'),
'replaygain_album_gain': N_('ReplayGain Album Gain'),
'replaygain_album_peak': N_('ReplayGain Album Peak'),
'replaygain_album_range': N_('ReplayGain Album Range'),
'replaygain_reference_loudness': N_('ReplayGain Reference Loudness'),
'replaygain_track_gain': N_('ReplayGain Track Gain'),
'replaygain_track_peak': N_('ReplayGain Track Peak'),
'replaygain_track_range': N_('ReplayGain Track Range'),
'script': N_('Script'),
'show': N_('Show Name'),
'showsort': N_('Show Name Sort Order'),
'showmovement': N_('Show Work & Movement'),
'subtitle': N_('Subtitle'),
'syncedlyrics': N_('Synced Lyrics'),
'title': N_('Title'),
'titlesort': N_('Title Sort Order'),
'totaldiscs': N_('Total Discs'),
'totaltracks': N_('Total Tracks'),
'tracknumber': N_('Track Number'),
'website': N_('Artist Website'),
'work': N_('Work'),
'writer': N_('Writer'),
}
PRESERVED_TAGS = (
'~bitrate',
'~bits_per_sample',
'~channels',
'~dirname',
'~extension',
'~filename',
'~file_created_timestamp',
'~file_modified_timestamp',
'~format',
'~sample_rate',
'~video',
)
# Tags that got generated in some way from the audio content.
# Those can be set by Picard but the new values usually should be kept
# when moving the file between tags.
CALCULATED_TAGS = {
'acoustid_fingerprint',
'acoustid_id',
'replaygain_album_gain',
'replaygain_album_peak',
'replaygain_album_range',
'replaygain_reference_loudness',
'replaygain_track_gain',
'replaygain_track_peak',
'replaygain_track_range',
'r128_album_gain',
'r128_track_gain',
}
# Tags that contains infos related to files
FILE_INFO_TAGS = {
'~bitrate',
'~bits_per_sample',
'~channels',
'~filesize',
'~format',
'~sample_rate',
}
def display_tag_name(name):
if ':' in name:
name, desc = name.split(':', 1)
if desc:
return '%s [%s]' % (_(TAG_NAMES.get(name, name)), desc)
return _(TAG_NAMES.get(name, name))
RE_COMMENT_LANG = re.compile('^([a-zA-Z]{3}):')
def parse_comment_tag(name): # noqa: E302
"""
Parses a tag name like "comment:XXX:desc", where XXX is the language.
If language is not set ("comment:desc") "eng" is assumed as default.
Returns a (lang, desc) tuple.
"""
lang = 'eng'
desc = ''
split = name.split(':', 1)
if len(split) > 1:
desc = split[1]
match_ = RE_COMMENT_LANG.match(desc)
if match_:
lang = match_.group(1)
desc = desc[4:]
return lang, desc
# Special case for unspecified language + empty description
if desc == 'XXX':
lang = 'XXX'
desc = ''
return lang, desc
def parse_subtag(name):
"""
Parses a tag name like "lyrics:XXX:desc", where XXX is the language.
If language is not set, the colons are still mandatory, and "eng" is
assumed by default.
"""
split = name.split(':')
if len(split) > 1 and split[1]:
lang = split[1]
else:
lang = 'eng'
if len(split) > 2:
desc = split[2]
else:
desc = ''
return lang, desc
| 7,445
|
Python
|
.py
| 214
| 30.313084
| 80
| 0.634522
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,136
|
icontheme.py
|
metabrainz_picard/picard/util/icontheme.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008 Lukáš Lalinský
# Copyright (C) 2013, 2018-2021, 2024 Laurent Monin
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2020-2022 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
from PyQt6 import QtGui
from picard.const.sys import IS_WIN
if IS_WIN:
_search_paths = []
else:
_search_paths = [os.path.expanduser('~/.icons')]
_search_paths += [
os.path.join(path, 'icons') for path in
os.environ.get('XDG_DATA_DIRS', '/usr/share').split(':')
]
_search_paths.append('/usr/share/pixmaps')
_current_theme = None
if 'XDG_CURRENT_DESKTOP' in os.environ:
desktop = os.environ['XDG_CURRENT_DESKTOP'].lower()
if desktop in {'gnome', 'unity'}:
_current_theme = (os.popen('gsettings get org.gnome.desktop.interface icon-theme').read().strip()[1:-1]
or None)
elif os.environ.get('KDE_FULL_SESSION'):
_current_theme = (os.popen("kreadconfig --file kdeglobals --group Icons --key Theme --default crystalsvg").read().strip()
or None)
ICON_SIZE_MENU = ('16x16',)
ICON_SIZE_TOOLBAR = ('22x22',)
ICON_SIZE_ALL = ('22x22', '16x16')
def lookup(name, size=ICON_SIZE_ALL):
icon = QtGui.QIcon()
if _current_theme:
for path in _search_paths:
for subdir in ('actions', 'places', 'devices'):
fullpath = os.path.join(path, _current_theme, size[0], subdir, name)
if os.path.exists(fullpath + '.png'):
icon.addFile(fullpath + '.png')
for s in size[1:]:
icon.addFile(os.path.join(path, _current_theme, s, subdir, name) + '.png')
return icon
for s in size:
icon.addFile('/'.join([':', 'images', s, name]) + '.png')
return icon
| 2,569
|
Python
|
.py
| 60
| 37.183333
| 125
| 0.656914
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,137
|
progresscheckpoints.py
|
metabrainz_picard/picard/util/progresscheckpoints.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020 Gabriel Ferreira
# Copyright (C) 2020-2021 Laurent Monin
# Copyright (C) 2021 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class ProgressCheckpoints:
def __init__(self, num_jobs, num_checkpoints=10):
"""Create a set of unique and evenly spaced indexes of jobs, used as checkpoints for progress"""
self.num_jobs = num_jobs
self._checkpoints = {}
if num_checkpoints > 0:
self._offset = num_jobs/num_checkpoints
for i in range(1, num_checkpoints):
self._checkpoints[int(i*self._offset)] = 100*i//num_checkpoints
if num_jobs > 0:
self._checkpoints[num_jobs-1] = 100
def is_checkpoint(self, index):
return index in self._checkpoints
def progress(self, index):
try:
return self._checkpoints[index]
except KeyError:
return None
| 1,662
|
Python
|
.py
| 39
| 37.358974
| 104
| 0.696351
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,138
|
pipe.py
|
metabrainz_picard/picard/util/pipe.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2022 Bob Swift
# Copyright (C) 2022 Kamil
# Copyright (C) 2022 skelly37
# Copyright (C) 2022-2023 Philipp Wolfer
# Copyright (C) 2022-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from abc import (
ABCMeta,
abstractmethod,
)
import concurrent.futures
import os
from tempfile import NamedTemporaryFile
from typing import (
Any,
Iterable,
List,
Optional,
Tuple,
)
from picard import (
PICARD_APP_ID,
log,
)
from picard.const.sys import (
IS_HAIKU,
IS_MACOS,
IS_WIN,
)
from picard.util import sanitize_filename
if IS_WIN:
from pywintypes import error as WinApiError # type: ignore
import win32file # type: ignore
import win32pipe # type: ignore
class PipeError(Exception):
MESSAGE: str = ""
def __init__(self, *messages):
if self.MESSAGE:
self.messages: Tuple[str] = (self.MESSAGE,) + tuple(messages)
else:
self.messages: Tuple[str] = tuple(messages) # type: ignore
def __str__(self) -> str:
messages_str = "\n ".join(str(m) for m in self.messages)
if not messages_str:
messages_str = "unknown"
return f"ERROR: {messages_str}"
class PipeErrorInvalidArgs(PipeError):
MESSAGE = "Pipe() args argument has to be iterable"
class PipeErrorInvalidAppData(PipeError):
MESSAGE = "Pipe() app_name and app_version arguments have to be str"
class PipeErrorNotFound(PipeError):
MESSAGE = "Pipe doesn't exist"
class PipeErrorBroken(PipeError):
MESSAGE = "Pipe is broken"
class PipeErrorInvalidResponse(PipeError):
MESSAGE = "Invalid response from pipe"
class PipeErrorWin(PipeError):
MESSAGE = "Windows API error"
class PipeErrorNoPermission(PipeError):
MESSAGE = "No permissions for creating a pipe"
class PipeErrorNoDestination(PipeError):
MESSAGE = "No available dirs to place a pipe"
class AbstractPipe(metaclass=ABCMeta):
NO_RESPONSE_MESSAGE: str = "No response from FIFO"
MESSAGE_TO_IGNORE: str = '\0'
TIMEOUT_SECS_WRITE: float = 1.5
@classmethod
@property
@abstractmethod
def PIPE_DIRS(cls):
"""
Tuple of dirs where pipe could possibly be created
**Virtual**, implement in child classes
"""
raise NotImplementedError
def __init__(self, app_name: str, app_version: str, args: Optional[Iterable[str]] = None,
forced_path: Optional[str] = None, identifier: Optional[str] = None):
"""
:param app_name: (str) Name of the app, included in the pipe name
:param app_version: (str) Version of the app, included in the pipe name
:param identifier: (Optional[str]) config file / standalone identifier, included in pipe name
:param args: (Optional[Iterable[str]]) Will be passed to an existing instance of app if possible
:param forced_path: (Optional[str]) Testing-purposes only, bypass of no $HOME on testing machines
"""
if args is None:
self._args: Tuple[str] = tuple() # type: ignore
else:
try:
self._args = tuple(args) # type: ignore
except TypeError as exc:
raise PipeErrorInvalidArgs(exc) from None
if not self._args:
self._args = ('SHOW',)
if not isinstance(app_name, str) or not isinstance(app_version, str):
raise PipeErrorInvalidAppData
self._identifier = identifier or 'main'
if forced_path:
self._paths = (forced_path,)
elif IS_WIN or os.getenv('HOME'):
self._paths = self.__generate_filenames(app_name, app_version)
self.path_was_forced = False
else:
self._paths = (NamedTemporaryFile(delete=False).name,)
self.path_was_forced = True
log.debug("Pipe path had to be mocked by a temporary file")
self.is_pipe_owner: bool = False
self.pipe_running = False
self.unexpected_removal = False
self.__thread_pool = concurrent.futures.ThreadPoolExecutor()
for path in self._paths:
self.path = path
for arg in self._args:
if not self.send_to_pipe(arg):
self.is_pipe_owner = True
break
if self.path:
log.debug("Using pipe: %r", self.path)
break
def _remove_temp_attributes(self) -> None:
"""
Removing self._args and self._paths when child classes don't need them anymore.
Should be called by child classes.
"""
del self._args
del self._paths
def __generate_filenames(self, app_name: str, app_version: str) -> List[str]:
"""
Returns list of paths available for pipe
:param app_name: (str) Name of the app, included in the pipe name
:param app_version: (str) Version of the app, included in the pipe name
:return: List of available pipe paths
:rtype: List[str]
"""
_pipe_names = []
for directory in self.PIPE_DIRS:
if directory:
_pipe_names.append(os.path.join(os.path.expanduser(directory),
sanitize_filename(f"{app_name}_v{app_version}_{self._identifier}_pipe_file")))
if _pipe_names:
return _pipe_names
raise PipeErrorNoDestination
def _reader(self) -> str:
"""
Listens on the pipe for messages
**Virtual**, implement in child classes
:return: What has been read from pipe
:rtype: str
"""
raise NotImplementedError()
def _sender(self, message: str) -> bool:
"""
Sends message to the pipe
**Virtual**, implement in child classes
:param message: (str)
:return: True if operation went successfully, False otherwise
:rtype: bool
"""
raise NotImplementedError()
def read_from_pipe(self) -> List[str]:
"""
Common interface for the custom _reader implementations
:return: List of messages or {self.NO_RESPONSE_MESSAGE} (if no messages received)
:rtype: List[str]
"""
try:
res = self._reader()
if res:
out = [r for r in res.split(self.MESSAGE_TO_IGNORE) if r]
if out:
return out
except Exception as e:
# https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future.result
# If the call raised an exception, this method will raise the same exception.
log.error("pipe reader exception: %s", e)
return [self.NO_RESPONSE_MESSAGE]
def send_to_pipe(self, message: str, timeout_secs: Optional[float] = None) -> bool:
"""
Common interface for the custom _sender implementations
:param message: (str) Message that will be sent to the pipe
:param timeout_secs: (Optional[float]) Timeout for the function, by default it fallbacks to self.TIMEOUT_SECS
:return: True if operation went successfully, False otherwise
:rtype: bool
"""
if timeout_secs is None:
timeout_secs = self.TIMEOUT_SECS_WRITE
# we're sending only filepaths, so we have to create some kind of separator
# to avoid any potential conflicts and mixing the data
try:
sender = self.__thread_pool.submit(self._sender, message + self.MESSAGE_TO_IGNORE)
if sender.result(timeout=timeout_secs):
return True
except concurrent.futures._base.TimeoutError:
if self.pipe_running:
log.warning("Couldn't send: %r", message)
# hacky way to kill the sender
self.read_from_pipe()
except Exception as e:
# https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future.result
# If the call raised an exception, this method will raise the same exception.
log.error("pipe sender exception: %s", e)
return False
def stop(self):
log.debug("Stopping pipe")
self.pipe_running = False
self.send_to_pipe(self.MESSAGE_TO_IGNORE)
try:
self.__thread_pool.shutdown(wait=True, cancel_futures=True)
except TypeError: # cancel_futures is not supported on Python < 3.9
self.__thread_pool.shutdown(wait=True)
class UnixPipe(AbstractPipe):
PIPE_DIRS: Tuple[str] = (
os.getenv('XDG_RUNTIME_DIR'),
"~/.config/MusicBrainz/Picard/pipes/",
) # type: ignore
def __init__(self, app_name: str, app_version: str, args: Optional[Iterable[str]] = None,
forced_path: Optional[str] = None, identifier: Optional[str] = None):
super().__init__(app_name, app_version, args, forced_path, identifier)
if not self.path:
raise PipeErrorNoPermission
elif self.is_pipe_owner:
self.__create_pipe()
self._remove_temp_attributes()
def __create_pipe(self) -> None:
"""
Create pipe on Unix, if it doesn't exist
"""
# setting false to set make it true only when really created
self.is_pipe_owner = False
try:
try:
# just to be sure that there's no broken pipe left
os.unlink(self.path)
except FileNotFoundError:
pass
os.makedirs(os.path.dirname(self.path), exist_ok=True)
os.mkfifo(self.path)
self.is_pipe_owner = True
log.debug("Pipe successfully created: %r", self.path)
except PermissionError as e:
log.warning("Couldn't create pipe: %r (%s)", self.path, e)
self.path = ""
def _sender(self, message: str) -> bool:
if not os.path.exists(self.path):
return False
try:
with open(self.path, 'w') as fifo:
fifo.write(message)
return True
except BrokenPipeError:
log.warning("BrokenPipeError happened for %r", message)
return False
def _reader(self) -> str:
response: str = ""
while not response:
try:
with open(self.path, 'r') as fifo:
response = fifo.read()
except FileNotFoundError:
log.error("Pipe file removed unexpectedly")
self.pipe_running = False
self.unexpected_removal = True
raise PipeErrorNotFound from None
except BrokenPipeError:
log.warning("BrokenPipeError happened while listening to the pipe")
break
return response or self.NO_RESPONSE_MESSAGE
class MacOSPipe(UnixPipe):
PIPE_DIRS: Tuple[str] = (os.path.join("~/Library/Application Support/", PICARD_APP_ID),)
class HaikuPipe(UnixPipe):
PIPE_DIRS: Tuple[str] = ("~/config/var/MusicBrainz/Picard/",)
class WinPipe(AbstractPipe):
# win32pipe.CreateNamedPipe
# more about the arguments: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea
__MAX_INSTANCES: int = 1
__BUFFER_SIZE: int = 65536
# timeout doesn't really matter, concurrent.futures ensures that connections are closed in declared time
# the value is in milliseconds
__DEFAULT_TIMEOUT: int = 300
# win32file.CreateFile
# more about the arguments: http://timgolden.me.uk/pywin32-docs/win32file__CreateFile_meth.html
__SHARE_MODE: int = 0
__FLAGS_AND_ATTRIBUTES: int = 0
# pywintypes.error error codes
# more about the error codes: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/18d8fbe8-a967-4f1c-ae50-99ca8e491d2d
__FILE_NOT_FOUND_ERROR_CODE: int = 2
__BROKEN_PIPE_ERROR_CODE: int = 109
PIPE_DIRS: Tuple[str] = ("\\\\.\\pipe\\",)
def __init__(self, app_name: str, app_version: str, args: Optional[Iterable[str]] = None,
forced_path: Optional[str] = None, identifier: Optional[str] = None):
# type checking is already enforced in the AbstractPipe
try:
app_version = app_version.replace(".", "-")
except AttributeError:
pass
super().__init__(app_name, app_version, args, forced_path, identifier)
self.__create_pipe()
self._remove_temp_attributes()
def __create_pipe(self):
try:
self.__pipe = win32pipe.CreateNamedPipe(
self.path,
win32pipe.PIPE_ACCESS_DUPLEX,
win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT,
self.__MAX_INSTANCES,
self.__BUFFER_SIZE,
self.__BUFFER_SIZE,
self.__DEFAULT_TIMEOUT,
None)
self.is_pipe_owner = True
except WinApiError:
self.__pipe = None
self.is_pipe_owner = False
def __close_pipe(self):
if self.__pipe:
handle = self.__pipe
self.__pipe = None
try:
win32file.CloseHandle(handle)
except WinApiError:
log.error('Error closing pipe', exc_info=True)
def _sender(self, message: str) -> bool:
try:
pipe = win32file.CreateFile(
self.path,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
self.__SHARE_MODE,
None,
win32file.OPEN_EXISTING,
self.__FLAGS_AND_ATTRIBUTES,
None
)
except WinApiError as err:
# File did not exist, no existing pipe to write to
if err.winerror == self.__FILE_NOT_FOUND_ERROR_CODE:
return False
else:
raise
try:
win32file.WriteFile(pipe, str.encode(message))
finally:
win32file.CloseHandle(pipe)
return True
def _reader(self) -> str:
exit_code = 0
message = None
try:
win32pipe.ConnectNamedPipe(self.__pipe, None)
(exit_code, message) = win32file.ReadFile(self.__pipe, self.__BUFFER_SIZE)
except WinApiError as err:
if err.winerror == self.__FILE_NOT_FOUND_ERROR_CODE:
# we just keep reopening the pipe, nothing wrong is happening
pass
elif err.winerror == self.__BROKEN_PIPE_ERROR_CODE:
raise PipeErrorBroken from None
else:
raise PipeErrorWin(f"{err.winerror}; {err.funcname}; {err.strerror}") from None
finally:
# Pipe was closed when client disconnected, recreate
self.__close_pipe()
if self.pipe_running:
self.__create_pipe()
if message is not None:
message = message.decode('utf-8')
if exit_code == 0:
return message # type: ignore
else:
raise PipeErrorInvalidResponse(message) # type: ignore
else:
return self.NO_RESPONSE_MESSAGE
def stop(self):
super().stop()
if self.is_pipe_owner:
self.__close_pipe()
if IS_WIN:
Pipe: Any = WinPipe
elif IS_MACOS:
Pipe = MacOSPipe
elif IS_HAIKU:
Pipe = HaikuPipe
else:
Pipe = UnixPipe
| 16,220
|
Python
|
.py
| 398
| 31.298995
| 140
| 0.611394
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,139
|
macos.py
|
metabrainz_picard/picard/util/macos.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from picard import log
def _find_root_volume():
try:
for entry in os.scandir("/Volumes/"):
if entry.is_symlink() and os.path.realpath(entry.path) == "/":
return entry.path
except OSError:
log.warning("Could not detect macOS boot volume", exc_info=True)
return None
def extend_root_volume_path(path):
if not path.startswith("/Volumes/"):
root_volume = _find_root_volume()
if root_volume:
if path.startswith("/"):
path = path[1:]
path = os.path.join(root_volume, path)
return path
def strip_root_volume_path(path):
if not path.startswith("/Volumes/"):
return path
root_volume = _find_root_volume()
if root_volume:
norm_path = os.path.normpath(path)
if norm_path.startswith(root_volume):
path = os.path.join('/', norm_path[len(root_volume):])
return path
| 1,761
|
Python
|
.py
| 46
| 33.369565
| 80
| 0.68717
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,140
|
mbserver.py
|
metabrainz_picard/picard/util/mbserver.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2021 Laurent Monin
# Copyright (C) 2021 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import namedtuple
from picard.config import get_config
from picard.const import MUSICBRAINZ_SERVERS
from picard.util import build_qurl
ServerTuple = namedtuple('ServerTuple', ('host', 'port'))
def is_official_server(host):
"""Returns True, if host is an official MusicBrainz server for the primary database.
Args:
host: the hostname
Returns: True, if host is an official MusicBrainz server, False otherwise
"""
return host in MUSICBRAINZ_SERVERS
def get_submission_server():
"""Returns the host and port used for data submission.
Data submission usually should be done against the primary database. This function
will return the hostname configured as `server_host` if it is an official MusicBrainz
server, otherwise it will return the primary official server.
Returns: Tuple of hostname and port number, e.g. `('musicbrainz.org', 443)`
"""
config = get_config()
host = config.setting['server_host']
if is_official_server(host):
return ServerTuple(host, 443)
elif host and config.setting['use_server_for_submission']:
port = config.setting['server_port']
return ServerTuple(host, port)
else:
return ServerTuple(MUSICBRAINZ_SERVERS[0], 443)
def build_submission_url(path=None, query_args=None):
"""Builds a submission URL with path and query parameters.
Args:
path: The path for the URL
query_args: A dict of query parameters
Returns: The submission URL as a string
"""
server = get_submission_server()
url = build_qurl(server.host, server.port, path, query_args)
return url.toString()
| 2,522
|
Python
|
.py
| 58
| 39.637931
| 89
| 0.741725
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,141
|
lrucache.py
|
metabrainz_picard/picard/util/lrucache.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 Antonio Larrosa
# Copyright (C) 2018-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections.abc import MutableMapping
class LRUCache(MutableMapping):
"""
Helper class to cache items using a Least Recently Used policy.
It's originally used to cache generated pixmaps in the CoverArtBox object
but it's generic enough to be used for other purposes if necessary.
The cache will never hold more than max_size items and the item least
recently used will be discarded.
>>> cache = LRUCache(3)
>>> cache['item1'] = 'some value'
>>> cache['item2'] = 'some other value'
>>> cache['item3'] = 'yet another value'
>>> cache['item1']
'some value'
>>> cache['item4'] = 'This will push item 2 out of the cache'
>>> cache['item2']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "lrucache.py", line 48, in __getitem__
return super().__getitem__(key)
KeyError: 'item2'
>>> cache['item5'] = 'This will push item3 out of the cache'
>>> cache['item3']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "lrucache.py", line 48, in __getitem__
return super().__getitem__(key)
KeyError: 'item3'
>>> cache['item1']
'some value'
"""
def __init__(self, max_size, *args, **kwargs):
self._ordered_keys = []
self._max_size = max_size
self._dict = dict()
for k, v in dict(*args, **kwargs).items():
self[k] = v
def __getitem__(self, key):
value = self._dict[key]
self._ordered_keys.remove(key)
self._ordered_keys.insert(0, key)
return value
def __setitem__(self, key, value):
if key in self:
self._ordered_keys.remove(key)
self._ordered_keys.insert(0, key)
self._dict[key] = value
if len(self) > self._max_size:
item = self._ordered_keys.pop()
del self._dict[item]
def __delitem__(self, key):
del self._dict[key]
self._ordered_keys.remove(key)
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
return repr(self._dict)
| 3,041
|
Python
|
.py
| 79
| 32.949367
| 80
| 0.646081
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,142
|
versions.py
|
metabrainz_picard/picard/util/versions.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2014 Lukáš Lalinský
# Copyright (C) 2014-2015, 2017-2018, 2020-2021, 2023-2024 Laurent Monin
# Copyright (C) 2016 Sambhav Kothari
# Copyright (C) 2018-2019, 2021-2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import OrderedDict
from platform import python_version
from mutagen import version_string as mutagen_version
from PyQt6.QtCore import (
PYQT_VERSION_STR as pyqt_version,
qVersion,
)
from PyQt6.QtNetwork import QSslSocket
from picard import PICARD_FANCY_VERSION_STR
from picard.disc import discid_version
from picard.i18n import (
N_,
gettext as _,
)
from picard.util.astrcmp import astrcmp_implementation
_versions = None
_names = {
'version': "Picard",
'python-version': "Python",
'pyqt-version': "PyQt",
'qt-version': "Qt",
'mutagen-version': "Mutagen",
'discid-version': "Discid",
'astrcmp': "astrcmp",
'ssl-version': "SSL",
}
def _load_versions():
global _versions
_versions = OrderedDict((
('version', PICARD_FANCY_VERSION_STR),
('python-version', python_version()),
('pyqt-version', pyqt_version),
('qt-version', qVersion()),
('mutagen-version', mutagen_version),
('discid-version', discid_version),
('astrcmp', astrcmp_implementation),
('ssl-version', QSslSocket.sslLibraryVersionString())
))
def _value_as_text(value, i18n=False):
if not value:
value = N_("is not installed")
if i18n:
return _(value)
return value
def version_name(key):
return _names[key]
def as_dict(i18n=False):
if not _versions:
_load_versions()
return OrderedDict((key, _value_as_text(value, i18n))
for key, value in _versions.items())
def as_string(i18n=False, separator=", "):
values = as_dict(i18n)
return separator.join(_names[key] + " " + value
for key, value in values.items())
| 2,726
|
Python
|
.py
| 77
| 31.038961
| 80
| 0.695205
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,143
|
emptydir.py
|
metabrainz_picard/picard/util/emptydir.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006 Lukáš Lalinský
# Copyright (C) 2019, 2021-2022 Philipp Wolfer
# Copyright (C) 2020-2021, 2023-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import os.path
import shutil
from PyQt6.QtCore import QStandardPaths
# Files not considered relevant for a directory. If a directory has only
# some of these files inside it is still considered empty and can be deleted.
JUNK_FILES = {".DS_Store", "desktop.ini", "Desktop.ini", "Thumbs.db"}
# Special file system locations Picard should never delete.
PROTECTED_DIRECTORIES = set()
for location in QStandardPaths.StandardLocation:
for path in QStandardPaths.standardLocations(location):
try:
PROTECTED_DIRECTORIES.add(os.path.realpath(path))
except OSError: # Path might no longer exist, skip it
pass
class SkipRemoveDir(Exception):
pass
def is_empty_dir(path, ignored_files=None):
"""
Checks if a directory is considered empty.
Args:
path: Path to directory to check.
ignored_files: List of files to ignore. I only some of those files is
inside the directory it is still considered empty.
Returns:
True if path is considered an empty directory
False if path is not considered an empty directory
Raises:
NotADirectoryError: path is not a directory
"""
if ignored_files is None:
ignored_files = JUNK_FILES
return not set(os.listdir(path)) - set(ignored_files)
def rm_empty_dir(path):
"""
Delete a directory if it is considered empty by is_empty_dir and if it
is not considered a special directory (e.g. the users home dir or ~/Desktop).
Args:
path: Path to directory to remove.
Raises:
NotADirectoryError: path is not a directory
SkipRemoveDir: path was not deleted because it is either not empty
or considered a special directory.
"""
if os.path.realpath(path) in PROTECTED_DIRECTORIES:
raise SkipRemoveDir("%s is a protected directory" % path)
elif not is_empty_dir(path):
raise SkipRemoveDir("%s is not empty" % path)
else:
shutil.rmtree(path)
| 2,951
|
Python
|
.py
| 71
| 36.647887
| 81
| 0.72028
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,144
|
bytes2human.py
|
metabrainz_picard/picard/util/bytes2human.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2013, 2019-2020, 2023-2024 Laurent Monin
# Copyright (C) 2018 Wieland Hoffmann
# Copyright (C) 2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Helper class to convert bytes to human-readable form
It supports l10n through gettext, decimal and binary units.
>>> n = 1572864
>>> [binary(n), decimal(n)]
['1.5 MiB', '1.6 MB']
"""
import locale
from picard.i18n import (
N_,
gettext as _,
)
# used to force gettextization
_BYTES_STRINGS_I18N = (
N_("%(value)s B"),
N_("%(value)s kB"),
N_("%(value)s KiB"),
N_("%(value)s MB"),
N_("%(value)s MiB"),
N_("%(value)s GB"),
N_("%(value)s GiB"),
N_("%(value)s TB"),
N_("%(value)s TiB"),
N_("%(value)s PB"),
N_("%(value)s PiB"),
)
def decimal(number, scale=1, l10n=True):
"""
Convert bytes to short human-readable string, decimal mode
>>> [decimal(n) for n in [1000, 1024, 15500]]
['1 kB', '1 kB', '15.5 kB']
"""
return short_string(int(number), 1000, scale=scale, l10n=l10n)
def binary(number, scale=1, l10n=True):
"""
Convert bytes to short human-readable string, binary mode
>>> [binary(n) for n in [1000, 1024, 15500]]
['1000 B', '1 KiB', '15.1 KiB']
"""
return short_string(int(number), 1024, scale=scale, l10n=l10n)
def short_string(number, multiple, scale=1, l10n=True):
"""
Returns short human-readable string for `number` bytes
>>> [short_string(n, 1024, 2) for n in [1000, 1100, 15500]]
['1000 B', '1.07 KiB', '15.14 KiB']
>>> [short_string(n, 1000, 1) for n in [10000, 11000, 1550000]]
['10 kB', '11 kB', '1.6 MB']
"""
num, unit = calc_unit(number, multiple)
n = int(num)
nr = round(num, scale)
if n == nr or unit == 'B':
fmt = '%d'
num = n
else:
fmt = '%%0.%df' % scale
num = nr
if l10n:
fmtnum = locale.format_string(fmt, num)
fmt = "%(value)s " + unit
return _(fmt) % {"value": fmtnum}
else:
return fmt % num + " " + unit
def calc_unit(number, multiple=1000):
"""
Calculate rounded number of multiple * bytes, finding best unit
>>> calc_unit(12456, 1024)
(12.1640625, 'KiB')
>>> calc_unit(-12456, 1000)
(-12.456, 'kB')
>>> calc_unit(0, 1001)
Traceback (most recent call last):
...
ValueError: multiple parameter has to be 1000 or 1024
"""
if number < 0:
sign = -1
number = -number
else:
sign = 1
n = float(number)
if multiple == 1000:
k, b = 'k', 'B'
elif multiple == 1024:
k, b = 'K', 'iB'
else:
raise ValueError("multiple parameter has to be 1000 or 1024")
suffixes = ['B'] + [i + b for i in k + 'MGTP']
for suffix in suffixes:
if n < multiple or suffix == suffixes[-1]:
return (sign * n, suffix)
else:
n /= multiple
| 3,651
|
Python
|
.py
| 113
| 27.707965
| 80
| 0.614379
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,145
|
thread.py
|
metabrainz_picard/picard/util/thread.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007 Lukáš Lalinský
# Copyright (C) 2008 Gary van der Merwe
# Copyright (C) 2011-2013 Michael Wiencek
# Copyright (C) 2013, 2018, 2020-2024 Laurent Monin
# Copyright (C) 2016 Sambhav Kothari
# Copyright (C) 2017 Sophist-UK
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2020, 2022 Philipp Wolfer
# Copyright (C) 2022 Bob Swift
# Copyright (C) 2022 skelly37
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import threading
import time
import traceback
from PyQt6.QtCore import (
QCoreApplication,
QEvent,
QRunnable,
)
from picard import log
class ProxyToMainEvent(QEvent):
def __init__(self, func, *args, **kwargs):
super().__init__(QEvent.Type.User)
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class Runnable(QRunnable):
def __init__(self, func, next_func, task_counter=None, traceback=True):
super().__init__()
self.func = func
self.next_func = next_func
self.task_counter = task_counter
self.traceback = traceback
def run(self):
try:
result = self.func()
except BaseException:
if self.traceback:
log.error(traceback.format_exc())
to_main(self.next_func, error=sys.exc_info()[1])
else:
to_main(self.next_func, result=result)
finally:
if self.task_counter:
self.task_counter.decrement()
class TaskCounter:
def __init__(self):
self.count = 0
self.lock = threading.Lock()
self.condition = threading.Condition(self.lock)
def decrement(self):
with self.lock:
self.count -= 1
if self.count == 0:
self.condition.notify_all()
def increment(self):
with self.lock:
self.count += 1
def wait_for_tasks(self):
with self.lock:
while self.count > 0:
self.condition.wait()
def run_task(func, next_func=None, priority=0, thread_pool=None, task_counter=None, traceback=True):
"""Schedules func to be run on a separate thread
Args:
func: Function to run on a separate thread.
next_func: Callback function to run after the thread has been completed.
The callback will be run on the main thread.
priority: Priority for the run queue's order of execution.
thread_pool: Instance of QThreadPool to run this task.
task_counter: Instance of TaskCounter to count the number of tasks currently running.
traceback: If set to true the stack trace will be logged to the error log
if an exception was raised.
"""
def _no_operation(*args, **kwargs):
return
if not next_func:
next_func = _no_operation
if task_counter:
task_counter.increment()
if not thread_pool:
thread_pool = QCoreApplication.instance().thread_pool
thread_pool.start(Runnable(func, next_func, task_counter, traceback), priority)
def to_main(func, *args, **kwargs):
QCoreApplication.postEvent(QCoreApplication.instance(),
ProxyToMainEvent(func, *args, **kwargs))
def to_main_with_blocking(func, *args, **kwargs):
"""Executes a command as a user-defined event, and waits until the event has
closed before returning. Note that any new threads started while processing
the event will not be considered when releasing the blocking of the function.
Args:
func: Function to run.
"""
_task = ProxyToMainEvent(func, *args, **kwargs)
QCoreApplication.postEvent(QCoreApplication.instance(), _task)
while True:
try:
if not _task.isAccepted():
break
except Exception:
break
time.sleep(.01)
| 4,632
|
Python
|
.py
| 122
| 31.393443
| 100
| 0.667411
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,146
|
scripttofilename.py
|
metabrainz_picard/picard/util/scripttofilename.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006 Lukáš Lalinský
# Copyright (C) 2018-2020, 2022 Philipp Wolfer
# Copyright (C) 2019-2021, 2023-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from picard.config import get_config
from picard.const.sys import IS_WIN
from picard.metadata import Metadata
from picard.script import ScriptParser
from picard.util import (
replace_win32_incompat,
sanitize_filename,
)
from picard.util.textencoding import replace_non_ascii
_re_replace_underscores = re.compile(r'[\s_]+')
def script_to_filename_with_metadata(naming_format, metadata, file=None, settings=None):
"""Creates a valid filename from a script with the given metadata.
Args:
naming_format: A string containing the tagger script. The result of
executing this script will be the filename.
metadata: A Metadata object. The metadata will not be modified.
file: A File object (optional)
settings: The settings. If not set config.setting will be used.
Returns:
A tuple with the filename as first element and the updated metadata
with changes from the script as second.
"""
if settings is None:
config = get_config()
settings = config.setting
# make sure every metadata can safely be used in a path name
win_compat = IS_WIN or settings['windows_compatibility']
new_metadata = Metadata()
replace_dir_separator = settings['replace_dir_separator']
for name in metadata:
new_metadata[name] = [
sanitize_filename(str(v), repl=replace_dir_separator, win_compat=win_compat)
for v in metadata.getall(name)
]
naming_format = naming_format.replace('\t', '').replace('\n', '')
filename = ScriptParser().eval(naming_format, new_metadata, file)
if settings['ascii_filenames']:
filename = replace_non_ascii(filename, pathsave=True, win_compat=win_compat)
# replace incompatible characters
if win_compat:
filename = replace_win32_incompat(filename, replacements=settings['win_compat_replacements'])
if settings['replace_spaces_with_underscores']:
filename = _re_replace_underscores.sub('_', filename.strip())
# remove null characters
filename = filename.replace('\x00', '')
return (filename, new_metadata)
def script_to_filename(naming_format, metadata, file=None, settings=None):
"""Creates a valid filename from a script with the given metadata.
Args:
naming_format: A string containing the tagger script. The result of
executing this script will be the filename.
metadata: A Metadata object. The metadata will not be modified.
file: A File object (optional)
settings: The settings. If not set config.setting will be used.
Returns:
The filename.
"""
(filename, _unused) = script_to_filename_with_metadata(
naming_format, metadata, file, settings)
return filename
| 3,738
|
Python
|
.py
| 83
| 40.13253
| 101
| 0.722451
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,147
|
astrcmp.py
|
metabrainz_picard/picard/util/astrcmp.py
|
# fix-header: skip
# http://hetland.org/coding/python/levenshtein.py
# This is a straightforward implementation of a well-known algorithm, and thus
# probably shouldn't be covered by copyright to begin with. But in case it is,
# the author (Magnus Lie Hetland) has, to the extent possible under law,
# dedicated all copyright and related and neighboring rights to this software
# to the public domain worldwide, by distributing it under the CC0 license,
# version 1.0. This software is distributed without any warranty. For more
# information, see <http://creativecommons.org/publicdomain/zero/1.0>
def astrcmp_py(a, b):
"""Calculates the Levenshtein distance between a and b."""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
if n == 0 or m == 0.0:
return 0.0
current = range(n+1)
for i in range(1, m+1):
previous, current = current, [i]+[0]*n
for j in range(1, n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change += 1
current[j] = min(add, delete, change)
return 1.0 - current[n] / max(m, n)
try:
from picard.util._astrcmp import astrcmp as astrcmp_c
astrcmp = astrcmp_c
astrcmp_implementation = "C"
except ImportError:
astrcmp = astrcmp_py
astrcmp_implementation = "Python"
| 1,449
|
Python
|
.py
| 35
| 35.571429
| 78
| 0.647687
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,148
|
bitreader.py
|
metabrainz_picard/picard/util/bitreader.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2014, 2020 Christoph Reiter
# Copyright (C) 2019, 2021 Philipp Wolfer
# Copyright (C) 2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# This implementation is taken from mutagen, see
# https://github.com/quodlibet/mutagen/blob/master/mutagen/_util.py
# https://github.com/quodlibet/mutagen/blob/master/mutagen/tak.py
class BitReaderError(Exception):
pass
class _BitReader:
def __init__(self, fileobj):
self._fileobj = fileobj
self._buffer = 0
self._bits = 0
self._pos = fileobj.tell()
def bits(self, count):
"""Reads `count` bits and returns an uint.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
raise NotImplementedError
def bytes(self, count):
"""Returns a bytearray of length `count`. Works unaligned."""
if count < 0:
raise ValueError
# fast path
if self._bits == 0:
data = self._fileobj.read(count)
if len(data) != count:
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in range(count)))
def skip(self, count):
"""Skip `count` bits.
Might raise BitReaderError if there wasn't enough data to skip,
but might also fail on the next bits() instead.
"""
if count < 0:
raise ValueError
if count <= self._bits:
self.bits(count)
else:
count -= self.align()
n_bytes = count // 8
self._fileobj.seek(n_bytes, 1)
count -= n_bytes * 8
self.bits(count)
def get_position(self):
"""Returns the amount of bits read or skipped so far"""
return (self._fileobj.tell() - self._pos) * 8 - self._bits
def align(self):
"""Align to the next byte, returns the amount of bits skipped"""
bits = self._bits
self._buffer = 0
self._bits = 0
return bits
def is_aligned(self):
"""If we are currently aligned to bytes and nothing is buffered"""
return self._bits == 0
class MSBBitReader(_BitReader):
"""BitReader implementation which reads bits starting at LSB in each byte.
"""
def bits(self, count):
"""Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
if count > self._bits:
n_bytes = (count - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
self._buffer = (self._buffer << 8) | b
self._bits += n_bytes * 8
self._bits -= count
value = self._buffer >> self._bits
self._buffer &= (1 << self._bits) - 1
return value
class LSBBitReader(_BitReader):
"""BitReader implementation which reads bits starting at LSB in each byte.
"""
def _lsb(self, count):
value = self._buffer & 0xff >> (8 - count)
self._buffer = self._buffer >> count
self._bits -= count
return value
def bits(self, count):
"""Reads `count` bits and returns an uint, LSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
value = 0
if count <= self._bits:
value = self._lsb(count)
else:
# First read all available bits
shift = 0
remaining = count
if self._bits > 0:
remaining -= self._bits
shift = self._bits
value = self._lsb(self._bits)
# Now add additional bytes
n_bytes = (remaining - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
if remaining > 8: # Use full byte
remaining -= 8
value = (b << shift) | value
shift += 8
else:
self._buffer = b
self._bits = 8
b = self._lsb(remaining)
value = (b << shift) | value
return value
| 5,418
|
Python
|
.py
| 140
| 29.542857
| 80
| 0.586641
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,149
|
webbrowser2.py
|
metabrainz_picard/picard/util/webbrowser2.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007 Lukáš Lalinský
# Copyright (C) 2011 Calvin Walton
# Copyright (C) 2013, 2018-2021, 2024 Laurent Monin
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2018 Wieland Hoffmann
# Copyright (C) 2019, 2022-2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""A webbrowser extension for Picard.
It handles and displays errors in PyQt and also adds a utility function for opening Picard URLS.
"""
import webbrowser
from PyQt6 import QtWidgets
from picard.const import PICARD_URLS
from picard.i18n import gettext as _
def open(url):
if url in PICARD_URLS:
url = PICARD_URLS[url]
try:
webbrowser.open(url)
except webbrowser.Error as e:
QtWidgets.QMessageBox.critical(None, _("Web Browser Error"), _("Error while launching a web browser:\n\n%s") % (e,))
| 1,579
|
Python
|
.py
| 38
| 39.315789
| 124
| 0.75817
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,150
|
time.py
|
metabrainz_picard/picard/util/time.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2021 Gabriel Ferreira
# Copyright (C) 2021, 2023 Philipp Wolfer
# Copyright (C) 2021, 2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import namedtuple
from picard.i18n import gettext as _
SECS_IN_DAY = 86400
SECS_IN_HOUR = 3600
SECS_IN_MINUTE = 60
Duration = namedtuple('Duration', 'days hours minutes seconds')
def euclidian_div(a, b):
return a // b, a % b
def seconds_to_dhms(seconds):
days, seconds = euclidian_div(seconds, SECS_IN_DAY)
hours, seconds = euclidian_div(seconds, SECS_IN_HOUR)
minutes, seconds = euclidian_div(seconds, SECS_IN_MINUTE)
return Duration(days=days, hours=hours, minutes=minutes, seconds=seconds)
def get_timestamp(seconds):
time = seconds_to_dhms(seconds)
if time.days > 0:
return _("%(days).2dd %(hours).2dh") % time._asdict()
if time.hours > 0:
return _("%(hours).2dh %(minutes).2dm") % time._asdict()
if time.minutes > 0:
return _("%(minutes).2dm %(seconds).2ds") % time._asdict()
if time.seconds > 0:
return _("%(seconds).2ds") % time._asdict()
return ''
| 1,867
|
Python
|
.py
| 45
| 38.511111
| 80
| 0.718076
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,151
|
remotecommands.py
|
metabrainz_picard/picard/util/remotecommands.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2022-2023 Bob Swift
# Copyright (C) 2023 Philipp Wolfer
# Copyright (C) 2023-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import queue
import shlex
import threading
from picard import log
class RemoteCommand:
def __init__(self, method_name, help_text=None, help_args=None):
self.method_name = method_name
self.help_text = help_text or ""
self.help_args = help_args or ""
REMOTE_COMMANDS = {
'CLEAR_LOGS': RemoteCommand(
'handle_command_clear_logs',
help_text="Clear the Picard logs",
),
'CLUSTER': RemoteCommand(
'handle_command_cluster',
help_text="Cluster all files in the cluster pane.",
),
'FINGERPRINT': RemoteCommand(
'handle_command_fingerprint',
help_text="Calculate acoustic fingerprints for all (matched) files in the album pane.",
),
'FROM_FILE': RemoteCommand(
'handle_command_from_file',
help_text="Load commands from a file.",
help_args="[Path to a file containing commands]",
),
'LOAD': RemoteCommand(
'handle_command_load',
help_text="Load one or more files/MBIDs/URLs to Picard.",
help_args="[supported MBID/URL or path to a file]",
),
'LOOKUP': RemoteCommand(
'handle_command_lookup',
help_text="Lookup files in the clustering pane. Defaults to all files.",
help_args="[clustered|unclustered|all]"
),
'LOOKUP_CD': RemoteCommand(
'handle_command_lookup_cd',
help_text="Read CD from the selected drive and lookup on MusicBrainz. "
"Without argument, it defaults to the first (alphabetically) available disc drive.",
help_args="[device/log file]",
),
'PAUSE': RemoteCommand(
'handle_command_pause',
help_text="Pause executable command processing.",
help_args="[number of seconds to pause]",
),
'QUIT': RemoteCommand(
'handle_command_quit',
help_text="Exit the running instance of Picard. "
"Use the argument 'FORCE' to bypass Picard's unsaved files check.",
help_args="[FORCE]",
),
'REMOVE': RemoteCommand(
'handle_command_remove',
help_text="Remove the file from Picard. Do nothing if no arguments provided.",
help_args="[absolute path to one or more files]",
),
'REMOVE_ALL': RemoteCommand(
'handle_command_remove_all',
help_text="Remove all files from Picard.",
),
'REMOVE_EMPTY': RemoteCommand(
'handle_command_remove_empty',
help_text="Remove all empty clusters and albums.",
),
'REMOVE_SAVED': RemoteCommand(
'handle_command_remove_saved',
help_text="Remove all saved files from the album pane.",
),
'REMOVE_UNCLUSTERED': RemoteCommand(
'handle_command_remove_unclustered',
help_text="Remove all unclustered files from the cluster pane.",
),
'SAVE_MATCHED': RemoteCommand(
'handle_command_save_matched',
help_text="Save all matched files from the album pane."
),
'SAVE_MODIFIED': RemoteCommand(
'handle_command_save_modified',
help_text="Save all modified files from the album pane.",
),
'SCAN': RemoteCommand(
'handle_command_scan',
help_text="Scan all files in the cluster pane.",
),
'SHOW': RemoteCommand(
'handle_command_show',
help_text="Make the running instance the currently active window.",
),
'SUBMIT_FINGERPRINTS': RemoteCommand(
'handle_command_submit_fingerprints',
help_text="Submit outstanding acoustic fingerprints for all (matched) files in the album pane.",
),
'WRITE_LOGS': RemoteCommand(
'handle_command_write_logs',
help_text="Write Picard logs to a given path.",
help_args="[absolute path to one file]",
),
}
class RemoteCommands:
"""Handler for remote commands processed from the command line using the '-e' option.
"""
# Collection of command files currently being parsed
_command_files = set()
# Flag to indicate whether a 'QUIT' command has been queued
_has_quit = False
# Flag to indicate whether a command is currently running
_command_running = False
_lock = threading.Lock()
command_queue = queue.Queue()
@classmethod
def cmd_files_contains(cls, filepath: str):
"""Check if the specified filepath is currently open for reading commands.
Args:
filepath (str): File path to check.
Returns:
bool: True if the filepath is open for processing, otherwise False.
"""
with cls._lock:
return filepath in cls._command_files
@classmethod
def cmd_files_add(cls, filepath: str):
"""Adds the specified filepath to the collection of files currently open
for reading commands.
Args:
filepath (str): File path to add.
"""
with cls._lock:
cls._command_files.add(filepath)
@classmethod
def cmd_files_remove(cls, filepath: str):
"""Removes the specified filepath from the collection of files currently
open for reading commands.
Args:
filepath (str): File path to remove.
"""
with cls._lock:
cls._command_files.discard(filepath)
@classmethod
def has_quit(cls):
"""Indicates whether a 'QUIT' command has been added to the command queue.
Returns:
bool: True if a 'QUIT' command has been queued, otherwise False.
"""
with cls._lock:
return cls._has_quit
@classmethod
def set_quit(cls, value: bool):
"""Sets the status of the 'has_quit()' flag.
Args:
value (bool): Value to set for the 'has_quit()' flag.
"""
with cls._lock:
cls._has_quit = value
@classmethod
def get_running(cls):
"""Indicates whether a command is currently set as active regardless of
processing status.
Returns:
bool: True if there is an active command, otherwise False.
"""
with cls._lock:
return cls._command_running
@classmethod
def set_running(cls, value: bool):
"""Sets the status of the 'get_running()' flag.
Args:
value (bool): Value to set for the 'get_running()' flag.
"""
with cls._lock:
cls._command_running = value
@classmethod
def parse_commands_to_queue(cls, commands):
"""Parses the list of command tuples, and adds them to the command queue. If the command
is 'FROM_FILE' then the commands will be read from the file recursively. Once a 'QUIT'
command has been queued, all further commands will be ignored and not placed in the queue.
Args:
commands (list): Command tuples in the form (command, [args]) to add to the queue.
"""
if cls.has_quit():
# Don't queue any more commands after a QUIT command.
return
for (cmd, cmdargs) in commands:
cmd = cmd.upper()
if cmd not in REMOTE_COMMANDS:
log.error("Unknown command: %s", cmd)
continue
for cmd_arg in cmdargs or ['']:
if cmd == 'FROM_FILE':
cls.get_commands_from_file(cmd_arg)
else:
log.debug(f"Queueing command: {cmd} {repr(cmd_arg)}")
cls.command_queue.put([cmd, cmd_arg])
# Set flag so as to not queue any more commands after a QUIT command.
if cmd == 'QUIT':
cls.set_quit(True)
return
@staticmethod
def _read_commands_from_file(filepath: str):
"""Reads the commands from the specified filepath.
Args:
filepath (str): File to read.
Returns:
list: Command tuples in the form (command, [args]).
"""
commands = []
try:
lines = open(filepath).readlines()
except Exception as e:
log.error("Error reading command file '%s': %s", filepath, e)
return commands
for line in lines:
line = line.strip()
if not line or line.startswith('#'):
continue
elements = shlex.split(line)
if not elements:
continue
command_args = elements[1:] or ['']
commands.append((elements[0], command_args))
return commands
@classmethod
def get_commands_from_file(cls, filepath: str):
"""Reads and parses the commands from the specified filepath and adds
them to the command queue for processing.
Args:
filepath (str): File to read.
"""
log.debug("Reading commands from: %r", filepath)
if not os.path.exists(filepath):
log.error("Missing command file: '%s'", filepath)
return
absfilepath = os.path.abspath(filepath)
if cls.cmd_files_contains(absfilepath):
log.warning("Circular command file reference ignored: '%s'", filepath)
return
cls.cmd_files_add(absfilepath)
cls.parse_commands_to_queue(cls._read_commands_from_file(absfilepath))
cls.cmd_files_remove(absfilepath)
| 10,191
|
Python
|
.py
| 263
| 30.509506
| 104
| 0.624343
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,152
|
__init__.py
|
metabrainz_picard/picard/util/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006-2009, 2011-2012, 2014 Lukáš Lalinský
# Copyright (C) 2008-2011, 2014, 2018-2023 Philipp Wolfer
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2009 david
# Copyright (C) 2010 fatih
# Copyright (C) 2011-2013 Michael Wiencek
# Copyright (C) 2012, 2014-2015 Wieland Hoffmann
# Copyright (C) 2013 Ionuț Ciocîrlan
# Copyright (C) 2013-2014 Sophist-UK
# Copyright (C) 2013-2014, 2018-2024 Laurent Monin
# Copyright (C) 2014 Johannes Dewender
# Copyright (C) 2016 Rahul Raturi
# Copyright (C) 2016 barami
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Frederik “Freso” S. Olesen
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2018, 2021 Bob Swift
# Copyright (C) 2020 Ray Bouchard
# Copyright (C) 2021 Gabriel Ferreira
# Copyright (C) 2021 Louis Sautier
# Copyright (C) 2022 Kamil
# Copyright (C) 2022 skelly37
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
try:
from charset_normalizer import detect
except ImportError:
try:
from chardet import detect
except ImportError:
detect = None
from collections import (
defaultdict,
namedtuple,
)
from collections.abc import Mapping
from itertools import chain
import json
import ntpath
from operator import attrgetter
import os
from pathlib import PurePath
import re
import subprocess # nosec: B404
import sys
from time import monotonic
import unicodedata
from dateutil.parser import parse
from PyQt6 import QtCore
from PyQt6.QtGui import QDesktopServices
from picard import log
from picard.const import MUSICBRAINZ_SERVERS
from picard.const.sys import (
FROZEN_TEMP_PATH,
IS_FROZEN,
IS_MACOS,
IS_WIN,
)
from picard.i18n import (
gettext as _,
gettext_constants,
)
if IS_WIN:
import winreg
# Windows path length constraints
# See https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation
# the entire path's length (260 - 1 null character)
WIN_MAX_FILEPATH_LEN = 259
# the entire parent directory path's length must leave room for a 8.3 filename
WIN_MAX_DIRPATH_LEN = WIN_MAX_FILEPATH_LEN - 12
# a single node's (directory or file) length
WIN_MAX_NODE_LEN = 255
# Prefix for long paths in Windows API
WIN_LONGPATH_PREFIX = '\\\\?\\'
class ReadWriteLockContext:
"""Context for releasing a locked QReadWriteLock
"""
def __init__(self):
self.__lock = QtCore.QReadWriteLock()
def lock_for_read(self):
self.__lock.lockForRead()
return self
def lock_for_write(self):
self.__lock.lockForWrite()
return self
def unlock(self):
self.__lock.unlock()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
self.__lock.unlock()
def __bool__(self):
return self._entered > 0
def process_events_iter(iterable, interval=0.1):
"""
Creates an iterator over iterable that calls QCoreApplication.processEvents()
after certain time intervals.
This must only be used in the main thread.
Args:
iterable: iterable object to iterate over
interval: interval in seconds to call QCoreApplication.processEvents()
"""
if interval:
start = monotonic()
for item in iterable:
if interval:
now = monotonic()
delta = now - start
if delta > interval:
start = now
QtCore.QCoreApplication.processEvents()
yield item
QtCore.QCoreApplication.processEvents()
def iter_files_from_objects(objects, save=False):
"""Creates an iterator over all unique files from list of albums, clusters, tracks or files."""
return iter_unique(chain(*(obj.iterfiles(save) for obj in objects)))
_io_encoding = sys.getfilesystemencoding()
# The following was adapted from k3b's source code:
# On a glibc system the system locale defaults to ANSI_X3.4-1968
# It is very unlikely that one would set the locale to ANSI_X3.4-1968
# intentionally
def check_io_encoding():
if _io_encoding == "ANSI_X3.4-1968":
log.warning("""
System locale charset is ANSI_X3.4-1968
Your system's locale charset (i.e. the charset used to encode filenames)
is set to ANSI_X3.4-1968. It is highly unlikely that this has been done
intentionally. Most likely the locale is not set at all. An invalid setting
will result in problems when creating data projects.
To properly set the locale charset make sure the LC_* environment variables
are set. Normally the distribution setup tools take care of this.
Translation: Picard will have problems with non-english characters
in filenames until you change your charset.
""")
def encode_filename(filename):
"""Encode unicode strings to filesystem encoding."""
if isinstance(filename, str):
if os.path.supports_unicode_filenames and sys.platform != "darwin":
return filename
else:
return filename.encode(_io_encoding, 'replace')
else:
return filename
def decode_filename(filename):
"""Decode strings from filesystem encoding to unicode."""
if isinstance(filename, str):
return filename
else:
return filename.decode(_io_encoding)
def _check_windows_min_version(major, build):
try:
v = sys.getwindowsversion()
return v.major >= major and v.build >= build
except AttributeError:
return False
def system_supports_long_paths():
"""Detects long path support.
On Windows returns True, only if long path support is enabled in the registry (Windows 10 1607 or later).
All other systems return always True.
"""
if not IS_WIN:
return True
try:
# Use cached value
return system_supports_long_paths._supported
except AttributeError:
pass
try:
# Long path support can be enabled in Windows 10 version 1607 or later
if _check_windows_min_version(10, 14393):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
r"SYSTEM\CurrentControlSet\Control\FileSystem") as key:
supported = winreg.QueryValueEx(key, "LongPathsEnabled")[0] == 1
else:
supported = False
system_supports_long_paths._supported = supported
return supported
except OSError:
log.info("Failed reading LongPathsEnabled from registry")
return False
def normpath(path):
path = os.path.normpath(path)
try:
path = os.path.realpath(path)
except OSError as why:
# realpath can fail if path does not exist or is not accessible
# or on Windows if drives are mounted without mount manager
# (see https://tickets.metabrainz.org/browse/PICARD-2425).
log.warning("Failed getting realpath for `%s`: %s", path, why)
# If the path is longer than 259 characters on Windows, prepend the \\?\
# prefix. This enables access to long paths using the Windows API. See
# https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation
if IS_WIN and not system_supports_long_paths():
path = win_prefix_longpath(path)
return path
def win_prefix_longpath(path):
"""
For paths longer then WIN_MAX_FILEPATH_LEN enable long path support by prefixing with WIN_LONGPATH_PREFIX.
See https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation
"""
if len(path) > WIN_MAX_FILEPATH_LEN and not path.startswith(WIN_LONGPATH_PREFIX):
if path.startswith(r'\\'): # UNC path
path = WIN_LONGPATH_PREFIX + 'UNC' + path[1:]
else:
path = WIN_LONGPATH_PREFIX + path
return path
def is_absolute_path(path):
"""Similar to os.path.isabs, but properly detects Windows shares as absolute paths
See https://bugs.python.org/issue22302
"""
return os.path.isabs(path) or (IS_WIN and os.path.normpath(path).startswith("\\\\"))
def samepath(path1, path2):
return os.path.normcase(os.path.normpath(path1)) == os.path.normcase(os.path.normpath(path2))
def samefile(path1, path2):
"""Returns True, if both `path1` and `path2` refer to the same file.
Behaves similar to os.path.samefile, but first checks identical paths including
case insensitive comparison on Windows using os.path.normcase. This fixes issues on
some network drives (e.g. VirtualBox mounts) where two paths different only in case
are considered separate files by os.path.samefile.
"""
return samepath(path1, path2) or os.path.samefile(path1, path2)
def format_time(ms, display_zero=False):
"""Formats time in milliseconds to a string representation."""
ms = float(ms)
if ms == 0 and not display_zero:
return "?:??"
duration_seconds = round(ms / 1000)
if duration_seconds < 3600:
minutes, seconds = divmod(duration_seconds, 60)
return "%d:%02d" % (minutes, seconds)
else:
hours, remainder = divmod(duration_seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
def sanitize_date(datestr):
"""Sanitize date format.
e.g.: "1980-00-00" -> "1980"
"1980- - " -> "1980"
"1980-00-23" -> "1980-00-23"
...
"""
date = []
for num in reversed(datestr.split("-")):
try:
num = int(num.strip())
except ValueError:
if num == '':
num = 0
else:
break
if num or (num == 0 and date):
date.append(num)
date.reverse()
return ("", "%04d", "%04d-%02d", "%04d-%02d-%02d")[len(date)] % tuple(date)
def replace_win32_incompat(string, repl="_", replacements=None): # noqa: E302
"""Replace win32 filename incompatible characters from ``string`` by
``repl``."""
# Don't replace : for windows drive
if IS_WIN and os.path.isabs(string):
drive, string = ntpath.splitdrive(string)
else:
drive = ''
replacements = defaultdict(lambda: repl, replacements or {})
for char in {'"', '*', ':', '<', '>', '?', '|'}:
if char in string:
string = string.replace(char, replacements[char])
return drive + string
_re_non_alphanum = re.compile(r'\W+', re.UNICODE)
def strip_non_alnum(string): # noqa: E302
"""Remove all non-alphanumeric characters from ``string``."""
return _re_non_alphanum.sub(" ", string).strip()
def sanitize_filename(string, repl="_", win_compat=False):
string = string.replace(os.sep, repl)
if os.altsep:
string = string.replace(os.altsep, repl)
if win_compat and os.altsep != '\\':
string = string.replace('\\', repl)
return string
def make_filename_from_title(title=None, default=None):
if default is None:
default = _("No Title")
if not title or not title.strip():
title = default
filename = sanitize_filename(title, win_compat=IS_WIN)
if IS_WIN:
filename = replace_win32_incompat(filename)
return filename
def _reverse_sortname(sortname):
"""Reverse sortnames."""
chunks = [a.strip() for a in sortname.split(",")]
if len(chunks) == 2:
return "%s %s" % (chunks[1], chunks[0])
elif len(chunks) == 3:
return "%s %s %s" % (chunks[2], chunks[1], chunks[0])
elif len(chunks) == 4:
return "%s %s, %s %s" % (chunks[1], chunks[0], chunks[3], chunks[2])
else:
return sortname.strip()
def translate_from_sortname(name, sortname):
"""'Translate' the artist name by reversing the sortname."""
for c in name:
ctg = unicodedata.category(c)
if ctg[0] == "L" and unicodedata.name(c).find('LATIN') == -1:
for separator in (" & ", "; ", " and ", " vs. ", " with ", " y "):
if separator in sortname:
parts = sortname.split(separator)
break
else:
parts = [sortname]
separator = ""
return separator.join(map(_reverse_sortname, parts))
return name
def find_existing_path(path):
path = encode_filename(path)
while path and not os.path.isdir(path):
head, tail = os.path.split(path)
if head == path:
break
path = head
return decode_filename(path)
def _add_windows_executable_extension(*executables):
return [e if e.endswith(('.py', '.exe')) else e + '.exe' for e in executables]
def find_executable(*executables):
if IS_WIN:
executables = _add_windows_executable_extension(*executables)
paths = [os.path.dirname(sys.executable)] if sys.executable else []
paths += os.environ.get('PATH', '').split(os.pathsep)
paths.append('./')
# This is for searching for executables bundled in packaged builds
if IS_FROZEN:
paths += [FROZEN_TEMP_PATH]
for path in paths:
for executable in executables:
f = os.path.join(path, executable)
if os.path.isfile(f):
return os.path.abspath(f)
def run_executable(executable, *args, timeout=None):
# Prevent new shell window from appearing
startupinfo = None
if IS_WIN:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Include python interpreter if running a python script
if ".py" in executable:
arguments = [sys.executable, executable, *args]
else:
arguments = [executable, *args]
# Call program with arguments
ret = subprocess.run( # nosec: B603
arguments,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
timeout=timeout
)
# Return (error code, stdout and stderr)
return ret.returncode, ret.stdout.decode(sys.stdout.encoding), ret.stderr.decode(sys.stderr.encoding)
def open_local_path(path):
url = QtCore.QUrl.fromLocalFile(path)
if os.environ.get('SNAP'):
run_executable('xdg-open', url.toString())
else:
QDesktopServices.openUrl(url)
_mbid_format = '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
_re_mbid_val = re.compile(_mbid_format, re.IGNORECASE)
def mbid_validate(string): # noqa: E302
"""Test if passed string is a valid mbid
"""
return _re_mbid_val.match(string) is not None
def parse_amazon_url(url):
"""Extract host and asin from an amazon url.
It returns a dict with host and asin keys on success, None else
"""
r = re.compile(r'^https?://(?:www.)?(?P<host>.*?)(?:\:[0-9]+)?/.*/(?P<asin>[0-9B][0-9A-Z]{9})(?:[^0-9A-Z]|$)')
match_ = r.match(url)
if match_ is not None:
return match_.groupdict()
return None
def throttle(interval):
"""
Throttle a function so that it will only execute once per ``interval``
(specified in milliseconds).
"""
mutex = QtCore.QMutex()
def decorator(func):
def later():
mutex.lock()
func(*decorator.args, **decorator.kwargs)
decorator.prev = monotonic()
decorator.is_ticking = False
mutex.unlock()
def throttled_func(*args, **kwargs):
if decorator.is_ticking:
mutex.lock()
decorator.args = args
decorator.kwargs = kwargs
mutex.unlock()
return
mutex.lock()
now = monotonic()
r = interval - (now-decorator.prev)*1000.0
if r <= 0:
func(*args, **kwargs)
decorator.prev = now
else:
decorator.args = args
decorator.kwargs = kwargs
QtCore.QTimer.singleShot(int(r), later)
decorator.is_ticking = True
mutex.unlock()
return throttled_func
decorator.prev = 0
decorator.is_ticking = False
return decorator
class IgnoreUpdatesContext:
"""Context manager for holding a boolean value, indicating whether updates are performed or not.
By default the context resolves to False. If entered it is True. This allows
to temporarily set a state on a block of code like:
ignore_changes = IgnoreUpdatesContext()
# Initially ignore_changes is False
with ignore_changes:
# Perform some tasks with ignore_changes now being True
...
# ignore_changes is False again
The code actually doing updates can check `ignore_changes` and only perform
updates if it is `False`.
"""
def __init__(self, on_exit=None, on_enter=None, on_first_enter=None, on_last_exit=None):
self._entered = 0
self._on_exit = on_exit
self._on_last_exit = on_last_exit
self._on_enter = on_enter
self._on_first_enter = on_first_enter
def __enter__(self):
self._entered += 1
if self._on_enter:
self._on_enter()
if self._entered == 1 and self._on_first_enter:
self._on_first_enter()
def __exit__(self, exc_type, exc_value, traceback):
self._entered -= 1
if self._on_exit:
self._on_exit()
if self._entered == 0 and self._on_last_exit:
self._on_last_exit()
def __bool__(self):
return self._entered > 0
def uniqify(seq):
"""Uniqify a list, preserving order"""
return list(iter_unique(seq))
def iter_unique(seq):
"""Creates an iterator only returning unique values from seq"""
seen = set()
return (x for x in seq if x not in seen and not seen.add(x))
# order is important
_tracknum_regexps = [re.compile(r, re.I) for r in (
# search for explicit track number (prefix "track")
r"track[\s_-]*(?:(?:no|nr)\.?)?[\s_-]*(?P<number>\d+)",
# search for 1- or 2-digit number at start of string (additional leading zeroes are allowed)
# An optional disc number preceding the track number is ignored.
r"^(?:\d+[\s_-])?(?P<number>0*\d{1,2})(?:\.)[^0-9,]", # "99. ", but not "99.02"
r"^(?:\d+[\s_-])?(?P<number>0*\d{1,2})[^0-9,.s]",
# search for 2-digit number at end of string (additional leading zeroes are allowed)
r"[^0-9,.\w](?P<number>0*\d{2})$",
r"[^0-9,.\w]\[(?P<number>0*\d{1,2})\]$",
r"[^0-9,.\w]\((?P<number>0*\d{2})\)$",
# File names which consist of only a number
r"^(?P<number>\d+)$",
)]
def tracknum_from_filename(base_filename):
"""Guess and extract track number from filename
Returns `None` if none found, the number as integer else
"""
filename, _ext = os.path.splitext(base_filename)
for pattern in _tracknum_regexps:
match_ = pattern.search(filename)
if match_:
n = int(match_.group('number'))
# Numbers above 1900 are often years, track numbers should be much
# smaller even for extensive collections
if n > 0 and n < 1900:
return n
return None
GuessedFromFilename = namedtuple('GuessedFromFilename', ('tracknumber', 'title'))
def tracknum_and_title_from_filename(base_filename):
"""Guess tracknumber and title from filename.
Uses `tracknum_from_filename` to guess the tracknumber. The filename is used
as the title. If the tracknumber is at the beginning of the title it gets stripped.
Returns a tuple `(tracknumber, title)`.
"""
filename, _ext = os.path.splitext(base_filename)
title = filename
tracknumber = tracknum_from_filename(base_filename)
if tracknumber is not None:
tracknumber = str(tracknumber)
stripped_filename = filename.lstrip('0')
tnlen = len(tracknumber)
if stripped_filename[:tnlen] == tracknumber:
title = stripped_filename[tnlen:].lstrip()
return GuessedFromFilename(tracknumber, title)
def is_hidden(filepath):
"""Test whether a file or directory is hidden.
A file is considered hidden if it starts with a dot
on non-Windows systems or if it has the "hidden" flag
set on Windows."""
name = os.path.basename(os.path.abspath(filepath))
return (not IS_WIN and name.startswith('.')) \
or _has_hidden_attribute(filepath)
if IS_WIN:
from ctypes import windll
def _has_hidden_attribute(filepath):
try:
attrs = windll.kernel32.GetFileAttributesW(filepath)
assert attrs != -1
return bool(attrs & 2)
except (AttributeError, AssertionError):
return False
elif IS_MACOS:
import Foundation
def _has_hidden_attribute(filepath):
# On macOS detecting hidden files involves more than just checking for dot files, see
# https://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
url = Foundation.NSURL.fileURLWithPath_(filepath)
result = url.getResourceValue_forKey_error_(None, Foundation.NSURLIsHiddenKey, None)
return result[1]
else:
def _has_hidden_attribute(filepath):
return False
def linear_combination_of_weights(parts):
"""Produces a probability as a linear combination of weights
Parts should be a list of tuples in the form:
[(v0, w0), (v1, w1), ..., (vn, wn)]
where vn is a value between 0.0 and 1.0
and wn corresponding weight as a positive number
"""
total = 0.0
sum_of_products = 0.0
for value, weight in parts:
if value < 0.0:
raise ValueError("Value must be greater than or equal to 0.0")
if value > 1.0:
raise ValueError("Value must be lesser than or equal to 1.0")
if weight < 0:
raise ValueError("Weight must be greater than or equal to 0.0")
total += weight
sum_of_products += value * weight
if total == 0.0:
return 0.0
return sum_of_products / total
def album_artist_from_path(filename, album, artist):
"""If album is not set, try to extract album and artist from path.
Args:
filename: The full file path
album: Default album name
artist: Default artist name
Returns:
A tuple (album, artist)
"""
if not album:
path = PurePath(filename)
dirs = list(path.relative_to(path.anchor).parent.parts)
# Strip disc subdirectory from list
if dirs and re.search(r'\b(?:CD|DVD|Disc)\s*\d+\b', dirs[-1], re.I):
del dirs[-1]
if dirs:
# For clustering assume %artist%/%album%/file or %artist% - %album%/file
album = dirs[-1]
if ' - ' in album:
new_artist, album = album.split(' - ', 1)
if not artist:
artist = new_artist
elif not artist and len(dirs) >= 2:
artist = dirs[-2]
return album, artist
def encoded_queryargs(queryargs):
"""
Percent-encode all values from passed dictionary
Keys are left unmodified
"""
return {
name: bytes(QtCore.QUrl.toPercentEncoding(str(value))).decode()
for name, value in queryargs.items()
}
def build_qurl(host, port=80, path=None, queryargs=None):
"""
Builds and returns a QUrl object from `host`, `port` and `path` and
automatically enables HTTPS if necessary.
Encoded query arguments can be provided in `queryargs`, a
dictionary mapping field names to values.
"""
url = QtCore.QUrl()
url.setHost(host)
if port == 443 or host in MUSICBRAINZ_SERVERS:
url.setScheme('https')
elif port == 80:
url.setScheme('http')
else:
url.setScheme('http')
url.setPort(port)
if path is not None:
url.setPath(path)
if queryargs is not None:
url_query = QtCore.QUrlQuery()
for k, v in queryargs.items():
url_query.addQueryItem(k, str(v))
url.setQuery(url_query)
return url
def union_sorted_lists(list1, list2):
"""
Returns union of two sorted lists.
>> list1 = [1, 2, 2, 2, 3]
>> list2 = [2, 3, 4]
>> union_sorted_lists(list1, list2)
>> [1, 2, 2, 2, 3, 4]
"""
union = []
i = 0
j = 0
while i != len(list1) and j != len(list2):
if list1[i] > list2[j]:
union.append(list2[j])
j += 1
elif list1[i] < list2[j]:
union.append(list1[i])
i += 1
else:
union.append(list1[i])
i += 1
j += 1
if i == len(list1):
union.extend(list2[j:])
else:
union.extend(list1[i:])
return union
def __convert_to_string(obj):
"""Appropriately converts the input `obj` to a string.
Args:
obj (QByteArray, bytes, bytearray, ...): The input object
Returns:
string: The appropriately decoded string
"""
if isinstance(obj, QtCore.QByteArray):
return bytes(obj).decode()
elif isinstance(obj, (bytes, bytearray)):
return obj.decode()
else:
return str(obj)
def load_json(data):
"""Deserializes a string or bytes like json response and converts
it to a python object.
Args:
data (QByteArray, bytes, bytearray, ...): The json response
Returns:
dict: Response data as a python dict
"""
return json.loads(__convert_to_string(data))
def parse_json(reply):
return load_json(reply.readAll())
def restore_method(func):
def func_wrapper(*args, **kwargs):
tagger = QtCore.QCoreApplication.instance()
if not tagger._no_restore:
return func(*args, **kwargs)
return func_wrapper
def reconnect(signal, newhandler=None, oldhandler=None):
"""
Reconnect an handler to a signal
It disconnects all previous handlers before connecting new one
Credits: https://stackoverflow.com/a/21589403
"""
while True:
try:
if oldhandler is not None:
signal.disconnect(oldhandler)
else:
signal.disconnect()
except TypeError:
break
if newhandler is not None:
signal.connect(newhandler)
def compare_barcodes(barcode1, barcode2):
"""
Compares two barcodes. Returns True if they are the same, False otherwise.
Tries to normalize UPC barcodes to EAN barcodes so e.g. "727361379704"
and "0727361379704" are considered the same.
"""
barcode1 = barcode1 or ''
barcode2 = barcode2 or ''
if barcode1 == barcode2:
return True
if not barcode1 or not barcode2:
return False
return barcode1.zfill(13) == barcode2.zfill(13)
BestMatch = namedtuple('BestMatch', ('similarity', 'result'))
def sort_by_similarity(candidates):
"""Sorts the objects in candidates by similarity.
Args:
candidates: Iterable with objects having a `similarity` attribute
Returns: List of candidates sorted by similarity (highest similarity first)
"""
return sorted(
candidates,
reverse=True,
key=attrgetter('similarity')
)
def find_best_match(candidates, no_match):
"""Returns a BestMatch based on the similarity of candidates.
Args:
candidates: Iterable with objects having a `similarity` attribute
no_match: Match to return if there was no candidate
Returns: `BestMatch` with the similarity and the matched object as result.
"""
best_match = max(candidates, key=attrgetter('similarity'), default=no_match)
return BestMatch(similarity=best_match.similarity, result=best_match)
def limited_join(a_list, limit, join_string='+', middle_string='…'):
"""Join elements of a list with `join_string`
If list is longer than `limit`, middle elements will be dropped,
and replaced by `middle_string`.
Args:
a_list: list of strings to join
limit: maximum number of elements to join before limiting
join_string: string used to join elements
middle_string: string to insert in the middle if limited
Returns:
A string
Example:
>>> limited_join(['a', 'b', 'c', 'd', 'e', 'f'], 2)
'a+…+f'
>>> limited_join(['a', 'b', 'c', 'd', 'e', 'f'], 3)
'a+…+f'
>>> limited_join(['a', 'b', 'c', 'd', 'e', 'f'], 4)
'a+b+…+e+f'
>>> limited_join(['a', 'b', 'c', 'd', 'e', 'f'], 6)
'a+b+c+d+e+f'
>>> limited_join(['a', 'b', 'c', 'd', 'e', 'f'], 2, ',', '?')
'a,?,f'
"""
length = len(a_list)
if limit <= 1 or limit >= length:
return join_string.join(a_list)
half = limit // 2
start = a_list[:half]
end = a_list[-half:]
return join_string.join(start + [middle_string] + end)
def countries_shortlist(countries):
return limited_join(countries, 6, '+', '…')
def extract_year_from_date(dt):
""" Extracts year from passed in date either dict or string """
try:
if isinstance(dt, Mapping):
return int(dt.get('year'))
else:
return parse(dt).year
except (TypeError, ValueError):
return None
def pattern_as_regex(pattern, allow_wildcards=False, flags=0):
"""Parses a string and interprets it as a matching pattern.
- If pattern is of the form /pattern/flags it is interpreted as a regular expression (e.g. `/foo.*/`).
The flags are optional and in addition to the flags passed in the `flags` function parameter. Supported
flags in the expression are "i" (ignore case) and "m" (multiline)
- Otherwise if `allow_wildcards` is True, it is interpreted as a pattern that allows wildcard matching (see below)
- If `allow_wildcards` is False a regex matching the literal string is returned
Wildcard matching currently supports these characters:
- `*`: Matches an arbitrary number of characters or none, e.g. `fo*` matches "foo" or "foot".
- `?`: Matches exactly one character, e.g. `fo?` matches "foo" or "for".
- `[...]`: Matches any character in the set, e.g. `[fo?]` matches all of "f", "o" and "?".
- `?`, `*`, `[`, `]` and `\\` can be escaped with a backslash \\ to match the literal
character, e.g. `fo\\?` matches "fo?".
Args:
pattern: The pattern as a string
allow_wildcards: If true and if the the pattern is not interpreted as a regex wildard matching is allowed.
flags: Additional regex flags to set (e.g. `re.I`)
Returns: An re.Pattern instance
Raises: `re.error` if the regular expression could not be parsed
"""
plain_pattern = pattern.rstrip('im')
if len(plain_pattern) > 2 and plain_pattern[0] == '/' and plain_pattern[-1] == '/':
extra_flags = pattern[len(plain_pattern):]
if 'i' in extra_flags:
flags |= re.IGNORECASE
if 'm' in extra_flags:
flags |= re.MULTILINE
regex = plain_pattern[1:-1]
elif allow_wildcards:
regex = '^' + wildcards_to_regex_pattern(pattern) + '$'
else:
regex = re.escape(pattern)
return re.compile(regex, flags)
def wildcards_to_regex_pattern(pattern):
"""Converts a pattern with shell like wildcards into a regular expression string.
The following syntax is supported:
- `*`: Matches an arbitrary number of characters or none, e.g. `fo*` matches "foo" or "foot".
- `?`: Matches exactly one character, e.g. `fo?` matches "foo" or "for".
- `[...]`
- `?`, `*` and `\\` can be escaped with a backslash \\ to match the literal character, e.g. `fo\\?` matches "fo?".
Args:
pattern: The pattern as a string
Returns: A string with a valid regular expression.
"""
regex = []
group = None
escape = False
for c in pattern:
if group is not None:
if escape:
if c in {'\\', '[', ']'}:
c = '\\' + c
else:
group.append('\\\\')
escape = False
if c == ']':
group.append(c)
part = ''.join(group)
group = None
elif c == '\\':
escape = True
continue
else:
group.append(c)
continue
elif escape:
if c in {'*', '?', '\\', '[', ']'}:
part = '\\' + c
else:
part = re.escape('\\' + c)
escape = False
elif c == '\\':
escape = True
continue
elif c == '[':
group = ['[']
continue
elif c == '*':
part = '.*'
elif c == '?':
part = '.'
else:
part = re.escape(c)
regex.append(part)
# There might be an unclosed character group. Interpret the starting
# bracket of the group as a literal bracket and re-evaluate the rest.
if group is not None:
regex.append('\\[')
regex.append(wildcards_to_regex_pattern(''.join(group[1:])))
return ''.join(regex)
def _regex_numbered_title_fmt(fmt, title_repl, count_repl):
title_marker = '{title}'
count_marker = '{count}'
parts = fmt.split(title_marker)
def wrap_count(p):
if count_marker in p:
return '(?:' + re.escape(p) + ')?'
else:
return p
return (
re.escape(title_marker).join(wrap_count(p) for p in parts)
.replace(re.escape(title_marker), title_repl)
.replace(re.escape(count_marker), count_repl)
)
def _get_default_numbered_title_format():
from picard.const.defaults import DEFAULT_NUMBERED_TITLE_FORMAT
return gettext_constants(DEFAULT_NUMBERED_TITLE_FORMAT)
def unique_numbered_title(default_title, existing_titles, fmt=None):
"""Generate a new unique and numbered title
based on given default title and existing titles
"""
if fmt is None:
fmt = _get_default_numbered_title_format()
escaped_title = re.escape(default_title)
reg_count = r'(\d+)'
regstr = _regex_numbered_title_fmt(fmt, escaped_title, reg_count)
regex = re.compile(regstr)
count = 0
for title in existing_titles:
m = regex.fullmatch(title)
if m:
num = m.group(1)
if num is not None:
count = max(count, int(num))
else:
count += 1
return fmt.format(title=default_title, count=count + 1)
def get_base_title_with_suffix(title, suffix, fmt=None):
"""Extract the base portion of a title,
removing the suffix and number portion from the end.
"""
if fmt is None:
fmt = _get_default_numbered_title_format()
escaped_suffix = re.escape(suffix)
reg_title = r'(?P<title>.*?)(?:\s*' + escaped_suffix + ')?'
reg_count = r'\d*'
regstr = _regex_numbered_title_fmt(fmt, reg_title, reg_count)\
.replace(r'\ ', r'\s+')\
.replace(' ', r'\s+')
match_obj = re.fullmatch(regstr, title)
return match_obj['title'] if match_obj else title
def get_base_title(title):
"""Extract the base portion of a title, using the standard suffix.
"""
from picard.const.defaults import DEFAULT_COPY_TEXT
suffix = gettext_constants(DEFAULT_COPY_TEXT)
return get_base_title_with_suffix(title, suffix)
def iter_exception_chain(err):
"""Iterate over the exception chain.
Yields this exception and all __context__ and __cause__ exceptions"""
yield err
if hasattr(err, '__context__'):
yield from iter_exception_chain(err.__context__)
if hasattr(err, '__cause__'):
yield from iter_exception_chain(err.__cause__)
def any_exception_isinstance(error, type_):
"""Returns True, if any exception in the exception chain is instance of type_."""
return any(isinstance(err, type_) for err in iter_exception_chain(error))
ENCODING_BOMS = {
b'\xff\xfe\x00\x00': 'utf-32-le',
b'\x00\x00\xfe\xff': 'utf-32-be',
b'\xef\xbb\xbf': 'utf-8-sig',
b'\xff\xfe': 'utf-16-le',
b'\xfe\xff': 'utf-16-be',
}
def detect_file_encoding(path, max_bytes_to_read=1024*256):
"""Attempts to guess the unicode encoding of a file based on the BOM, and
depending on avalibility, using a charset detection method.
Assumes UTF-8 by default if no other encoding is detected.
Args:
path: The path to the file
max_bytes_to_read: Maximum bytes to read from the file during encoding
detection.
Returns: The encoding as a string, e.g. "utf-16-le" or "utf-8"
"""
with open(path, 'rb') as f:
first_bytes = f.read(4)
for bom, encoding in ENCODING_BOMS.items():
if first_bytes.startswith(bom):
return encoding
if detect is None:
return 'utf-8'
f.seek(0)
result = detect(f.read(max_bytes_to_read))
if result['encoding'] is None:
log.warning("Couldn't detect encoding for file %r", path)
encoding = 'utf-8'
elif result['encoding'].lower() == 'ascii':
# Treat ASCII as UTF-8 (an ASCII document is also valid UTF-8)
encoding = 'utf-8'
else:
encoding = result['encoding'].lower()
return encoding
| 37,864
|
Python
|
.py
| 966
| 32.190476
| 118
| 0.633606
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,153
|
imagelist.py
|
metabrainz_picard/picard/util/imagelist.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 Antonio Larrosa
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018, 2020-2022 Philipp Wolfer
# Copyright (C) 2019-2021, 2023-2024 Laurent Monin
# Copyright (C) 2021 Gabriel Ferreira
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections.abc import MutableSequence
from picard.config import get_config
class ImageList(MutableSequence):
def __init__(self, iterable=()):
self._images = list(iterable)
self._hash_dict = {}
self._dirty = True
def __len__(self):
return len(self._images)
def __getitem__(self, index):
return self._images[index]
def __setitem__(self, index, value):
if self._images[index] != value:
self._images[index] = value
self._dirty = True
def __delitem__(self, index):
del self._images[index]
self._dirty = True
def insert(self, index, value):
self._images.insert(index, value)
self._dirty = True
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._images)
def _sorted(self):
return sorted(self, key=lambda image: image.normalized_types())
def __eq__(self, other):
if len(self) != len(other):
return False
return self._sorted() == other._sorted()
def copy(self):
return self.__class__(self._images)
def get_front_image(self):
for img in self:
if img.is_front_image():
return img
return None
def to_be_saved_to_tags(self, settings=None):
"""Generator returning images to be saved to tags according to
passed settings or config.setting
"""
if settings is None:
config = get_config()
settings = config.setting
if settings['save_images_to_tags']:
only_one_front = settings['embed_only_one_front_image']
for image in self:
if not image.can_be_saved_to_tags:
continue
if only_one_front:
if image.is_front_image():
yield image
break
else:
yield image
def strip_front_images(self):
self._images = [image for image in self._images if not image.is_front_image()]
self._dirty = True
def hash_dict(self):
if self._dirty:
self._hash_dict = {img.datahash.hash(): img for img in self._images}
self._dirty = False
return self._hash_dict
def get_types_dict(self):
types_dict = dict()
for image in self._images:
image_types = image.normalized_types()
if image_types in types_dict:
previous_image = types_dict[image_types]
if image.width > previous_image.width or image.height > previous_image.height:
continue
types_dict[image_types] = image
return types_dict
| 3,745
|
Python
|
.py
| 95
| 31.115789
| 94
| 0.617461
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,154
|
textencoding.py
|
metabrainz_picard/picard/util/textencoding.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006 Lukáš Lalinský
# Copyright (C) 2014 Sophist-UK
# Copyright (C) 2014, 2018, 2020-2021 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018-2019, 2021 Philipp Wolfer
# Copyright (C) 2020 Gabriel Ferreira
# Copyright (C) 2020 Undearius
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# This modules provides functionality for simplifying unicode strings.
# The unicode character set (of over 1m codepoints and 24,000 characters) includes:
# Normal ascii (latin) non-accented characters
# Combined latin characters e.g. ae in normal usage
# Compatibility combined latin characters (retained for compatibility with other character sets)
# These can look very similar to normal characters and can be confusing for searches, sort orders etc.
# Non-latin (e.g. japanese, greek, hebrew etc.) characters
# Both latin and non-latin characters can be accented. Accents can be either:
# Provided by separate nonspacing_mark characters which are visually overlaid (visually 1 character is actually 2); or
# Integrated accented characters (i.e. non-accented characters combined with a nonspace_mark into a single character)
# Again these can be confusing for searches, sort orders etc.
# Punctuation can also be confusing in unicode e.g. several types of single or double quote mark.
# For latin script:
# Combined characters, accents and punctuation can be visually similar but look different to search engines,
# sort orders etc. and the number of ways to use similar looking characters can (does) result in inconsistent
# usage inside Music metadata.
#
# Simplifying # the unicode character sets by many-to-one mappings can improve consistency and reduce confusion,
# however sometimes the choice of specific characters can be a deliberate part of an album, song title or artist name
# (and should not therefore be changed without careful thought) and occasionally the choice of characters can be
# malicious (i.e. to defeat firewalls or spam filters or to appear to be something else).
#
# Finally, given the size of the unicode character set, fonts are unlikely to display all characters,
# making simplification a necessity.
#
# Simplification may also be needed to make tags conform to ISO-8859-1 (extended ascii) or to make tags or filenames
# into ascii, perhaps because the file system or player cannot support unicode.
#
# Non-latin scripts may also need to be converted to latin scripts through:
# Translation (e.g. hebrew word for mother is translated to "mother"); or
# Transliteration (e.g. the SOUND of the hebrew letter or word is spelt out in latin)
# These are non-trivial, and the software to do these is far from comprehensive.
# This module provides utility functions to enable simplification of latin and punctuation unicode:
# 1. simplify compatibility characters;
# 2. split combined characters;
# 3. remove accents (entirely or if not in ISO-8859-1 as applicable);
# 4. replace remaining non-ascii or non-ISO-8859-1 characters with a default character
# This module also provides an extension infrastructure to allow translation and / or transliteration plugins to be added.
import codecs
from functools import partial
import unicodedata
from picard.util import sanitize_filename
# LATIN SIMPLIFICATION
# The translation tables for punctuation and latin combined-characters are taken from
# http://unicode.org/repos/cldr/trunk/common/transforms/Latin-ASCII.xml
# Various bugs and mistakes in this have been ironed out during testing.
_additional_compatibility = {
"\u0276": "Œ", # LATIN LETTER SMALL CAPITAL OE
"\u1D00": "A", # LATIN LETTER SMALL CAPITAL A
"\u1D01": "Æ", # LATIN LETTER SMALL CAPITAL AE
"\u1D04": "C", # LATIN LETTER SMALL CAPITAL C
"\u1D05": "D", # LATIN LETTER SMALL CAPITAL D
"\u1D07": "E", # LATIN LETTER SMALL CAPITAL E
"\u1D0A": "J", # LATIN LETTER SMALL CAPITAL J
"\u1D0B": "K", # LATIN LETTER SMALL CAPITAL K
"\u1D0D": "M", # LATIN LETTER SMALL CAPITAL M
"\u1D0F": "O", # LATIN LETTER SMALL CAPITAL O
"\u1D18": "P", # LATIN LETTER SMALL CAPITAL P
"\u1D1B": "T", # LATIN LETTER SMALL CAPITAL T
"\u1D1C": "U", # LATIN LETTER SMALL CAPITAL U
"\u1D20": "V", # LATIN LETTER SMALL CAPITAL V
"\u1D21": "W", # LATIN LETTER SMALL CAPITAL W
"\u1D22": "Z", # LATIN LETTER SMALL CAPITAL Z
"\u3007": "0", # IDEOGRAPHIC NUMBER ZERO
"\u00A0": " ", # NO-BREAK SPACE
"\u3000": " ", # IDEOGRAPHIC SPACE (from ‹character-fallback›)
"\u2033": "”", # DOUBLE PRIME
"\uff0f": "/", # FULLWIDTH SOLIDUS
}
def unicode_simplify_compatibility(string, pathsave=False, win_compat=False):
interim = ''.join(_replace_char(_additional_compatibility, ch, pathsave, win_compat) for ch in string)
return unicodedata.normalize("NFKC", interim)
_simplify_punctuation = {
"\u013F": "L", # LATIN CAPITAL LETTER L WITH MIDDLE DOT (compat)
"\u0140": "l", # LATIN SMALL LETTER L WITH MIDDLE DOT (compat)
"\u2018": "'", # LEFT SINGLE QUOTATION MARK (from ‹character-fallback›)
"\u2019": "'", # RIGHT SINGLE QUOTATION MARK (from ‹character-fallback›)
"\u201A": "'", # SINGLE LOW-9 QUOTATION MARK (from ‹character-fallback›)
"\u201B": "'", # SINGLE HIGH-REVERSED-9 QUOTATION MARK (from ‹character-fallback›)
"\u201C": "\"", # LEFT DOUBLE QUOTATION MARK (from ‹character-fallback›)
"\u201D": "\"", # RIGHT DOUBLE QUOTATION MARK (from ‹character-fallback›)
"\u201E": "\"", # DOUBLE LOW-9 QUOTATION MARK (from ‹character-fallback›)
"\u201F": "\"", # DOUBLE HIGH-REVERSED-9 QUOTATION MARK (from ‹character-fallback›)
"\u2032": "'", # PRIME
"\u2033": "\"", # DOUBLE PRIME
"\u301D": "\"", # REVERSED DOUBLE PRIME QUOTATION MARK
"\u301E": "\"", # DOUBLE PRIME QUOTATION MARK
"\u00AB": "<<", # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK (from ‹character-fallback›)
"\u00BB": ">>", # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK (from ‹character-fallback›)
"\u2039": "<", # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
"\u203A": ">", # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
"\u00AD": "", # SOFT HYPHEN (from ‹character-fallback›)
"\u2010": "-", # HYPHEN (from ‹character-fallback›)
"\u2011": "-", # NON-BREAKING HYPHEN (from ‹character-fallback›)
"\u2012": "-", # FIGURE DASH (from ‹character-fallback›)
"\u2013": "-", # EN DASH (from ‹character-fallback›)
"\u2014": "-", # EM DASH (from ‹character-fallback›)
"\u2015": "-", # HORIZONTAL BAR (from ‹character-fallback›)
"\uFE31": "|", # PRESENTATION FORM FOR VERTICAL EM DASH (compat)
"\uFE32": "|", # PRESENTATION FORM FOR VERTICAL EN DASH (compat)
"\uFE58": "-", # SMALL EM DASH (compat)
"\u2016": "||", # DOUBLE VERTICAL LINE
"\u2044": "/", # FRACTION SLASH (from ‹character-fallback›)
"\u2045": "[", # LEFT SQUARE BRACKET WITH QUILL
"\u2046": "]", # RIGHT SQUARE BRACKET WITH QUILL
"\u204E": "*", # LOW ASTERISK
"\u3008": "<", # LEFT ANGLE BRACKET
"\u3009": ">", # RIGHT ANGLE BRACKET
"\u300A": "<<", # LEFT DOUBLE ANGLE BRACKET
"\u300B": ">>", # RIGHT DOUBLE ANGLE BRACKET
"\u3014": "[", # LEFT TORTOISE SHELL BRACKET
"\u3015": "]", # RIGHT TORTOISE SHELL BRACKET
"\u3018": "[", # LEFT WHITE TORTOISE SHELL BRACKET
"\u3019": "]", # RIGHT WHITE TORTOISE SHELL BRACKET
"\u301A": "[", # LEFT WHITE SQUARE BRACKET
"\u301B": "]", # RIGHT WHITE SQUARE BRACKET
"\uFE11": ",", # PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC COMMA (compat)
"\uFE12": ".", # PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC FULL STOP (compat)
"\uFE39": "[", # PRESENTATION FORM FOR VERTICAL LEFT TORTOISE SHELL BRACKET (compat)
"\uFE3A": "]", # PRESENTATION FORM FOR VERTICAL RIGHT TORTOISE SHELL BRACKET (compat)
"\uFE3D": "<<", # PRESENTATION FORM FOR VERTICAL LEFT DOUBLE ANGLE BRACKET (compat)
"\uFE3E": ">>", # PRESENTATION FORM FOR VERTICAL RIGHT DOUBLE ANGLE BRACKET (compat)
"\uFE3F": "<", # PRESENTATION FORM FOR VERTICAL LEFT ANGLE BRACKET (compat)
"\uFE40": ">", # PRESENTATION FORM FOR VERTICAL RIGHT ANGLE BRACKET (compat)
"\uFE51": ",", # SMALL IDEOGRAPHIC COMMA (compat)
"\uFE5D": "[", # SMALL LEFT TORTOISE SHELL BRACKET (compat)
"\uFE5E": "]", # SMALL RIGHT TORTOISE SHELL BRACKET (compat)
"\uFF5F": "((", # FULLWIDTH LEFT WHITE PARENTHESIS (compat)(from ‹character-fallback›)
"\uFF60": "))", # FULLWIDTH RIGHT WHITE PARENTHESIS (compat)(from ‹character-fallback›)
"\uFF61": ".", # HALFWIDTH IDEOGRAPHIC FULL STOP (compat)
"\uFF64": ",", # HALFWIDTH IDEOGRAPHIC COMMA (compat)
"\u2212": "-", # MINUS SIGN (from ‹character-fallback›)
"\u2215": "/", # DIVISION SLASH (from ‹character-fallback›)
"\u2216": "\\", # SET MINUS (from ‹character-fallback›)
"\u2223": "|", # DIVIDES (from ‹character-fallback›)
"\u2225": "||", # PARALLEL TO (from ‹character-fallback›)
"\u226A": "<<", # MUCH LESS-THAN
"\u226B": ">>", # MUCH GREATER-THAN
"\u2985": "((", # LEFT WHITE PARENTHESIS
"\u2986": "))", # RIGHT WHITE PARENTHESIS
"\u2022": "-", # BULLET
"\u200B": "", # Zero Width Space
}
def unicode_simplify_punctuation(string, pathsave=False, win_compat=False):
return ''.join(_replace_char(_simplify_punctuation, ch, pathsave, win_compat) for ch in string)
_simplify_combinations = {
"\u00C6": "AE", # LATIN CAPITAL LETTER AE (from ‹character-fallback›)
"\u00D0": "D", # LATIN CAPITAL LETTER ETH
"\u00D8": "OE", # LATIN CAPITAL LETTER O WITH STROKE (see https://en.wikipedia.org/wiki/%C3%98)
"\u00DE": "TH", # LATIN CAPITAL LETTER THORN
"\u00DF": "ss", # LATIN SMALL LETTER SHARP S (from ‹character-fallback›)
"\u00E6": "ae", # LATIN SMALL LETTER AE (from ‹character-fallback›)
"\u00F0": "d", # LATIN SMALL LETTER ETH
"\u00F8": "oe", # LATIN SMALL LETTER O WITH STROKE (see https://en.wikipedia.org/wiki/%C3%98)
"\u00FE": "th", # LATIN SMALL LETTER THORN
"\u0110": "D", # LATIN CAPITAL LETTER D WITH STROKE
"\u0111": "d", # LATIN SMALL LETTER D WITH STROKE
"\u0126": "H", # LATIN CAPITAL LETTER H WITH STROKE
"\u0127": "h", # LATIN CAPITAL LETTER H WITH STROKE
"\u0131": "i", # LATIN SMALL LETTER DOTLESS I
"\u0138": "q", # LATIN SMALL LETTER KRA (collates with q in DUCET)
"\u0141": "L", # LATIN CAPITAL LETTER L WITH STROKE
"\u0142": "l", # LATIN SMALL LETTER L WITH STROKE
"\u0149": "'n", # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE (from ‹character-fallback›)
"\u014A": "N", # LATIN CAPITAL LETTER ENG
"\u014B": "n", # LATIN SMALL LETTER ENG
"\u0152": "OE", # LATIN CAPITAL LIGATURE OE (from ‹character-fallback›)
"\u0153": "oe", # LATIN SMALL LIGATURE OE (from ‹character-fallback›)
"\u0166": "T", # LATIN CAPITAL LETTER T WITH STROKE
"\u0167": "t", # LATIN SMALL LETTER T WITH STROKE
"\u0180": "b", # LATIN SMALL LETTER B WITH STROKE
"\u0181": "B", # LATIN CAPITAL LETTER B WITH HOOK
"\u0182": "B", # LATIN CAPITAL LETTER B WITH TOPBAR
"\u0183": "b", # LATIN SMALL LETTER B WITH TOPBAR
"\u0187": "C", # LATIN CAPITAL LETTER C WITH HOOK
"\u0188": "c", # LATIN SMALL LETTER C WITH HOOK
"\u0189": "D", # LATIN CAPITAL LETTER AFRICAN D
"\u018A": "D", # LATIN CAPITAL LETTER D WITH HOOK
"\u018B": "D", # LATIN CAPITAL LETTER D WITH TOPBAR
"\u018C": "d", # LATIN SMALL LETTER D WITH TOPBAR
"\u0190": "E", # LATIN CAPITAL LETTER OPEN E
"\u0191": "F", # LATIN CAPITAL LETTER F WITH HOOK
"\u0192": "f", # LATIN SMALL LETTER F WITH HOOK
"\u0193": "G", # LATIN CAPITAL LETTER G WITH HOOK
"\u0195": "hv", # LATIN SMALL LETTER HV
"\u0196": "I", # LATIN CAPITAL LETTER IOTA
"\u0197": "I", # LATIN CAPITAL LETTER I WITH STROKE
"\u0198": "K", # LATIN CAPITAL LETTER K WITH HOOK
"\u0199": "k", # LATIN SMALL LETTER K WITH HOOK
"\u019A": "l", # LATIN SMALL LETTER L WITH BAR
"\u019D": "N", # LATIN CAPITAL LETTER N WITH LEFT HOOK
"\u019E": "n", # LATIN SMALL LETTER N WITH LONG RIGHT LEG
"\u01A2": "GH", # LATIN CAPITAL LETTER GHA (see http://unicode.org/notes/tn27/)
"\u01A3": "gh", # LATIN SMALL LETTER GHA (see http://unicode.org/notes/tn27/)
"\u01A4": "P", # LATIN CAPITAL LETTER P WITH HOOK
"\u01A5": "p", # LATIN SMALL LETTER P WITH HOOK
"\u01AB": "t", # LATIN SMALL LETTER T WITH PALATAL HOOK
"\u01AC": "T", # LATIN CAPITAL LETTER T WITH HOOK
"\u01AD": "t", # LATIN SMALL LETTER T WITH HOOK
"\u01AE": "T", # LATIN CAPITAL LETTER T WITH RETROFLEX HOOK
"\u01B2": "V", # LATIN CAPITAL LETTER V WITH HOOK
"\u01B3": "Y", # LATIN CAPITAL LETTER Y WITH HOOK
"\u01B4": "y", # LATIN SMALL LETTER Y WITH HOOK
"\u01B5": "Z", # LATIN CAPITAL LETTER Z WITH STROKE
"\u01B6": "z", # LATIN SMALL LETTER Z WITH STROKE
"\u01C4": "DZ", # LATIN CAPITAL LETTER DZ WITH CARON (compat)
"\u01C5": "Dz", # LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON (compat)
"\u01C6": "dz", # LATIN SMALL LETTER DZ WITH CARON (compat)
"\u01E4": "G", # LATIN CAPITAL LETTER G WITH STROKE
"\u01E5": "g", # LATIN SMALL LETTER G WITH STROKE
"\u0221": "d", # LATIN SMALL LETTER D WITH CURL
"\u0224": "Z", # LATIN CAPITAL LETTER Z WITH HOOK
"\u0225": "z", # LATIN SMALL LETTER Z WITH HOOK
"\u0234": "l", # LATIN SMALL LETTER L WITH CURL
"\u0235": "n", # LATIN SMALL LETTER N WITH CURL
"\u0236": "t", # LATIN SMALL LETTER T WITH CURL
"\u0237": "j", # LATIN SMALL LETTER DOTLESS J
"\u0238": "db", # LATIN SMALL LETTER DB DIGRAPH
"\u0239": "qp", # LATIN SMALL LETTER QP DIGRAPH
"\u023A": "A", # LATIN CAPITAL LETTER A WITH STROKE
"\u023B": "C", # LATIN CAPITAL LETTER C WITH STROKE
"\u023C": "c", # LATIN SMALL LETTER C WITH STROKE
"\u023D": "L", # LATIN CAPITAL LETTER L WITH BAR
"\u023E": "T", # LATIN CAPITAL LETTER T WITH DIAGONAL STROKE
"\u023F": "s", # LATIN SMALL LETTER S WITH SWASH TAIL
"\u0240": "z", # LATIN SMALL LETTER Z WITH SWASH TAIL
"\u0243": "B", # LATIN CAPITAL LETTER B WITH STROKE
"\u0244": "U", # LATIN CAPITAL LETTER U BAR
"\u0246": "E", # LATIN CAPITAL LETTER E WITH STROKE
"\u0247": "e", # LATIN SMALL LETTER E WITH STROKE
"\u0248": "J", # LATIN CAPITAL LETTER J WITH STROKE
"\u0249": "j", # LATIN SMALL LETTER J WITH STROKE
"\u024C": "R", # LATIN CAPITAL LETTER R WITH STROKE
"\u024D": "r", # LATIN SMALL LETTER R WITH STROKE
"\u024E": "Y", # LATIN CAPITAL LETTER Y WITH STROKE
"\u024F": "y", # LATIN SMALL LETTER Y WITH STROKE
"\u0253": "b", # LATIN SMALL LETTER B WITH HOOK
"\u0255": "c", # LATIN SMALL LETTER C WITH CURL
"\u0256": "d", # LATIN SMALL LETTER D WITH TAIL
"\u0257": "d", # LATIN SMALL LETTER D WITH HOOK
"\u025B": "e", # LATIN SMALL LETTER OPEN E
"\u025F": "j", # LATIN SMALL LETTER DOTLESS J WITH STROKE
"\u0260": "g", # LATIN SMALL LETTER G WITH HOOK
"\u0261": "g", # LATIN SMALL LETTER SCRIPT G
"\u0262": "G", # LATIN LETTER SMALL CAPITAL G
"\u0266": "h", # LATIN SMALL LETTER H WITH HOOK
"\u0267": "h", # LATIN SMALL LETTER HENG WITH HOOK
"\u0268": "i", # LATIN SMALL LETTER I WITH STROKE
"\u026A": "I", # LATIN LETTER SMALL CAPITAL I
"\u026B": "l", # LATIN SMALL LETTER L WITH MIDDLE TILDE
"\u026C": "l", # LATIN SMALL LETTER L WITH BELT
"\u026D": "l", # LATIN SMALL LETTER L WITH RETROFLEX HOOK
"\u0271": "m", # LATIN SMALL LETTER M WITH HOOK
"\u0272": "n", # LATIN SMALL LETTER N WITH LEFT HOOK
"\u0273": "n", # LATIN SMALL LETTER N WITH RETROFLEX HOOK
"\u0274": "N", # LATIN LETTER SMALL CAPITAL N
"\u0276": "OE", # LATIN LETTER SMALL CAPITAL OE
"\u027C": "r", # LATIN SMALL LETTER R WITH LONG LEG
"\u027D": "r", # LATIN SMALL LETTER R WITH TAIL
"\u027E": "r", # LATIN SMALL LETTER R WITH FISHHOOK
"\u0280": "R", # LATIN LETTER SMALL CAPITAL R
"\u0282": "s", # LATIN SMALL LETTER S WITH HOOK
"\u0288": "t", # LATIN SMALL LETTER T WITH RETROFLEX HOOK
"\u0289": "u", # LATIN SMALL LETTER U BAR
"\u028B": "v", # LATIN SMALL LETTER V WITH HOOK
"\u028F": "Y", # LATIN LETTER SMALL CAPITAL Y
"\u0290": "z", # LATIN SMALL LETTER Z WITH RETROFLEX HOOK
"\u0291": "z", # LATIN SMALL LETTER Z WITH CURL
"\u0299": "B", # LATIN LETTER SMALL CAPITAL B
"\u029B": "G", # LATIN LETTER SMALL CAPITAL G WITH HOOK
"\u029C": "H", # LATIN LETTER SMALL CAPITAL H
"\u029D": "j", # LATIN SMALL LETTER J WITH CROSSED-TAIL
"\u029F": "L", # LATIN LETTER SMALL CAPITAL L
"\u02A0": "q", # LATIN SMALL LETTER Q WITH HOOK
"\u02A3": "dz", # LATIN SMALL LETTER DZ DIGRAPH
"\u02A5": "dz", # LATIN SMALL LETTER DZ DIGRAPH WITH CURL
"\u02A6": "ts", # LATIN SMALL LETTER TS DIGRAPH
"\u02AA": "ls", # LATIN SMALL LETTER LS DIGRAPH
"\u02AB": "lz", # LATIN SMALL LETTER LZ DIGRAPH
"\u1D01": "AE", # LATIN LETTER SMALL CAPITAL AE
"\u1D03": "B", # LATIN LETTER SMALL CAPITAL BARRED B
"\u1D06": "D", # LATIN LETTER SMALL CAPITAL ETH
"\u1D0C": "L", # LATIN LETTER SMALL CAPITAL L WITH STROKE
"\u1D6B": "ue", # LATIN SMALL LETTER UE
"\u1D6C": "b", # LATIN SMALL LETTER B WITH MIDDLE TILDE
"\u1D6D": "d", # LATIN SMALL LETTER D WITH MIDDLE TILDE
"\u1D6E": "f", # LATIN SMALL LETTER F WITH MIDDLE TILDE
"\u1D6F": "m", # LATIN SMALL LETTER M WITH MIDDLE TILDE
"\u1D70": "n", # LATIN SMALL LETTER N WITH MIDDLE TILDE
"\u1D71": "p", # LATIN SMALL LETTER P WITH MIDDLE TILDE
"\u1D72": "r", # LATIN SMALL LETTER R WITH MIDDLE TILDE
"\u1D73": "r", # LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE
"\u1D74": "s", # LATIN SMALL LETTER S WITH MIDDLE TILDE
"\u1D75": "t", # LATIN SMALL LETTER T WITH MIDDLE TILDE
"\u1D76": "z", # LATIN SMALL LETTER Z WITH MIDDLE TILDE
"\u1D7A": "th", # LATIN SMALL LETTER TH WITH STRIKETHROUGH
"\u1D7B": "I", # LATIN SMALL CAPITAL LETTER I WITH STROKE
"\u1D7D": "p", # LATIN SMALL LETTER P WITH STROKE
"\u1D7E": "U", # LATIN SMALL CAPITAL LETTER U WITH STROKE
"\u1D80": "b", # LATIN SMALL LETTER B WITH PALATAL HOOK
"\u1D81": "d", # LATIN SMALL LETTER D WITH PALATAL HOOK
"\u1D82": "f", # LATIN SMALL LETTER F WITH PALATAL HOOK
"\u1D83": "g", # LATIN SMALL LETTER G WITH PALATAL HOOK
"\u1D84": "k", # LATIN SMALL LETTER K WITH PALATAL HOOK
"\u1D85": "l", # LATIN SMALL LETTER L WITH PALATAL HOOK
"\u1D86": "m", # LATIN SMALL LETTER M WITH PALATAL HOOK
"\u1D87": "n", # LATIN SMALL LETTER N WITH PALATAL HOOK
"\u1D88": "p", # LATIN SMALL LETTER P WITH PALATAL HOOK
"\u1D89": "r", # LATIN SMALL LETTER R WITH PALATAL HOOK
"\u1D8A": "s", # LATIN SMALL LETTER S WITH PALATAL HOOK
"\u1D8C": "v", # LATIN SMALL LETTER V WITH PALATAL HOOK
"\u1D8D": "x", # LATIN SMALL LETTER X WITH PALATAL HOOK
"\u1D8E": "z", # LATIN SMALL LETTER Z WITH PALATAL HOOK
"\u1D8F": "a", # LATIN SMALL LETTER A WITH RETROFLEX HOOK
"\u1D91": "d", # LATIN SMALL LETTER D WITH HOOK AND TAIL
"\u1D92": "e", # LATIN SMALL LETTER E WITH RETROFLEX HOOK
"\u1D93": "e", # LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK
"\u1D96": "i", # LATIN SMALL LETTER I WITH RETROFLEX HOOK
"\u1D99": "u", # LATIN SMALL LETTER U WITH RETROFLEX HOOK
"\u1E9A": "a", # LATIN SMALL LETTER A WITH RIGHT HALF RING
"\u1E9C": "s", # LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE
"\u1E9D": "s", # LATIN SMALL LETTER LONG S WITH HIGH STROKE
"\u1E9E": "SS", # LATIN CAPITAL LETTER SHARP S
"\u1EFA": "LL", # LATIN CAPITAL LETTER MIDDLE-WELSH LL
"\u1EFB": "ll", # LATIN SMALL LETTER MIDDLE-WELSH LL
"\u1EFC": "V", # LATIN CAPITAL LETTER MIDDLE-WELSH V
"\u1EFD": "v", # LATIN SMALL LETTER MIDDLE-WELSH V
"\u1EFE": "Y", # LATIN CAPITAL LETTER Y WITH LOOP
"\u1EFF": "y", # LATIN SMALL LETTER Y WITH LOOP
"\u00A9": "(C)", # COPYRIGHT SIGN (from ‹character-fallback›)
"\u00AE": "(R)", # REGISTERED SIGN (from ‹character-fallback›)
"\u20A0": "CE", # EURO-CURRENCY SIGN (from ‹character-fallback›)
"\u20A2": "Cr", # CRUZEIRO SIGN (from ‹character-fallback›)
"\u20A3": "Fr.", # FRENCH FRANC SIGN (from ‹character-fallback›)
"\u20A4": "L.", # LIRA SIGN (from ‹character-fallback›)
"\u20A7": "Pts", # PESETA SIGN (from ‹character-fallback›)
"\u20BA": "TL", # TURKISH LIRA SIGN (from ‹character-fallback›)
"\u20B9": "Rs", # INDIAN RUPEE SIGN (from ‹character-fallback›)
"\u211E": "Rx", # PRESCRIPTION TAKE (from ‹character-fallback›)
"\u33A7": "m/s", # SQUARE M OVER S (compat) (from ‹character-fallback›)
"\u33AE": "rad/s", # SQUARE RAD OVER S (compat) (from ‹character-fallback›)
"\u33C6": "C/kg", # SQUARE C OVER KG (compat) (from ‹character-fallback›)
"\u33DE": "V/m", # SQUARE V OVER M (compat) (from ‹character-fallback›)
"\u33DF": "A/m", # SQUARE A OVER M (compat) (from ‹character-fallback›)
"\u00BC": " 1/4", # VULGAR FRACTION ONE QUARTER (from ‹character-fallback›)
"\u00BD": " 1/2", # VULGAR FRACTION ONE HALF (from ‹character-fallback›)
"\u00BE": " 3/4", # VULGAR FRACTION THREE QUARTERS (from ‹character-fallback›)
"\u2153": " 1/3", # VULGAR FRACTION ONE THIRD (from ‹character-fallback›)
"\u2154": " 2/3", # VULGAR FRACTION TWO THIRDS (from ‹character-fallback›)
"\u2155": " 1/5", # VULGAR FRACTION ONE FIFTH (from ‹character-fallback›)
"\u2156": " 2/5", # VULGAR FRACTION TWO FIFTHS (from ‹character-fallback›)
"\u2157": " 3/5", # VULGAR FRACTION THREE FIFTHS (from ‹character-fallback›)
"\u2158": " 4/5", # VULGAR FRACTION FOUR FIFTHS (from ‹character-fallback›)
"\u2159": " 1/6", # VULGAR FRACTION ONE SIXTH (from ‹character-fallback›)
"\u215A": " 5/6", # VULGAR FRACTION FIVE SIXTHS (from ‹character-fallback›)
"\u215B": " 1/8", # VULGAR FRACTION ONE EIGHTH (from ‹character-fallback›)
"\u215C": " 3/8", # VULGAR FRACTION THREE EIGHTHS (from ‹character-fallback›)
"\u215D": " 5/8", # VULGAR FRACTION FIVE EIGHTHS (from ‹character-fallback›)
"\u215E": " 7/8", # VULGAR FRACTION SEVEN EIGHTHS (from ‹character-fallback›)
"\u215F": " 1/", # FRACTION NUMERATOR ONE (from ‹character-fallback›)
"\u3001": ",", # IDEOGRAPHIC COMMA
"\u3002": ".", # IDEOGRAPHIC FULL STOP
"\u00D7": "x", # MULTIPLICATION SIGN
"\u00F7": "/", # DIVISION SIGN
"\u00B7": ".", # MIDDLE DOT
"\u1E9F": "dd", # LATIN SMALL LETTER DELTA
"\u0184": "H", # LATIN CAPITAL LETTER TONE SIX
"\u0185": "h", # LATIN SMALL LETTER TONE SIX
"\u01BE": "ts", # LATIN LETTER TS LIGATION (see http://unicode.org/notes/tn27/)
}
def _replace_unicode_simplify_combinations(char, pathsave, win_compat):
result = _simplify_combinations.get(char)
if result is None:
return char
elif not pathsave:
return result
else:
return sanitize_filename(result, win_compat=win_compat)
def unicode_simplify_combinations(string, pathsave=False, win_compat=False):
return ''.join(
_replace_unicode_simplify_combinations(c, pathsave, win_compat) for c in string)
def unicode_simplify_accents(string):
result = ''.join(c for c in unicodedata.normalize('NFKD', string) if not unicodedata.combining(c))
return result
def asciipunct(string):
interim = unicode_simplify_compatibility(string)
return unicode_simplify_punctuation(interim)
def unaccent(string):
"""Remove accents ``string``."""
return unicode_simplify_accents(string)
def replace_non_ascii(string, repl="_", pathsave=False, win_compat=False):
"""Replace non-ASCII characters from ``string`` by ``repl``."""
interim = unicode_simplify_combinations(string, pathsave, win_compat)
interim = unicode_simplify_punctuation(interim, pathsave, win_compat)
interim = unicode_simplify_compatibility(interim, pathsave, win_compat)
interim = unicode_simplify_accents(interim)
def error_repl(e, repl="_"):
return (repl, e.start + 1)
codecs.register_error('repl', partial(error_repl, repl=repl))
# Decoding and encoding to allow replacements
return interim.encode('ascii', 'repl').decode('ascii')
def _replace_char(map, ch, pathsave=False, win_compat=False):
try:
result = map[ch]
if ch != result and pathsave:
result = sanitize_filename(result, win_compat=win_compat)
return result
except KeyError:
return ch
| 25,715
|
Python
|
.py
| 435
| 54.14023
| 122
| 0.657662
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,155
|
script_detector_weighted.py
|
metabrainz_picard/picard/util/script_detector_weighted.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2021 Bob Swift
# Copyright (C) 2021 Philipp Wolfer
# Copyright (C) 2021-2024 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from enum import (
IntEnum,
unique,
)
import unicodedata as ud
@unique
class ScriptSelectionOrder(IntEnum):
"""Character set script selection order
"""
SPECIFIED = 0
WEIGHTED = 1
# Provide weighting factors to take into account character sets that use fewer (or more)
# characters to convey the same information as other characters sets. The factor is generally
# based on the relative number of characters in the alphabet compared with the LATIN alphabet.
SCRIPT_WEIGHTING_FACTORS = {
'LATIN': 1.0,
'CYRILLIC': 1.02,
'GREEK': 0.92,
'ARABIC': 1.08,
'HEBREW': 0.85,
'CJK': 2.5,
'HANGUL': 0.92,
'HIRAGANA': 1.77,
'KATAKANA': 1.77,
'THAI': 1.69,
}
def detect_script_weighted(string_to_check, threshold=0.0):
"""Provide a dictionary of the unicode scripts found in the supplied string that meet
or exceed the specified weighting threshold based on the number of characters matching
the script as a weighted percentage of the number of characters matching all scripts.
Args:
string_to_check (str): The unicode string to check
threshold (float, optional): Minimum threshold to include in the results. Defaults to 0.
Returns:
dict: Dictionary of the scripts represented in the string with their threshold values.
"""
scripts = {}
total_weighting = 0
for character in string_to_check:
if character.isalpha():
script_id = ud.name(character).split(' ')[0]
weighting_factor = SCRIPT_WEIGHTING_FACTORS[script_id] if script_id in SCRIPT_WEIGHTING_FACTORS else 1
scripts[script_id] = (scripts[script_id] if script_id in scripts else 0) + weighting_factor
total_weighting += weighting_factor
# Normalize weightings to a float between 0 and 1 inclusive.
for key in scripts:
scripts[key] /= total_weighting
return dict(filter(lambda item: item[1] >= threshold, scripts.items()))
def list_script_weighted(string_to_check, threshold=0.0):
"""Provide a list of the unicode scripts found in the supplied string that meet
or exceed the specified weighting threshold based on the number of characters
matching the script as a weighted percentage of the number of characters matching
all scripts. The list is sorted in descending order of weighted values.
Args:
string_to_check (str): The unicode string to check
threshold (float, optional): Minimum threshold to include in the results. Defaults to 0.
Returns:
list: List of the scripts represented in the string sorted in descending order of weighted values.
"""
weighted_dict = detect_script_weighted(string_to_check, threshold)
return sorted(weighted_dict, key=weighted_dict.get, reverse=True)
| 3,690
|
Python
|
.py
| 82
| 40.658537
| 114
| 0.729549
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,156
|
imageinfo.py
|
metabrainz_picard/picard/util/imageinfo.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2014, 2018, 2020-2021, 2023-2024 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2020-2021 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from dataclasses import dataclass
from io import BytesIO
import struct
from picard.util.bitreader import LSBBitReader
@dataclass
class ImageInfo:
width: int
height: int
mime: str
extension: str
datalen: int
@property
def format(self):
return self.extension[1:]
class IdentificationError(Exception):
pass
class NotEnoughData(IdentificationError):
pass
class UnrecognizedFormat(IdentificationError):
pass
class UnexpectedError(IdentificationError):
pass
class IdentifyImageType:
mime = ''
extension = ''
w = -1
h = -1
def __init__(self, data):
self.data = data
self.datalen = len(self.data)
if self.datalen < 16:
raise NotEnoughData("Not enough data")
def read(self):
self._read()
return self._result()
def _result(self):
return ImageInfo(
width=int(self.w),
height=int(self.h),
mime=self.mime,
extension=self.extension,
datalen=self.datalen,
)
def match(self):
raise NotImplementedError
def _read(self):
raise NotImplementedError
@classmethod
def all_extensions(cls):
return [cls.extension]
class IdentifyJPEG(IdentifyImageType):
mime = 'image/jpeg'
extension = '.jpg'
def match(self):
# http://en.wikipedia.org/wiki/JPEG
return self.data[:2] == b'\xFF\xD8' # Start Of Image (SOI) marker
@classmethod
def all_extensions(cls):
return [cls.extension, '.jpeg']
def _read(self):
jpeg = BytesIO(self.data)
# skip SOI
jpeg.read(2)
b = jpeg.read(1)
try:
# https://en.wikibooks.org/wiki/JPEG_-_Idea_and_Practice/The_header_part
# https://www.disktuna.com/list-of-jpeg-markers/
# https://de.wikipedia.org/wiki/JPEG_File_Interchange_Format
SOF_markers = {
0xC0, 0xC1, 0xC2, 0xC3,
0xC5, 0xC6, 0xC7,
0xC9, 0xCA, 0xCB,
0xCD, 0xCE, 0xCF
}
while b and ord(b) != 0xDA: # Start Of Scan (SOS)
while ord(b) != 0xFF:
b = jpeg.read(1)
while ord(b) == 0xFF:
b = jpeg.read(1)
if ord(b) in SOF_markers:
jpeg.read(2) # parameter length (2 bytes)
jpeg.read(1) # data precision (1 byte)
self.h, self.w = struct.unpack('>HH', jpeg.read(4))
break
else:
# read 2 bytes as integer
length = int(struct.unpack('>H', jpeg.read(2))[0])
# skip data
jpeg.read(length - 2)
b = jpeg.read(1)
except struct.error:
pass
except ValueError:
pass
class IdentifyGIF(IdentifyImageType):
mime = 'image/gif'
extension = '.gif'
def match(self):
# http://en.wikipedia.org/wiki/Graphics_Interchange_Format
return self.data[:6] in {b'GIF87a', b'GIF89a'}
def _read(self):
self.w, self.h = struct.unpack('<HH', self.data[6:10])
class IdentifyPDF(IdentifyImageType):
mime = 'application/pdf'
extension = '.pdf'
def match(self):
# PDF
return self.data[:4] == b'%PDF'
def _read(self):
self.w = self.h = 0
class IdentifyPNG(IdentifyImageType):
mime = 'image/png'
extension = '.png'
def match(self):
# http://en.wikipedia.org/wiki/Portable_Network_Graphics
# http://www.w3.org/TR/PNG/#11IHDR
return self.data[:8] == b'\x89PNG\x0D\x0A\x1A\x0A' and self.data[12:16] == b'IHDR'
def _read(self):
self.w, self.h = struct.unpack('>LL', self.data[16:24])
class IdentifyWebP(IdentifyImageType):
mime = 'image/webp'
extension = '.webp'
def match(self):
return self.data[:4] == b'RIFF' and self.data[8:12] == b'WEBP'
def _read(self):
data = self.data
# See https://developers.google.com/speed/webp/docs/riff_container
format = data[12:16]
# Simple File Format (Lossy)
if format == b'VP8 ':
# See https://tools.ietf.org/html/rfc6386#section-9.1
index = data.find(b'\x9d\x01\x2a')
if index != -1:
if self.datalen < index + 7:
raise NotEnoughData("Not enough data for WebP VP8")
self.w, self.h = struct.unpack('<HH', data[index + 3:index + 7])
# Width and height are encoded as 14 bit integers, ignore the first 2 bits
self.w &= 0x3fff
self.h &= 0x3fff
else:
self.w, self.h = 0, 0
# Simple File Format (Lossless)
elif format == b'VP8L':
if self.datalen < 25:
raise NotEnoughData("Not enough data for WebP VP8L")
reader = LSBBitReader(BytesIO(data[21:25]))
self.w = reader.bits(14) + 1
self.h = reader.bits(14) + 1
# Extended File Format
elif format == b'VP8X':
if self.datalen < 30:
raise NotEnoughData("Not enough data for WebP VP8X")
reader = LSBBitReader(BytesIO(data[24:30]))
self.w = reader.bits(24) + 1
self.h = reader.bits(24) + 1
else:
self.h, self.w = 0, 0
TIFF_BYTE_ORDER_LSB = b'II'
TIFF_BYTE_ORDER_MSB = b'MM'
TIFF_TAG_IMAGE_LENGTH = 257
TIFF_TAG_IMAGE_WIDTH = 256
TIFF_TYPE_SHORT = 3
TIFF_TYPE_LONG = 4
class IdentifyTiff(IdentifyImageType):
mime = 'image/tiff'
extension = '.tiff'
def match(self):
return self.data[:4] == b'II*\x00' or self.data[:4] == b'MM\x00*'
@classmethod
def all_extensions(cls):
return [cls.extension, '.tif']
def _read(self):
# See https://www.adobe.io/content/dam/udp/en/open/standards/tiff/TIFF6.pdf
data = self.data
self.w, self.h = 0, 0
byte_order = data[:2]
if byte_order == TIFF_BYTE_ORDER_LSB:
order = '<'
elif byte_order == TIFF_BYTE_ORDER_MSB:
order = '>'
else:
raise UnexpectedError("TIFF: unexpected byte order %r" % byte_order)
try:
offset, = struct.unpack(order + 'I', data[4:8])
entry_count, = struct.unpack(order + 'H', data[offset:offset + 2])
pos = offset + 2
for i in range(entry_count):
field = data[pos:pos + 12]
tag, tiff_type = struct.unpack(order + 'HH', field[:4])
if tag == TIFF_TAG_IMAGE_WIDTH:
self.w = self._read_value(tiff_type, order, field[8:12])
if self.h:
return
elif tag == TIFF_TAG_IMAGE_LENGTH:
self.h = self._read_value(tiff_type, order, field[8:12])
if self.w:
return
pos += 12
except struct.error:
pass
@staticmethod
def _read_value(tiff_type, order, data):
if tiff_type == TIFF_TYPE_LONG:
value = data[:4]
struct_format = order + 'I'
elif tiff_type == TIFF_TYPE_SHORT:
value = data[:2]
struct_format = order + 'H'
else:
raise UnexpectedError("TIFF: unexpected field type %s" % tiff_type)
return struct.unpack(struct_format, value)[0]
knownimagetypes = (
IdentifyJPEG,
IdentifyPNG,
IdentifyPDF,
IdentifyGIF,
IdentifyWebP,
IdentifyTiff,
)
def identify(data):
"""Parse data for jpg, gif, png, webp, tiff and pdf metadata
If successfully recognized, it returns a tuple with:
- width
- height
- mimetype
- extension
- data length
Exceptions:
- `NotEnoughData` if data has less than 16 bytes.
- `UnrecognizedFormat` if data isn't recognized as a known format.
- `UnexpectedError` if unhandled cases (shouldn't happen).
- `IdentificationError` is parent class for all preceding exceptions.
"""
for cls in knownimagetypes:
obj = cls(data)
if obj.match():
return obj.read()
raise UnrecognizedFormat("Unrecognized image data")
def supports_mime_type(mime):
return any(cls.mime == mime for cls in knownimagetypes)
def get_supported_extensions():
for cls in knownimagetypes:
yield from cls.all_extensions()
| 9,496
|
Python
|
.py
| 264
| 27.136364
| 90
| 0.585369
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,157
|
preservedtags.py
|
metabrainz_picard/picard/util/preservedtags.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2018, 2020-2021 Laurent Monin
# Copyright (C) 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.config import get_config
class PreservedTags:
opt_name = 'preserved_tags'
def __init__(self):
self._tags = self._from_config()
def _to_config(self):
config = get_config()
config.setting[self.opt_name] = sorted(self._tags)
def _from_config(self):
config = get_config()
tags = config.setting[self.opt_name]
return set(filter(bool, map(self._normalize_tag, tags)))
@staticmethod
def _normalize_tag(tag):
return tag.strip().lower()
def add(self, name):
self._tags.add(self._normalize_tag(name))
self._to_config()
def discard(self, name):
self._tags.discard(self._normalize_tag(name))
self._to_config()
def __contains__(self, key):
return self._normalize_tag(key) in self._tags
| 1,700
|
Python
|
.py
| 43
| 35.186047
| 80
| 0.70152
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,158
|
filenaming.py
|
metabrainz_picard/picard/util/filenaming.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2013-2014 Ionuț Ciocîrlan
# Copyright (C) 2013-2014, 2018-2024 Laurent Monin
# Copyright (C) 2014 Michael Wiencek
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2017 Ville Skyttä
# Copyright (C) 2018 Antonio Larrosa
# Copyright (C) 2019-2022 Philipp Wolfer
# Copyright (C) 2022 Bob Swift
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from enum import IntEnum
import math
import os
import re
import shutil
import struct
import sys
import unicodedata
from PyQt6.QtCore import QStandardPaths
from picard import log
from picard.const.sys import (
IS_LINUX,
IS_MACOS,
IS_WIN,
)
from picard.util import (
WIN_MAX_DIRPATH_LEN,
WIN_MAX_FILEPATH_LEN,
WIN_MAX_NODE_LEN,
_io_encoding,
decode_filename,
encode_filename,
samefile,
)
win32api = None
if IS_WIN:
try:
import pywintypes
import win32api
except ImportError as e:
log.warning("pywin32 not available: %s", e)
def _get_utf16_length(text):
"""Returns the number of code points used by a unicode object in its
UTF-16 representation.
"""
if isinstance(text, bytes):
return len(text)
# if this is a narrow Python build, len will in fact return exactly
# what we're looking for
if sys.maxunicode == 0xFFFF:
return len(text)
# otherwise, encode the string in UTF-16 using the system's endianness,
# and divide the resulting length by 2
return len(text.encode("utf-16%ce" % sys.byteorder[0])) // 2
def _shorten_to_utf16_length(text, length):
"""Truncates a str object to the given number of UTF-16 code points.
"""
assert isinstance(text, str), "This function only works on unicode"
# if this is a narrow Python build, regular slicing will do exactly
# what we're looking for
if sys.maxunicode == 0xFFFF:
shortened = text[:length]
# before returning, we need to check if we didn't cut in the middle
# of a surrogate pair
last = shortened[-1:]
if last and 0xD800 <= ord(last) <= 0xDBFF:
# it's a leading surrogate alright
return shortened[:-1]
# else...
return shortened
# otherwise, encode the string in UTF-16 using the system's endianness,
# and shorten by twice the length
enc = "utf-16%ce" % sys.byteorder[0]
shortened = text.encode(enc)[:length * 2]
# if we hit a surrogate pair, get rid of the last codepoint
last = shortened[-2:]
if last and 0xD800 <= struct.unpack("=H", last)[0] <= 0xDBFF:
shortened = shortened[:-2]
return shortened.decode(enc)
def _shorten_to_utf16_nfd_length(text, length):
text = unicodedata.normalize('NFD', text)
newtext = _shorten_to_utf16_length(text, length)
# if the first cut-off character was a combining one, remove our last
try:
if unicodedata.combining(text[len(newtext)]):
newtext = newtext[:-1]
except IndexError:
pass
return unicodedata.normalize('NFC', newtext)
_re_utf8 = re.compile(r'^utf([-_]?8)$', re.IGNORECASE)
def _shorten_to_bytes_length(text, length): # noqa: E302
"""Truncates a unicode object to the given number of bytes it would take
when encoded in the "filesystem encoding".
"""
assert isinstance(text, str), "This function only works on unicode"
raw = encode_filename(text)
# maybe there's no need to truncate anything
if len(raw) <= length:
return text
# or maybe there's nothing multi-byte here
if len(raw) == len(text):
return text[:length]
# if we're dealing with utf-8, we can use an efficient algorithm
# to deal with character boundaries
if _re_utf8.match(_io_encoding):
i = length
# a UTF-8 intermediate byte starts with the bits 10xxxxxx,
# so ord(char) & 0b11000000 = 0b10000000
while i > 0 and (raw[i] & 0xC0) == 0x80:
i -= 1
return decode_filename(raw[:i])
# finally, a brute force approach
i = length
while i > 0:
try:
return decode_filename(raw[:i])
except UnicodeDecodeError:
pass
i -= 1
# hmm. we got here?
return ""
class ShortenMode(IntEnum):
BYTES = 0
UTF16 = 1
UTF16_NFD = 2
def shorten_filename(filename, length, mode): # noqa: E302
"""Truncates a filename to the given number of thingies,
as implied by `mode`.
"""
if isinstance(filename, bytes):
return filename[:length]
if mode == ShortenMode.BYTES:
return _shorten_to_bytes_length(filename, length)
if mode == ShortenMode.UTF16:
return _shorten_to_utf16_length(filename, length)
if mode == ShortenMode.UTF16_NFD:
return _shorten_to_utf16_nfd_length(filename, length)
def shorten_path(path, length, mode):
"""Reduce path nodes' length to given limit(s).
path: Absolute or relative path to shorten.
length: Maximum number of code points / bytes allowed in a node.
mode: One of the enum values from ShortenMode.
"""
def shorten(name, length):
return name and shorten_filename(name, length, mode).strip() or ""
dirpath, filename = os.path.split(path)
fileroot, ext = os.path.splitext(filename)
return os.path.join(
os.path.join(*[shorten(node, length)
for node in dirpath.split(os.path.sep)]),
shorten(fileroot, length - len(ext)) + ext
)
def _shorten_to_utf16_ratio(text, ratio):
"""Shortens the string to the given ratio (and strips it)."""
length = _get_utf16_length(text)
limit = max(1, int(math.floor(length / ratio)))
if isinstance(text, bytes):
return text[:limit].strip()
else:
return _shorten_to_utf16_length(text, limit).strip()
class WinPathTooLong(OSError):
pass
def _make_win_short_filename(relpath, reserved=0):
r"""Shorten a relative file path according to WinAPI quirks.
relpath: The file's path.
reserved: Number of characters reserved for the parent path to be joined with,
e.g. 3 if it will be joined with "X:\", respectively 5 for "X:\y\".
(note the inclusion of the final backslash)
"""
# See:
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
#
# The MAX_PATH is 260 characters, with this possible format for a file:
# "X:\<244-char dir path>\<12-char filename><NUL>".
# Use a shorter max node length then the theoretically allowed 255 characters
# to leave room for longer file names
MAX_NODE_LENGTH = WIN_MAX_NODE_LEN - 29
# to make predictable directory paths we need to fit the directories in
# WIN_MAX_DIRPATH_LEN, and truncate the filename to whatever's left
remaining = WIN_MAX_DIRPATH_LEN - reserved
# to make things more readable...
def shorten(path, length):
return shorten_path(path, length, mode=ShortenMode.UTF16)
xlength = _get_utf16_length
# shorten to MAX_NODE_LENGTH from the beginning
relpath = shorten(relpath, MAX_NODE_LENGTH)
dirpath, filename = os.path.split(relpath)
# what if dirpath is already the right size?
dplen = xlength(dirpath)
if dplen <= remaining:
filename_max = WIN_MAX_FILEPATH_LEN - (reserved + dplen + 1) # the final separator
filename = shorten(filename, filename_max)
return os.path.join(dirpath, filename)
# compute the directory path and the maximum number of characters
# in a filename, and cache them
try:
computed = _make_win_short_filename._computed
except AttributeError:
computed = _make_win_short_filename._computed = {}
try:
finaldirpath, filename_max = computed[(dirpath, reserved)]
except KeyError:
dirnames = dirpath.split(os.path.sep)
# allocate space for the separators,
# but don't include the final one
remaining -= len(dirnames) - 1
# make sure we can have at least single-character dirnames
average = float(remaining) / len(dirnames)
if average < 1:
raise WinPathTooLong(
"Path too long. "
"You need to move renamed files to a different directory."
)
# try to reduce directories exceeding average with a ratio proportional
# to how much they exceed with; if not possible, reduce all dirs
# proportionally to their initial length
shortdirnames = [dn for dn in dirnames if len(dn) <= average]
totalchars = sum(map(xlength, dirnames))
shortdirchars = sum(map(xlength, shortdirnames))
# do we have at least 1 char for longdirs?
if remaining > shortdirchars + len(dirnames) - len(shortdirnames):
ratio = float(totalchars - shortdirchars) / (remaining - shortdirchars)
for i, dn in enumerate(dirnames):
if len(dn) > average:
dirnames[i] = _shorten_to_utf16_ratio(dn, ratio)
else:
ratio = float(totalchars) / remaining
dirnames = [_shorten_to_utf16_ratio(dn, ratio) for dn in dirnames]
# here it is:
finaldirpath = os.path.join(*dirnames)
# did we win back some chars from .floor()s and .strip()s?
recovered = remaining - sum(map(xlength, dirnames))
# so how much do we have left for the filename?
filename_max = WIN_MAX_FILEPATH_LEN - WIN_MAX_DIRPATH_LEN - 1 + recovered
# ^ the final separator
# and don't forget to cache
computed[(dirpath, reserved)] = (finaldirpath, filename_max)
# finally...
filename = shorten(filename, filename_max)
return os.path.join(finaldirpath, filename)
def _get_mount_point(target):
"""Finds the target's mountpoint."""
# and caches it for future lookups
try:
mounts = _get_mount_point._mounts
except AttributeError:
mounts = _get_mount_point._mounts = {}
try:
mount = mounts[target]
except KeyError:
mount = target
while mount and not os.path.ismount(mount):
mount = os.path.dirname(mount)
mounts[target] = mount
return mount
# NOTE: this could be merged with the function above, and get all needed info
# in a single call, returning the filesystem type as well. (but python's
# posix.statvfs_result doesn't implement f_fsid)
def _get_filename_limit(target):
"""Finds the maximum filename length under the given directory."""
# and caches it
try:
limits = _get_filename_limit._limits
except AttributeError:
limits = _get_filename_limit._limits = {}
try:
limit = limits[target]
except KeyError:
# we need to call statvfs on an existing target
d = target
while not os.path.exists(d):
d = os.path.dirname(d)
# XXX http://bugs.python.org/issue18695
try:
limit = os.statvfs(d).f_namemax
except UnicodeEncodeError:
limit = os.statvfs(d.encode(_io_encoding)).f_namemax
limits[target] = limit
return limit
def make_short_filename(basedir, relpath, win_shorten_path=False, relative_to=""):
"""Shorten a filename's path to proper limits.
basedir: Absolute path of the base directory where files will be moved.
relpath: File path, relative from the base directory.
win_shorten_path: Enforce 259 character limit for the path for Windows compatibility.
relative_to: An ancestor directory of basedir, against which win_shorten_path
will be applied.
"""
# only deal with absolute paths. it saves a lot of grief,
# and is the right thing to do, even for renames.
try:
basedir = os.path.abspath(basedir)
except FileNotFoundError:
# os.path.abspath raises an exception if basedir is a relative path and
# cwd doesn't exist anymore
basedir = QStandardPaths.writableLocation(QStandardPaths.StandardLocation.MusicLocation)
# also, make sure the relative path is clean
relpath = os.path.normpath(relpath)
if win_shorten_path and relative_to:
relative_to = os.path.abspath(relative_to)
assert basedir.startswith(relative_to) and \
basedir.split(relative_to)[1][:1] in (os.path.sep, ''), \
"`relative_to` must be an ancestor of `basedir`"
# always strip the relpath parts
relpath = os.path.join(*[part.strip() for part in relpath.split(os.path.sep)])
# if we're on windows, delegate the work to a windows-specific function
if IS_WIN:
if win_shorten_path:
reserved = len(basedir)
if not basedir.endswith(os.path.sep):
reserved += 1
return _make_win_short_filename(relpath, reserved)
else:
return shorten_path(relpath, WIN_MAX_NODE_LEN, mode=ShortenMode.UTF16)
# if we're being windows compatible, figure out how much
# needs to be reserved for the basedir part
elif win_shorten_path:
# if a relative ancestor wasn't provided,
# use the basedir's mount point
if not relative_to:
relative_to = _get_mount_point(basedir)
# if it's root, presume the parent will be copied over
# to windows, and hope for the best
if relative_to == os.path.sep:
relative_to = os.path.dirname(basedir)
reserved = len(basedir) - len(relative_to) + 3 + 1
# the drive name ^ + ^ the final separator
relpath = _make_win_short_filename(relpath, reserved)
# on *nix we can consider there is no path limit, but there is
# a filename length limit.
if IS_MACOS:
# on OS X (i.e. HFS+), this is expressed in UTF-16 code points,
# in NFD normalization form
relpath = shorten_path(relpath, 255, mode=ShortenMode.UTF16_NFD)
else:
# on everything else the limit is expressed in bytes,
# and filesystem-dependent
limit = _get_filename_limit(basedir)
relpath = shorten_path(relpath, limit, mode=ShortenMode.BYTES)
return relpath
def samefile_different_casing(path1, path2):
"""Returns True if path1 and path2 refer to the same file, but differ in casing of the filename.
Returns False if path1 and path2 refer to different files or there case is identical.
"""
path1 = os.path.normpath(path1)
path2 = os.path.normpath(path2)
if path1 == path2 or not os.path.exists(path1) or not os.path.exists(path2):
return False
dir1 = os.path.normcase(os.path.dirname(path1))
dir2 = os.path.normcase(os.path.dirname(path2))
try:
dir1 = os.path.realpath(dir1)
dir2 = os.path.realpath(dir2)
except OSError:
# os.path.realpath can fail if cwd does not exist and path is relative
# or on Windows if drives are mounted without mount manager.
pass
if dir1 != dir2 or not samefile(path1, path2):
return False
file1 = os.path.basename(path1)
file2 = os.path.basename(path2)
return file1 != file2 and file1.lower() == file2.lower()
def _make_unique_temp_name(target_path):
i = 0
target_dir = os.path.dirname(target_path)
target_filename = os.path.basename(target_path)
while True:
# Attempt to get a non-existant temporary name for the file
# without changing path length.
temp_filename = '.%s%02d' % (target_filename[:-3], i)
temp_path = os.path.join(target_dir, temp_filename)
if not os.path.exists(temp_path):
return temp_path
i += 1
def _move_force_rename(source_path, target_path):
"""Moves a file by renaming it first to a temporary name.
Ensure file casing changes on system's not natively supporting this.
"""
temp_path = _make_unique_temp_name(target_path)
shutil.move(source_path, temp_path)
os.rename(temp_path, target_path)
def move_ensure_casing(source_path, target_path):
"""Moves a file from source_path to target_path.
If the move would result just in the name changing the case apply workarounds
for Linux and Windows to ensure the case change is applied on case-insensitive
file systems. Otherwise use shutil.move to move the file.
"""
source_path = os.path.normpath(source_path)
target_path = os.path.normpath(target_path)
if source_path == target_path:
return
# Special handling is only required if both paths refer to the same file
# but the file name differs in casing.
# Also macOS does allow renaming only the casing and does not need special
# handling.
if not IS_MACOS and samefile_different_casing(source_path, target_path):
if IS_LINUX:
# On Linux always force a double move
_move_force_rename(source_path, target_path)
return
elif IS_WIN and win32api:
# Windows supports case renaming for NTFS and SMB shares, but not
# on FAT32 or exFAT file systems. Perform a normal move first,
# then check the result.
shutil.move(source_path, target_path)
try:
# Get the path in the actual casing as stored on disk
actual_path = win32api.GetLongPathNameW(win32api.GetShortPathName(target_path))
if samefile_different_casing(target_path, actual_path):
_move_force_rename(source_path, target_path)
except pywintypes.error:
pass
return
# Just perform a normal move
try:
shutil.move(source_path, target_path)
except shutil.SameFileError:
# Sometimes different paths refer to the same file (e.g. network path / local path on Windows)
pass
def make_save_path(path, win_compat=False, mac_compat=False):
"""Performs a couple of cleanups on a path to avoid side effects and incompatibilities.
- If win_compat is True, trailing dots in file and directory names will
be removed, as they are unsupported on Windows (dot is a delimiter for the file extension)
- Leading dots in file and directory names will be removed. These files cannot be properly
handled by Windows Explorer and on Unix like systems they count as hidden
- If mac_compat is True, normalize precomposed Unicode characters on macOS
- Remove unicode zero-width space (\\u200B) from path
Args:
path: filename or path to clean
win_compat: Set to True, if Windows compatibility is required
mac_compat: Set to True, if macOS compatibility is required
Returns: sanitized path
"""
if win_compat:
path = path.replace('./', '_/').replace('.\\', '_\\')
if path.endswith('.'):
path = path[:-1] + '_'
# replace . at the beginning of file and directory names
path = path.replace('/.', '/_').replace('\\.', '\\_')
if path.startswith('.'):
path = '_' + path[1:]
# Fix for precomposed characters on macOS.
if mac_compat:
path = unicodedata.normalize("NFD", path)
# Remove unicode zero-width space (\u200B) from path
path = path.replace("\u200B", "")
return path
def get_available_filename(new_path, old_path=None):
"""Returns an available file name.
If new_path does already exist it appends " (N)" to the file name, where
N is an integer counted upwards.
If `old_path` is given the `new_path` is only changed if it does not point
to the same file location.
Args:
new_path: The requested file name for the file
old_path: The previous name of the file
Returns: A unique available file name.
"""
tmp_filename, ext = os.path.splitext(new_path)
i = 1
compare_old_path = old_path and os.path.exists(old_path)
while (os.path.exists(new_path)
and (not compare_old_path or not samefile(old_path, new_path))):
new_path = "%s (%d)%s" % (tmp_filename, i, ext)
i += 1
return new_path
def replace_extension(filename, new_ext):
"""Replaces the extension in filename with new_ext.
If the file has no extension the extension is added.
Args:
filename: A file name
new_ext: New file extension
Returns: filename with replaced file extension
"""
name, ext = os.path.splitext(filename)
return name + '.' + new_ext.lstrip('.')
| 21,144
|
Python
|
.py
| 491
| 36.319756
| 102
| 0.664383
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,159
|
__init__.py
|
metabrainz_picard/picard/plugins/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007 Lukáš Lalinský
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
| 868
|
Python
|
.py
| 20
| 42.25
| 80
| 0.773964
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,160
|
browser.py
|
metabrainz_picard/picard/browser/browser.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2007, 2011 Lukáš Lalinský
# Copyright (C) 2011-2013 Michael Wiencek
# Copyright (C) 2012 Chad Wilson
# Copyright (C) 2012-2013, 2018, 2021-2022, 2024 Philipp Wolfer
# Copyright (C) 2013, 2018, 2020-2021, 2024 Laurent Monin
# Copyright (C) 2016 Suhas
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2018 Vishal Choudhary
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from http.server import (
BaseHTTPRequestHandler,
HTTPServer,
)
import re
import threading
from urllib.parse import (
parse_qs,
urlparse,
)
from PyQt6 import QtCore
from picard import (
PICARD_APP_NAME,
PICARD_ORG_NAME,
PICARD_VERSION_STR,
log,
)
from picard.browser import addrelease
from picard.config import get_config
from picard.oauth import OAuthInvalidStateError
from picard.util import mbid_validate
from picard.util.thread import to_main
try:
from http.server import ThreadingHTTPServer
except ImportError:
from socketserver import ThreadingMixIn
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
SERVER_VERSION = '%s-%s/%s' % (PICARD_ORG_NAME, PICARD_APP_NAME, PICARD_VERSION_STR)
RE_VALID_ORIGINS = re.compile(r'^(?:[^\.]+\.)*musicbrainz\.org$')
def _is_valid_origin(origin):
try:
url = urlparse(origin)
except ValueError:
return False
hostname = url.hostname
if not hostname:
return False
if RE_VALID_ORIGINS.match(hostname):
return True
config = get_config()
return config.setting['server_host'] == hostname
class BrowserIntegration(QtCore.QObject):
listen_port_changed = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.server = None
@property
def host_address(self):
if not self.server:
return ''
return self.server.server_address[0]
@property
def port(self):
if not self.server:
return 0
return self.server.server_address[1]
@property
def is_running(self):
return self.server is not None
def start(self):
if self.server:
self.stop()
config = get_config()
if config.setting["browser_integration_localhost_only"]:
host_address = '127.0.0.1'
else:
host_address = '0.0.0.0' # nosec
try:
for port in range(config.setting["browser_integration_port"], 65535):
try:
self.server = ThreadingHTTPServer((host_address, port), RequestHandler)
except OSError:
continue
log.info("Starting the browser integration (%s:%d)", host_address, port)
self.listen_port_changed.emit(port)
threading.Thread(target=self.server.serve_forever).start()
break
else:
log.error("Failed finding an available port for the browser integration.")
self.stop()
except Exception:
log.error("Failed starting the browser integration on %s", host_address, exc_info=True)
def stop(self):
if self.server:
try:
log.info("Stopping the browser integration")
self.server.shutdown()
self.server.server_close()
self.server = None
self.listen_port_changed.emit(self.port)
except Exception:
log.error("Failed stopping the browser integration", exc_info=True)
else:
log.debug("Browser integration inactive, no need to stop")
class RequestHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
origin = self.headers['origin']
if _is_valid_origin(origin):
self.send_response(204)
self.send_header('Access-Control-Allow-Origin', origin)
self.send_header('Access-Control-Allow-Methods', 'GET')
self.send_header('Access-Control-Allow-Credentials', 'false')
self.send_header('Access-Control-Allow-Private-Network', 'true')
self.send_header('Access-Control-Max-Age', 3600)
self.send_header('Vary', 'Origin')
else:
self.send_response(401)
self.end_headers()
def do_GET(self):
try:
self._handle_get()
except Exception:
log.error('Browser integration failed handling request', exc_info=True)
self._response(500, 'Unexpected request error')
def log_error(self, format, *args):
log.error(format, *args)
def log_message(self, format, *args):
log.info(format, *args)
def _handle_get(self):
parsed = urlparse(self.path)
args = parse_qs(parsed.query)
action = parsed.path
if action == '/':
self._response(200, SERVER_VERSION)
elif action == '/openalbum':
self._load_mbid('album', args)
elif action == '/opennat':
self._load_mbid('nat', args)
elif action == '/add' and addrelease.is_available():
self._add_release(args)
elif action == '/auth':
self._auth(args)
else:
self._response(404, 'Unknown action.')
def _load_mbid(self, type, args):
if 'id' in args and args['id']:
mbid = args['id'][0]
if not mbid_validate(mbid):
self._response(400, '"id" is not a valid MBID.')
else:
tagger = QtCore.QCoreApplication.instance()
to_main(tagger.load_mbid, type, mbid)
self._response(200, 'MBID "%s" loaded' % mbid)
else:
self._response(400, 'Missing parameter "id".')
def _add_release(self, args):
if 'token' in args and args['token']:
try:
content = addrelease.serve_form(args['token'][0])
self._response(200, content, 'text/html')
except addrelease.NotFoundError as err:
self._response(404, str(err))
except addrelease.InvalidTokenError:
self._response(400, 'Invalid token')
else:
self._response(400, 'Missing parameter "token".')
def _auth(self, args):
if 'code' in args and args['code']:
tagger = QtCore.QCoreApplication.instance()
oauth_manager = tagger.webservice.oauth_manager
try:
state = args.get('state', [''])[0]
callback = oauth_manager.verify_state(state)
except OAuthInvalidStateError:
self._response(400, 'Invalid "state" parameter.')
return
to_main(
oauth_manager.exchange_authorization_code,
authorization_code=args['code'][0],
scopes='profile tag rating collection submit_isrc submit_barcode',
callback=callback,
)
self._response(200, "Authentication successful, you can close this window now.", 'text/html')
else:
self._response(400, 'Missing parameter "code".')
def _response(self, code, content='', content_type='text/plain'):
self.server_version = SERVER_VERSION
self.send_response(code)
self.send_header('Content-Type', content_type)
self.send_header('Cache-Control', 'max-age=0')
origin = self.headers['origin']
if _is_valid_origin(origin):
self.send_header('Access-Control-Allow-Origin', origin)
self.send_header('Vary', 'Origin')
self.end_headers()
self.wfile.write(content.encode())
| 8,409
|
Python
|
.py
| 213
| 30.666667
| 105
| 0.621691
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,161
|
addrelease.py
|
metabrainz_picard/picard/browser/addrelease.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2021-2023 Philipp Wolfer
# Copyright (C) 2021-2024 Laurent Monin
# Copyright (C) 2022 Bob Swift
# Copyright (C) 2022 jesus2099
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from html import escape
from secrets import token_bytes
from PyQt6.QtCore import QCoreApplication
from picard import log
from picard.i18n import gettext as _
from picard.util import format_time
from picard.util.mbserver import build_submission_url
from picard.util.webbrowser2 import open
try:
import jwt
import jwt.exceptions
except ImportError:
log.debug("PyJWT not available, addrelease functionality disabled")
jwt = None
__key = token_bytes() # Generating a new secret on each startup
__algorithm = 'HS256'
_form_template = '''<!doctype html>
<meta charset="UTF-8">
<html>
<head>
<title>{title}</title>
</head>
<body>
<form action="{action}" method="post">
{form_data}
<input type="submit" value="{submit_label}">
</form>
<script>document.forms[0].submit()</script>
</body>
'''
_form_input_template = '<input type="hidden" name="{name}" value="{value}" >'
class InvalidTokenError(Exception):
pass
class NotFoundError(Exception):
pass
def is_available():
return jwt is not None
def is_enabled():
tagger = QCoreApplication.instance()
return tagger.browser_integration.is_running
def submit_cluster(cluster):
_open_url_with_token({'cluster': hash(cluster)})
def submit_file(file, as_release=False):
_open_url_with_token({'file': file.filename, 'as_release': as_release})
def serve_form(token):
try:
payload = jwt.decode(token, __key, algorithms=__algorithm)
log.debug("received JWT token %r", payload)
tagger = QCoreApplication.instance()
tport = tagger.browser_integration.port
if 'cluster' in payload:
cluster = _find_cluster(tagger, payload['cluster'])
if not cluster:
raise NotFoundError("Cluster not found")
return _get_cluster_form(cluster, tport)
elif 'file' in payload:
file = _find_file(tagger, payload['file'])
if not file:
raise NotFoundError("File not found")
if payload.get('as_release', False):
return _get_file_as_release_form(file, tport)
else:
return _get_file_as_recording_form(file, tport)
else:
raise InvalidTokenError
except jwt.exceptions.InvalidTokenError:
raise InvalidTokenError
def extract_discnumber(metadata):
try:
discnumber = metadata.get('discnumber', '1').split('/')[0]
return int(discnumber)
except ValueError:
return 1
def _open_url_with_token(payload):
token = jwt.encode(payload, __key, algorithm=__algorithm)
if isinstance(token, bytes): # For compatibility with PyJWT 1.x
token = token.decode()
browser_integration = QCoreApplication.instance().browser_integration
url = f'http://127.0.0.1:{browser_integration.port}/add?token={token}'
open(url)
def _find_cluster(tagger, cluster_hash):
for cluster in tagger.clusters:
if hash(cluster) == cluster_hash:
return cluster
return None
def _find_file(tagger, path):
return tagger.files.get(path, None)
def _get_cluster_form(cluster, tport):
return _get_form(
_("Add cluster as release"),
'/release/add',
_("Add cluster as release…"),
_get_cluster_data(cluster),
{'tport': tport}
)
def _get_file_as_release_form(file, tport):
return _get_form(
_("Add file as release"),
'/release/add',
_("Add file as release…"),
_get_file_as_release_data(file),
{'tport': tport}
)
def _get_file_as_recording_form(file, tport):
return _get_form(
_("Add file as recording"),
'/recording/create',
_("Add file as recording…"),
_get_file_as_recording_data(file),
{'tport': tport}
)
def _get_cluster_data(cluster):
# See https://musicbrainz.org/doc/Development/Release_Editor_Seeding
metadata = cluster.metadata
data = {
'name': metadata['album'],
'artist_credit.names.0.artist.name': metadata['albumartist'],
}
_add_track_data(data, cluster.files)
return data
def _get_file_as_release_data(file):
# See https://musicbrainz.org/doc/Development/Release_Editor_Seeding
metadata = file.metadata
data = {
'name': metadata['album'] or metadata['title'],
'artist_credit.names.0.artist.name': metadata['albumartist'] or metadata['artist'],
}
_add_track_data(data, [file])
return data
def _get_file_as_recording_data(file):
metadata = file.metadata
data = {
'edit-recording.name': metadata['title'],
'edit-recording.artist_credit.names.0.artist.name': metadata['artist'],
'edit-recording.length': format_time(file.metadata.length),
}
return data
def _add_track_data(data, files):
def mkey(disc, track, name):
return 'mediums.%i.track.%i.%s' % (disc, track, name)
labels = set()
barcode = None
disc_counter = 0
track_counter = 0
last_discnumber = None
for f in files:
m = f.metadata
discnumber = extract_discnumber(m)
if last_discnumber is not None and discnumber != last_discnumber:
disc_counter += 1
track_counter = 0
last_discnumber = discnumber
if m['label'] or m['catalognumber']:
labels.add((m['label'], m['catalognumber']))
if m['barcode']:
barcode = m['barcode']
data[mkey(disc_counter, track_counter, 'name')] = m['title']
data[mkey(disc_counter, track_counter, 'artist_credit.names.0.name')] = m['artist']
data[mkey(disc_counter, track_counter, 'number')] = m['tracknumber'] or str(track_counter + 1)
data[mkey(disc_counter, track_counter, 'length')] = str(m.length)
if m['musicbrainz_recordingid']:
data[mkey(disc_counter, track_counter, 'recording')] = m['musicbrainz_recordingid']
track_counter += 1
for i, label in enumerate(labels):
(label, catalog_number) = label
data['labels.%i.name' % i] = label
data['labels.%i.catalog_number' % i] = catalog_number
if barcode:
data['barcode'] = barcode
def _get_form(title, action, label, form_data, query_args=None):
return _form_template.format(
title=escape(title),
submit_label=escape(label),
action=escape(build_submission_url(action, query_args)),
form_data=_format_form_data(form_data),
)
def _format_form_data(data):
return ''.join(
_form_input_template.format(name=escape(name), value=escape(value))
for name, value in data.items()
)
| 7,599
|
Python
|
.py
| 203
| 31.403941
| 102
| 0.661486
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,162
|
__init__.py
|
metabrainz_picard/picard/browser/__init__.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006 Lukáš Lalinský
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
| 863
|
Python
|
.py
| 20
| 42
| 80
| 0.77381
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,163
|
filelookup.py
|
metabrainz_picard/picard/browser/filelookup.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006-2008, 2011-2012 Lukáš Lalinský
# Copyright (C) 2011 Pavan Chander
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013, 2018, 2020-2021, 2023-2024 Laurent Monin
# Copyright (C) 2014-2015 Sophist-UK
# Copyright (C) 2015 Ohm Patel
# Copyright (C) 2015-2016 Wieland Hoffmann
# Copyright (C) 2016 Rahul Raturi
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2020, 2022-2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
import re
from PyQt6 import QtCore
from picard import log
from picard.config import get_config
from picard.const import PICARD_URLS
from picard.disc import Disc
from picard.util import (
build_qurl,
webbrowser2,
)
from picard.ui.searchdialog.album import AlbumSearchDialog
class FileLookup:
RE_MB_ENTITY = re.compile(r"""
\b(?P<entity>area|artist|instrument|label|place|recording|release|release-group|series|track|url|work)?
\W*(?P<id>[a-f0-9]{8}(?:-[a-f0-9]{4}){3}-[a-f0-9]{12})
""", re.VERBOSE | re.IGNORECASE)
RE_MB_CDTOC = re.compile(r"""
\b(?P<entity>cdtoc)
\W*(?P<id>[a-z0-9-_.]{28})
""", re.VERBOSE | re.IGNORECASE)
def __init__(self, parent, server, port, local_port):
self.server = server
self.local_port = int(local_port)
self.port = port
self.tagger = QtCore.QCoreApplication.instance()
def _url(self, path, params=None):
if params is None:
params = {}
if self.local_port:
params['tport'] = self.local_port
url = build_qurl(self.server, self.port, path=path, queryargs=params)
return bytes(url.toEncoded()).decode()
def _build_launch(self, path, params=None):
if params is None:
params = {}
return self.launch(self._url(path, params))
def launch(self, url):
log.debug("webbrowser2: %s", url)
webbrowser2.open(url)
return True
def _lookup(self, type_, id_):
return self._build_launch("/%s/%s" % (type_, id_))
def recording_lookup(self, recording_id):
return self._lookup('recording', recording_id)
def album_lookup(self, album_id):
return self._lookup('release', album_id)
def artist_lookup(self, artist_id):
return self._lookup('artist', artist_id)
def track_lookup(self, track_id):
return self._lookup('track', track_id)
def work_lookup(self, work_id):
return self._lookup('work', work_id)
def release_group_lookup(self, release_group_id):
return self._lookup('release-group', release_group_id)
def discid_lookup(self, discid):
return self._lookup('cdtoc', discid)
def discid_submission(self, url):
if self.local_port:
url = "%s&tport=%d" % (url, self.local_port)
return self.launch(url)
def acoust_lookup(self, acoust_id):
return self.launch(PICARD_URLS['acoustid_track'] + acoust_id)
def mbid_lookup(self, string, type_=None, mbid_matched_callback=None, browser_fallback=True):
"""Parses string for known entity type and mbid, open browser for it
If entity type is 'release', it will load corresponding release if
possible.
"""
m = self.RE_MB_ENTITY.search(string)
if m is None:
m = self.RE_MB_CDTOC.search(string)
if m is None:
return False
entity = m.group('entity')
if entity is None:
if type_ is None:
return False
entity = type_
else:
entity = entity.lower()
id = m.group('id')
if entity != 'cdtoc':
id = id.lower()
log.debug("Lookup for %s:%s", entity, id)
if mbid_matched_callback:
mbid_matched_callback(entity, id)
if entity == 'release':
self.tagger.load_album(id)
return True
elif entity == 'recording':
self.tagger.load_nat(id)
return True
elif entity == 'release-group':
AlbumSearchDialog.show_releasegroup_search(id)
return True
elif entity == 'cdtoc':
disc = Disc(id=id)
disc.lookup()
return True
if browser_fallback:
return self._lookup(entity, id)
return False
def tag_lookup(self, artist, release, track, tracknum, duration, filename):
params = {
'artist': artist,
'release': release,
'track': track,
'tracknum': tracknum,
'duration': duration,
'filename': os.path.basename(filename),
}
return self._build_launch("/taglookup", params)
def collection_lookup(self, userid):
return self._build_launch("/user/%s/collections" % userid)
def search_entity(self, type_, query, adv=False, mbid_matched_callback=None, force_browser=False):
if not force_browser and self.mbid_lookup(query, type_, mbid_matched_callback=mbid_matched_callback):
return True
config = get_config()
params = {
'limit': config.setting['query_limit'],
'type': type_,
'query': query,
}
if adv:
params['adv'] = 'on'
return self._build_launch("/search/textsearch", params)
| 6,116
|
Python
|
.py
| 155
| 32.019355
| 111
| 0.63244
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,164
|
nsh2json.py
|
metabrainz_picard/installer/i18n/nsh2json.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import glob
import json
import os.path
import nshutil as nsh
def language_from_filename(path):
lang = os.path.splitext(os.path.basename(path))[0]
return (lang, nsh.language_to_code(lang))
def extract_strings(f):
for line in f:
parsed = nsh.parse_langstring(line)
if parsed:
yield parsed
def main():
scriptdir = os.path.dirname(os.path.abspath(__file__))
sourcesdir = os.path.join(scriptdir, 'sources')
outdir = os.path.join(scriptdir, 'out')
for path in glob.glob(os.path.join(outdir, '*.nsh')):
language, language_code = language_from_filename(path)
if not language_code:
print(f'Unknown language "{language}", skipping')
continue
target_file = os.path.join(sourcesdir, f'{language_code}.json')
print(f'{path} => {target_file}')
with open(path, 'r', encoding='utf-8') as infile:
output = {}
for identifier, text in extract_strings(infile):
output[identifier] = text
with open(target_file, 'w+', encoding='utf-8') as outfile:
outfile.write(json.dumps(output, ensure_ascii=False, indent=4))
if __name__ == "__main__":
main()
| 2,071
|
Python
|
.py
| 51
| 35.607843
| 80
| 0.685757
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,165
|
nshutil.py
|
metabrainz_picard/installer/i18n/nshutil.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020-2021 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
# See list of available NSIS languages at
# https://sourceforge.net/p/nsis/code/HEAD/tree/NSIS/trunk/Contrib/Language%20files/
LANGUAGES = {
'Afrikaans': 'af',
'Albanian': 'sq',
'Arabic': 'ar',
'Asturian': 'ast',
'Basque': 'eu',
'Belarusian': 'be',
'Bosnian': 'bs',
'Breton': 'br',
'Bulgarian': 'bg',
'Catalan': 'ca',
'Cibemba': 'bem',
'Corsican': 'co',
'Croation': 'hr',
'Czech': 'cs',
'Danish': 'da',
'Dutch': 'nl',
'English': 'en',
'Esperanto': 'eo',
'Estonian': 'et',
'Farsi': 'fa',
'Finnish': 'fi',
'French': 'fr',
'Galician': 'gl',
'Georgian': 'ka',
'German': 'de',
'Greek': 'el',
'Hebrew': 'he',
'Hindi': 'hi',
'Hungarian': 'hu',
'Icelandic': 'is',
'Igbo': 'ig',
'Indonesian': 'id',
'Irish': 'ga',
'Italian': 'it',
'Japanese': 'ja',
'Khmer': 'km',
'Korean': 'ko',
'Kurdish': 'ku',
'Latvian': 'lv',
'Lithuanian': 'lt',
'Luxembourgish': 'lb',
'Macedonian': 'mk',
'Malagasy': 'mg',
'Malay': 'ms_MY',
'Mongolian': 'mn',
'Norwegian': 'nb',
'NorwegianNynorsk': 'nn',
'Polish': 'pl',
'Portuguese': 'pt',
'PortugueseBR': 'pt_BR',
'Romanian': 'ro',
'Russian': 'ru',
'ScotsGaelic': 'sco',
'Serbian': 'sr',
# 'SimpChinese': 'zh-Hans',
'SimpChinese': 'zh_CN',
'Slovak': 'sk',
'Slovenian': 'sl',
'Spanish': 'es',
'Swahili': 'sw',
'Swedish': 'sv',
'Tatar': 'tt',
'Thai': 'th',
# 'TradChinese': 'zh-Hant',
'TradChinese': 'zh_TW',
'Turkish': 'tr',
'Ukrainian': 'uk',
'Uzbek': 'uz',
'Vietnamese': 'vi',
'Welsh': 'cy',
'Yoruba': 'yo',
}
_R_LANGUAGES = {code: name for name, code in LANGUAGES.items()}
# See https://nsis.sourceforge.io/Docs/Chapter4.html#varstrings
ESCAPE_CHARS = {
r'$\r': '\r',
r'$\n': '\n',
r'$\t': '\t',
r'$\"': '"',
r'$\'': "'",
r'$\`': '`',
}
RE_LANGSTRING_LINE = re.compile(r'LangString\s+(?P<identifier>[A-Za-z0-9_]+)\s+\${LANG_[A-Z]+}\s+["\'`](?P<text>.*)["\'`]$')
def language_to_code(language):
return LANGUAGES.get(language)
def code_to_language(language_code):
return _R_LANGUAGES.get(language_code)
def escape_string(text):
for escape, char in ESCAPE_CHARS.items():
if char not in {"'", "`"}: # No need to escape quotes other than ""
text = text.replace(char, escape)
return text
def unescape_string(text):
for escape, char in ESCAPE_CHARS.items():
text = text.replace(escape, char)
return text
def parse_langstring(line):
match_ = RE_LANGSTRING_LINE.match(line)
if match_:
return (
match_.group('identifier'),
unescape_string(match_.group('text'))
)
else:
return None
def make_langstring(language, identifier, text):
language = language.upper()
text = escape_string(text)
return f'LangString {identifier} ${{LANG_{language}}} "{text}"\n'
| 3,867
|
Python
|
.py
| 133
| 24.691729
| 124
| 0.597955
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,166
|
json2nsh.py
|
metabrainz_picard/installer/i18n/json2nsh.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import glob
import json
import os.path
import nshutil as nsh
def language_from_filename(path):
lang = os.path.splitext(os.path.basename(path))[0]
return (nsh.code_to_language(lang), lang)
def write_langstring(f, language, identifier, text):
langstring = nsh.make_langstring(language, identifier, text)
f.write(langstring)
def merge_translations(*translations):
merged = {}
for trans in translations:
for k, v in trans.items():
if v:
merged[k] = v
return merged
def main():
scriptdir = os.path.dirname(os.path.abspath(__file__))
sourcesdir = os.path.join(scriptdir, 'sources')
outdir = os.path.join(scriptdir, 'out')
os.makedirs(outdir, exist_ok=True)
# Read the english sources for defaults
with open(os.path.join(sourcesdir, 'en.json'), 'r', encoding='utf-8') as infile:
data_en = json.loads(infile.read())
for path in glob.glob(os.path.join(sourcesdir, '*.json')):
language, language_code = language_from_filename(path)
if not language:
print(f'Unknown language code "{language_code}", skipping')
continue
target_file = os.path.join(outdir, f'{language}.nsh')
print(f'{path} => {target_file}')
with open(path, 'r', encoding='utf-8') as infile:
data = json.loads(infile.read())
data = merge_translations(data_en, data)
with open(target_file, 'w+', encoding='utf-8') as outfile:
for identifier, text in data.items():
write_langstring(outfile, language, identifier, text)
if __name__ == "__main__":
main()
| 2,502
|
Python
|
.py
| 60
| 36.466667
| 84
| 0.682867
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,167
|
fix-header.py
|
metabrainz_picard/scripts/tools/fix-header.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020-2021 Philipp Wolfer
# Copyright (C) 2020-2021, 2023 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
from collections import defaultdict
import glob
import itertools
import logging
import os
import re
import subprocess # nosec: B404
import sys
logging.basicConfig(
force=True,
format="%(asctime)s:%(levelname)s: %(message)s",
level=logging.DEBUG,
stream=sys.stderr,
)
ALIASES = {
'abhi-ohri': 'Abhinav Ohri',
'Antonio Larrosa <alarrosa@suse.com>': 'Antonio Larrosa',
'bob': 'Bob Swift',
'Lukas Lalinsky <lalinsky@gmail.com>': 'Lukáš Lalinský',
'petitminion': 'Petit Minion',
'Philipp Wolfer <ph.wolfer@gmail.com>': 'Philipp Wolfer',
'Ray': 'Ray Bouchard',
'RaysDev': 'Ray Bouchard',
'Sophist': 'Sophist-UK',
'vishal choudhary': 'Vishal Choudhary',
'vishichoudhary': 'Vishal Choudhary',
'yvanzo': 'Yvan Rivière',
}
# https://stackoverflow.com/a/4629241
def ranges(i):
for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
b = list(b)
yield b[0][1], b[-1][1]
def extract_authors_from_gitlog(path):
authors = {}
cmd = ['git', 'log', r'--pretty=format:%ad¤%aN¤%aE', r'--date=format:%Y', r'--', path]
result = subprocess.run(cmd, stdout=subprocess.PIPE, timeout=30) # nosec: B603
aliased = set()
if result.returncode == 0:
pattern = re.compile(r'^(?P<year>\d+)¤(?P<name>[^¤]*)¤(?P<email>.*)$')
for line in result.stdout.decode('utf-8').split("\n"):
matched = pattern.search(line)
if matched:
year = int(matched.group('year'))
author = matched.group('name')
email = matched.group('email')
for c in (f"{author} <{email}>", email, author):
if c in ALIASES:
alias = ALIASES[c]
aliased.add(f"{author} <{email}> -> {alias}")
author = alias
break
if author in authors:
if year not in authors[author]:
authors[author].append(year)
else:
authors[author] = [year]
for a in aliased:
logging.debug(f"Alias found: {a}")
return authors
def parse_copyright_text(text):
authors = {}
pattern_copyright = re.compile(r'^# Copyright \D*((?:\d{4}(?:,? *|-))+) (.+)\s*$')
range_pattern = re.compile(r'^\s*(\d{4})\s*-\s*(\d{4})\s*$')
for line in text.split("\n"):
matched = pattern_copyright.search(line)
if matched:
all_years = []
years_group = matched.group(1)
author = matched.group(2)
author = ALIASES.get(author, author)
comma_years = []
if ',' in years_group:
for year in years_group.split(','):
comma_years.append(year.strip())
else:
comma_years.append(years_group.strip())
for years in comma_years:
m = range_pattern.search(years)
if m:
year1 = int(m.group(1))
year2 = int(m.group(2))
for y in range(min(year1, year2), max(year1, year2)+1):
all_years.append(y)
else:
all_years.append(int(years))
if author in authors:
for y in all_years:
if y not in authors[author]:
authors[author].append(y)
else:
authors[author] = all_years
return authors
EMPTY_LINE = ("\n", "#\n")
def parse_file(path, encoding='utf-8'):
authors_from_log = extract_authors_from_gitlog(path)
start = end = None
authors_from_file = {}
fix_header_pattern = re.compile(r'^(?:#|/\*|//)\s+(fix-header:)\s*(.*)$', re.IGNORECASE)
skip_pattern = re.compile(r'^(?:#|/\*|//)\s+(Automatically\s+generated|Created\s+by:\s+The\s+Resource\s+Compiler\s+for\s+PyQt6)', re.IGNORECASE)
with open(path, encoding=encoding) as f:
lines = f.readlines()
found = defaultdict(lambda: None)
if lines and lines[0].startswith('#!'):
found["shebang"] = lines[0].rstrip()
del lines[0]
for num, line in enumerate(lines):
skip_matched = skip_pattern.search(line)
if skip_matched:
found['skip'] = skip_matched.group(1)
logging.debug("Found skip indicator: {}".format(found['skip']))
return (found, {}, {}, '', "".join(lines))
fix_header_matched = fix_header_pattern.search(line)
if fix_header_matched:
words = fix_header_matched.group(2).lower().split()
if 'nolicense' in words:
# do not add a license header
logging.debug("Found fix-header: nolicense")
found['nolicense'] = True
if 'skip' in words:
logging.debug("Found fix-header: skip")
found['skip'] = fix_header_matched.group(1) + ' ' + fix_header_matched.group(2)
return (found, {}, {}, '', "".join(lines))
for num, line in enumerate(lines):
if not line.startswith("#") and line not in EMPTY_LINE:
break
if "coding: utf-8" in line:
del lines[num]
i = num + 1
while i < len(lines) and lines[i] in EMPTY_LINE:
del lines[i]
break
for num, line in enumerate(lines):
if not line.startswith("#") and line not in EMPTY_LINE:
break
if "GNU General Public License" in line:
found['license'] = num
break
if found['license'] is not None:
i = starting_pos = found['license']
while lines[i].startswith("#"):
if i == 0:
break
if lines[i].startswith("# Picard"):
break
i -= 1
while True:
if i == 0:
break
if lines[i-1] in EMPTY_LINE:
i -= 1
else:
break
start = i
i = starting_pos
while lines[i].startswith("#"):
if i == len(lines) - 1:
break
if lines[i].endswith(" USA.\n"):
break
i += 1
while True:
if i == len(lines) - 1:
break
if lines[i+1] in EMPTY_LINE:
i += 1
else:
break
end = i
authors_from_file = parse_copyright_text("".join(lines[start:end]))
before = lines[:start]
after = lines[end+1:]
else:
before = []
after = lines
return found, authors_from_file, authors_from_log, "".join(before), "".join(after)
CODING_TEXT = """# -*- coding: utf-8 -*-
#
"""
LICENSE_TOP = """# Picard, the next-generation MusicBrainz tagger
#
"""
LICENSE_BOTTOM = """#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
def fix_header(path, encoding='utf-8'):
found, authors_from_file, authors_from_log, before, after = parse_file(path, encoding)
if found['skip'] is not None:
return None, found['skip']
authors = {}
for a in authors_from_log:
if a not in authors:
authors[a] = set(authors_from_log[a])
for b in authors_from_file:
if b not in authors:
authors[b] = set(authors_from_file[b])
else:
authors[b] = authors[b].union(authors_from_file[b])
new_authors = {}
for a in authors:
new_authors[a] = []
for y1, y2 in list(ranges(sorted(authors[a]))):
if y1 == y2:
new_authors[a].append(str(y1))
else:
new_authors[a].append("%d-%d" % (y1, y2))
new_copyright = ""
for author, years in sorted(new_authors.items(), key=lambda x: (sorted(x[1]), x[0])):
new_copyright += "# Copyright (C) %s %s\n" % (", ".join(years), author)
before = before.strip()
after = after.strip()
has_content = bool(before + after)
parts = list(filter(None, [
found["shebang"],
CODING_TEXT.strip(),
LICENSE_TOP.strip() if not found['nolicense'] else None,
new_copyright.strip() if not found['nolicense'] else None,
(LICENSE_BOTTOM.strip() + ("\n\n" if has_content else "")) if not found['nolicense'] else None,
before.strip(),
after.strip(),
]))
return "\n".join(parts), None
def main():
parser = argparse.ArgumentParser(
description='Generate source file header with copyrights & license from existing header and git log',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('path', nargs='+', help='Path of a file or a folder of files')
parser.add_argument('-e', '--extension', default='.py', help='File extension to filter by')
parser.add_argument('-i', '--in-place', action='store_true', default=False, help='Edit files in place')
parser.add_argument('-r', '--recursive', action='store_true', default=False, help='Search through subfolders')
parser.add_argument('--encoding', default='utf-8', help='File encoding of the source files')
args = parser.parse_args()
paths = list(args.path)
files = set()
for path in paths:
if os.path.isfile(path):
name, ext = os.path.splitext(path)
if args.extension in {'', ext}:
files.add(path)
else:
if args.recursive:
paths += glob.glob(path + '/*')
if not files:
logging.info("No valid file found")
sys.exit(0)
for path in files:
new_content, info = fix_header(path, encoding=args.encoding)
if new_content is None:
logging.info("Skipping %s (%s)" % (path, info))
continue
if args.in_place:
logging.info("Parsing and fixing %s (in place)" % path)
with open(path, 'w', encoding=args.encoding) as f:
print(new_content, file=f)
else:
# by default, we just output to stdout
logging.info("Parsing and fixing %s (stdout)" % path)
print(new_content)
if __name__ == '__main__':
logging.debug("Starting...")
main()
| 12,095
|
Python
|
.py
| 296
| 31.006757
| 148
| 0.563319
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,168
|
changelog-for-version.py
|
metabrainz_picard/scripts/tools/changelog-for-version.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020, 2023 Philipp Wolfer
# Copyright (C) 2020-2021 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
import sys
if len(sys.argv) == 1:
print("Call with changelog-for-version.py [version]", file=sys.stderr)
sys.exit(1)
version = sys.argv[1]
re_changes = re.compile(
'^# Version ' + re.escape(version) + r' - \d{4}-\d{2}-\d{2}\s*?\n'
'(?P<changes>.*?)(?=# Version)',
re.DOTALL | re.MULTILINE)
with open('NEWS.md', 'r') as newsfile:
news = newsfile.read()
result = re_changes.search(news)
if not result:
print("No changelog found for version %s" % version, file=sys.stderr)
sys.exit(1)
print(result.group('changes').strip())
| 1,477
|
Python
|
.py
| 38
| 36.342105
| 80
| 0.714585
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,169
|
pull-shared-translations.py
|
metabrainz_picard/scripts/tools/pull-shared-translations.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import logging
import os
import os.path
import sys
from wlc import (
Component,
Weblate,
)
from wlc.config import WeblateConfig
WEBLATE_URL = 'https://translations.metabrainz.org/api/'
PROJECT_NAME = 'musicbrainz'
PROJECT_COMPONENTS = (
'attributes',
'countries',
)
MIN_TRANSLATED_PERCENT = 10
logging.basicConfig(
force=True,
format="%(asctime)s:%(levelname)s: %(message)s",
level=logging.INFO,
stream=sys.stderr,
)
def fetch_translations(component_name: str, user_key: str = '', config: WeblateConfig = None):
weblate = Weblate(key=user_key, url=WEBLATE_URL, config=config)
component = Component(weblate, f'components/{PROJECT_NAME}/{component_name}/')
logging.info('Processing component %s...', component['name'])
translations = component.list()
source_language = component['source_language']['code']
output_dir = get_output_dir(component_name)
logging.info('Output dir: %s', output_dir)
for translation in translations:
# Skip incomplete translations and translation templates
language_name = translation['language']['name']
language_code = translation['language']['code']
if (translation['translated_percent'] < MIN_TRANSLATED_PERCENT
or translation['is_template']):
logging.info('Skipping translation file for %s.', language_name)
continue
if language_code == source_language:
filename = f'{component_name}.pot'
else:
filename = f'{language_code}.po'
logging.info('Downloading translation file %s...', filename)
data = translation.download()
output_path = os.path.join(output_dir, filename)
with open(output_path, 'bw') as output_file:
output_file.write(data)
def get_output_dir(component_name: str) -> str:
path = os.path.join(os.path.dirname(__file__), '..', '..', 'po', component_name)
os.makedirs(path, exist_ok=True)
return path
def load_config() -> WeblateConfig:
config_path = os.path.join(os.path.dirname(__file__), '..', '..', '.weblate.ini')
if os.path.exists:
config = WeblateConfig()
config.load(config_path)
return config
else:
return None
def main():
parser = argparse.ArgumentParser(
prog='pull-shared-translations',
description=(
'Fetches the translations for attributes and countries from '
'the MusicBrainz Server project on Weblate.'
),
epilog=(
'Instead of passing the --key parameter the key can also be set in '
'a file .weblate.ini in the repositories root directory. See '
'po/README.md for details.'
))
parser.add_argument('-k', '--key', help='Weblate user key')
args = parser.parse_args()
config = None
if not args.key:
config = load_config()
if not config:
parser.print_usage()
parser.error('No Weblate user key specified. See po/README.md for details.')
url, key = config.get_url_key()
if not key or url != WEBLATE_URL:
parser.print_usage()
parser.error('Invalid .weblate.ini. See po/README.md for details.')
for component_name in PROJECT_COMPONENTS:
fetch_translations(component_name, user_key=args.key, config=config)
if __name__ == '__main__':
logging.debug("Starting...")
main()
| 4,287
|
Python
|
.py
| 109
| 33.633028
| 94
| 0.673641
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,170
|
win-startup-hook.py
|
metabrainz_picard/scripts/pyinstaller/win-startup-hook.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019, 2021, 2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from ctypes import windll
import os
import os.path
import sys
# On Windows try to attach to the console as early as possible in order
# to get stdout / stderr logged to console. This needs to happen before
# logging gets imported.
# See https://stackoverflow.com/questions/54536/win32-gui-app-that-writes-usage-text-to-stdout-when-invoked-as-app-exe-help
if windll.kernel32.AttachConsole(-1):
sys.stdout = open('CON', 'w')
sys.stderr = open('CON', 'w')
# Ensure bundled DLLs are loaded
os.environ['PATH'] = os.pathsep.join((
os.path.normpath(sys._MEIPASS),
os.path.normpath(os.path.join(sys._MEIPASS, 'PyQt6\\Qt6\\bin')),
os.environ['PATH'],
))
| 1,502
|
Python
|
.py
| 36
| 40.027778
| 123
| 0.757016
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,171
|
portable-hook.py
|
metabrainz_picard/scripts/pyinstaller/portable-hook.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019, 2021 Philipp Wolfer
# Copyright (C) 2020 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import os.path
import sys
from picard import (
PICARD_APP_NAME,
PICARD_ORG_NAME,
)
# The portable version stores all data in a folder beside the executable
configdir = '{}-{}'.format(PICARD_ORG_NAME, PICARD_APP_NAME)
basedir = os.path.join(os.path.dirname(sys.executable), configdir)
os.makedirs(basedir, exist_ok=True)
# Setup config file if not specified as command line argument
if '--config-file' not in sys.argv and '-c' not in sys.argv:
sys.argv.append('--config-file')
sys.argv.append(os.path.join(basedir, 'Config.ini'))
plugindir = os.path.normpath(os.path.join(basedir, 'Plugins'))
cachedir = os.path.normpath(os.path.join(basedir, 'Cache'))
os.environ['PICARD_CONFIG_DIR'] = basedir
os.environ['PICARD_CACHE_DIR'] = cachedir
os.environ['PICARD_PLUGIN_DIR'] = plugindir
| 1,675
|
Python
|
.py
| 40
| 40.275
| 80
| 0.761524
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,172
|
macos-library-path-hook.py
|
metabrainz_picard/scripts/pyinstaller/macos-library-path-hook.py
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019-2020 Philipp Wolfer
# Copyright (C) 2020 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import sys
# The macOS app crashes on launch if the working directory happens to be sys._MEIPASS
os.chdir(os.path.abspath(os.path.join(sys._MEIPASS, '..', '..')))
# On macOS ensure libraries such as libdiscid.dylib get loaded from app bundle
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = '%s:%s' % (
os.path.dirname(sys.executable), os.environ.get('DYLD_FALLBACK_LIBRARY_PATH', ''))
| 1,259
|
Python
|
.py
| 27
| 45.296296
| 86
| 0.761206
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,173
|
package-pypi.yml
|
metabrainz_picard/.github/workflows/package-pypi.yml
|
name: Package for PyPI
on: [workflow_call]
permissions: {}
defaults:
run:
shell: bash
jobs:
pypi-sdist:
runs-on: ubuntu-latest
env:
CODESIGN: 0
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install dependencies (Linux)
if: runner.os == 'linux'
run: |
sudo apt-get update
sudo apt-get install libegl1
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools
pip install --upgrade -r requirements.txt
- name: Run tests
timeout-minutes: 30
run: |
python setup.py test
- name: Build Python source distribution
run: |
git clean -dfx
python setup.py clean sdist --formats=gztar,zip
- name: Prepare GPG signing key
run: |
if [ -n "$CODESIGN_GPG_URL" ] && [ -n "$AWS_ACCESS_KEY_ID" ]; then
pip3 install awscli
aws s3 cp "$CODESIGN_GPG_URL" signkey.asc.enc
openssl enc -d -aes-256-cbc -pbkdf2 -iter 600000 -in signkey.asc.enc -out signkey.asc -k "$CODESIGN_GPG_PASSWORD"
gpg --import signkey.asc
rm signkey.asc*
echo "CODESIGN=1" >> $GITHUB_ENV
else
echo "::warning::No signing key available, skipping code signing."
fi
env:
AWS_DEFAULT_REGION: eu-central-1
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
CODESIGN_GPG_URL: ${{ secrets.CODESIGN_GPG_URL }}
CODESIGN_GPG_PASSWORD: ${{ secrets.CODESIGN_GPG_PASSWORD }}
- name: Sign source archives
if: env.CODESIGN == '1'
run: |
for f in dist/*.{zip,tar.gz}; do
gpg --armor --local-user "$CODESIGN_GPG_IDENTITY" --output "${f}.asc" --detach-sig "$f"
done
env:
CODESIGN_GPG_IDENTITY: 68990DD0B1EDC129B856958167997E14D563DA7C
- name: Cleanup
if: env.CODESIGN == '1'
run: |
rm -rf "$HOME/.gnupg"
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: picard-sdist
path: dist/*
pypi-bdist:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [macos-12, windows-2019]
python-version: ['3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install gettext (macOS)
if: runner.os == 'macOS'
run: |
brew install gettext
brew link gettext --force
echo "/usr/local/opt/gettext/bin" >> $GITHUB_PATH
- name: Install gettext (Windows)
if: runner.os == 'Windows'
run: |
& .\scripts\package\win-setup-gettext.ps1 `
-GettextVersion $Env:GETTEXT_VERSION -GettextSha256Sum $Env:GETTEXT_SHA256SUM
Add-Content $env:GITHUB_PATH (Join-Path -Path (Resolve-Path .) -ChildPath gettext\bin)
shell: pwsh
env:
GETTEXT_VERSION: 0.22.4
GETTEXT_SHA256SUM: 220068ac0b9e7aedda03534a3088e584640ac1e639800b3a0baa9410aa6d012a
- name: Install dependencies (Linux)
if: runner.os == 'linux'
run: |
sudo apt-get update
sudo apt-get install libegl1
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools wheel
pip install --upgrade -r requirements.txt
- name: Run tests
timeout-minutes: 30
run: |
python setup.py test
- name: Build Python binary distribution
run: |
python setup.py clean bdist_wheel
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: picard-bdist-${{ runner.os }}-${{ matrix.python-version }}
path: dist/*.whl
pypi-release:
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
needs:
- pypi-bdist
- pypi-sdist
environment:
name: pypi
url: https://pypi.org/p/picard
permissions:
id-token: write # required for PyPI upload
steps:
- uses: actions/download-artifact@v4
with:
pattern: picard-?dist*
path: dist/
merge-multiple: true
- name: Prepare distributions
run: |
ls -l dist/
# Remove zip source distribution (only a single sdist is allowed)
rm dist/picard-*.zip*
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
| 4,702
|
Python
|
.pyp
| 144
| 25.506944
| 123
| 0.61432
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,174
|
options_attached_profiles.ui
|
metabrainz_picard/ui/options_attached_profiles.ui
|
<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>AttachedProfilesDialog</class>
<widget class="QDialog" name="AttachedProfilesDialog">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>800</width>
<height>450</height>
</rect>
</property>
<property name="windowTitle">
<string>Profiles Attached to Options</string>
</property>
<layout class="QVBoxLayout">
<property name="spacing">
<number>6</number>
</property>
<property name="leftMargin">
<number>9</number>
</property>
<property name="topMargin">
<number>9</number>
</property>
<property name="rightMargin">
<number>9</number>
</property>
<property name="bottomMargin">
<number>9</number>
</property>
<item>
<widget class="QTableView" name="options_list">
<property name="editTriggers">
<set>QAbstractItemView::NoEditTriggers</set>
</property>
<property name="selectionMode">
<enum>QAbstractItemView::SingleSelection</enum>
</property>
<property name="selectionBehavior">
<enum>QAbstractItemView::SelectRows</enum>
</property>
</widget>
</item>
<item>
<widget class="QDialogButtonBox" name="buttonBox">
<property name="standardButtons">
<set>QDialogButtonBox::NoButton</set>
</property>
</widget>
</item>
</layout>
</widget>
<resources/>
<connections/>
</ui>
| 1,421
|
Python
|
.tac
| 56
| 21.089286
| 55
| 0.660806
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,175
|
ui_options_attached_profiles.py
|
metabrainz_picard/picard/ui/forms/ui_options_attached_profiles.py
|
# Form implementation generated from reading ui file 'ui/options_attached_profiles.ui'
#
# Created by: PyQt6 UI code generator 6.6.1
#
# Automatically generated - do not edit.
# Use `python setup.py build_ui` to update it.
from PyQt6 import (
QtCore,
QtGui,
QtWidgets,
)
from picard.i18n import gettext as _
class Ui_AttachedProfilesDialog(object):
def setupUi(self, AttachedProfilesDialog):
AttachedProfilesDialog.setObjectName("AttachedProfilesDialog")
AttachedProfilesDialog.resize(800, 450)
self.vboxlayout = QtWidgets.QVBoxLayout(AttachedProfilesDialog)
self.vboxlayout.setContentsMargins(9, 9, 9, 9)
self.vboxlayout.setSpacing(6)
self.vboxlayout.setObjectName("vboxlayout")
self.options_list = QtWidgets.QTableView(parent=AttachedProfilesDialog)
self.options_list.setEditTriggers(QtWidgets.QAbstractItemView.EditTrigger.NoEditTriggers)
self.options_list.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.SingleSelection)
self.options_list.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectionBehavior.SelectRows)
self.options_list.setObjectName("options_list")
self.vboxlayout.addWidget(self.options_list)
self.buttonBox = QtWidgets.QDialogButtonBox(parent=AttachedProfilesDialog)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.StandardButton.NoButton)
self.buttonBox.setObjectName("buttonBox")
self.vboxlayout.addWidget(self.buttonBox)
self.retranslateUi(AttachedProfilesDialog)
QtCore.QMetaObject.connectSlotsByName(AttachedProfilesDialog)
def retranslateUi(self, AttachedProfilesDialog):
AttachedProfilesDialog.setWindowTitle(_("Profiles Attached to Options"))
| 1,776
|
Python
|
.tac
| 34
| 46
| 104
| 0.779954
|
metabrainz/picard
| 3,687
| 383
| 10
|
GPL-2.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,176
|
setup.py.legacy
|
Kozea_Radicale/setup.py.legacy
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2009-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
from setuptools import find_packages, setup
# When the version is updated, a new section in the CHANGELOG.md file must be
# added too.
VERSION = "3.3.0"
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
web_files = ["web/internal_data/css/icon.png",
"web/internal_data/css/loading.svg",
"web/internal_data/css/logo.svg",
"web/internal_data/css/main.css",
"web/internal_data/css/icons/delete.svg",
"web/internal_data/css/icons/download.svg",
"web/internal_data/css/icons/edit.svg",
"web/internal_data/css/icons/new.svg",
"web/internal_data/css/icons/upload.svg",
"web/internal_data/fn.js",
"web/internal_data/index.html"]
install_requires = ["defusedxml", "passlib", "vobject>=0.9.6",
"python-dateutil>=2.7.3",
"pika>=1.1.0",
]
bcrypt_requires = ["bcrypt"]
test_requires = ["pytest>=7", "waitress", *bcrypt_requires]
setup(
name="Radicale",
version=VERSION,
description="CalDAV and CardDAV Server",
long_description=long_description,
long_description_content_type="text/markdown",
author="Guillaume Ayoub",
author_email="guillaume.ayoub@kozea.fr",
url="https://radicale.org/",
license="GNU GPL v3",
platforms="Any",
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={"radicale": [*web_files, "py.typed"]},
entry_points={"console_scripts": ["radicale = radicale.__main__:run"]},
install_requires=install_requires,
extras_require={"test": test_requires, "bcrypt": bcrypt_requires},
keywords=["calendar", "addressbook", "CalDAV", "CardDAV"],
python_requires=">=3.8.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Office/Business :: Groupware"])
| 3,496
|
Python
|
.py
| 76
| 39.513158
| 77
| 0.654263
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,177
|
__main__.py
|
Kozea_Radicale/radicale/__main__.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2011-2017 Guillaume Ayoub
# Copyright © 2017-2022 Unrud <unrud@outlook.com>
# Copyright © 2024-2024 Peter Bieringer <pb@bieringer.de>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Radicale executable module.
This module can be executed from a command line with ``$python -m radicale``.
Uses the built-in WSGI server.
"""
import argparse
import contextlib
import os
import signal
import socket
import sys
from types import FrameType
from typing import List, Optional, cast
from radicale import VERSION, config, log, server, storage, types
from radicale.log import logger
def run() -> None:
"""Run Radicale as a standalone server."""
exit_signal_numbers = [signal.SIGTERM, signal.SIGINT]
if sys.platform == "win32":
exit_signal_numbers.append(signal.SIGBREAK)
else:
exit_signal_numbers.append(signal.SIGHUP)
exit_signal_numbers.append(signal.SIGQUIT)
# Raise SystemExit when signal arrives to run cleanup code
# (like destructors, try-finish etc.), otherwise the process exits
# without running any of them
def exit_signal_handler(signal_number: int,
stack_frame: Optional[FrameType]) -> None:
sys.exit(1)
for signal_number in exit_signal_numbers:
signal.signal(signal_number, exit_signal_handler)
log.setup()
# Get command-line arguments
# Configuration options are stored in dest with format "c:SECTION:OPTION"
parser = argparse.ArgumentParser(
prog="radicale", usage="%(prog)s [OPTIONS]", allow_abbrev=False)
parser.add_argument("--version", action="version", version=VERSION)
parser.add_argument("--verify-storage", action="store_true",
help="check the storage for errors and exit")
parser.add_argument("-C", "--config",
help="use specific configuration files", nargs="*")
parser.add_argument("-D", "--debug", action="store_const", const="debug",
dest="c:logging:level", default=argparse.SUPPRESS,
help="print debug information")
for section, section_data in config.DEFAULT_CONFIG_SCHEMA.items():
if section.startswith("_"):
continue
assert ":" not in section # check field separator
assert "-" not in section and "_" not in section # not implemented
group_description = None
if section_data.get("_allow_extra"):
group_description = "additional options allowed"
if section == "headers":
group_description += " (e.g. --headers-Pragma=no-cache)"
elif "type" in section_data:
group_description = "backend specific options omitted"
group = parser.add_argument_group(section, group_description)
for option, data in section_data.items():
if option.startswith("_"):
continue
kwargs = data.copy()
long_name = "--%s-%s" % (section, option.replace("_", "-"))
args: List[str] = list(kwargs.pop("aliases", ()))
args.append(long_name)
kwargs["dest"] = "c:%s:%s" % (section, option)
kwargs["metavar"] = "VALUE"
kwargs["default"] = argparse.SUPPRESS
del kwargs["value"]
with contextlib.suppress(KeyError):
del kwargs["internal"]
if kwargs["type"] == bool:
del kwargs["type"]
opposite_args = list(kwargs.pop("opposite_aliases", ()))
opposite_args.append("--no%s" % long_name[1:])
group.add_argument(*args, nargs="?", const="True", **kwargs)
# Opposite argument
kwargs["help"] = "do not %s (opposite of %s)" % (
kwargs["help"], long_name)
group.add_argument(*opposite_args, action="store_const",
const="False", **kwargs)
else:
del kwargs["type"]
group.add_argument(*args, **kwargs)
args_ns, remaining_args = parser.parse_known_args()
unrecognized_args = []
while remaining_args:
arg = remaining_args.pop(0)
for section, data in config.DEFAULT_CONFIG_SCHEMA.items():
if "type" not in data and not data.get("_allow_extra"):
continue
prefix = "--%s-" % section
if arg.startswith(prefix):
arg = arg[len(prefix):]
break
else:
unrecognized_args.append(arg)
continue
value = ""
if "=" in arg:
arg, value = arg.split("=", maxsplit=1)
elif remaining_args and not remaining_args[0].startswith("-"):
value = remaining_args.pop(0)
option = arg
if not data.get("_allow_extra"): # preserve dash in HTTP header names
option = option.replace("-", "_")
vars(args_ns)["c:%s:%s" % (section, option)] = value
if unrecognized_args:
parser.error("unrecognized arguments: %s" %
" ".join(unrecognized_args))
# Preliminary configure logging
with contextlib.suppress(ValueError):
log.set_level(config.DEFAULT_CONFIG_SCHEMA["logging"]["level"]["type"](
vars(args_ns).get("c:logging:level", "")), True)
# Update Radicale configuration according to arguments
arguments_config: types.MUTABLE_CONFIG = {}
for key, value in vars(args_ns).items():
if key.startswith("c:"):
_, section, option = key.split(":", maxsplit=2)
arguments_config[section] = arguments_config.get(section, {})
arguments_config[section][option] = value
try:
configuration = config.load(config.parse_compound_paths(
config.DEFAULT_CONFIG_PATH,
os.environ.get("RADICALE_CONFIG"),
os.pathsep.join(args_ns.config) if args_ns.config is not None
else None))
if arguments_config:
configuration.update(arguments_config, "command line arguments")
except Exception as e:
logger.critical("Invalid configuration: %s", e, exc_info=True)
sys.exit(1)
# Configure logging
log.set_level(cast(str, configuration.get("logging", "level")), configuration.get("logging", "backtrace_on_debug"))
# Log configuration after logger is configured
default_config_active = True
for source, miss in configuration.sources():
logger.info("%s %s", "Skipped missing/unreadable" if miss else "Loaded", source)
if not miss and source != "default config":
default_config_active = False
if default_config_active:
logger.warning("%s", "No config file found/readable - only default config is active")
if args_ns.verify_storage:
logger.info("Verifying storage")
try:
storage_ = storage.load(configuration)
with storage_.acquire_lock("r"):
if not storage_.verify():
logger.critical("Storage verification failed")
sys.exit(1)
except Exception as e:
logger.critical("An exception occurred during storage "
"verification: %s", e, exc_info=True)
sys.exit(1)
return
# Create a socket pair to notify the server of program shutdown
shutdown_socket, shutdown_socket_out = socket.socketpair()
# Shutdown server when signal arrives
def shutdown_signal_handler(signal_number: int,
stack_frame: Optional[FrameType]) -> None:
shutdown_socket.close()
for signal_number in exit_signal_numbers:
signal.signal(signal_number, shutdown_signal_handler)
try:
server.serve(configuration, shutdown_socket_out)
except Exception as e:
logger.critical("An exception occurred during server startup: %s", e,
exc_info=False)
sys.exit(1)
if __name__ == "__main__":
run()
| 8,666
|
Python
|
.py
| 187
| 36.909091
| 119
| 0.623077
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,178
|
config.py
|
Kozea_Radicale/radicale/config.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2008-2017 Guillaume Ayoub
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2017-2020 Unrud <unrud@outlook.com>
# Copyright © 2024-2024 Peter Bieringer <pb@bieringer.de>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration module
Use ``load()`` to obtain an instance of ``Configuration`` for use with
``radicale.app.Application``.
"""
import contextlib
import json
import math
import os
import string
import sys
from collections import OrderedDict
from configparser import RawConfigParser
from typing import (Any, Callable, ClassVar, Iterable, List, Optional,
Sequence, Tuple, TypeVar, Union)
from radicale import auth, hook, rights, storage, types, web
from radicale.item import check_and_sanitize_props
DEFAULT_CONFIG_PATH: str = os.pathsep.join([
"?/etc/radicale/config",
"?~/.config/radicale/config"])
def positive_int(value: Any) -> int:
value = int(value)
if value < 0:
raise ValueError("value is negative: %d" % value)
return value
def positive_float(value: Any) -> float:
value = float(value)
if not math.isfinite(value):
raise ValueError("value is infinite")
if math.isnan(value):
raise ValueError("value is not a number")
if value < 0:
raise ValueError("value is negative: %f" % value)
return value
def logging_level(value: Any) -> str:
if value not in ("debug", "info", "warning", "error", "critical"):
raise ValueError("unsupported level: %r" % value)
return value
def filepath(value: Any) -> str:
if not value:
return ""
value = os.path.expanduser(value)
if sys.platform == "win32":
value = os.path.expandvars(value)
return os.path.abspath(value)
def list_of_ip_address(value: Any) -> List[Tuple[str, int]]:
def ip_address(value):
try:
address, port = value.rsplit(":", 1)
return address.strip(string.whitespace + "[]"), int(port)
except ValueError:
raise ValueError("malformed IP address: %r" % value)
return [ip_address(s) for s in value.split(",")]
def str_or_callable(value: Any) -> Union[str, Callable]:
if callable(value):
return value
return str(value)
def unspecified_type(value: Any) -> Any:
return value
def _convert_to_bool(value: Any) -> bool:
if value.lower() not in RawConfigParser.BOOLEAN_STATES:
raise ValueError("not a boolean: %r" % value)
return RawConfigParser.BOOLEAN_STATES[value.lower()]
def json_str(value: Any) -> dict:
if not value:
return {}
ret = json.loads(value)
for (name_coll, props) in ret.items():
checked_props = check_and_sanitize_props(props)
ret[name_coll] = checked_props
return ret
INTERNAL_OPTIONS: Sequence[str] = ("_allow_extra",)
# Default configuration
DEFAULT_CONFIG_SCHEMA: types.CONFIG_SCHEMA = OrderedDict([
("server", OrderedDict([
("hosts", {
"value": "localhost:5232",
"help": "set server hostnames including ports",
"aliases": ("-H", "--hosts",),
"type": list_of_ip_address}),
("max_connections", {
"value": "8",
"help": "maximum number of parallel connections",
"type": positive_int}),
("max_content_length", {
"value": "100000000",
"help": "maximum size of request body in bytes",
"type": positive_int}),
("timeout", {
"value": "30",
"help": "socket timeout",
"type": positive_float}),
("ssl", {
"value": "False",
"help": "use SSL connection",
"aliases": ("-s", "--ssl",),
"opposite_aliases": ("-S", "--no-ssl",),
"type": bool}),
("certificate", {
"value": "/etc/ssl/radicale.cert.pem",
"help": "set certificate file",
"aliases": ("-c", "--certificate",),
"type": filepath}),
("key", {
"value": "/etc/ssl/radicale.key.pem",
"help": "set private key file",
"aliases": ("-k", "--key",),
"type": filepath}),
("certificate_authority", {
"value": "",
"help": "set CA certificate for validating clients",
"aliases": ("--certificate-authority",),
"type": filepath}),
("_internal_server", {
"value": "False",
"help": "the internal server is used",
"type": bool})])),
("encoding", OrderedDict([
("request", {
"value": "utf-8",
"help": "encoding for responding requests",
"type": str}),
("stock", {
"value": "utf-8",
"help": "encoding for storing local collections",
"type": str})])),
("auth", OrderedDict([
("type", {
"value": "none",
"help": "authentication method",
"type": str_or_callable,
"internal": auth.INTERNAL_TYPES}),
("htpasswd_filename", {
"value": "/etc/radicale/users",
"help": "htpasswd filename",
"type": filepath}),
("htpasswd_encryption", {
"value": "autodetect",
"help": "htpasswd encryption method",
"type": str}),
("realm", {
"value": "Radicale - Password Required",
"help": "message displayed when a password is needed",
"type": str}),
("delay", {
"value": "1",
"help": "incorrect authentication delay",
"type": positive_float}),
("ldap_uri", {
"value": "ldap://localhost",
"help": "URI to the ldap server",
"type": str}),
("ldap_base", {
"value": "none",
"help": "LDAP base DN of the ldap server",
"type": str}),
("ldap_reader_dn", {
"value": "none",
"help": "the DN of a ldap user with read access to get the user accounts",
"type": str}),
("ldap_secret", {
"value": "none",
"help": "the password of the ldap_reader_dn",
"type": str}),
("ldap_filter", {
"value": "(cn={0})",
"help": "the search filter to find the user DN to authenticate by the username",
"type": str}),
("ldap_load_groups", {
"value": "False",
"help": "load the ldap groups of the authenticated user",
"type": bool}),
("ldap_use_ssl", {
"value": "False",
"help": "Use ssl on the ldap connection",
"type": bool}),
("ldap_ssl_verify_mode", {
"value": "REQUIRED",
"help": "The certifikat verification mode. NONE, OPTIONAL, default is REQUIRED",
"type": str}),
("ldap_ssl_ca_file", {
"value": "",
"help": "The path to the CA file in pem format which is used to certificate the server certificate",
"type": str}),
("strip_domain", {
"value": "False",
"help": "strip domain from username",
"type": bool}),
("lc_username", {
"value": "False",
"help": "convert username to lowercase, must be true for case-insensitive auth providers",
"type": bool})])),
("rights", OrderedDict([
("type", {
"value": "owner_only",
"help": "rights backend",
"type": str_or_callable,
"internal": rights.INTERNAL_TYPES}),
("permit_delete_collection", {
"value": "True",
"help": "permit delete of a collection",
"type": bool}),
("permit_overwrite_collection", {
"value": "True",
"help": "permit overwrite of a collection",
"type": bool}),
("file", {
"value": "/etc/radicale/rights",
"help": "file for rights management from_file",
"type": filepath})])),
("storage", OrderedDict([
("type", {
"value": "multifilesystem",
"help": "storage backend",
"type": str_or_callable,
"internal": storage.INTERNAL_TYPES}),
("filesystem_folder", {
"value": "/var/lib/radicale/collections",
"help": "path where collections are stored",
"type": filepath}),
("max_sync_token_age", {
"value": "2592000", # 30 days
"help": "delete sync token that are older",
"type": positive_int}),
("skip_broken_item", {
"value": "True",
"help": "skip broken item instead of triggering exception",
"type": bool}),
("hook", {
"value": "",
"help": "command that is run after changes to storage",
"type": str}),
("_filesystem_fsync", {
"value": "True",
"help": "sync all changes to filesystem during requests",
"type": bool}),
("predefined_collections", {
"value": "",
"help": "predefined user collections",
"type": json_str})])),
("hook", OrderedDict([
("type", {
"value": "none",
"help": "hook backend",
"type": str,
"internal": hook.INTERNAL_TYPES}),
("rabbitmq_endpoint", {
"value": "",
"help": "endpoint where rabbitmq server is running",
"type": str}),
("rabbitmq_topic", {
"value": "",
"help": "topic to declare queue",
"type": str}),
("rabbitmq_queue_type", {
"value": "",
"help": "queue type for topic declaration",
"type": str})])),
("web", OrderedDict([
("type", {
"value": "internal",
"help": "web interface backend",
"type": str_or_callable,
"internal": web.INTERNAL_TYPES})])),
("logging", OrderedDict([
("level", {
"value": "info",
"help": "threshold for the logger",
"type": logging_level}),
("bad_put_request_content", {
"value": "False",
"help": "log bad PUT request content",
"type": bool}),
("backtrace_on_debug", {
"value": "False",
"help": "log backtrace on level=debug",
"type": bool}),
("request_header_on_debug", {
"value": "False",
"help": "log request header on level=debug",
"type": bool}),
("request_content_on_debug", {
"value": "False",
"help": "log request content on level=debug",
"type": bool}),
("response_content_on_debug", {
"value": "False",
"help": "log response content on level=debug",
"type": bool}),
("rights_rule_doesnt_match_on_debug", {
"value": "False",
"help": "log rights rules which doesn't match on level=debug",
"type": bool}),
("mask_passwords", {
"value": "True",
"help": "mask passwords in logs",
"type": bool})])),
("headers", OrderedDict([
("_allow_extra", str)])),
("reporting", OrderedDict([
("max_freebusy_occurrence", {
"value": "10000",
"help": "number of occurrences per event when reporting",
"type": positive_int})]))
])
def parse_compound_paths(*compound_paths: Optional[str]
) -> List[Tuple[str, bool]]:
"""Parse a compound path and return the individual paths.
Paths in a compound path are joined by ``os.pathsep``. If a path starts
with ``?`` the return value ``IGNORE_IF_MISSING`` is set.
When multiple ``compound_paths`` are passed, the last argument that is
not ``None`` is used.
Returns a dict of the format ``[(PATH, IGNORE_IF_MISSING), ...]``
"""
compound_path = ""
for p in compound_paths:
if p is not None:
compound_path = p
paths = []
for path in compound_path.split(os.pathsep):
ignore_if_missing = path.startswith("?")
if ignore_if_missing:
path = path[1:]
path = filepath(path)
if path:
paths.append((path, ignore_if_missing))
return paths
def load(paths: Optional[Iterable[Tuple[str, bool]]] = None
) -> "Configuration":
"""
Create instance of ``Configuration`` for use with
``radicale.app.Application``.
``paths`` a list of configuration files with the format
``[(PATH, IGNORE_IF_MISSING), ...]``.
If a configuration file is missing and IGNORE_IF_MISSING is set, the
config is set to ``Configuration.SOURCE_MISSING``.
The configuration can later be changed with ``Configuration.update()``.
"""
if paths is None:
paths = []
configuration = Configuration(DEFAULT_CONFIG_SCHEMA)
for path, ignore_if_missing in paths:
parser = RawConfigParser()
config_source = "config file %r" % path
config: types.CONFIG
try:
with open(path) as f:
parser.read_file(f)
config = {s: {o: parser[s][o] for o in parser.options(s)}
for s in parser.sections()}
except Exception as e:
if not (ignore_if_missing and isinstance(e, (
FileNotFoundError, NotADirectoryError, PermissionError))):
raise RuntimeError("Failed to load %s: %s" % (config_source, e)
) from e
config = Configuration.SOURCE_MISSING
configuration.update(config, config_source)
return configuration
_Self = TypeVar("_Self", bound="Configuration")
class Configuration:
SOURCE_MISSING: ClassVar[types.CONFIG] = {}
_schema: types.CONFIG_SCHEMA
_values: types.MUTABLE_CONFIG
_configs: List[Tuple[types.CONFIG, str, bool]]
def __init__(self, schema: types.CONFIG_SCHEMA) -> None:
"""Initialize configuration.
``schema`` a dict that describes the configuration format.
See ``DEFAULT_CONFIG_SCHEMA``.
The content of ``schema`` must not change afterwards, it is kept
as an internal reference.
Use ``load()`` to create an instance for use with
``radicale.app.Application``.
"""
self._schema = schema
self._values = {}
self._configs = []
default = {section: {option: self._schema[section][option]["value"]
for option in self._schema[section]
if option not in INTERNAL_OPTIONS}
for section in self._schema}
self.update(default, "default config", privileged=True)
def update(self, config: types.CONFIG, source: Optional[str] = None,
privileged: bool = False) -> None:
"""Update the configuration.
``config`` a dict of the format {SECTION: {OPTION: VALUE, ...}, ...}.
The configuration is checked for errors according to the config schema.
The content of ``config`` must not change afterwards, it is kept
as an internal reference.
``source`` a description of the configuration source (used in error
messages).
``privileged`` allows updating sections and options starting with "_".
"""
if source is None:
source = "unspecified config"
new_values: types.MUTABLE_CONFIG = {}
for section in config:
if (section not in self._schema or
section.startswith("_") and not privileged):
raise ValueError(
"Invalid section %r in %s" % (section, source))
new_values[section] = {}
extra_type = None
extra_type = self._schema[section].get("_allow_extra")
if "type" in self._schema[section]:
if "type" in config[section]:
plugin = config[section]["type"]
else:
plugin = self.get(section, "type")
if plugin not in self._schema[section]["type"]["internal"]:
extra_type = unspecified_type
for option in config[section]:
type_ = extra_type
if option in self._schema[section]:
type_ = self._schema[section][option]["type"]
if (not type_ or option in INTERNAL_OPTIONS or
option.startswith("_") and not privileged):
raise RuntimeError("Invalid option %r in section %r in "
"%s" % (option, section, source))
raw_value = config[section][option]
try:
if type_ == bool and not isinstance(raw_value, bool):
raw_value = _convert_to_bool(raw_value)
new_values[section][option] = type_(raw_value)
except Exception as e:
raise RuntimeError(
"Invalid %s value for option %r in section %r in %s: "
"%r" % (type_.__name__, option, section, source,
raw_value)) from e
self._configs.append((config, source, bool(privileged)))
for section in new_values:
self._values[section] = self._values.get(section, {})
self._values[section].update(new_values[section])
def get(self, section: str, option: str) -> Any:
"""Get the value of ``option`` in ``section``."""
with contextlib.suppress(KeyError):
return self._values[section][option]
raise KeyError(section, option)
def get_raw(self, section: str, option: str) -> Any:
"""Get the raw value of ``option`` in ``section``."""
for config, _, _ in reversed(self._configs):
if option in config.get(section, {}):
return config[section][option]
raise KeyError(section, option)
def get_source(self, section: str, option: str) -> str:
"""Get the source that provides ``option`` in ``section``."""
for config, source, _ in reversed(self._configs):
if option in config.get(section, {}):
return source
raise KeyError(section, option)
def sections(self) -> List[str]:
"""List all sections."""
return list(self._values.keys())
def options(self, section: str) -> List[str]:
"""List all options in ``section``"""
return list(self._values[section].keys())
def sources(self) -> List[Tuple[str, bool]]:
"""List all config sources."""
return [(source, config is self.SOURCE_MISSING) for
config, source, _ in self._configs]
def copy(self: _Self, plugin_schema: Optional[types.CONFIG_SCHEMA] = None
) -> _Self:
"""Create a copy of the configuration
``plugin_schema`` is a optional dict that contains additional options
for usage with a plugin. See ``DEFAULT_CONFIG_SCHEMA``.
"""
if plugin_schema is None:
schema = self._schema
else:
new_schema = dict(self._schema)
for section, options in plugin_schema.items():
if (section not in new_schema or
"type" not in new_schema[section] or
"internal" not in new_schema[section]["type"]):
raise ValueError("not a plugin section: %r" % section)
new_section = dict(new_schema[section])
new_type = dict(new_section["type"])
new_type["internal"] = (self.get(section, "type"),)
new_section["type"] = new_type
for option, value in options.items():
if option in new_section:
raise ValueError("option already exists in %r: %r" %
(section, option))
new_section[option] = value
new_schema[section] = new_section
schema = new_schema
copy = type(self)(schema)
for config, source, privileged in self._configs:
copy.update(config, source, privileged)
return copy
| 21,143
|
Python
|
.py
| 508
| 31.027559
| 112
| 0.548469
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,179
|
pathutils.py
|
Kozea_Radicale/radicale/pathutils.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for working with the file system.
"""
import errno
import os
import posixpath
import sys
import threading
from tempfile import TemporaryDirectory
from typing import Iterator, Type, Union
from radicale import storage, types
if sys.platform == "win32":
import ctypes
import ctypes.wintypes
import msvcrt
LOCKFILE_EXCLUSIVE_LOCK: int = 2
ULONG_PTR: Union[Type[ctypes.c_uint32], Type[ctypes.c_uint64]]
if ctypes.sizeof(ctypes.c_void_p) == 4:
ULONG_PTR = ctypes.c_uint32
else:
ULONG_PTR = ctypes.c_uint64
class Overlapped(ctypes.Structure):
_fields_ = [
("internal", ULONG_PTR),
("internal_high", ULONG_PTR),
("offset", ctypes.wintypes.DWORD),
("offset_high", ctypes.wintypes.DWORD),
("h_event", ctypes.wintypes.HANDLE)]
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
lock_file_ex = kernel32.LockFileEx
lock_file_ex.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.POINTER(Overlapped)]
lock_file_ex.restype = ctypes.wintypes.BOOL
unlock_file_ex = kernel32.UnlockFileEx
unlock_file_ex.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.POINTER(Overlapped)]
unlock_file_ex.restype = ctypes.wintypes.BOOL
else:
import fcntl
if sys.platform == "linux":
import ctypes
RENAME_EXCHANGE: int = 2
renameat2 = None
try:
renameat2 = ctypes.CDLL(None, use_errno=True).renameat2
except AttributeError:
pass
else:
renameat2.argtypes = [
ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_char_p,
ctypes.c_uint]
renameat2.restype = ctypes.c_int
if sys.platform == "darwin":
# Definition missing in PyPy
F_FULLFSYNC: int = getattr(fcntl, "F_FULLFSYNC", 51)
class RwLock:
"""A readers-Writer lock that locks a file."""
_path: str
_readers: int
_writer: bool
_lock: threading.Lock
def __init__(self, path: str) -> None:
self._path = path
self._readers = 0
self._writer = False
self._lock = threading.Lock()
@property
def locked(self) -> str:
with self._lock:
if self._readers > 0:
return "r"
if self._writer:
return "w"
return ""
@types.contextmanager
def acquire(self, mode: str) -> Iterator[None]:
if mode not in "rw":
raise ValueError("Invalid mode: %r" % mode)
with open(self._path, "w+") as lock_file:
if sys.platform == "win32":
handle = msvcrt.get_osfhandle(lock_file.fileno())
flags = LOCKFILE_EXCLUSIVE_LOCK if mode == "w" else 0
overlapped = Overlapped()
try:
if not lock_file_ex(handle, flags, 0, 1, 0, overlapped):
raise ctypes.WinError()
except OSError as e:
raise RuntimeError("Locking the storage failed: %s" % e
) from e
else:
_cmd = fcntl.LOCK_EX if mode == "w" else fcntl.LOCK_SH
try:
fcntl.flock(lock_file.fileno(), _cmd)
except OSError as e:
raise RuntimeError("Locking the storage failed: %s" % e
) from e
with self._lock:
if self._writer or mode == "w" and self._readers != 0:
raise RuntimeError("Locking the storage failed: "
"Guarantees failed")
if mode == "r":
self._readers += 1
else:
self._writer = True
try:
yield
finally:
with self._lock:
if mode == "r":
self._readers -= 1
self._writer = False
def rename_exchange(src: str, dst: str) -> None:
"""Exchange the files or directories `src` and `dst`.
Both `src` and `dst` must exist but may be of different types.
On Linux with renameat2 the operation is atomic.
On other platforms it's not atomic.
"""
src_dir, src_base = os.path.split(src)
dst_dir, dst_base = os.path.split(dst)
src_dir = src_dir or os.curdir
dst_dir = dst_dir or os.curdir
if not src_base or not dst_base:
raise ValueError("Invalid arguments: %r -> %r" % (src, dst))
if sys.platform == "linux" and renameat2:
src_base_bytes = os.fsencode(src_base)
dst_base_bytes = os.fsencode(dst_base)
src_dir_fd = os.open(src_dir, 0)
try:
dst_dir_fd = os.open(dst_dir, 0)
try:
if renameat2(src_dir_fd, src_base_bytes,
dst_dir_fd, dst_base_bytes,
RENAME_EXCHANGE) == 0:
return
errno_ = ctypes.get_errno()
# Fallback if RENAME_EXCHANGE not supported by filesystem
if errno_ != errno.EINVAL:
raise OSError(errno_, os.strerror(errno_))
finally:
os.close(dst_dir_fd)
finally:
os.close(src_dir_fd)
with TemporaryDirectory(prefix=".Radicale.tmp-", dir=src_dir
) as tmp_dir:
os.rename(dst, os.path.join(tmp_dir, "interim"))
os.rename(src, dst)
os.rename(os.path.join(tmp_dir, "interim"), src)
def fsync(fd: int) -> None:
if sys.platform == "darwin":
try:
fcntl.fcntl(fd, F_FULLFSYNC)
return
except OSError as e:
# Fallback if F_FULLFSYNC not supported by filesystem
if e.errno != errno.EINVAL:
raise
os.fsync(fd)
def strip_path(path: str) -> str:
assert sanitize_path(path) == path
return path.strip("/")
def unstrip_path(stripped_path: str, trailing_slash: bool = False) -> str:
assert strip_path(sanitize_path(stripped_path)) == stripped_path
assert stripped_path or trailing_slash
path = "/%s" % stripped_path
if trailing_slash and not path.endswith("/"):
path += "/"
return path
def sanitize_path(path: str) -> str:
"""Make path absolute with leading slash to prevent access to other data.
Preserve potential trailing slash.
"""
trailing_slash = "/" if path.endswith("/") else ""
path = posixpath.normpath(path)
new_path = "/"
for part in path.split("/"):
if not is_safe_path_component(part):
continue
new_path = posixpath.join(new_path, part)
trailing_slash = "" if new_path.endswith("/") else trailing_slash
return new_path + trailing_slash
def is_safe_path_component(path: str) -> bool:
"""Check if path is a single component of a path.
Check that the path is safe to join too.
"""
return bool(path) and "/" not in path and path not in (".", "..")
def is_safe_filesystem_path_component(path: str) -> bool:
"""Check if path is a single component of a local and posix filesystem
path.
Check that the path is safe to join too.
"""
return (
bool(path) and not os.path.splitdrive(path)[0] and
(sys.platform != "win32" or ":" not in path) and # Block NTFS-ADS
not os.path.split(path)[0] and path not in (os.curdir, os.pardir) and
not path.startswith(".") and not path.endswith("~") and
is_safe_path_component(path))
def path_to_filesystem(root: str, sane_path: str) -> str:
"""Convert `sane_path` to a local filesystem path relative to `root`.
`root` must be a secure filesystem path, it will be prepend to the path.
`sane_path` must be a sanitized path without leading or trailing ``/``.
Conversion of `sane_path` is done in a secure manner,
or raises ``ValueError``.
"""
assert sane_path == strip_path(sanitize_path(sane_path))
safe_path = root
parts = sane_path.split("/") if sane_path else []
for part in parts:
if not is_safe_filesystem_path_component(part):
raise UnsafePathError(part)
safe_path_parent = safe_path
safe_path = os.path.join(safe_path, part)
# Check for conflicting files (e.g. case-insensitive file systems
# or short names on Windows file systems)
if (os.path.lexists(safe_path) and
part not in (e.name for e in os.scandir(safe_path_parent))):
raise CollidingPathError(part)
return safe_path
class UnsafePathError(ValueError):
def __init__(self, path: str) -> None:
super().__init__("Can't translate name safely to filesystem: %r" %
path)
class CollidingPathError(ValueError):
def __init__(self, path: str) -> None:
super().__init__("File name collision: %r" % path)
def name_from_path(path: str, collection: "storage.BaseCollection") -> str:
"""Return Radicale item name from ``path``."""
assert sanitize_path(path) == path
start = unstrip_path(collection.path, True)
if not (path + "/").startswith(start):
raise ValueError("%r doesn't start with %r" % (path, start))
name = path[len(start):]
if name and not is_safe_path_component(name):
raise ValueError("%r is not a component in collection %r" %
(name, collection.path))
return name
| 10,539
|
Python
|
.py
| 262
| 31.312977
| 77
| 0.602348
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,180
|
utils.py
|
Kozea_Radicale/radicale/utils.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
from importlib import import_module, metadata
from typing import Callable, Sequence, Type, TypeVar, Union
from radicale import config
from radicale.log import logger
_T_co = TypeVar("_T_co", covariant=True)
def load_plugin(internal_types: Sequence[str], module_name: str,
class_name: str, base_class: Type[_T_co],
configuration: "config.Configuration") -> _T_co:
type_: Union[str, Callable] = configuration.get(module_name, "type")
if callable(type_):
logger.info("%s type is %r", module_name, type_)
return type_(configuration)
if type_ in internal_types:
module = "radicale.%s.%s" % (module_name, type_)
else:
module = type_
try:
class_ = getattr(import_module(module), class_name)
except Exception as e:
raise RuntimeError("Failed to load %s module %r: %s" %
(module_name, module, e)) from e
logger.info("%s type is %r", module_name, module)
return class_(configuration)
def package_version(name):
return metadata.version(name)
| 1,903
|
Python
|
.py
| 42
| 40.666667
| 72
| 0.707726
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,181
|
__init__.py
|
Kozea_Radicale/radicale/__init__.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2017 Guillaume Ayoub
# Copyright © 2017-2022 Unrud <unrud@outlook.com>
# Copyright © 2024-2024 Peter Bieringer <pb@bieringer.de>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Entry point for external WSGI servers (like uWSGI or Gunicorn).
Configuration files can be specified in the environment variable
``RADICALE_CONFIG``.
"""
import os
import threading
from typing import Iterable, Optional, cast
from radicale import config, log, types, utils
from radicale.app import Application
from radicale.log import logger
VERSION: str = utils.package_version("radicale")
_application_instance: Optional[Application] = None
_application_config_path: Optional[str] = None
_application_lock = threading.Lock()
def _get_application_instance(config_path: str, wsgi_errors: types.ErrorStream
) -> Application:
global _application_instance, _application_config_path
with _application_lock:
if _application_instance is None:
log.setup()
with log.register_stream(wsgi_errors):
_application_config_path = config_path
configuration = config.load(config.parse_compound_paths(
config.DEFAULT_CONFIG_PATH,
config_path))
log.set_level(cast(str, configuration.get("logging", "level")), configuration.get("logging", "backtrace_on_debug"))
# Log configuration after logger is configured
default_config_active = True
for source, miss in configuration.sources():
logger.info("%s %s", "Skipped missing/unreadable" if miss
else "Loaded", source)
if not miss and source != "default config":
default_config_active = False
if default_config_active:
logger.warning("%s", "No config file found/readable - only default config is active")
_application_instance = Application(configuration)
if _application_config_path != config_path:
raise ValueError("RADICALE_CONFIG must not change: %r != %r" %
(config_path, _application_config_path))
return _application_instance
def application(environ: types.WSGIEnviron,
start_response: types.WSGIStartResponse) -> Iterable[bytes]:
"""Entry point for external WSGI servers."""
config_path = environ.get("RADICALE_CONFIG",
os.environ.get("RADICALE_CONFIG"))
app = _get_application_instance(config_path, environ["wsgi.errors"])
return app(environ, start_response)
| 3,376
|
Python
|
.py
| 67
| 42.328358
| 131
| 0.680838
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,182
|
log.py
|
Kozea_Radicale/radicale/log.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2011-2017 Guillaume Ayoub
# Copyright © 2017-2023 Unrud <unrud@outlook.com>
# Copyright © 2024-2024 Peter Bieringer <pb@bieringer.de>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to set up Python's logging facility for Radicale's WSGI application.
Log messages are sent to the first available target of:
- Error stream specified by the WSGI server in "wsgi.errors"
- ``sys.stderr``
"""
import contextlib
import io
import logging
import os
import socket
import struct
import sys
import threading
import time
from typing import (Any, Callable, ClassVar, Dict, Iterator, Mapping, Optional,
Tuple, Union, cast)
from radicale import types
LOGGER_NAME: str = "radicale"
LOGGER_FORMATS: Mapping[str, str] = {
"verbose": "[%(asctime)s] [%(ident)s] [%(levelname)s] %(message)s",
"journal": "[%(ident)s] [%(levelname)s] %(message)s",
}
DATE_FORMAT: str = "%Y-%m-%d %H:%M:%S %z"
logger: logging.Logger = logging.getLogger(LOGGER_NAME)
class RemoveTracebackFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
record.exc_info = None
return True
REMOVE_TRACEBACK_FILTER: logging.Filter = RemoveTracebackFilter()
class IdentLogRecordFactory:
"""LogRecordFactory that adds ``ident`` attribute."""
def __init__(self, upstream_factory: Callable[..., logging.LogRecord]
) -> None:
self._upstream_factory = upstream_factory
def __call__(self, *args: Any, **kwargs: Any) -> logging.LogRecord:
record = self._upstream_factory(*args, **kwargs)
ident = ("%d" % record.process if record.process is not None
else record.processName or "unknown")
tid = None
if record.thread is not None:
if record.thread != threading.main_thread().ident:
ident += "/%s" % (record.threadName or "unknown")
if (sys.version_info >= (3, 8) and
record.thread == threading.get_ident()):
tid = threading.get_native_id()
record.ident = ident # type:ignore[attr-defined]
record.tid = tid # type:ignore[attr-defined]
return record
class ThreadedStreamHandler(logging.Handler):
"""Sends logging output to the stream registered for the current thread or
``sys.stderr`` when no stream was registered."""
terminator: ClassVar[str] = "\n"
_streams: Dict[int, types.ErrorStream]
_journal_stream_id: Optional[Tuple[int, int]]
_journal_socket: Optional[socket.socket]
_journal_socket_failed: bool
_formatters: Mapping[str, logging.Formatter]
_formatter: Optional[logging.Formatter]
def __init__(self, format_name: Optional[str] = None) -> None:
super().__init__()
self._streams = {}
self._journal_stream_id = None
with contextlib.suppress(TypeError, ValueError):
dev, inode = os.environ.get("JOURNAL_STREAM", "").split(":", 1)
self._journal_stream_id = (int(dev), int(inode))
self._journal_socket = None
self._journal_socket_failed = False
self._formatters = {name: logging.Formatter(fmt, DATE_FORMAT)
for name, fmt in LOGGER_FORMATS.items()}
self._formatter = (self._formatters[format_name]
if format_name is not None else None)
def _get_formatter(self, default_format_name: str) -> logging.Formatter:
return self._formatter or self._formatters[default_format_name]
def _detect_journal(self, stream: types.ErrorStream) -> bool:
if not self._journal_stream_id or not isinstance(stream, io.IOBase):
return False
try:
stat = os.fstat(stream.fileno())
except OSError:
return False
return self._journal_stream_id == (stat.st_dev, stat.st_ino)
@staticmethod
def _encode_journal(data: Mapping[str, Optional[Union[str, int]]]
) -> bytes:
msg = b""
for key, value in data.items():
if value is None:
continue
keyb = key.encode()
valueb = str(value).encode()
if b"\n" in valueb:
msg += (keyb + b"\n" +
struct.pack("<Q", len(valueb)) + valueb + b"\n")
else:
msg += keyb + b"=" + valueb + b"\n"
return msg
def _try_emit_journal(self, record: logging.LogRecord) -> bool:
if not self._journal_socket:
# Try to connect to systemd journal socket
if self._journal_socket_failed or not hasattr(socket, "AF_UNIX"):
return False
journal_socket = None
try:
journal_socket = socket.socket(
socket.AF_UNIX, socket.SOCK_DGRAM)
journal_socket.connect("/run/systemd/journal/socket")
except OSError as e:
self._journal_socket_failed = True
if journal_socket:
journal_socket.close()
# Log after setting `_journal_socket_failed` to prevent loop!
logger.error("Failed to connect to systemd journal: %s",
e, exc_info=True)
return False
self._journal_socket = journal_socket
priority = {"DEBUG": 7,
"INFO": 6,
"WARNING": 4,
"ERROR": 3,
"CRITICAL": 2}.get(record.levelname, 4)
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S.%%03dZ",
time.gmtime(record.created)) % record.msecs
data = {"PRIORITY": priority,
"TID": cast(Optional[int], getattr(record, "tid", None)),
"SYSLOG_IDENTIFIER": record.name,
"SYSLOG_FACILITY": 1,
"SYSLOG_PID": record.process,
"SYSLOG_TIMESTAMP": timestamp,
"CODE_FILE": record.pathname,
"CODE_LINE": record.lineno,
"CODE_FUNC": record.funcName,
"MESSAGE": self._get_formatter("journal").format(record)}
self._journal_socket.sendall(self._encode_journal(data))
return True
def emit(self, record: logging.LogRecord) -> None:
try:
stream = self._streams.get(threading.get_ident(), sys.stderr)
if self._detect_journal(stream) and self._try_emit_journal(record):
return
msg = self._get_formatter("verbose").format(record)
stream.write(msg + self.terminator)
stream.flush()
except Exception:
self.handleError(record)
@types.contextmanager
def register_stream(self, stream: types.ErrorStream) -> Iterator[None]:
"""Register stream for logging output of the current thread."""
key = threading.get_ident()
self._streams[key] = stream
try:
yield
finally:
del self._streams[key]
@types.contextmanager
def register_stream(stream: types.ErrorStream) -> Iterator[None]:
"""Register stream for logging output of the current thread."""
yield
def setup() -> None:
"""Set global logging up."""
global register_stream
format_name = os.environ.get("RADICALE_LOG_FORMAT") or None
sane_format_name = format_name if format_name in LOGGER_FORMATS else None
handler = ThreadedStreamHandler(sane_format_name)
logging.basicConfig(handlers=[handler])
register_stream = handler.register_stream
log_record_factory = IdentLogRecordFactory(logging.getLogRecordFactory())
logging.setLogRecordFactory(log_record_factory)
set_level(logging.INFO, True)
if format_name != sane_format_name:
logger.error("Invalid RADICALE_LOG_FORMAT: %r", format_name)
def set_level(level: Union[int, str], backtrace_on_debug: bool) -> None:
"""Set logging level for global logger."""
if isinstance(level, str):
level = getattr(logging, level.upper())
assert isinstance(level, int)
logger.setLevel(level)
if level > logging.DEBUG:
logger.info("Logging of backtrace is disabled in this loglevel")
logger.addFilter(REMOVE_TRACEBACK_FILTER)
else:
if not backtrace_on_debug:
logger.debug("Logging of backtrace is disabled by option in this loglevel")
logger.addFilter(REMOVE_TRACEBACK_FILTER)
else:
logger.removeFilter(REMOVE_TRACEBACK_FILTER)
| 9,173
|
Python
|
.py
| 203
| 36.024631
| 87
| 0.625616
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,183
|
xmlutils.py
|
Kozea_Radicale/radicale/xmlutils.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2015 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for XML.
"""
import copy
import xml.etree.ElementTree as ET
from collections import OrderedDict
from http import client
from typing import Dict, Mapping, Optional
from urllib.parse import quote
from radicale import item, pathutils
MIMETYPES: Mapping[str, str] = {
"VADDRESSBOOK": "text/vcard",
"VCALENDAR": "text/calendar",
"VSUBSCRIBED": "text/calendar"}
OBJECT_MIMETYPES: Mapping[str, str] = {
"VCARD": "text/vcard",
"VLIST": "text/x-vlist",
"VCALENDAR": "text/calendar"}
NAMESPACES: Mapping[str, str] = {
"C": "urn:ietf:params:xml:ns:caldav",
"CR": "urn:ietf:params:xml:ns:carddav",
"D": "DAV:",
"CS": "http://calendarserver.org/ns/",
"ICAL": "http://apple.com/ns/ical/",
"ME": "http://me.com/_namespace/",
"RADICALE": "http://radicale.org/ns/"}
NAMESPACES_REV: Mapping[str, str] = {v: k for k, v in NAMESPACES.items()}
for short, url in NAMESPACES.items():
ET.register_namespace("" if short == "D" else short, url)
def pretty_xml(element: ET.Element) -> str:
"""Indent an ElementTree ``element`` and its children."""
def pretty_xml_recursive(element: ET.Element, level: int) -> None:
indent = "\n" + level * " "
if len(element) > 0:
if not (element.text or "").strip():
element.text = indent + " "
if not (element.tail or "").strip():
element.tail = indent
for sub_element in element:
pretty_xml_recursive(sub_element, level + 1)
if not (sub_element.tail or "").strip():
sub_element.tail = indent
elif level > 0 and not (element.tail or "").strip():
element.tail = indent
element = copy.deepcopy(element)
pretty_xml_recursive(element, 0)
return '<?xml version="1.0"?>\n%s' % ET.tostring(element, "unicode")
def make_clark(human_tag: str) -> str:
"""Get XML Clark notation from human tag ``human_tag``.
If ``human_tag`` is already in XML Clark notation it is returned as-is.
"""
if human_tag.startswith("{"):
ns, tag = human_tag[len("{"):].split("}", maxsplit=1)
if not ns or not tag:
raise ValueError("Invalid XML tag: %r" % human_tag)
return human_tag
ns_prefix, tag = human_tag.split(":", maxsplit=1)
if not ns_prefix or not tag:
raise ValueError("Invalid XML tag: %r" % human_tag)
ns = NAMESPACES.get(ns_prefix, "")
if not ns:
raise ValueError("Unknown XML namespace prefix: %r" % human_tag)
return "{%s}%s" % (ns, tag)
def make_human_tag(clark_tag: str) -> str:
"""Replace known namespaces in XML Clark notation ``clark_tag`` with
prefix.
If the namespace is not in ``NAMESPACES`` the tag is returned as-is.
"""
if not clark_tag.startswith("{"):
ns_prefix, tag = clark_tag.split(":", maxsplit=1)
if not ns_prefix or not tag:
raise ValueError("Invalid XML tag: %r" % clark_tag)
if ns_prefix not in NAMESPACES:
raise ValueError("Unknown XML namespace prefix: %r" % clark_tag)
return clark_tag
ns, tag = clark_tag[len("{"):].split("}", maxsplit=1)
if not ns or not tag:
raise ValueError("Invalid XML tag: %r" % clark_tag)
ns_prefix = NAMESPACES_REV.get(ns, "")
if ns_prefix:
return "%s:%s" % (ns_prefix, tag)
return clark_tag
def make_response(code: int) -> str:
"""Return full W3C names from HTTP status codes."""
return "HTTP/1.1 %i %s" % (code, client.responses[code])
def make_href(base_prefix: str, href: str) -> str:
"""Return prefixed href."""
assert href == pathutils.sanitize_path(href)
return quote("%s%s" % (base_prefix, href))
def webdav_error(human_tag: str) -> ET.Element:
"""Generate XML error message."""
root = ET.Element(make_clark("D:error"))
root.append(ET.Element(make_clark(human_tag)))
return root
def get_content_type(item: "item.Item", encoding: str) -> str:
"""Get the content-type of an item with charset and component parameters.
"""
mimetype = OBJECT_MIMETYPES[item.name]
tag = item.component_name
content_type = "%s;charset=%s" % (mimetype, encoding)
if tag:
content_type += ";component=%s" % tag
return content_type
def props_from_request(xml_request: Optional[ET.Element]
) -> Dict[str, Optional[str]]:
"""Return a list of properties as a dictionary.
Properties that should be removed are set to `None`.
"""
result: OrderedDict = OrderedDict()
if xml_request is None:
return result
# Requests can contain multipe <D:set> and <D:remove> elements.
# Each of these elements must contain exactly one <D:prop> element which
# can contain multpile properties.
# The order of the elements in the document must be respected.
props = []
for element in xml_request:
if element.tag in (make_clark("D:set"), make_clark("D:remove")):
for prop in element.findall("./%s/*" % make_clark("D:prop")):
props.append((element.tag == make_clark("D:set"), prop))
for is_set, prop in props:
key = make_human_tag(prop.tag)
value = None
if prop.tag == make_clark("D:resourcetype"):
key = "tag"
if is_set:
for resource_type in prop:
if resource_type.tag == make_clark("C:calendar"):
value = "VCALENDAR"
break
if resource_type.tag == make_clark("CS:subscribed"):
value = "VSUBSCRIBED"
break
if resource_type.tag == make_clark("CR:addressbook"):
value = "VADDRESSBOOK"
break
elif prop.tag == make_clark("C:supported-calendar-component-set"):
if is_set:
value = ",".join(
supported_comp.attrib["name"] for supported_comp in prop
if supported_comp.tag == make_clark("C:comp"))
elif is_set:
value = prop.text or ""
result[key] = value
result.move_to_end(key)
return result
| 7,085
|
Python
|
.py
| 164
| 35.890244
| 77
| 0.623329
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,184
|
httputils.py
|
Kozea_Radicale/radicale/httputils.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2017 Guillaume Ayoub
# Copyright © 2017-2022 Unrud <unrud@outlook.com>
# Copyright © 2024-2024 Peter Bieringer <pb@bieringer.de>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions for HTTP.
"""
import contextlib
import os
import pathlib
import sys
import time
from http import client
from typing import List, Mapping, Union, cast
from radicale import config, pathutils, types
from radicale.log import logger
if sys.version_info < (3, 9):
import pkg_resources
_TRAVERSABLE_LIKE_TYPE = pathlib.Path
else:
import importlib.abc
from importlib import resources
_TRAVERSABLE_LIKE_TYPE = Union[importlib.abc.Traversable, pathlib.Path]
NOT_ALLOWED: types.WSGIResponse = (
client.FORBIDDEN, (("Content-Type", "text/plain"),),
"Access to the requested resource forbidden.")
FORBIDDEN: types.WSGIResponse = (
client.FORBIDDEN, (("Content-Type", "text/plain"),),
"Action on the requested resource refused.")
BAD_REQUEST: types.WSGIResponse = (
client.BAD_REQUEST, (("Content-Type", "text/plain"),), "Bad Request")
NOT_FOUND: types.WSGIResponse = (
client.NOT_FOUND, (("Content-Type", "text/plain"),),
"The requested resource could not be found.")
CONFLICT: types.WSGIResponse = (
client.CONFLICT, (("Content-Type", "text/plain"),),
"Conflict in the request.")
METHOD_NOT_ALLOWED: types.WSGIResponse = (
client.METHOD_NOT_ALLOWED, (("Content-Type", "text/plain"),),
"The method is not allowed on the requested resource.")
PRECONDITION_FAILED: types.WSGIResponse = (
client.PRECONDITION_FAILED,
(("Content-Type", "text/plain"),), "Precondition failed.")
REQUEST_TIMEOUT: types.WSGIResponse = (
client.REQUEST_TIMEOUT, (("Content-Type", "text/plain"),),
"Connection timed out.")
REQUEST_ENTITY_TOO_LARGE: types.WSGIResponse = (
client.REQUEST_ENTITY_TOO_LARGE, (("Content-Type", "text/plain"),),
"Request body too large.")
REMOTE_DESTINATION: types.WSGIResponse = (
client.BAD_GATEWAY, (("Content-Type", "text/plain"),),
"Remote destination not supported.")
DIRECTORY_LISTING: types.WSGIResponse = (
client.FORBIDDEN, (("Content-Type", "text/plain"),),
"Directory listings are not supported.")
INTERNAL_SERVER_ERROR: types.WSGIResponse = (
client.INTERNAL_SERVER_ERROR, (("Content-Type", "text/plain"),),
"A server error occurred. Please contact the administrator.")
DAV_HEADERS: str = "1, 2, 3, calendar-access, addressbook, extended-mkcol"
MIMETYPES: Mapping[str, str] = {
".css": "text/css",
".eot": "application/vnd.ms-fontobject",
".gif": "image/gif",
".html": "text/html",
".js": "application/javascript",
".manifest": "text/cache-manifest",
".png": "image/png",
".svg": "image/svg+xml",
".ttf": "application/font-sfnt",
".txt": "text/plain",
".woff": "application/font-woff",
".woff2": "font/woff2",
".xml": "text/xml"}
FALLBACK_MIMETYPE: str = "application/octet-stream"
def decode_request(configuration: "config.Configuration",
environ: types.WSGIEnviron, text: bytes) -> str:
"""Try to magically decode ``text`` according to given ``environ``."""
# List of charsets to try
charsets: List[str] = []
# First append content charset given in the request
content_type = environ.get("CONTENT_TYPE")
if content_type and "charset=" in content_type:
charsets.append(
content_type.split("charset=")[1].split(";")[0].strip())
# Then append default Radicale charset
charsets.append(cast(str, configuration.get("encoding", "request")))
# Then append various fallbacks
charsets.append("utf-8")
charsets.append("iso8859-1")
# Remove duplicates
for i, s in reversed(list(enumerate(charsets))):
if s in charsets[:i]:
del charsets[i]
# Try to decode
for charset in charsets:
with contextlib.suppress(UnicodeDecodeError):
return text.decode(charset)
raise UnicodeDecodeError("decode_request", text, 0, len(text),
"all codecs failed [%s]" % ", ".join(charsets))
def read_raw_request_body(configuration: "config.Configuration",
environ: types.WSGIEnviron) -> bytes:
content_length = int(environ.get("CONTENT_LENGTH") or 0)
if not content_length:
return b""
content = environ["wsgi.input"].read(content_length)
if len(content) < content_length:
raise RuntimeError("Request body too short: %d" % len(content))
return content
def read_request_body(configuration: "config.Configuration",
environ: types.WSGIEnviron) -> str:
content = decode_request(configuration, environ,
read_raw_request_body(configuration, environ))
if configuration.get("logging", "request_content_on_debug"):
logger.debug("Request content:\n%s", content)
else:
logger.debug("Request content: suppressed by config/option [logging] request_content_on_debug")
return content
def redirect(location: str, status: int = client.FOUND) -> types.WSGIResponse:
return (status,
{"Location": location, "Content-Type": "text/plain"},
"Redirected to %s" % location)
def _serve_traversable(
traversable: _TRAVERSABLE_LIKE_TYPE, base_prefix: str, path: str,
path_prefix: str, index_file: str, mimetypes: Mapping[str, str],
fallback_mimetype: str) -> types.WSGIResponse:
if path != path_prefix and not path.startswith(path_prefix):
raise ValueError("path must start with path_prefix: %r --> %r" %
(path_prefix, path))
assert pathutils.sanitize_path(path) == path
parts_path = path[len(path_prefix):].strip('/')
parts = parts_path.split("/") if parts_path else []
for part in parts:
if not pathutils.is_safe_filesystem_path_component(part):
logger.debug("Web content with unsafe path %r requested", path)
return NOT_FOUND
if (not traversable.is_dir() or
all(part != entry.name for entry in traversable.iterdir())):
return NOT_FOUND
traversable = traversable.joinpath(part)
if traversable.is_dir():
if not path.endswith("/"):
return redirect(base_prefix + path + "/")
if not index_file:
return NOT_FOUND
traversable = traversable.joinpath(index_file)
if not traversable.is_file():
return NOT_FOUND
content_type = MIMETYPES.get(
os.path.splitext(traversable.name)[1].lower(), FALLBACK_MIMETYPE)
headers = {"Content-Type": content_type}
if isinstance(traversable, pathlib.Path):
headers["Last-Modified"] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(traversable.stat().st_mtime))
answer = traversable.read_bytes()
return client.OK, headers, answer
def serve_resource(
package: str, resource: str, base_prefix: str, path: str,
path_prefix: str = "/.web", index_file: str = "index.html",
mimetypes: Mapping[str, str] = MIMETYPES,
fallback_mimetype: str = FALLBACK_MIMETYPE) -> types.WSGIResponse:
if sys.version_info < (3, 9):
traversable = pathlib.Path(
pkg_resources.resource_filename(package, resource))
else:
traversable = resources.files(package).joinpath(resource)
return _serve_traversable(traversable, base_prefix, path, path_prefix,
index_file, mimetypes, fallback_mimetype)
def serve_folder(
folder: str, base_prefix: str, path: str,
path_prefix: str = "/.web", index_file: str = "index.html",
mimetypes: Mapping[str, str] = MIMETYPES,
fallback_mimetype: str = FALLBACK_MIMETYPE) -> types.WSGIResponse:
# deprecated: use `serve_resource` instead
traversable = pathlib.Path(folder)
return _serve_traversable(traversable, base_prefix, path, path_prefix,
index_file, mimetypes, fallback_mimetype)
| 8,777
|
Python
|
.py
| 192
| 39.541667
| 103
| 0.674304
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,185
|
server.py
|
Kozea_Radicale/radicale/server.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2017 Guillaume Ayoub
# Copyright © 2017-2019 Unrud <unrud@outlook.com>
# Copyright © 2024-2024 Peter Bieringer <pb@bieringer.de>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Built-in WSGI server.
"""
import http
import select
import socket
import socketserver
import ssl
import sys
import wsgiref.simple_server
from typing import (Any, Callable, Dict, List, MutableMapping, Optional, Set,
Tuple, Union)
from urllib.parse import unquote
from radicale import Application, config
from radicale.log import logger
COMPAT_EAI_ADDRFAMILY: int
if hasattr(socket, "EAI_ADDRFAMILY"):
COMPAT_EAI_ADDRFAMILY = socket.EAI_ADDRFAMILY # type:ignore[attr-defined]
elif hasattr(socket, "EAI_NONAME"):
# Windows and BSD don't have a special error code for this
COMPAT_EAI_ADDRFAMILY = socket.EAI_NONAME
COMPAT_EAI_NODATA: int
if hasattr(socket, "EAI_NODATA"):
COMPAT_EAI_NODATA = socket.EAI_NODATA
elif hasattr(socket, "EAI_NONAME"):
# Windows and BSD don't have a special error code for this
COMPAT_EAI_NODATA = socket.EAI_NONAME
COMPAT_IPPROTO_IPV6: int
if hasattr(socket, "IPPROTO_IPV6"):
COMPAT_IPPROTO_IPV6 = socket.IPPROTO_IPV6
elif sys.platform == "win32":
# HACK: https://bugs.python.org/issue29515
COMPAT_IPPROTO_IPV6 = 41
# IPv4 (host, port) and IPv6 (host, port, flowinfo, scopeid)
ADDRESS_TYPE = Union[Tuple[Union[str, bytes, bytearray], int],
Tuple[str, int, int, int]]
def format_address(address: ADDRESS_TYPE) -> str:
host, port, *_ = address
if not isinstance(host, str):
raise NotImplementedError("Unsupported address format: %r" %
(address,))
if host.find(":") == -1:
return "%s:%d" % (host, port)
else:
return "[%s]:%d" % (host, port)
class ParallelHTTPServer(socketserver.ThreadingMixIn,
wsgiref.simple_server.WSGIServer):
configuration: config.Configuration
worker_sockets: Set[socket.socket]
_timeout: float
# We wait for child threads ourself (ThreadingMixIn)
block_on_close: bool = False
daemon_threads: bool = True
def __init__(self, configuration: config.Configuration, family: int,
address: Tuple[str, int], RequestHandlerClass:
Callable[..., http.server.BaseHTTPRequestHandler]) -> None:
self.configuration = configuration
self.address_family = family
super().__init__(address, RequestHandlerClass)
self.worker_sockets = set()
self._timeout = configuration.get("server", "timeout")
def server_bind(self) -> None:
if self.address_family == socket.AF_INET6:
# Only allow IPv6 connections to the IPv6 socket
self.socket.setsockopt(COMPAT_IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
super().server_bind()
def get_request( # type:ignore[override]
self) -> Tuple[socket.socket, Tuple[ADDRESS_TYPE, socket.socket]]:
# Set timeout for client
request: socket.socket
client_address: ADDRESS_TYPE
request, client_address = super().get_request() # type:ignore[misc]
if self._timeout > 0:
request.settimeout(self._timeout)
worker_socket, worker_socket_out = socket.socketpair()
self.worker_sockets.add(worker_socket_out)
# HACK: Forward `worker_socket` via `client_address` return value
# to worker thread.
# The super class calls `verify_request`, `process_request` and
# `handle_error` with modified `client_address` value.
return request, (client_address, worker_socket)
def verify_request( # type:ignore[override]
self, request: socket.socket, client_address_and_socket:
Tuple[ADDRESS_TYPE, socket.socket]) -> bool:
return True
def process_request( # type:ignore[override]
self, request: socket.socket, client_address_and_socket:
Tuple[ADDRESS_TYPE, socket.socket]) -> None:
# HACK: Super class calls `finish_request` in new thread with
# `client_address_and_socket`
return super().process_request(
request, client_address_and_socket) # type:ignore[arg-type]
def finish_request( # type:ignore[override]
self, request: socket.socket, client_address_and_socket:
Tuple[ADDRESS_TYPE, socket.socket]) -> None:
# HACK: Unpack `client_address_and_socket` and call super class
# `finish_request` with original `client_address`
client_address, worker_socket = client_address_and_socket
try:
return self.finish_request_locked(request, client_address)
finally:
worker_socket.close()
def finish_request_locked(self, request: socket.socket,
client_address: ADDRESS_TYPE) -> None:
return super().finish_request(
request, client_address) # type:ignore[arg-type]
def handle_error( # type:ignore[override]
self, request: socket.socket,
client_address_or_client_address_and_socket:
Union[ADDRESS_TYPE, Tuple[ADDRESS_TYPE, socket.socket]]) -> None:
# HACK: This method can be called with the modified
# `client_address_and_socket` or the original `client_address` value
e = sys.exc_info()[1]
assert e is not None
if isinstance(e, socket.timeout):
logger.info("Client timed out", exc_info=True)
else:
logger.error("An exception occurred during request: %s",
sys.exc_info()[1], exc_info=True)
class ParallelHTTPSServer(ParallelHTTPServer):
def server_bind(self) -> None:
super().server_bind()
# Wrap the TCP socket in an SSL socket
certfile: str = self.configuration.get("server", "certificate")
keyfile: str = self.configuration.get("server", "key")
cafile: str = self.configuration.get("server", "certificate_authority")
# Test if the files can be read
for name, filename in [("certificate", certfile), ("key", keyfile),
("certificate_authority", cafile)]:
type_name = config.DEFAULT_CONFIG_SCHEMA["server"][name][
"type"].__name__
source = self.configuration.get_source("server", name)
if name == "certificate_authority" and not filename:
continue
try:
open(filename).close()
except OSError as e:
raise RuntimeError(
"Invalid %s value for option %r in section %r in %s: %r "
"(%s)" % (type_name, name, "server", source, filename,
e)) from e
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(certfile=certfile, keyfile=keyfile)
if cafile:
context.load_verify_locations(cafile=cafile)
context.verify_mode = ssl.CERT_REQUIRED
self.socket = context.wrap_socket(
self.socket, server_side=True, do_handshake_on_connect=False)
def finish_request_locked( # type:ignore[override]
self, request: ssl.SSLSocket, client_address: ADDRESS_TYPE
) -> None:
try:
try:
request.do_handshake()
except socket.timeout:
raise
except Exception as e:
raise RuntimeError("SSL handshake failed: %s" % e) from e
except Exception:
try:
self.handle_error(request, client_address)
finally:
self.shutdown_request(request) # type:ignore[attr-defined]
return
return super().finish_request_locked(request, client_address)
class ServerHandler(wsgiref.simple_server.ServerHandler):
# Don't pollute WSGI environ with OS environment
os_environ: MutableMapping[str, str] = {}
def log_exception(self, exc_info) -> None:
logger.error("An exception occurred during request: %s",
exc_info[1], exc_info=exc_info) # type:ignore[arg-type]
class RequestHandler(wsgiref.simple_server.WSGIRequestHandler):
"""HTTP requests handler."""
# HACK: Assigned in `socketserver.StreamRequestHandler`
connection: socket.socket
def log_request(self, code: Union[int, str] = "-",
size: Union[int, str] = "-") -> None:
pass # Disable request logging.
def log_error(self, format_: str, *args: Any) -> None:
logger.error("An error occurred during request: %s", format_ % args)
def get_environ(self) -> Dict[str, Any]:
env = super().get_environ()
if isinstance(self.connection, ssl.SSLSocket):
# The certificate can be evaluated by the auth module
env["REMOTE_CERTIFICATE"] = self.connection.getpeercert()
# Parent class only tries latin1 encoding
env["PATH_INFO"] = unquote(self.path.split("?", 1)[0])
return env
def handle(self) -> None:
"""Copy of WSGIRequestHandler.handle with different ServerHandler"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ""
self.request_version = ""
self.command = ""
self.send_error(414)
return
if not self.parse_request():
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # type:ignore[attr-defined]
app = self.server.get_app() # type:ignore[attr-defined]
handler.run(app)
def serve(configuration: config.Configuration,
shutdown_socket: Optional[socket.socket] = None) -> None:
"""Serve radicale from configuration.
`shutdown_socket` can be used to gracefully shutdown the server.
The socket can be created with `socket.socketpair()`, when the other socket
gets closed the server stops accepting new requests by clients and the
function returns after all active requests are finished.
"""
logger.info("Starting Radicale")
# Copy configuration before modifying
configuration = configuration.copy()
configuration.update({"server": {"_internal_server": "True"}}, "server",
privileged=True)
use_ssl: bool = configuration.get("server", "ssl")
server_class = ParallelHTTPSServer if use_ssl else ParallelHTTPServer
application = Application(configuration)
servers = {}
try:
hosts: List[Tuple[str, int]] = configuration.get("server", "hosts")
for address_port in hosts:
# retrieve IPv4/IPv6 address of address
try:
getaddrinfo = socket.getaddrinfo(address_port[0], address_port[1], 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
except OSError as e:
logger.warning("cannot retrieve IPv4 or IPv6 address of '%s': %s" % (format_address(address_port), e))
continue
logger.debug("getaddrinfo of '%s': %s" % (format_address(address_port), getaddrinfo))
for (address_family, socket_kind, socket_proto, socket_flags, socket_address) in getaddrinfo:
logger.debug("try to create server socket on '%s'" % (format_address(socket_address)))
try:
server = server_class(configuration, address_family, (socket_address[0], socket_address[1]), RequestHandler)
except OSError as e:
logger.warning("cannot create server socket on '%s': %s" % (format_address(socket_address), e))
continue
servers[server.socket] = server
server.set_app(application)
logger.info("Listening on %r%s",
format_address(server.server_address),
" with SSL" if use_ssl else "")
if not servers:
raise RuntimeError("No servers started")
# Mainloop
select_timeout = None
if sys.platform == "win32":
# Fallback to busy waiting. (select(...) blocks SIGINT on Windows.)
select_timeout = 1.0
max_connections: int = configuration.get("server", "max_connections")
logger.info("Radicale server ready")
while True:
rlist: List[socket.socket] = []
# Wait for finished clients
for server in servers.values():
rlist.extend(server.worker_sockets)
# Accept new connections if max_connections is not reached
if max_connections <= 0 or len(rlist) < max_connections:
rlist.extend(servers)
# Use socket to get notified of program shutdown
if shutdown_socket is not None:
rlist.append(shutdown_socket)
rlist, _, _ = select.select(rlist, [], [], select_timeout)
rset = set(rlist)
if shutdown_socket in rset:
logger.info("Stopping Radicale")
break
for server in servers.values():
finished_sockets = server.worker_sockets.intersection(rset)
for s in finished_sockets:
s.close()
server.worker_sockets.remove(s)
rset.remove(s)
if finished_sockets:
server.service_actions()
if rset:
active_server = servers.get(rset.pop())
if active_server:
active_server.handle_request()
finally:
# Wait for clients to finish and close servers
for server in servers.values():
for s in server.worker_sockets:
s.recv(1)
s.close()
server.server_close()
| 14,688
|
Python
|
.py
| 307
| 37.837134
| 128
| 0.628332
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,186
|
types.py
|
Kozea_Radicale/radicale/types.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2020 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
import contextlib
from typing import (Any, Callable, ContextManager, Iterator, List, Mapping,
MutableMapping, Protocol, Sequence, Tuple, TypeVar, Union,
runtime_checkable)
WSGIResponseHeaders = Union[Mapping[str, str], Sequence[Tuple[str, str]]]
WSGIResponse = Tuple[int, WSGIResponseHeaders, Union[None, str, bytes]]
WSGIEnviron = Mapping[str, Any]
WSGIStartResponse = Callable[[str, List[Tuple[str, str]]], Any]
CONFIG = Mapping[str, Mapping[str, Any]]
MUTABLE_CONFIG = MutableMapping[str, MutableMapping[str, Any]]
CONFIG_SCHEMA = Mapping[str, Mapping[str, Any]]
_T = TypeVar("_T")
def contextmanager(func: Callable[..., Iterator[_T]]
) -> Callable[..., ContextManager[_T]]:
"""Compatibility wrapper for `contextlib.contextmanager` with
`typeguard`"""
result = contextlib.contextmanager(func)
result.__annotations__ = {**func.__annotations__,
"return": ContextManager[_T]}
return result
@runtime_checkable
class InputStream(Protocol):
def read(self, size: int = ...) -> bytes: ...
@runtime_checkable
class ErrorStream(Protocol):
def flush(self) -> object: ...
def write(self, s: str) -> object: ...
from radicale import item, storage # noqa:E402 isort:skip
CollectionOrItem = Union[item.Item, storage.BaseCollection]
| 2,094
|
Python
|
.py
| 44
| 43.522727
| 78
| 0.720039
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,187
|
owner_only.py
|
Kozea_Radicale/radicale/rights/owner_only.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Rights backend that allows authenticated users to read and write their own
calendars and address books.
"""
import radicale.rights.authenticated as authenticated
from radicale import pathutils
class Rights(authenticated.Rights):
def authorization(self, user: str, path: str) -> str:
if self._verify_user and not user:
return ""
sane_path = pathutils.strip_path(path)
if not sane_path:
return "R"
if self._verify_user and user != sane_path.split("/", maxsplit=1)[0]:
return ""
if "/" not in sane_path:
return "RW"
if sane_path.count("/") == 1:
return "rw"
return ""
| 1,479
|
Python
|
.py
| 36
| 36.527778
| 77
| 0.705923
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,188
|
__init__.py
|
Kozea_Radicale/radicale/rights/__init__.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
The rights module used to determine if a user can read and/or write
collections and entries.
Permissions:
- R: read collections (excluding address books and calendars)
- r: read address book and calendar collections
- i: subset of **r** that only allows direct access via HTTP method GET
(CalDAV/CardDAV is susceptible to expensive search requests)
- W: write collections (excluding address books and calendars)
- w: write address book and calendar collections
Take a look at the class ``BaseRights`` if you want to implement your own.
"""
from typing import Sequence, Set
from radicale import config, utils
INTERNAL_TYPES: Sequence[str] = ("authenticated", "owner_write", "owner_only",
"from_file")
def load(configuration: "config.Configuration") -> "BaseRights":
"""Load the rights module chosen in configuration."""
return utils.load_plugin(INTERNAL_TYPES, "rights", "Rights", BaseRights,
configuration)
def intersect(a: str, b: str) -> str:
"""Intersect two lists of rights.
Returns all rights that are both in ``a`` and ``b``.
"""
return "".join(set(a).intersection(set(b)))
class BaseRights:
_user_groups: Set[str] = set([])
def __init__(self, configuration: "config.Configuration") -> None:
"""Initialize BaseRights.
``configuration`` see ``radicale.config`` module.
The ``configuration`` must not change during the lifetime of
this object, it is kept as an internal reference.
"""
self.configuration = configuration
def authorization(self, user: str, path: str) -> str:
"""Get granted rights of ``user`` for the collection ``path``.
If ``user`` is empty, check for anonymous rights.
``path`` is sanitized.
Returns granted rights (e.g. ``"RW"``).
"""
raise NotImplementedError
| 2,708
|
Python
|
.py
| 57
| 42.333333
| 78
| 0.70122
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,189
|
authenticated.py
|
Kozea_Radicale/radicale/rights/authenticated.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Rights backend that allows authenticated users to read and write all
calendars and address books.
"""
from radicale import config, pathutils, rights
class Rights(rights.BaseRights):
def __init__(self, configuration: config.Configuration) -> None:
super().__init__(configuration)
self._verify_user = self.configuration.get("auth", "type") != "none"
def authorization(self, user: str, path: str) -> str:
if self._verify_user and not user:
return ""
sane_path = pathutils.strip_path(path)
if "/" not in sane_path:
return "RW"
if sane_path.count("/") == 1:
return "rw"
return ""
| 1,470
|
Python
|
.py
| 34
| 39.029412
| 76
| 0.70918
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,190
|
owner_write.py
|
Kozea_Radicale/radicale/rights/owner_write.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Rights backend that allows authenticated users to read all calendars and
address books but only grants write access to their own.
"""
import radicale.rights.authenticated as authenticated
from radicale import pathutils
class Rights(authenticated.Rights):
def authorization(self, user: str, path: str) -> str:
if self._verify_user and not user:
return ""
sane_path = pathutils.strip_path(path)
if not sane_path:
return "R"
if self._verify_user:
owned = user == sane_path.split("/", maxsplit=1)[0]
else:
owned = True
if "/" not in sane_path:
return "RW" if owned else "R"
if sane_path.count("/") == 1:
return "rw" if owned else "r"
return ""
| 1,574
|
Python
|
.py
| 38
| 36.526316
| 72
| 0.698298
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,191
|
from_file.py
|
Kozea_Radicale/radicale/rights/from_file.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2019 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Rights backend based on a regex-based file whose name is specified in the
config (section "rights", key "file").
The login is matched against the "user" key, and the collection path
is matched against the "collection" key. In the "collection" regex you can use
`{user}` and get groups from the "user" regex with `{0}`, `{1}`, etc.
In consequence of the parameter substitution you have to write `{{` and `}}`
if you want to use regular curly braces in the "user" and "collection" regexes.
For example, for the "user" key, ".+" means "authenticated user" and ".*"
means "anybody" (including anonymous users).
Section names are only used for naming the rule.
Leading or ending slashes are trimmed from collection's path.
"""
import configparser
import re
from radicale import config, pathutils, rights
from radicale.log import logger
class Rights(rights.BaseRights):
_filename: str
def __init__(self, configuration: config.Configuration) -> None:
super().__init__(configuration)
self._filename = configuration.get("rights", "file")
self._log_rights_rule_doesnt_match_on_debug = configuration.get("logging", "rights_rule_doesnt_match_on_debug")
self._rights_config = configparser.ConfigParser()
try:
with open(self._filename, "r") as f:
self._rights_config.read_file(f)
logger.debug("Read rights file")
except Exception as e:
raise RuntimeError("Failed to load rights file %r: %s" %
(self._filename, e)) from e
def authorization(self, user: str, path: str) -> str:
user = user or ""
sane_path = pathutils.strip_path(path)
# Prevent "regex injection"
escaped_user = re.escape(user)
if not self._log_rights_rule_doesnt_match_on_debug:
logger.debug("logging of rules which doesn't match suppressed by config/option [logging] rights_rule_doesnt_match_on_debug")
for section in self._rights_config.sections():
group_match = None
user_match = None
try:
user_pattern = self._rights_config.get(section, "user", fallback="")
collection_pattern = self._rights_config.get(section, "collection")
allowed_groups = self._rights_config.get(section, "groups", fallback="").split(",")
try:
group_match = len(self._user_groups.intersection(allowed_groups)) > 0
except Exception:
pass
# Use empty format() for harmonized handling of curly braces
if user_pattern != "":
user_match = re.fullmatch(user_pattern.format(), user)
user_collection_match = user_match and re.fullmatch(
collection_pattern.format(
*(re.escape(s) for s in user_match.groups()),
user=escaped_user), sane_path)
group_collection_match = re.fullmatch(collection_pattern.format(user=escaped_user), sane_path)
except Exception as e:
raise RuntimeError("Error in section %r of rights file %r: "
"%s" % (section, self._filename, e)) from e
if user_match and user_collection_match:
permission = self._rights_config.get(section, "permissions")
logger.debug("Rule %r:%r matches %r:%r from section %r permission %r",
user, sane_path, user_pattern,
collection_pattern, section, permission)
return permission
if group_match and group_collection_match:
permission = self._rights_config.get(section, "permissions")
logger.debug("Rule %r:%r matches %r:%r from section %r permission %r by group membership",
user, sane_path, user_pattern,
collection_pattern, section, permission)
return permission
logger.debug("Rule %r:%r doesn't match %r:%r from section %r",
user, sane_path, user_pattern, collection_pattern,
section)
if self._log_rights_rule_doesnt_match_on_debug:
logger.debug("Rule %r:%r doesn't match %r:%r from section %r",
user, sane_path, user_pattern, collection_pattern,
section)
logger.info("Rights: %r:%r doesn't match any section", user, sane_path)
return ""
| 5,380
|
Python
|
.py
| 97
| 44.268041
| 136
| 0.624336
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,192
|
__init__.py
|
Kozea_Radicale/radicale/storage/__init__.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
The storage module that stores calendars and address books.
Take a look at the class ``BaseCollection`` if you want to implement your own.
"""
import json
import xml.etree.ElementTree as ET
from hashlib import sha256
from typing import (Callable, ContextManager, Iterable, Iterator, Mapping,
Optional, Sequence, Set, Tuple, Union, overload)
import vobject
from radicale import config
from radicale import item as radicale_item
from radicale import types, utils
from radicale.item import filter as radicale_filter
INTERNAL_TYPES: Sequence[str] = ("multifilesystem", "multifilesystem_nolock",)
CACHE_DEPS: Sequence[str] = ("radicale", "vobject", "python-dateutil",)
CACHE_VERSION: bytes = "".join(
"%s=%s;" % (pkg, utils.package_version(pkg))
for pkg in CACHE_DEPS).encode()
def load(configuration: "config.Configuration") -> "BaseStorage":
"""Load the storage module chosen in configuration."""
return utils.load_plugin(INTERNAL_TYPES, "storage", "Storage", BaseStorage,
configuration)
class ComponentExistsError(ValueError):
def __init__(self, path: str) -> None:
message = "Component already exists: %r" % path
super().__init__(message)
class ComponentNotFoundError(ValueError):
def __init__(self, path: str) -> None:
message = "Component doesn't exist: %r" % path
super().__init__(message)
class BaseCollection:
@property
def path(self) -> str:
"""The sanitized path of the collection without leading or
trailing ``/``."""
raise NotImplementedError
@property
def owner(self) -> str:
"""The owner of the collection."""
return self.path.split("/", maxsplit=1)[0]
@property
def is_principal(self) -> bool:
"""Collection is a principal."""
return bool(self.path) and "/" not in self.path
@property
def etag(self) -> str:
"""Encoded as quoted-string (see RFC 2616)."""
etag = sha256()
for item in self.get_all():
assert item.href
etag.update((item.href + "/" + item.etag).encode())
etag.update(json.dumps(self.get_meta(), sort_keys=True).encode())
return '"%s"' % etag.hexdigest()
@property
def tag(self) -> str:
"""The tag of the collection."""
return self.get_meta("tag") or ""
def sync(self, old_token: str = "") -> Tuple[str, Iterable[str]]:
"""Get the current sync token and changed items for synchronization.
``old_token`` an old sync token which is used as the base of the
delta update. If sync token is empty, all items are returned.
ValueError is raised for invalid or old tokens.
WARNING: This simple default implementation treats all sync-token as
invalid.
"""
def hrefs_iter() -> Iterator[str]:
for item in self.get_all():
assert item.href
yield item.href
token = "http://radicale.org/ns/sync/%s" % self.etag.strip("\"")
if old_token:
raise ValueError("Sync token are not supported")
return token, hrefs_iter()
def get_multi(self, hrefs: Iterable[str]
) -> Iterable[Tuple[str, Optional["radicale_item.Item"]]]:
"""Fetch multiple items.
It's not required to return the requested items in the correct order.
Duplicated hrefs can be ignored.
Returns tuples with the href and the item or None if the item doesn't
exist.
"""
raise NotImplementedError
def get_all(self) -> Iterable["radicale_item.Item"]:
"""Fetch all items."""
raise NotImplementedError
def get_filtered(self, filters: Iterable[ET.Element]
) -> Iterable[Tuple["radicale_item.Item", bool]]:
"""Fetch all items with optional filtering.
This can largely improve performance of reports depending on
the filters and this implementation.
Returns tuples in the form ``(item, filters_matched)``.
``filters_matched`` is a bool that indicates if ``filters`` are fully
matched.
"""
if not self.tag:
return
tag, start, end, simple = radicale_filter.simplify_prefilters(
filters, self.tag)
for item in self.get_all():
if tag is not None and tag != item.component_name:
continue
istart, iend = item.time_range
if istart >= end or iend <= start:
continue
yield item, simple and (start <= istart or iend <= end)
def has_uid(self, uid: str) -> bool:
"""Check if a UID exists in the collection."""
for item in self.get_all():
if item.uid == uid:
return True
return False
def upload(self, href: str, item: "radicale_item.Item") -> (
"radicale_item.Item"):
"""Upload a new or replace an existing item."""
raise NotImplementedError
def delete(self, href: Optional[str] = None) -> None:
"""Delete an item.
When ``href`` is ``None``, delete the collection.
"""
raise NotImplementedError
@overload
def get_meta(self, key: None = None) -> Mapping[str, str]: ...
@overload
def get_meta(self, key: str) -> Optional[str]: ...
def get_meta(self, key: Optional[str] = None
) -> Union[Mapping[str, str], Optional[str]]:
"""Get metadata value for collection.
Return the value of the property ``key``. If ``key`` is ``None`` return
a dict with all properties
"""
raise NotImplementedError
def set_meta(self, props: Mapping[str, str]) -> None:
"""Set metadata values for collection.
``props`` a dict with values for properties.
"""
raise NotImplementedError
@property
def last_modified(self) -> str:
"""Get the HTTP-datetime of when the collection was modified."""
raise NotImplementedError
def serialize(self) -> str:
"""Get the unicode string representing the whole collection."""
if self.tag == "VCALENDAR":
in_vcalendar = False
vtimezones = ""
included_tzids: Set[str] = set()
vtimezone = []
tzid = None
components = ""
# Concatenate all child elements of VCALENDAR from all items
# together, while preventing duplicated VTIMEZONE entries.
# VTIMEZONEs are only distinguished by their TZID, if different
# timezones share the same TZID this produces erroneous output.
# VObject fails at this too.
for item in self.get_all():
depth = 0
for line in item.serialize().split("\r\n"):
if line.startswith("BEGIN:"):
depth += 1
if depth == 1 and line == "BEGIN:VCALENDAR":
in_vcalendar = True
elif in_vcalendar:
if depth == 1 and line.startswith("END:"):
in_vcalendar = False
if depth == 2 and line == "BEGIN:VTIMEZONE":
vtimezone.append(line + "\r\n")
elif vtimezone:
vtimezone.append(line + "\r\n")
if depth == 2 and line.startswith("TZID:"):
tzid = line[len("TZID:"):]
elif depth == 2 and line.startswith("END:"):
if tzid is None or tzid not in included_tzids:
vtimezones += "".join(vtimezone)
if tzid is not None:
included_tzids.add(tzid)
vtimezone.clear()
tzid = None
elif depth >= 2:
components += line + "\r\n"
if line.startswith("END:"):
depth -= 1
template = vobject.iCalendar()
displayname = self.get_meta("D:displayname")
if displayname:
template.add("X-WR-CALNAME")
template.x_wr_calname.value_param = "TEXT"
template.x_wr_calname.value = displayname
description = self.get_meta("C:calendar-description")
if description:
template.add("X-WR-CALDESC")
template.x_wr_caldesc.value_param = "TEXT"
template.x_wr_caldesc.value = description
template = template.serialize()
template_insert_pos = template.find("\r\nEND:VCALENDAR\r\n") + 2
assert template_insert_pos != -1
return (template[:template_insert_pos] +
vtimezones + components +
template[template_insert_pos:])
if self.tag == "VADDRESSBOOK":
return "".join((item.serialize() for item in self.get_all()))
return ""
class BaseStorage:
def __init__(self, configuration: "config.Configuration") -> None:
"""Initialize BaseStorage.
``configuration`` see ``radicale.config`` module.
The ``configuration`` must not change during the lifetime of
this object, it is kept as an internal reference.
"""
self.configuration = configuration
def discover(
self, path: str, depth: str = "0",
child_context_manager: Optional[
Callable[[str, Optional[str]], ContextManager[None]]] = None,
user_groups: Set[str] = set([])) -> Iterable["types.CollectionOrItem"]:
"""Discover a list of collections under the given ``path``.
``path`` is sanitized.
If ``depth`` is "0", only the actual object under ``path`` is
returned.
If ``depth`` is anything but "0", it is considered as "1" and direct
children are included in the result.
The root collection "/" must always exist.
"""
raise NotImplementedError
def move(self, item: "radicale_item.Item", to_collection: BaseCollection,
to_href: str) -> None:
"""Move an object.
``item`` is the item to move.
``to_collection`` is the target collection.
``to_href`` is the target name in ``to_collection``. An item with the
same name might already exist.
"""
raise NotImplementedError
def create_collection(
self, href: str,
items: Optional[Iterable["radicale_item.Item"]] = None,
props: Optional[Mapping[str, str]] = None) -> BaseCollection:
"""Create a collection.
``href`` is the sanitized path.
If the collection already exists and neither ``collection`` nor
``props`` are set, this method shouldn't do anything. Otherwise the
existing collection must be replaced.
``collection`` is a list of vobject components.
``props`` are metadata values for the collection.
``props["tag"]`` is the type of collection (VCALENDAR or VADDRESSBOOK).
If the key ``tag`` is missing, ``items`` is ignored.
"""
raise NotImplementedError
@types.contextmanager
def acquire_lock(self, mode: str, user: str = "") -> Iterator[None]:
"""Set a context manager to lock the whole storage.
``mode`` must either be "r" for shared access or "w" for exclusive
access.
``user`` is the name of the logged in user or empty.
"""
raise NotImplementedError
def verify(self) -> bool:
"""Check the storage for errors."""
raise NotImplementedError
| 12,701
|
Python
|
.py
| 276
| 35.391304
| 83
| 0.593778
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,193
|
multifilesystem_nolock.py
|
Kozea_Radicale/radicale/storage/multifilesystem_nolock.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2021 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
The multifilesystem backend without file-based locking.
"""
import threading
from collections import deque
from typing import ClassVar, Deque, Dict, Hashable, Iterator, Type
from radicale import config, pathutils, types
from radicale.storage import multifilesystem
class RwLock(pathutils.RwLock):
_cond: threading.Condition
def __init__(self) -> None:
super().__init__("")
self._cond = threading.Condition(self._lock)
@types.contextmanager
def acquire(self, mode: str, user: str = "") -> Iterator[None]:
if mode not in "rw":
raise ValueError("Invalid mode: %r" % mode)
with self._cond:
self._cond.wait_for(lambda: not self._writer and (
mode == "r" or self._readers == 0))
if mode == "r":
self._readers += 1
else:
self._writer = True
try:
yield
finally:
with self._cond:
if mode == "r":
self._readers -= 1
self._writer = False
if self._readers == 0:
self._cond.notify_all()
class LockDict:
_lock: threading.Lock
_dict: Dict[Hashable, Deque[threading.Lock]]
def __init__(self) -> None:
self._lock = threading.Lock()
self._dict = {}
@types.contextmanager
def acquire(self, key: Hashable) -> Iterator[None]:
with self._lock:
waiters = self._dict.get(key)
if waiters is None:
self._dict[key] = waiters = deque()
wait = bool(waiters)
waiter = threading.Lock()
waiter.acquire()
waiters.append(waiter)
if wait:
waiter.acquire()
try:
yield
finally:
with self._lock:
assert waiters[0] is waiter and self._dict[key] is waiters
del waiters[0]
if waiters:
waiters[0].release()
else:
del self._dict[key]
class Collection(multifilesystem.Collection):
_storage: "Storage"
@types.contextmanager
def _acquire_cache_lock(self, ns: str = "") -> Iterator[None]:
if self._storage._lock.locked == "w":
yield
return
with self._storage._cache_lock.acquire((self.path, ns)):
yield
class Storage(multifilesystem.Storage):
_collection_class: ClassVar[Type[Collection]] = Collection
_cache_lock: LockDict
def __init__(self, configuration: config.Configuration) -> None:
super().__init__(configuration)
self._lock = RwLock()
self._cache_lock = LockDict()
| 3,478
|
Python
|
.py
| 92
| 29.086957
| 74
| 0.603448
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,194
|
lock.py
|
Kozea_Radicale/radicale/storage/multifilesystem/lock.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2019 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import logging
import os
import shlex
import signal
import subprocess
import sys
from typing import Iterator
from radicale import config, pathutils, types
from radicale.log import logger
from radicale.storage.multifilesystem.base import CollectionBase, StorageBase
class CollectionPartLock(CollectionBase):
@types.contextmanager
def _acquire_cache_lock(self, ns: str = "") -> Iterator[None]:
if self._storage._lock.locked == "w":
yield
return
cache_folder = os.path.join(self._filesystem_path, ".Radicale.cache")
self._storage._makedirs_synced(cache_folder)
lock_path = os.path.join(cache_folder,
".Radicale.lock" + (".%s" % ns if ns else ""))
lock = pathutils.RwLock(lock_path)
with lock.acquire("w"):
yield
class StoragePartLock(StorageBase):
_lock: pathutils.RwLock
_hook: str
def __init__(self, configuration: config.Configuration) -> None:
super().__init__(configuration)
lock_path = os.path.join(self._filesystem_folder, ".Radicale.lock")
self._lock = pathutils.RwLock(lock_path)
self._hook = configuration.get("storage", "hook")
@types.contextmanager
def acquire_lock(self, mode: str, user: str = "") -> Iterator[None]:
with self._lock.acquire(mode):
yield
# execute hook
if mode == "w" and self._hook:
debug = logger.isEnabledFor(logging.DEBUG)
# Use new process group for child to prevent terminals
# from sending SIGINT etc.
preexec_fn = None
creationflags = 0
if sys.platform == "win32":
creationflags |= subprocess.CREATE_NEW_PROCESS_GROUP
else:
# Process group is also used to identify child processes
preexec_fn = os.setpgrp
command = self._hook % {
"user": shlex.quote(user or "Anonymous")}
logger.debug("Running storage hook")
p = subprocess.Popen(
command, stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE if debug else subprocess.DEVNULL,
stderr=subprocess.PIPE if debug else subprocess.DEVNULL,
shell=True, universal_newlines=True, preexec_fn=preexec_fn,
cwd=self._filesystem_folder, creationflags=creationflags)
try:
stdout_data, stderr_data = p.communicate()
except BaseException: # e.g. KeyboardInterrupt or SystemExit
p.kill()
p.wait()
raise
finally:
if sys.platform != "win32":
# Kill remaining children identified by process group
with contextlib.suppress(OSError):
os.killpg(p.pid, signal.SIGKILL)
if stdout_data:
logger.debug("Captured stdout from hook:\n%s", stdout_data)
if stderr_data:
logger.debug("Captured stderr from hook:\n%s", stderr_data)
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, p.args)
| 4,200
|
Python
|
.py
| 91
| 35.197802
| 79
| 0.614502
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,195
|
sync.py
|
Kozea_Radicale/radicale/storage/multifilesystem/sync.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2019 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import itertools
import os
import pickle
from hashlib import sha256
from typing import BinaryIO, Iterable, Tuple, cast
from radicale.log import logger
from radicale.storage.multifilesystem.base import CollectionBase
from radicale.storage.multifilesystem.cache import CollectionPartCache
from radicale.storage.multifilesystem.history import CollectionPartHistory
class CollectionPartSync(CollectionPartCache, CollectionPartHistory,
CollectionBase):
def sync(self, old_token: str = "") -> Tuple[str, Iterable[str]]:
# The sync token has the form http://radicale.org/ns/sync/TOKEN_NAME
# where TOKEN_NAME is the sha256 hash of all history etags of present
# and past items of the collection.
def check_token_name(token_name: str) -> bool:
if len(token_name) != 64:
return False
for c in token_name:
if c not in "0123456789abcdef":
return False
return True
old_token_name = ""
if old_token:
# Extract the token name from the sync token
if not old_token.startswith("http://radicale.org/ns/sync/"):
raise ValueError("Malformed token: %r" % old_token)
old_token_name = old_token[len("http://radicale.org/ns/sync/"):]
if not check_token_name(old_token_name):
raise ValueError("Malformed token: %r" % old_token)
# Get the current state and sync-token of the collection.
state = {}
token_name_hash = sha256()
# Find the history of all existing and deleted items
for href, item in itertools.chain(
((item.href, item) for item in self.get_all()),
((href, None) for href in self._get_deleted_history_hrefs())):
history_etag = self._update_history_etag(href, item)
state[href] = history_etag
token_name_hash.update((href + "/" + history_etag).encode())
token_name = token_name_hash.hexdigest()
token = "http://radicale.org/ns/sync/%s" % token_name
if token_name == old_token_name:
# Nothing changed
return token, ()
token_folder = os.path.join(self._filesystem_path,
".Radicale.cache", "sync-token")
token_path = os.path.join(token_folder, token_name)
old_state = {}
if old_token_name:
# load the old token state
old_token_path = os.path.join(token_folder, old_token_name)
try:
# Race: Another process might have deleted the file.
with open(old_token_path, "rb") as f:
old_state = pickle.load(f)
except (FileNotFoundError, pickle.UnpicklingError,
ValueError) as e:
if isinstance(e, (pickle.UnpicklingError, ValueError)):
logger.warning(
"Failed to load stored sync token %r in %r: %s",
old_token_name, self.path, e, exc_info=True)
# Delete the damaged file
with contextlib.suppress(FileNotFoundError,
PermissionError):
os.remove(old_token_path)
raise ValueError("Token not found: %r" % old_token)
# write the new token state or update the modification time of
# existing token state
if not os.path.exists(token_path):
self._storage._makedirs_synced(token_folder)
try:
# Race: Other processes might have created and locked the file.
# TODO: better fix for "mypy"
with self._atomic_write(token_path, "wb") as fo: # type: ignore
fb = cast(BinaryIO, fo)
pickle.dump(state, fb)
except PermissionError:
pass
else:
# clean up old sync tokens and item cache
self._clean_cache(token_folder, os.listdir(token_folder),
max_age=self._max_sync_token_age)
self._clean_history()
else:
# Try to update the modification time
with contextlib.suppress(FileNotFoundError):
# Race: Another process might have deleted the file.
os.utime(token_path)
changes = []
# Find all new, changed and deleted (that are still in the item cache)
# items
for href, history_etag in state.items():
if history_etag != old_state.get(href):
changes.append(href)
# Find all deleted items that are no longer in the item cache
for href, history_etag in old_state.items():
if href not in state:
changes.append(href)
return token, changes
| 5,758
|
Python
|
.py
| 118
| 37.279661
| 80
| 0.60753
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,196
|
meta.py
|
Kozea_Radicale/radicale/storage/multifilesystem/meta.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
import json
import os
from typing import Mapping, Optional, TextIO, Union, cast, overload
import radicale.item as radicale_item
from radicale.storage import multifilesystem
from radicale.storage.multifilesystem.base import CollectionBase
class CollectionPartMeta(CollectionBase):
_meta_cache: Optional[Mapping[str, str]]
_props_path: str
def __init__(self, storage_: "multifilesystem.Storage", path: str,
filesystem_path: Optional[str] = None) -> None:
super().__init__(storage_, path, filesystem_path)
self._meta_cache = None
self._props_path = os.path.join(
self._filesystem_path, ".Radicale.props")
@overload
def get_meta(self, key: None = None) -> Mapping[str, str]: ...
@overload
def get_meta(self, key: str) -> Optional[str]: ...
def get_meta(self, key: Optional[str] = None) -> Union[Mapping[str, str],
Optional[str]]:
# reuse cached value if the storage is read-only
if self._storage._lock.locked == "w" or self._meta_cache is None:
try:
try:
with open(self._props_path, encoding=self._encoding) as f:
temp_meta = json.load(f)
except FileNotFoundError:
temp_meta = {}
self._meta_cache = radicale_item.check_and_sanitize_props(
temp_meta)
except ValueError as e:
raise RuntimeError("Failed to load properties of collection "
"%r: %s" % (self.path, e)) from e
return self._meta_cache if key is None else self._meta_cache.get(key)
def set_meta(self, props: Mapping[str, str]) -> None:
# TODO: better fix for "mypy"
with self._atomic_write(self._props_path, "w") as fo: # type: ignore
f = cast(TextIO, fo)
json.dump(props, f, sort_keys=True)
| 2,798
|
Python
|
.py
| 57
| 40.508772
| 78
| 0.642595
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,197
|
upload.py
|
Kozea_Radicale/radicale/storage/multifilesystem/upload.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
import errno
import os
import pickle
import sys
from typing import Iterable, Iterator, TextIO, cast
import radicale.item as radicale_item
from radicale import pathutils
from radicale.storage.multifilesystem.base import CollectionBase
from radicale.storage.multifilesystem.cache import CollectionPartCache
from radicale.storage.multifilesystem.get import CollectionPartGet
from radicale.storage.multifilesystem.history import CollectionPartHistory
class CollectionPartUpload(CollectionPartGet, CollectionPartCache,
CollectionPartHistory, CollectionBase):
def upload(self, href: str, item: radicale_item.Item
) -> radicale_item.Item:
if not pathutils.is_safe_filesystem_path_component(href):
raise pathutils.UnsafePathError(href)
try:
self._store_item_cache(href, item)
except Exception as e:
raise ValueError("Failed to store item %r in collection %r: %s" %
(href, self.path, e)) from e
path = pathutils.path_to_filesystem(self._filesystem_path, href)
# TODO: better fix for "mypy"
with self._atomic_write(path, newline="") as fo: # type: ignore
f = cast(TextIO, fo)
f.write(item.serialize())
# Clean the cache after the actual item is stored, or the cache entry
# will be removed again.
self._clean_item_cache()
# Track the change
self._update_history_etag(href, item)
self._clean_history()
uploaded_item = self._get(href, verify_href=False)
if uploaded_item is None:
raise RuntimeError("Storage modified externally")
return uploaded_item
def _upload_all_nonatomic(self, items: Iterable[radicale_item.Item],
suffix: str = "") -> None:
"""Upload a new set of items non-atomic"""
def is_safe_free_href(href: str) -> bool:
return (pathutils.is_safe_filesystem_path_component(href) and
not os.path.lexists(
os.path.join(self._filesystem_path, href)))
def get_safe_free_hrefs(uid: str) -> Iterator[str]:
for href in [uid if uid.lower().endswith(suffix.lower())
else uid + suffix,
radicale_item.get_etag(uid).strip('"') + suffix]:
if is_safe_free_href(href):
yield href
yield radicale_item.find_available_uid(
lambda href: not is_safe_free_href(href), suffix)
cache_folder = os.path.join(self._filesystem_path,
".Radicale.cache", "item")
self._storage._makedirs_synced(cache_folder)
for item in items:
uid = item.uid
try:
cache_content = self._item_cache_content(item)
except Exception as e:
raise ValueError(
"Failed to store item %r in temporary collection %r: %s" %
(uid, self.path, e)) from e
for href in get_safe_free_hrefs(uid):
try:
f = open(os.path.join(self._filesystem_path, href),
"w", newline="", encoding=self._encoding)
except OSError as e:
if (sys.platform != "win32" and e.errno == errno.EINVAL or
sys.platform == "win32" and e.errno == 123):
# not a valid filename
continue
raise
break
else:
raise RuntimeError("No href found for item %r in temporary "
"collection %r" % (uid, self.path))
with f:
f.write(item.serialize())
f.flush()
self._storage._fsync(f)
with open(os.path.join(cache_folder, href), "wb") as fb:
pickle.dump(cache_content, fb)
fb.flush()
self._storage._fsync(fb)
self._storage._sync_directory(cache_folder)
self._storage._sync_directory(self._filesystem_path)
| 5,019
|
Python
|
.py
| 104
| 36.586538
| 78
| 0.604608
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,198
|
history.py
|
Kozea_Radicale/radicale/storage/multifilesystem/history.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2019 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
import binascii
import contextlib
import os
import pickle
from typing import BinaryIO, Optional, cast
import radicale.item as radicale_item
from radicale import pathutils
from radicale.log import logger
from radicale.storage import multifilesystem
from radicale.storage.multifilesystem.base import CollectionBase
class CollectionPartHistory(CollectionBase):
_max_sync_token_age: int
def __init__(self, storage_: "multifilesystem.Storage", path: str,
filesystem_path: Optional[str] = None) -> None:
super().__init__(storage_, path, filesystem_path)
self._max_sync_token_age = storage_.configuration.get(
"storage", "max_sync_token_age")
def _update_history_etag(self, href, item):
"""Updates and retrieves the history etag from the history cache.
The history cache contains a file for each current and deleted item
of the collection. These files contain the etag of the item (empty
string for deleted items) and a history etag, which is a hash over
the previous history etag and the etag separated by "/".
"""
history_folder = os.path.join(self._filesystem_path,
".Radicale.cache", "history")
try:
with open(os.path.join(history_folder, href), "rb") as f:
cache_etag, history_etag = pickle.load(f)
except (FileNotFoundError, pickle.UnpicklingError, ValueError) as e:
if isinstance(e, (pickle.UnpicklingError, ValueError)):
logger.warning(
"Failed to load history cache entry %r in %r: %s",
href, self.path, e, exc_info=True)
cache_etag = ""
# Initialize with random data to prevent collisions with cleaned
# expired items.
history_etag = binascii.hexlify(os.urandom(16)).decode("ascii")
etag = item.etag if item else ""
if etag != cache_etag:
self._storage._makedirs_synced(history_folder)
history_etag = radicale_item.get_etag(
history_etag + "/" + etag).strip("\"")
# Race: Other processes might have created and locked the file.
with contextlib.suppress(PermissionError), self._atomic_write(
os.path.join(history_folder, href), "wb") as fo:
fb = cast(BinaryIO, fo)
pickle.dump([etag, history_etag], fb)
return history_etag
def _get_deleted_history_hrefs(self):
"""Returns the hrefs of all deleted items that are still in the
history cache."""
history_folder = os.path.join(self._filesystem_path,
".Radicale.cache", "history")
with contextlib.suppress(FileNotFoundError):
for entry in os.scandir(history_folder):
href = entry.name
if not pathutils.is_safe_filesystem_path_component(href):
continue
if os.path.isfile(os.path.join(self._filesystem_path, href)):
continue
yield href
def _clean_history(self):
# Delete all expired history entries of deleted items.
history_folder = os.path.join(self._filesystem_path,
".Radicale.cache", "history")
self._clean_cache(history_folder, self._get_deleted_history_hrefs(),
max_age=self._max_sync_token_age)
| 4,313
|
Python
|
.py
| 85
| 40.964706
| 77
| 0.646263
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|
11,199
|
discover.py
|
Kozea_Radicale/radicale/storage/multifilesystem/discover.py
|
# This file is part of Radicale - CalDAV and CardDAV server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
import base64
import os
import posixpath
from typing import Callable, ContextManager, Iterator, Optional, Set, cast
from radicale import pathutils, types
from radicale.log import logger
from radicale.storage import multifilesystem
from radicale.storage.multifilesystem.base import StorageBase
@types.contextmanager
def _null_child_context_manager(path: str,
href: Optional[str]) -> Iterator[None]:
yield
class StoragePartDiscover(StorageBase):
def discover(
self, path: str, depth: str = "0",
child_context_manager: Optional[
Callable[[str, Optional[str]], ContextManager[None]]] = None,
user_groups: Set[str] = set([])
) -> Iterator[types.CollectionOrItem]:
# assert isinstance(self, multifilesystem.Storage)
if child_context_manager is None:
child_context_manager = _null_child_context_manager
# Path should already be sanitized
sane_path = pathutils.strip_path(path)
attributes = sane_path.split("/") if sane_path else []
folder = self._get_collection_root_folder()
# Create the root collection
self._makedirs_synced(folder)
try:
filesystem_path = pathutils.path_to_filesystem(folder, sane_path)
except ValueError as e:
# Path is unsafe
logger.debug("Unsafe path %r requested from storage: %s",
sane_path, e, exc_info=True)
return
# Check if the path exists and if it leads to a collection or an item
href: Optional[str]
if not os.path.isdir(filesystem_path):
if attributes and os.path.isfile(filesystem_path):
href = attributes.pop()
else:
return
else:
href = None
sane_path = "/".join(attributes)
collection = self._collection_class(
cast(multifilesystem.Storage, self),
pathutils.unstrip_path(sane_path, True))
if href:
item = collection._get(href)
if item is not None:
yield item
return
yield collection
if depth == "0":
return
for href in collection._list():
with child_context_manager(sane_path, href):
item = collection._get(href)
if item is not None:
yield item
for entry in os.scandir(filesystem_path):
if not entry.is_dir():
continue
href = entry.name
if not pathutils.is_safe_filesystem_path_component(href):
if not href.startswith(".Radicale"):
logger.debug("Skipping collection %r in %r",
href, sane_path)
continue
sane_child_path = posixpath.join(sane_path, href)
child_path = pathutils.unstrip_path(sane_child_path, True)
with child_context_manager(sane_child_path, None):
yield self._collection_class(
cast(multifilesystem.Storage, self), child_path)
for group in user_groups:
href = base64.b64encode(group.encode('utf-8')).decode('ascii')
logger.debug(f"searching for group calendar {group} {href}")
sane_child_path = f"GROUPS/{href}"
if not os.path.isdir(pathutils.path_to_filesystem(folder, sane_child_path)):
continue
child_path = f"/GROUPS/{href}/"
with child_context_manager(sane_child_path, None):
yield self._collection_class(
cast(multifilesystem.Storage, self), child_path)
| 4,554
|
Python
|
.py
| 102
| 34.509804
| 88
| 0.625846
|
Kozea/Radicale
| 3,268
| 426
| 211
|
GPL-3.0
|
9/5/2024, 5:11:02 PM (Europe/Amsterdam)
|