id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8,300
|
build_info.py
|
gpodder_gpodder/src/gpodder/build_info.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
"""This file gets edited at build time to add build specific data"""
BUILD_TYPE = u"default"
"""Either 'windows', 'windows-portable', 'osx' or 'default'"""
BUILD_INFO = u""
"""Additional build info like git revision etc"""
BUILD_VERSION = 0
"""1.2.3 with a BUILD_VERSION of 1 results in 1.2.3.1"""
| 610
|
Python
|
.py
| 14
| 42.285714
| 70
| 0.733108
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,301
|
feedcore.py
|
gpodder_gpodder/src/gpodder/feedcore.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Generic feed fetching module for aggregators
# Thomas Perl <thp@gpodder.org>; 2009-06-11
#
import logging
import urllib.parse
from html.parser import HTMLParser
from io import BytesIO
from gpodder import util, youtube
logger = logging.getLogger(__name__)
class ExceptionWithData(Exception):
"""Base exception with additional payload"""
def __init__(self, data):
Exception.__init__(self)
self.data = data
def __str__(self):
return '%s: %s' % (self.__class__.__name__, str(self.data))
# Temporary errors
class BadRequest(Exception):
pass
class InternalServerError(Exception):
pass
class WifiLogin(ExceptionWithData):
pass
# Fatal errors
class Unsubscribe(Exception):
pass
class NotFound(Exception):
pass
class InvalidFeed(Exception):
pass
class UnknownStatusCode(ExceptionWithData):
pass
# Authentication error
class AuthenticationRequired(Exception):
def __init__(self, msg, url=None):
super().__init__(msg)
self.url = url
# Successful status codes
UPDATED_FEED, NEW_LOCATION, NOT_MODIFIED = list(range(3))
class Result:
def __init__(self, status, feed=None):
self.status = status
self.feed = feed
class FeedAutodiscovery(HTMLParser):
def __init__(self, base):
HTMLParser.__init__(self)
self._base = base
self._resolved_url = None
def handle_starttag(self, tag, attrs):
if tag == 'link':
attrs = dict(attrs)
is_feed = attrs.get('type', '') in Fetcher.FEED_TYPES
is_youtube = 'youtube.com' in self._base
is_alternate = attrs.get('rel', '') == 'alternate'
is_canonical = attrs.get('rel', '') == 'canonical'
url = attrs.get('href', None)
url = urllib.parse.urljoin(self._base, url)
if is_feed and is_alternate and url:
logger.info('Feed autodiscovery: %s', url)
self._resolved_url = url
elif is_youtube and is_canonical and url:
url = youtube.parse_youtube_url(url)
logger.info('Feed autodiscovery: %s', url)
self._resolved_url = url
class FetcherFeedData:
def __init__(self, text, content):
self.text = text
self.content = content
class Fetcher(object):
# Supported types, see http://feedvalidator.org/docs/warning/EncodingMismatch.html
FEED_TYPES = ('application/rss+xml',
'application/atom+xml',
'application/rdf+xml',
'application/xml',
'text/xml')
def _resolve_url(self, url):
"""Provide additional ways of resolving an URL
Subclasses can override this method to provide more
ways of resolving a given URL to a feed URL. If the
Fetcher is in "autodiscovery" mode, it will try this
method as a last resort for coming up with a feed URL.
"""
return None
@staticmethod
def _check_statuscode(status, url):
if status >= 200 and status < 300:
return UPDATED_FEED
elif status == 304:
return NOT_MODIFIED
# redirects are handled by requests directly
# => the status should never be 301, 302, 303, 307, 308
if status == 401:
raise AuthenticationRequired('authentication required', url)
elif status == 403:
raise Unsubscribe('forbidden')
elif status == 404:
raise NotFound('not found')
elif status == 410:
raise Unsubscribe('resource is gone')
elif status >= 400 and status < 500:
raise BadRequest('bad request')
elif status >= 500 and status < 600:
raise InternalServerError('internal server error')
else:
raise UnknownStatusCode(status)
def parse_feed(self, url, feed_data, data_stream, headers, status, **kwargs):
"""
kwargs are passed from Fetcher.fetch
:param str url: real url
:param data_stream: file-like object to read from (bytes mode)
:param dict-like headers: response headers (may be empty)
:param int status: always UPDATED_FEED for now
:return Result: Result(status, model.Feed from parsed data_stream)
"""
raise NotImplementedError("Implement parse_feed()")
def fetch(self, url, etag=None, modified=None, autodiscovery=True, **kwargs):
""" use kwargs to pass extra data to parse_feed in Fetcher subclasses """
# handle local file first
if url.startswith('file://'):
url = url[len('file://'):]
stream = open(url)
return self.parse_feed(url, None, stream, {}, UPDATED_FEED, **kwargs)
# remote feed
headers = {}
if modified is not None:
headers['If-Modified-Since'] = modified
if etag is not None:
headers['If-None-Match'] = etag
stream = util.urlopen(url, headers)
responses = stream.history + [stream]
for i, resp in enumerate(responses):
if resp.is_permanent_redirect:
# there should always be a next response when a redirect is encountered
# If max redirects is reached, TooManyRedirects is raised
# TODO: since we've got the end contents anyway, modify model.py to accept contents on NEW_LOCATION
return Result(NEW_LOCATION, responses[i + 1].url)
res = self._check_statuscode(stream.status_code, stream.url)
if res == NOT_MODIFIED:
return Result(NOT_MODIFIED, stream.url)
if autodiscovery and stream.headers.get('content-type', '').startswith('text/html'):
ad = FeedAutodiscovery(url)
# response_text() will assume utf-8 if no charset specified
ad.feed(util.response_text(stream))
if ad._resolved_url and ad._resolved_url != url:
try:
self.fetch(ad._resolved_url, etag=None, modified=None, autodiscovery=False, **kwargs)
return Result(NEW_LOCATION, ad._resolved_url)
except Exception:
logger.warning('Feed autodiscovery failed', exc_info=True)
# Second, try to resolve the URL
new_url = self._resolve_url(url)
if new_url and new_url != url:
return Result(NEW_LOCATION, new_url)
# xml documents specify the encoding inline so better pass encoded body.
# Especially since requests will use ISO-8859-1 for content-type 'text/xml'
# if the server doesn't specify a charset.
return self.parse_feed(url, FetcherFeedData(stream.text, stream.content), BytesIO(stream.content), stream.headers,
UPDATED_FEED, **kwargs)
| 7,597
|
Python
|
.py
| 177
| 34.418079
| 122
| 0.635784
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,302
|
utilwin32locale.py
|
gpodder_gpodder/src/gpodder/utilwin32locale.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2007-2010 Dieter Verfaillie <dieterv@optionexplicit.be>
#
# This file is part of elib.intl.
#
# elib.intl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# elib.intl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with elib.intl. If not, see <http://www.gnu.org/licenses/>.
"""
This code is adapted from the elib.intl module available on GitHub at
https://github.com/dieterv/elib.intl, commit 49d5797 on 1 Sep 2017.
It has been stripped down to the necessary code for UI localization
using Gtk.Builder in gPodder on Windows (msys2) and adapted to python 3.
The elib.intl module provides enhanced internationalization (I18N) services for
your Python modules and applications.
This module adds the following on Microsoft Windows systems:
- automatic detection of the current screen language (not necessarily the same
as the installation language) provided by MUI packs,
- makes sure internationalized C libraries which internally invoke gettext() or
dcgettext() can properly locate their message catalogs. This fixes a known
limitation in gettext's Windows support when using eg. gtk.builder or gtk.glade.
See http://www.gnu.org/software/gettext/FAQ.html#windows_setenv for more
information.
"""
__version__ = '0.0.3'
__docformat__ = 'restructuredtext'
import gettext
import locale
import os
import sys
from logging import getLogger
logger = getLogger(__name__)
def _localefromlcid(lcid):
"""
:param lcid: Microsoft Windows LCID
:returns: name of the supported gPodder locale or ISO 639-1 language code for a given lcid. If there is no
ISO 639-1 language code assigned to the language specified by lcid,
the ISO 639-2 language code is returned. If the language specified
by lcid is unknown in the ISO 639-x database, None is returned.
More information can be found on the following websites:
- List of ISO 639-1 and ISO 639-2 language codes: http://www.loc.gov/standards/iso639-2/
- List of language identifiers: https://msdn.microsoft.com/library/windows/desktop/dd318693(v=vs.85).aspx
"""
mapping = {1078: 'af', # Afrikaans - South Africa
1052: 'sq', # Albanian - Albania
1118: 'am', # Amharic - Ethiopia
1025: 'ar', # Arabic - Saudi Arabia
5121: 'ar', # Arabic - Algeria
15361: 'ar', # Arabic - Bahrain
3073: 'ar', # Arabic - Egypt
2049: 'ar', # Arabic - Iraq
11265: 'ar', # Arabic - Jordan
13313: 'ar', # Arabic - Kuwait
12289: 'ar', # Arabic - Lebanon
4097: 'ar', # Arabic - Libya
6145: 'ar', # Arabic - Morocco
8193: 'ar', # Arabic - Oman
16385: 'ar', # Arabic - Qatar
10241: 'ar', # Arabic - Syria
7169: 'ar', # Arabic - Tunisia
14337: 'ar', # Arabic - U.A.E.
9217: 'ar', # Arabic - Yemen
1067: 'hy', # Armenian - Armenia
1101: 'as', # Assamese
2092: 'az', # Azeri (Cyrillic)
1068: 'az', # Azeri (Latin)
0x042D: 'eu', # Basque
1059: 'be', # Belarusian
1093: 'bn', # Bengali (India)
2117: 'bn', # Bengali (Bangladesh)
5146: 'bs', # Bosnian (Bosnia/Herzegovina)
1026: 'bg', # Bulgarian
1109: 'my', # Burmese
0x0403: 'ca', # Catalan
1116: 'chr', # Cherokee - United States
# we have only one zh_CN.po translation. Applying to all.
2052: 'zh_CN', # Chinese - People's Republic of China
4100: 'zh_CN', # Chinese - Singapore
1028: 'zh_CN', # Chinese - Taiwan
3076: 'zh_CN', # Chinese - Hong Kong SAR
5124: 'zh_CN', # Chinese - Macao SAR
1050: 'hr', # Croatian
4122: 'hr', # Croatian (Bosnia/Herzegovina)
# we have cs and cs_CZ translations. cs_CZ seems more up to date
0x0405: 'cs_CZ', # Czech
0x0406: 'da', # Danish
1125: 'dv', # Divehi
1043: 'nl', # Dutch - Netherlands
0x0813: 'nl', # Dutch - Belgium
1126: 'bin', # Edo
1033: 'en', # English - United States
2057: 'en', # English - United Kingdom
3081: 'en', # English - Australia
10249: 'en', # English - Belize
4105: 'en', # English - Canada
9225: 'en', # English - Caribbean
15369: 'en', # English - Hong Kong SAR
16393: 'en', # English - India
14345: 'en', # English - Indonesia
6153: 'en', # English - Ireland
8201: 'en', # English - Jamaica
17417: 'en', # English - Malaysia
5129: 'en', # English - New Zealand
13321: 'en', # English - Philippines
18441: 'en', # English - Singapore
7177: 'en', # English - South Africa
11273: 'en', # English - Trinidad
12297: 'en', # English - Zimbabwe
1061: 'et', # Estonian
1080: 'fo', # Faroese
0x0429: 'fa_IR', # Persian - Iran
1124: 'fil', # Filipino
0x040B: 'fi', # Finnish
0x040C: 'fr', # French - France
2060: 'fr', # French - Belgium
11276: 'fr', # French - Cameroon
3084: 'fr', # French - Canada
9228: 'fr', # French - Democratic Rep. of Congo
12300: 'fr', # French - Cote d'Ivoire
15372: 'fr', # French - Haiti
5132: 'fr', # French - Luxembourg
13324: 'fr', # French - Mali
6156: 'fr', # French - Monaco
14348: 'fr', # French - Morocco
58380: 'fr', # French - North Africa
8204: 'fr', # French - Reunion
10252: 'fr', # French - Senegal
4108: 'fr', # French - Switzerland
7180: 'fr', # French - West Indies
1122: 'fy', # Frisian - Netherlands
1127: None, # TODO: Fulfulde - Nigeria
1071: 'mk', # FYRO Macedonian
2108: 'ga', # Gaelic (Ireland)
1084: 'gd', # Gaelic (Scotland)
0x0456: 'gl', # Galician
1079: 'ka', # Georgian
0x0407: 'de', # German - Germany
3079: 'de', # German - Austria
5127: 'de', # German - Liechtenstein
4103: 'de', # German - Luxembourg
2055: 'de', # German - Switzerland
0x0408: 'el', # Greek
1140: 'gn', # Guarani - Paraguay
1095: 'gu', # Gujarati
1128: 'ha', # Hausa - Nigeria
1141: 'haw', # Hawaiian - United States
0x040D: 'he', # Hebrew
1081: 'hi', # Hindi
0x040E: 'hu', # Hungarian
1129: None, # TODO: Ibibio - Nigeria
1039: 'is', # Icelandic
1136: 'ig', # Igbo - Nigeria
0x0421: 'id_ID', # Indonesian
1117: 'iu', # Inuktitut
0x0410: 'it', # Italian - Italy
2064: 'it', # Italian - Switzerland
1041: 'ja', # Japanese
1099: 'kn', # Kannada
1137: 'kr', # Kanuri - Nigeria
2144: 'ks', # Kashmiri
1120: 'ks', # Kashmiri (Arabic)
0x043F: 'kk', # Kazakh
1107: 'km', # Khmer
1111: 'kok', # Konkani
0x0412: 'ko_KR', # Korean
1088: 'ky', # Kyrgyz (Cyrillic)
1108: 'lo', # Lao
1142: 'la', # Latin
1062: 'lv', # Latvian
1063: 'lt', # Lithuanian
1086: 'ms', # Malay - Malaysia
2110: 'ms', # Malay - Brunei Darussalam
1100: 'ml', # Malayalam
1082: 'mt', # Maltese
1112: 'mni', # Manipuri
1153: 'mi', # Maori - New Zealand
1102: 'mr', # Marathi
1104: 'mn', # Mongolian (Cyrillic)
2128: 'mn', # Mongolian (Mongolian)
1121: 'ne', # Nepali
2145: 'ne', # Nepali - India
0x0414: 'nb', # Norwegian (Bokmᅢᆬl)
0x0814: 'nn', # Norwegian (Nynorsk)
1096: 'or', # Oriya
1138: 'om', # Oromo
1145: 'pap', # Papiamentu
1123: 'ps', # Pashto
0x0415: 'pl', # Polish
0x0416: 'pt_BR', # Portuguese - Brazil
0x0816: 'pt', # Portuguese - Portugal
1094: 'pa', # Punjabi
2118: 'pa', # Punjabi (Pakistan)
1131: 'qu', # Quecha - Bolivia
2155: 'qu', # Quecha - Ecuador
3179: 'qu', # Quecha - Peru
1047: 'rm', # Rhaeto-Romanic
0x0418: 'ro', # Romanian
2072: 'ro', # Romanian - Moldava
0x0419: 'ru', # Russian
2073: 'ru', # Russian - Moldava
1083: 'se', # Sami (Lappish)
1103: 'sa', # Sanskrit
1132: 'nso', # Sepedi
3098: 'sr', # Serbian (Cyrillic)
2074: 'sr', # Serbian (Latin)
1113: 'sd', # Sindhi - India
2137: 'sd', # Sindhi - Pakistan
1115: 'si', # Sinhalese - Sri Lanka
1051: 'sk', # Slovak
1060: 'sl', # Slovenian
1143: 'so', # Somali
1070: 'wen', # Sorbian
0x0C0A: 'es', # Spanish - Spain (Modern Sort)
0x040A: 'es', # Spanish - Spain (Traditional Sort)
11274: 'es', # Spanish - Argentina
16394: 'es', # Spanish - Bolivia
13322: 'es', # Spanish - Chile
9226: 'es', # Spanish - Colombia
5130: 'es', # Spanish - Costa Rica
7178: 'es', # Spanish - Dominican Republic
12298: 'es', # Spanish - Ecuador
17418: 'es', # Spanish - El Salvador
4106: 'es', # Spanish - Guatemala
18442: 'es', # Spanish - Honduras
58378: 'es', # Spanish - Latin America
0x080A: 'es_MX', # Spanish - Mexico
19466: 'es', # Spanish - Nicaragua
6154: 'es', # Spanish - Panama
15370: 'es', # Spanish - Paraguay
10250: 'es', # Spanish - Peru
20490: 'es', # Spanish - Puerto Rico
21514: 'es', # Spanish - United States
14346: 'es', # Spanish - Uruguay
8202: 'es', # Spanish - Venezuela
1072: None, # TODO: Sutu
1089: 'sw', # Swahili
0x041D: 'sv', # Swedish
2077: 'sv', # Swedish - Finland
1114: 'syr', # Syriac
1064: 'tg', # Tajik
1119: None, # TODO: Tamazight (Arabic)
2143: None, # TODO: Tamazight (Latin)
1097: 'ta', # Tamil
1092: 'tt', # Tatar
1098: 'te', # Telugu
1054: 'th', # Thai
2129: 'bo', # Tibetan - Bhutan
1105: 'bo', # Tibetan - People's Republic of China
2163: 'ti', # Tigrigna - Eritrea
1139: 'ti', # Tigrigna - Ethiopia
1073: 'ts', # Tsonga
1074: 'tn', # Tswana
# we have tr and tr_TR translations. tr seems more complete
0x041F: 'tr', # Turkish
1090: 'tk', # Turkmen
1152: 'ug', # Uighur - China
0x0422: 'uk', # Ukrainian
1056: 'ur', # Urdu
2080: 'ur', # Urdu - India
2115: 'uz', # Uzbek (Cyrillic)
1091: 'uz', # Uzbek (Latin)
1075: 've', # Venda
1066: 'vi', # Vietnamese
1106: 'cy', # Welsh
1076: 'xh', # Xhosa
1144: 'ii', # Yi
1085: 'yi', # Yiddish
1130: 'yo', # Yoruba
1077: 'zu'} # Zulu
return mapping[lcid]
def _getscreenlanguage():
"""
:returns: the locale for this session.
If the LANGUAGE environment variable is set, it's value overrides the
screen language detection. Otherwise the screen language is determined by
the currently selected Microsoft Windows MUI language pack or the Microsoft
Windows installation language.
Works on Microsoft Windows 2000 and up.
"""
# Start with nothing
lang = None
# Check the LANGUAGE environment variable
lang = os.getenv('LANGUAGE')
if lang is None:
# Start with nothing
lcid = None
try:
from ctypes import windll
lcid = windll.kernel32.GetUserDefaultUILanguage()
except:
logger.warning("Failed to get current screen language with 'GetUserDefaultUILanguage'")
finally:
if lcid is None:
lang = 'C'
else:
lang = _localefromlcid(lcid)
logger.info("Windows screen language is '%s' (lcid %s)", lang, lcid)
return lang
def install(domain, localedir):
"""
:param domain: translation domain
:param localedir: locale directory
"""
# prep locale system
locale.setlocale(locale.LC_ALL, '')
# on windows systems, set the LANGUAGE environment variable
if sys.platform == 'win32' or sys.platform == 'nt':
os.environ['LANGUAGE'] = _getscreenlanguage()
# initialize Python's gettext interface
gettext.bindtextdomain(domain, localedir)
# on windows systems, initialize libintl
if sys.platform == 'win32' or sys.platform == 'nt':
from ctypes import cdll
libintl = cdll.LoadLibrary('libintl-8.dll')
libintl.bindtextdomain(domain.encode('mbcs'), localedir.encode('mbcs'))
# See #1538 not calling bind_textdomain_codeset results in garbled menus
libintl.bind_textdomain_codeset(domain.encode('mbcs'), 'UTF-8'.encode('mbcs'))
del libintl
| 15,081
|
Python
|
.py
| 328
| 33.356707
| 113
| 0.51342
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,303
|
model.py
|
gpodder_gpodder/src/gpodder/model.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
# Copyright (c) 2011 Neal H. Walfield
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.model - Core model classes for gPodder (2009-08-13)
# Based on libpodcasts.py (thp, 2005-10-29)
#
import datetime
import glob
import hashlib
import json
import logging
import os
import re
import shutil
import string
import time
import podcastparser
import gpodder
from gpodder import coverart, feedcore, registry, schema, util, vimeo, youtube
logger = logging.getLogger(__name__)
_ = gpodder.gettext
class Feed:
""" abstract class for presenting a parsed feed to PodcastChannel """
def get_title(self):
""" :return str: the feed's title """
return None
def get_link(self):
""" :return str: link to the feed's website """
return None
def get_description(self):
""" :return str: feed's textual description """
return None
def get_cover_url(self):
""" :return str: url of the feed's cover image """
return None
def get_payment_url(self):
""" :return str: optional -- feed's payment url """
return None
def get_http_etag(self):
""" :return str: optional -- last HTTP etag header, for conditional request next time """
return None
def get_http_last_modified(self):
""" :return str: optional -- last HTTP Last-Modified header, for conditional request next time """
return None
def get_new_episodes(self, channel, existing_guids):
"""
Produce new episodes and update old ones.
Feed is a class to present results, so the feed shall have already been fetched.
Existing episodes not in all_seen_guids will be purged from the database.
:param PodcastChannel channel: the updated channel
:param dict(str, PodcastEpisode): existing episodes, by guid
:return (list(PodcastEpisode), set(str)): new_episodes, all_seen_guids
"""
return ([], set())
def get_next_page(self, channel, max_episodes):
"""
Paginated feed support (RFC 5005).
If the feed is paged, return the next feed page.
Returned page will in turn be asked for the next page, until None is returned.
:return feedcore.Result: the next feed's page,
as a fully parsed Feed or None
"""
return None
class PodcastParserFeed(Feed):
def __init__(self, feed, fetcher, max_episodes=0):
self.feed = feed
self.fetcher = fetcher
self.max_episodes = max_episodes
def get_title(self):
return self.feed.get('title')
def get_link(self):
vid = youtube.get_youtube_id(self.feed['url'])
if vid is not None:
self.feed['link'] = youtube.get_channel_id_url(self.feed['url'], self.fetcher.feed_data)
return self.feed.get('link')
def get_description(self):
vid = youtube.get_youtube_id(self.feed['url'])
if vid is not None:
self.feed['description'] = youtube.get_channel_desc(self.feed['url'], self.fetcher.feed_data)
return self.feed.get('description')
def get_cover_url(self):
return self.feed.get('cover_url')
def get_payment_url(self):
return self.feed.get('payment_url')
def get_http_etag(self):
return self.feed.get('headers', {}).get('etag')
def get_http_last_modified(self):
return self.feed.get('headers', {}).get('last-modified')
def get_new_episodes(self, channel, existing_guids):
# Keep track of episode GUIDs currently seen in the feed
seen_guids = set()
# list of new episodes
new_episodes = []
# We have to sort the entries in descending chronological order,
# because if the feed lists items in ascending order and has >
# max_episodes old episodes, new episodes will not be shown.
# See also: gPodder Bug 1186
entries = sorted(self.feed.get('episodes', []), key=lambda episode: episode['published'], reverse=True)
# We can limit the maximum number of entries that gPodder will parse
if self.max_episodes > 0 and len(entries) > self.max_episodes:
entries = entries[:self.max_episodes]
num_duplicate_guids = 0
# Search all entries for new episodes
for entry in entries:
episode = channel.EpisodeClass.from_podcastparser_entry(entry, channel)
if episode is None:
continue
# Discard episode when its GUID collides with a newer episode
if episode.guid in seen_guids:
num_duplicate_guids += 1
channel._update_error = ('Discarded {} episode(s) with non-unique GUID, contact the podcast publisher to fix this issue.'
.format(num_duplicate_guids))
logger.warning('Discarded episode with non-unique GUID, contact the podcast publisher to fix this issue. [%s] [%s]',
channel.title, episode.title)
continue
seen_guids.add(episode.guid)
# Detect (and update) existing episode based on GUIDs
existing_episode = existing_guids.get(episode.guid, None)
if existing_episode:
if existing_episode.total_time == 0 and 'youtube' in episode.url:
# query duration for existing youtube episodes that haven't been downloaded or queried
# such as live streams after they have ended
existing_episode.total_time = youtube.get_total_time(episode)
existing_episode.update_from(episode)
existing_episode.cache_text_description()
existing_episode.save()
continue
elif episode.total_time == 0 and 'youtube' in episode.url:
# query duration for new youtube episodes
episode.total_time = youtube.get_total_time(episode)
episode.cache_text_description()
episode.save()
new_episodes.append(episode)
return new_episodes, seen_guids
def get_next_page(self, channel, max_episodes):
if 'paged_feed_next' in self.feed:
url = self.feed['paged_feed_next']
logger.debug("get_next_page: feed has next %s", url)
url = channel.authenticate_url(url)
return self.fetcher.fetch(url, autodiscovery=False, max_episodes=max_episodes)
return None
class gPodderFetcher(feedcore.Fetcher):
"""
This class implements fetching a channel from custom feed handlers
or the default using podcastparser
"""
def fetch_channel(self, channel, max_episodes):
custom_feed = registry.feed_handler.resolve(channel, None, max_episodes)
if custom_feed is not None:
return custom_feed
# TODO: revisit authenticate_url: pass auth as kwarg
# If we have a username or password, rebuild the url with them included
# Note: using a HTTPBasicAuthHandler would be pain because we need to
# know the realm. It can be done, but I think this method works, too
url = channel.authenticate_url(channel.url)
return self.fetch(url, channel.http_etag, channel.http_last_modified, max_episodes=max_episodes)
def _resolve_url(self, url):
url = youtube.get_real_channel_url(url)
url = vimeo.get_real_channel_url(url)
return url
def parse_feed(self, url, feed_data, data_stream, headers, status, max_episodes=0, **kwargs):
self.feed_data = feed_data
try:
feed = podcastparser.parse(url, data_stream)
feed['url'] = url
feed['headers'] = headers
return feedcore.Result(status, PodcastParserFeed(feed, self, max_episodes))
except ValueError as e:
raise feedcore.InvalidFeed('Could not parse feed: {url}: {msg}'.format(url=url, msg=e))
# Our podcast model:
#
# database -> podcast -> episode -> download/playback
# podcast.parent == db
# podcast.children == [episode, ...]
# episode.parent == podcast
#
# - normally: episode.children = (None, None)
# - downloading: episode.children = (DownloadTask(), None)
# - playback: episode.children = (None, PlaybackTask())
class PodcastModelObject(object):
"""
A generic base class for our podcast model providing common helper
and utility functions.
"""
__slots__ = ('id', 'parent', 'children')
@classmethod
def create_from_dict(cls, d, *args):
"""
Create a new object, passing "args" to the constructor
and then updating the object with the values from "d".
"""
o = cls(*args)
# XXX: all(map(lambda k: hasattr(o, k), d))?
for k, v in d.items():
setattr(o, k, v)
return o
class PodcastEpisode(PodcastModelObject):
"""holds data for one object in a channel"""
# In theory, Linux can have 255 bytes (not characters!) in a filename, but
# filesystems like eCryptFS store metadata in the filename, making the
# effective number of characters less than that. eCryptFS recommends
# 140 chars, we use 120 here (140 - len(extension) - len(".partial.webm"))
# (youtube-dl appends an extension after .partial, ".webm" is the longest).
# References: gPodder bug 1898, http://unix.stackexchange.com/a/32834
MAX_FILENAME_LENGTH = 120 # without extension
MAX_FILENAME_WITH_EXT_LENGTH = 140 - len(".partial.webm") # with extension
__slots__ = schema.EpisodeColumns + ('_download_error', '_text_description',)
def _deprecated(self):
raise Exception('Property is deprecated!')
is_played = property(fget=_deprecated, fset=_deprecated)
is_locked = property(fget=_deprecated, fset=_deprecated)
def has_website_link(self):
return bool(self.link) and (self.link != self.url
or youtube.is_video_link(self.link))
@classmethod
def from_podcastparser_entry(cls, entry, channel):
episode = cls(channel)
episode.guid = entry['guid']
episode.title = entry['title']
episode.link = entry['link']
episode.episode_art_url = entry.get('episode_art_url')
# Only one of the two description fields should be set at a time.
# This keeps the database from doubling in size and reduces load time from slow storage.
# episode._text_description is initialized by episode.cache_text_description() from the set field.
# episode.html_description() returns episode.description_html or generates from episode.description.
if entry.get('description_html'):
episode.description = ''
episode.description_html = entry['description_html']
else:
episode.description = util.remove_html_tags(entry['description'] or '')
episode.description_html = ''
episode.total_time = entry['total_time']
episode.published = entry['published']
episode.payment_url = entry['payment_url']
episode.chapters = None
if entry.get("chapters"):
episode.chapters = json.dumps(entry["chapters"])
audio_available = any(enclosure['mime_type'].startswith('audio/') for enclosure in entry['enclosures'])
video_available = any(enclosure['mime_type'].startswith('video/') for enclosure in entry['enclosures'])
link_has_media = False
if not (audio_available or video_available):
_url = episode.url
episode.url = util.normalize_feed_url(entry['link'])
# Check if any extensions (e.g. youtube-dl) support the link
link_has_media = registry.custom_downloader.resolve(None, None, episode) is not None
episode.url = _url
media_available = audio_available or video_available or link_has_media
url_is_invalid = False
for enclosure in entry['enclosures']:
episode.mime_type = enclosure['mime_type']
# Skip images in feeds if audio or video is available (bug 979)
# This must (and does) also look in Media RSS enclosures (bug 1430)
if episode.mime_type.startswith('image/') and media_available:
continue
# If we have audio or video available later on, skip
# all 'application/*' data types (fixes Linux Outlaws and peertube feeds)
if episode.mime_type.startswith('application/') and media_available:
continue
episode.url = util.normalize_feed_url(enclosure['url'])
if not episode.url:
url_is_invalid = True
continue
episode.file_size = enclosure['file_size']
return episode
# Brute-force detection of the episode link
episode.url = util.normalize_feed_url(entry['link'])
if not episode.url:
# The episode has no downloadable content.
# Set an empty URL so downloading will fail.
episode.url = ''
# Display an error icon if URL is invalid.
if url_is_invalid or (entry['link'] is not None and entry['link'] != ''):
episode._download_error = 'Invalid episode URL'
return episode
if any(mod.is_video_link(episode.url) for mod in (youtube, vimeo)):
return episode
# Check if we can resolve this link to a audio/video file
filename, extension = util.filename_from_url(episode.url)
file_type = util.file_type_by_extension(extension)
# The link points to a audio or video file - use it!
if file_type is not None:
return episode
if link_has_media:
return episode
# The episode has no downloadable content.
# It is either a blog post or it links to a webpage with content accessible from shownotes title.
# Remove the URL so downloading will fail.
episode.url = ''
return episode
def __init__(self, channel):
self.parent = channel
self.podcast_id = self.parent.id
self.children = (None, None)
self.id = None
self.url = ''
self.title = ''
self.file_size = 0
self.mime_type = 'application/octet-stream'
self.guid = ''
self.episode_art_url = None
self.description = ''
self.description_html = ''
self.chapters = None
self.link = ''
self.published = 0
self.download_filename = None
self.payment_url = None
self.state = gpodder.STATE_NORMAL
self.is_new = True
self.archive = channel.auto_archive_episodes
# Time attributes
self.total_time = 0
self.current_position = 0
self.current_position_updated = 0
# Timestamp of last playback time
self.last_playback = 0
self._download_error = None
self._text_description = ''
@property
def channel(self):
return self.parent
@property
def db(self):
return self.parent.parent.db
@property
def trimmed_title(self):
"""Return the title with the common prefix trimmed"""
# Minimum amount of leftover characters after trimming. This
# avoids things like "Common prefix 123" to become just "123".
# If there are LEFTOVER_MIN or less characters after trimming,
# the original title will be returned without trimming.
LEFTOVER_MIN = 5
# "Podcast Name - Title" and "Podcast Name: Title" -> "Title"
for postfix in (' - ', ': '):
prefix = self.parent.title + postfix
if (self.title.startswith(prefix)
and len(self.title) - len(prefix) > LEFTOVER_MIN):
return self.title[len(prefix):]
regex_patterns = [
# "Podcast Name <number>: ..." -> "<number>: ..."
r'^%s (\d+: .*)' % re.escape(self.parent.title),
# "Episode <number>: ..." -> "<number>: ..."
r'Episode (\d+:.*)',
]
for pattern in regex_patterns:
if re.match(pattern, self.title):
title = re.sub(pattern, r'\1', self.title)
if len(title) > LEFTOVER_MIN:
return title
# "#001: Title" -> "001: Title"
if (
not self.parent._common_prefix
and re.match(r'^#\d+: ', self.title)
and len(self.title) - 1 > LEFTOVER_MIN):
return self.title[1:]
if (self.parent._common_prefix is not None
and self.title.startswith(self.parent._common_prefix)
and len(self.title) - len(self.parent._common_prefix) > LEFTOVER_MIN):
return self.title[len(self.parent._common_prefix):]
return self.title
def _set_download_task(self, download_task):
self.children = (download_task, self.children[1])
def _get_download_task(self):
return self.children[0]
download_task = property(_get_download_task, _set_download_task)
@property
def downloading(self):
task = self.download_task
if task is None:
return False
return task.status in (task.DOWNLOADING, task.QUEUED, task.PAUSING, task.PAUSED, task.CANCELLING)
def get_player(self, config):
file_type = self.file_type()
if file_type == 'video' and config.player.video and config.player.video != 'default':
player = config.player.video
elif file_type == 'audio' and config.player.audio and config.player.audio != 'default':
player = config.player.audio
else:
player = 'default'
return player
def can_play(self, config):
"""
# gPodder.playback_episodes() filters selection with this method.
"""
return (self.was_downloaded(and_exists=True)
or self.can_preview()
or self.can_stream(config))
def can_preview(self):
return (self.downloading
and self.download_task.custom_downloader is not None
and self.download_task.custom_downloader.partial_filename is not None
and os.path.exists(self.download_task.custom_downloader.partial_filename))
def can_stream(self, config):
"""
Don't try streaming if the user has not defined a player
or else we would probably open the browser when giving a URL to xdg-open.
We look at the audio or video player depending on its file type.
"""
player = self.get_player(config)
return player and player != 'default'
def can_download(self):
"""
gPodder.on_download_selected_episodes() filters selection with this method.
PAUSING and PAUSED tasks can be resumed.
"""
return not self.was_downloaded(and_exists=True) and (
self.download_task is None
or self.download_task.can_queue()
or self.download_task.status == self.download_task.PAUSING)
def can_pause(self):
"""
gPodder.on_pause_selected_episodes() filters selection with this method.
"""
return self.download_task is not None and self.download_task.can_pause()
def can_cancel(self):
"""
DownloadTask.cancel() only cancels the following tasks.
"""
return self.download_task is not None and self.download_task.can_cancel()
def can_delete(self):
"""
gPodder.delete_episode_list() filters out locked episodes, and cancels all unlocked tasks in selection.
"""
return self.state != gpodder.STATE_DELETED and not self.archive and (
self.download_task is None or self.download_task.status == self.download_task.FAILED)
def can_lock(self):
"""
gPodder.on_item_toggle_lock_activate() unlocks deleted episodes and toggles all others.
Locked episodes can always be unlocked.
"""
return self.state != gpodder.STATE_DELETED or self.archive
def check_is_new(self):
return (self.state == gpodder.STATE_NORMAL and self.is_new
and not self.downloading)
def save(self):
gpodder.user_extensions.on_episode_save(self)
self.db.save_episode(self)
def on_downloaded(self, filename):
self.state = gpodder.STATE_DOWNLOADED
self.is_new = True
self.file_size = os.path.getsize(filename)
self.save()
def set_state(self, state):
self.state = state
self.save()
def playback_mark(self):
self.is_new = False
self.last_playback = int(time.time())
gpodder.user_extensions.on_episode_playback(self)
self.save()
def mark(self, state=None, is_played=None, is_locked=None):
if state is not None:
self.state = state
if is_played is not None:
self.is_new = not is_played
# "Mark as new" must "undelete" the episode
if self.is_new and self.state == gpodder.STATE_DELETED:
self.state = gpodder.STATE_NORMAL
if is_locked is not None:
self.archive = is_locked
self.save()
def age_in_days(self):
return util.file_age_in_days(self.local_filename(create=False,
check_only=True))
age_int_prop = property(fget=age_in_days)
def get_age_string(self):
return util.file_age_to_string(self.age_in_days())
age_prop = property(fget=get_age_string)
def cache_text_description(self):
if self.description:
self._text_description = self.description
elif self.description_html:
self._text_description = util.remove_html_tags(self.description_html)
else:
self._text_description = ''
def html_description(self):
return self.description_html \
or util.nice_html_description(self.episode_art_url, self.description or _('No description available'))
def one_line_description(self):
MAX_LINE_LENGTH = 120
desc = self._text_description
desc = re.sub(r'\s+', ' ', desc).strip()
if not desc:
return _('No description available')
else:
# Decode the description to avoid gPodder bug 1277
desc = util.convert_bytes(desc).strip()
if len(desc) > MAX_LINE_LENGTH:
return desc[:MAX_LINE_LENGTH] + '...'
else:
return desc
def delete_from_disk(self):
filename = self.local_filename(create=False, check_only=True)
if filename is not None:
gpodder.user_extensions.on_episode_delete(self, filename)
util.delete_file(filename)
self._download_error = None
self.set_state(gpodder.STATE_DELETED)
def get_playback_url(self, config=None, allow_partial=False):
"""Local (or remote) playback/streaming filename/URL
Returns either the local filename or a streaming URL that
can be used to playback this episode.
Also returns the filename of a partially downloaded file
in case partial (preview) playback is desired.
"""
if (allow_partial and self.can_preview()):
return self.download_task.custom_downloader.partial_filename
url = self.local_filename(create=False)
if url is None or not os.path.exists(url):
# FIXME: may custom downloaders provide the real url ?
url = registry.download_url.resolve(config, self.url, self, allow_partial)
return url
def find_unique_file_name(self, filename, extension):
# Remove leading and trailing whitespace + dots (to avoid hidden files)
filename = filename.strip('.' + string.whitespace) + extension
for name in util.generate_names(filename):
if (not self.db.episode_filename_exists(self.podcast_id, name)
or self.download_filename == name):
return name
def local_filename(self, create, force_update=False, check_only=False,
template=None, return_wanted_filename=False):
"""Get (and possibly generate) the local saving filename
Pass create=True if you want this function to generate a
new filename if none exists. You only want to do this when
planning to create/download the file after calling this function.
Normally, you should pass create=False. This will only
create a filename when the file already exists from a previous
version of gPodder (where we used md5 filenames). If the file
does not exist (and the filename also does not exist), this
function will return None.
If you pass force_update=True to this function, it will try to
find a new (better) filename and move the current file if this
is the case. This is useful if (during the download) you get
more information about the file, e.g. the mimetype and you want
to include this information in the file name generation process.
If check_only=True is passed to this function, it will never try
to rename the file, even if would be a good idea. Use this if you
only want to check if a file exists.
If "template" is specified, it should be a filename that is to
be used as a template for generating the "real" filename.
The generated filename is stored in the database for future access.
If return_wanted_filename is True, the filename will not be written to
the database, but simply returned by this function (for use by the
"import external downloads" feature).
"""
if self.download_filename is None and (check_only or not create):
return None
ext = self.extension(may_call_local_filename=False)
if not check_only and (force_update or not self.download_filename):
# Avoid and catch gPodder bug 1440 and similar situations
if template == '':
logger.warning('Empty template. Report this podcast URL %s',
self.channel.url)
template = None
# Try to find a new filename for the current file
if template is not None:
# If template is specified, trust the template's extension
episode_filename, ext = os.path.splitext(template)
else:
episode_filename, _ = util.filename_from_url(self.url)
if 'redirect' in episode_filename and template is None:
# This looks like a redirection URL - force URL resolving!
logger.warning('Looks like a redirection to me: %s', self.url)
url = util.get_real_url(self.channel.authenticate_url(self.url))
logger.info('Redirection resolved to: %s', url)
episode_filename, _ = util.filename_from_url(url)
# Use title for YouTube, Vimeo and Soundcloud downloads
if (youtube.is_video_link(self.url)
or vimeo.is_video_link(self.url)
or episode_filename == 'stream'):
episode_filename = self.title
# If the basename is empty, use the md5 hexdigest of the URL
if not episode_filename or episode_filename.startswith('redirect.'):
logger.error('Report this feed: Podcast %s, episode %s',
self.channel.url, self.url)
episode_filename = hashlib.md5(self.url.encode('utf-8')).hexdigest()
# Also sanitize ext (see #591 where ext=.mp3?dest-id=754182)
fn_template, ext = util.sanitize_filename_ext(
episode_filename,
ext,
self.MAX_FILENAME_LENGTH,
self.MAX_FILENAME_WITH_EXT_LENGTH)
# Find a unique filename for this episode
wanted_filename = self.find_unique_file_name(fn_template, ext)
if return_wanted_filename:
# return the calculated filename without updating the database
return wanted_filename
# The old file exists, but we have decided to want a different filename
if self.download_filename and wanted_filename != self.download_filename:
# there might be an old download folder crawling around - move it!
new_file_name = os.path.join(self.channel.save_dir, wanted_filename)
old_file_name = os.path.join(self.channel.save_dir, self.download_filename)
if os.path.exists(old_file_name) and not os.path.exists(new_file_name):
logger.info('Renaming %s => %s', old_file_name, new_file_name)
os.rename(old_file_name, new_file_name)
elif force_update and not os.path.exists(old_file_name):
# When we call force_update, the file might not yet exist when we
# call it from the downloading code before saving the file
logger.info('Choosing new filename: %s', new_file_name)
else:
logger.warning('%s exists or %s does not', new_file_name, old_file_name)
logger.info('Updating filename of %s to "%s".', self.url, wanted_filename)
elif self.download_filename is None:
logger.info('Setting download filename: %s', wanted_filename)
self.download_filename = wanted_filename
self.save()
if return_wanted_filename:
# return the filename, not full path
return self.download_filename
return os.path.join(self.channel.save_dir, self.download_filename)
def extension(self, may_call_local_filename=True):
filename, ext = util.filename_from_url(self.url)
if may_call_local_filename:
filename = self.local_filename(create=False)
if filename is not None:
filename, ext = os.path.splitext(filename)
# if we can't detect the extension from the url fallback on the mimetype
if ext == '' or util.file_type_by_extension(ext) is None:
ext = util.extension_from_mimetype(self.mime_type)
return ext
def mark_new(self):
self.is_new = True
self.save()
def mark_old(self):
self.is_new = False
self.save()
def file_exists(self):
filename = self.local_filename(create=False, check_only=True)
if filename is None:
return False
else:
return os.path.exists(filename)
def was_downloaded(self, and_exists=False):
if self.state != gpodder.STATE_DOWNLOADED:
return False
if and_exists and not self.file_exists():
return False
return True
def sync_filename(self, use_custom=False, custom_format=None):
if use_custom:
return util.object_string_formatter(custom_format,
episode=self, podcast=self.channel)
else:
return self.title
def file_type(self):
# Assume all YouTube/Vimeo links are video files
if youtube.is_video_link(self.url) or vimeo.is_video_link(self.url):
return 'video'
return util.file_type_by_extension(self.extension())
@property
def basename(self):
return os.path.splitext(os.path.basename(self.url))[0]
@property
def pubtime(self):
"""
Returns published time as HHMM (or 0000 if not available)
"""
try:
return datetime.datetime.fromtimestamp(self.published).strftime('%H%M')
except:
logger.warning('Cannot format pubtime: %s', self.title, exc_info=True)
return '0000'
def playlist_title(self):
"""Return a title for this episode in a playlist
The title will be composed of the podcast name, the
episode name and the publication date. The return
value is the canonical representation of this episode
in playlists (for example, M3U playlists).
"""
return '%s - %s (%s)' % (self.channel.title,
self.title,
self.cute_pubdate())
def cute_pubdate(self, show_time=False):
result = util.format_date(self.published)
if result is None:
return '(%s)' % _('unknown')
try:
if show_time:
timestamp = datetime.datetime.fromtimestamp(self.published)
return '<small>{}</small>\n{}'.format(timestamp.strftime('%H:%M'), result)
else:
return result
except:
return result
pubdate_prop = property(fget=cute_pubdate)
def published_datetime(self):
return datetime.datetime.fromtimestamp(self.published)
@property
def sortdate(self):
return self.published_datetime().strftime('%Y-%m-%d')
@property
def pubdate_day(self):
return self.published_datetime().strftime('%d')
@property
def pubdate_month(self):
return self.published_datetime().strftime('%m')
@property
def pubdate_year(self):
return self.published_datetime().strftime('%y')
def is_finished(self):
"""Return True if this episode is considered "finished playing"
An episode is considered "finished" when there is a
current position mark on the track, and when the
current position is greater than 99 percent of the
total time or inside the last 10 seconds of a track.
"""
return (self.current_position > 0
and self.total_time > 0
and (self.current_position + 10 >= self.total_time
or self.current_position >= self.total_time * .99))
def get_play_info_string(self, duration_only=False):
duration = util.format_time(self.total_time)
if duration_only and self.total_time > 0:
return duration
elif self.is_finished():
return '%s (%s)' % (_('Finished'), duration)
elif self.current_position > 0 and \
self.current_position != self.total_time:
position = util.format_time(self.current_position)
return '%s / %s' % (position, duration)
elif self.total_time > 0:
return duration
else:
return '-'
def update_from(self, episode):
for k in ('title', 'url', 'episode_art_url', 'description', 'description_html', 'chapters', 'link',
'published', 'guid', 'payment_url'):
setattr(self, k, getattr(episode, k))
# Don't overwrite file size on downloaded episodes
# See #648 refreshing a youtube podcast clears downloaded file size
if self.state != gpodder.STATE_DOWNLOADED:
setattr(self, 'file_size', getattr(episode, 'file_size'))
class PodcastChannel(PodcastModelObject):
__slots__ = schema.PodcastColumns + ('_common_prefix', '_update_error',)
UNICODE_TRANSLATE = {ord('ö'): 'o', ord('ä'): 'a', ord('ü'): 'u'}
# Enumerations for download strategy
STRATEGY_DEFAULT, STRATEGY_LATEST = list(range(2))
# Description and ordering of strategies
STRATEGIES = [
(STRATEGY_DEFAULT, _('Default')),
(STRATEGY_LATEST, _('Only keep latest')),
]
MAX_FOLDERNAME_LENGTH = 60
SECONDS_PER_DAY = 24 * 60 * 60
SECONDS_PER_WEEK = 7 * 24 * 60 * 60
EpisodeClass = PodcastEpisode
feed_fetcher = gPodderFetcher()
def __init__(self, model, channel_id=None):
self.parent = model
self.children = []
self.id = channel_id
self.url = None
self.title = ''
self.link = ''
self.description = ''
self.cover_url = None
self.payment_url = None
self.auth_username = ''
self.auth_password = ''
self.http_last_modified = None
self.http_etag = None
self.auto_archive_episodes = False
self.download_folder = None
self.pause_subscription = False
self.sync_to_mp3_player = True
self.cover_thumb = None
self.section = _('Other')
self._common_prefix = None
self.download_strategy = PodcastChannel.STRATEGY_DEFAULT
if self.id:
self.children = self.db.load_episodes(self, self.episode_factory)
self._determine_common_prefix()
self._update_error = None
@property
def model(self):
return self.parent
@property
def db(self):
return self.parent.db
def get_download_strategies(self):
for value, caption in PodcastChannel.STRATEGIES:
yield self.download_strategy == value, value, caption
def set_download_strategy(self, download_strategy):
if download_strategy == self.download_strategy:
return
caption = dict(self.STRATEGIES).get(download_strategy)
if caption is not None:
logger.debug('Strategy for %s changed to %s', self.title, caption)
self.download_strategy = download_strategy
else:
logger.warning('Cannot set strategy to %d', download_strategy)
def rewrite_url(self, new_url):
new_url = util.normalize_feed_url(new_url)
if new_url is None:
return None
self.url = new_url
self.http_etag = None
self.http_last_modified = None
self.save()
return new_url
def check_download_folder(self):
"""Check the download folder for externally-downloaded files
This will try to assign downloaded files with episodes in the
database.
This will also cause missing files to be marked as deleted.
"""
known_files = set()
for episode in self.get_episodes(gpodder.STATE_DOWNLOADED):
if episode.was_downloaded():
filename = episode.local_filename(create=False)
if filename is None:
# No filename has been determined for this episode
continue
if not os.path.exists(filename):
# File has been deleted by the user - simulate a
# delete event (also marks the episode as deleted)
logger.debug('Episode deleted: %s', filename)
episode.delete_from_disk()
continue
known_files.add(filename)
# youtube-dl and yt-dlp create <name>.partial and <name>.partial.<ext> files while downloading.
# On startup, the latter is reported as an unknown external file.
# Both files are properly removed when the download completes.
existing_files = {filename
for filename in glob.glob(os.path.join(self.save_dir, '*'))
if not filename.endswith('.partial')}
ignore_files = ['folder' + ext for ext in
coverart.CoverDownloader.EXTENSIONS]
external_files = existing_files.difference(list(known_files)
+ [os.path.join(self.save_dir, ignore_file)
for ignore_file in ignore_files])
if not external_files:
return
all_episodes = self.get_all_episodes()
for filename in external_files:
found = False
basename = os.path.basename(filename)
existing = [e for e in all_episodes if e.download_filename == basename]
if existing:
existing = existing[0]
logger.info('Importing external download: %s', filename)
existing.on_downloaded(filename)
continue
for episode in all_episodes:
wanted_filename = episode.local_filename(create=True,
return_wanted_filename=True)
if basename == wanted_filename:
logger.info('Importing external download: %s', filename)
episode.download_filename = basename
episode.on_downloaded(filename)
found = True
break
wanted_base, wanted_ext = os.path.splitext(wanted_filename)
target_base, target_ext = os.path.splitext(basename)
if wanted_base == target_base:
# Filenames only differ by the extension
wanted_type = util.file_type_by_extension(wanted_ext)
target_type = util.file_type_by_extension(target_ext)
# If wanted type is None, assume that we don't know
# the right extension before the download (e.g. YouTube)
# if the wanted type is the same as the target type,
# assume that it's the correct file
if wanted_type is None or wanted_type == target_type:
logger.info('Importing external download: %s', filename)
episode.download_filename = basename
episode.on_downloaded(filename)
found = True
break
if not found and not util.is_system_file(filename):
logger.warning('Unknown external file: %s', filename)
@classmethod
def sort_key(cls, podcast):
key = util.convert_bytes(podcast.title.lower())
return re.sub(r'^the ', '', key).translate(cls.UNICODE_TRANSLATE)
@classmethod
def load(cls, model, url, create=True, authentication_tokens=None, max_episodes=0):
existing = [p for p in model.get_podcasts() if p.url == url]
if existing:
return existing[0]
if create:
tmp = cls(model)
tmp.url = url
if authentication_tokens is not None:
tmp.auth_username = authentication_tokens[0]
tmp.auth_password = authentication_tokens[1]
# Save podcast, so it gets an ID assigned before
# updating the feed and adding saving episodes
tmp.save()
try:
tmp.update(max_episodes)
except Exception:
logger.debug('Fetch failed. Removing buggy feed.')
tmp.remove_downloaded()
tmp.delete()
raise
# Determine the section in which this podcast should appear
tmp.section = tmp._get_content_type()
# Determine a new download folder now that we have the title
tmp.get_save_dir(force_new=True)
# Mark episodes as downloaded if files already exist (bug 902)
tmp.check_download_folder()
# Determine common prefix of episode titles
tmp._determine_common_prefix()
tmp.save()
gpodder.user_extensions.on_podcast_subscribe(tmp)
return tmp
def episode_factory(self, d):
"""
This function takes a dictionary containing key-value pairs for
episodes and returns a new PodcastEpisode object that is connected
to this object.
Returns: A new PodcastEpisode object
"""
episode = self.EpisodeClass.create_from_dict(d, self)
episode.cache_text_description()
return episode
def _consume_updated_title(self, new_title):
# Replace multi-space and newlines with single space (Maemo bug 11173)
new_title = re.sub(r'\s+', ' ', new_title).strip()
# Only update the podcast-supplied title when we
# don't yet have a title, or if the title is the
# feed URL (e.g. we didn't find a title before).
if not self.title or self.title == self.url:
self.title = new_title
# Start YouTube- and Vimeo-specific title FIX
YOUTUBE_PREFIX = 'Uploads by '
VIMEO_PREFIX = 'Vimeo / '
if self.title.startswith(YOUTUBE_PREFIX):
self.title = self.title[len(YOUTUBE_PREFIX):] + ' on YouTube'
elif self.title.startswith(VIMEO_PREFIX):
self.title = self.title[len(VIMEO_PREFIX):] + ' on Vimeo'
# End YouTube- and Vimeo-specific title FIX
def _consume_metadata(self, title, link, description, cover_url,
payment_url):
self._consume_updated_title(title)
self.link = link
self.description = description
self.cover_url = cover_url
self.payment_url = payment_url
self.save()
def _consume_updated_feed(self, feed, max_episodes=0):
self._consume_metadata(feed.get_title() or self.url,
feed.get_link() or self.link,
feed.get_description() or '',
feed.get_cover_url() or None,
feed.get_payment_url() or None)
# Update values for HTTP conditional requests
self.http_etag = feed.get_http_etag() or self.http_etag
self.http_last_modified = feed.get_http_last_modified() or self.http_last_modified
# Load all episodes to update them properly.
existing = self.get_all_episodes()
# GUID-based existing episode list
existing_guids = {e.guid: e for e in existing}
# Get most recent published of all episodes
last_published = self.db.get_last_published(self) or 0
# fix for #516 an episode was marked published one month in the future (typo in month number)
# causing every new episode to be marked old
tomorrow = datetime.datetime.now().timestamp() + self.SECONDS_PER_DAY
if last_published > tomorrow:
logger.debug('Episode published in the future for podcast %s', self.title)
last_published = tomorrow
# new episodes from feed
new_episodes, seen_guids = feed.get_new_episodes(self, existing_guids)
# pagination
next_feed = feed
next_max_episodes = max_episodes - len(seen_guids)
# want to paginate if:
# - we raised the max episode count so we want more old episodes now
# FIXME: could also be that feed has less episodes than max_episodes and we're paginating for nothing
# - all episodes are new so we continue getting them until max_episodes is reached
could_have_more = max_episodes > len(existing) or len(new_episodes) == len(seen_guids)
while next_feed and could_have_more:
if max_episodes > 0 and next_max_episodes <= 0:
logger.debug("stopping pagination: seen enough episodes (%i)", max_episodes)
break
# brand new: try to load another page!
next_result = next_feed.get_next_page(self, next_max_episodes)
if next_result and next_result.status == feedcore.UPDATED_FEED:
next_feed = next_result.feed
for e in new_episodes:
existing_guids[e.guid] = e
next_new_episodes, next_seen_guids = next_feed.get_new_episodes(self, existing_guids)
logger.debug("next page has %i new episodes, %i seen episodes", len(next_new_episodes), len(next_seen_guids))
if not next_seen_guids:
logger.debug("breaking out of get_next_page loop because no episode in this page")
break
next_max_episodes -= len(next_seen_guids)
new_episodes += next_new_episodes
seen_guids = seen_guids.union(next_seen_guids)
else:
next_feed = None
# mark episodes not new
real_new_episodes = []
# Search all entries for new episodes
for episode in new_episodes:
# Workaround for bug 340: If the episode has been
# published earlier than one week before the most
# recent existing episode, do not mark it as new.
if episode.published < last_published - self.SECONDS_PER_WEEK:
logger.debug('Episode with old date: %s', episode.title)
episode.is_new = False
episode.save()
if episode.is_new:
real_new_episodes.append(episode)
# Only allow a certain number of new episodes per update
if (self.download_strategy == PodcastChannel.STRATEGY_LATEST
and len(real_new_episodes) > 1):
episode.is_new = False
episode.save()
self.children.extend(new_episodes)
self.remove_unreachable_episodes(existing, seen_guids, max_episodes)
return real_new_episodes
def remove_unreachable_episodes(self, existing, seen_guids, max_episodes):
# Remove "unreachable" episodes - episodes that have not been
# downloaded and that the feed does not list as downloadable anymore
# Keep episodes that are currently being downloaded, though (bug 1534)
if self.id is not None:
episodes_to_purge = [e for e in existing if
e.state != gpodder.STATE_DOWNLOADED
and e.guid not in seen_guids and not e.downloading]
for episode in episodes_to_purge:
logger.debug('Episode removed from feed: %s (%s)',
episode.title, episode.guid)
gpodder.user_extensions.on_episode_removed_from_podcast(episode)
self.db.delete_episode_by_guid(episode.guid, self.id)
# Remove the episode from the "children" episodes list
if self.children is not None:
self.children.remove(episode)
# This *might* cause episodes to be skipped if there were more than
# limit.episodes items added to the feed between updates.
# The benefit is that it prevents old episodes from appearing as new
# in certain situations (see bug #340).
self.db.purge(max_episodes, self.id) # TODO: Remove from self.children!
# Sort episodes by pubdate, descending
self.children.sort(key=lambda e: e.published, reverse=True)
def update(self, max_episodes=0):
max_episodes = int(max_episodes)
new_episodes = []
try:
result = self.feed_fetcher.fetch_channel(self, max_episodes)
if result.status == feedcore.UPDATED_FEED:
new_episodes = self._consume_updated_feed(result.feed, max_episodes)
elif result.status == feedcore.NEW_LOCATION:
# FIXME: could return the feed because in autodiscovery it is parsed already
url = result.feed
logger.info('New feed location: %s => %s', self.url, url)
if url in {x.url for x in self.model.get_podcasts()}:
raise Exception('Already subscribed to ' + url)
self.url = url
# With the updated URL, fetch the feed again
self.update(max_episodes)
return new_episodes
elif result.status == feedcore.NOT_MODIFIED:
pass
self.save()
except Exception as e:
# "Not really" errors
# feedcore.AuthenticationRequired
# Temporary errors
# feedcore.Offline
# feedcore.BadRequest
# feedcore.InternalServerError
# feedcore.WifiLogin
# Permanent errors
# feedcore.Unsubscribe
# feedcore.NotFound
# feedcore.InvalidFeed
# feedcore.UnknownStatusCode
gpodder.user_extensions.on_podcast_update_failed(self, e)
raise
gpodder.user_extensions.on_podcast_updated(self)
# Re-determine the common prefix for all episodes
self._determine_common_prefix()
self.db.commit()
return new_episodes
def delete(self):
self.db.delete_podcast(self)
self.model._remove_podcast(self)
def save(self):
if self.download_folder is None:
self.get_save_dir()
gpodder.user_extensions.on_podcast_save(self)
self.db.save_podcast(self)
self.model._append_podcast(self)
def get_statistics(self):
if self.id is None:
return (0, 0, 0, 0, 0)
else:
return self.db.get_podcast_statistics(self.id)
@property
def group_by(self):
if not self.section:
self.section = self._get_content_type()
self.save()
return self.section
def _get_content_type(self):
if 'youtube.com' in self.url or 'vimeo.com' in self.url:
return _('Video')
audio, video, other = 0, 0, 0
for content_type in self.db.get_content_types(self.id):
content_type = content_type.lower()
if content_type.startswith('audio'):
audio += 1
elif content_type.startswith('video'):
video += 1
else:
other += 1
if audio >= video:
return _('Audio')
elif video > other:
return _('Video')
return _('Other')
def authenticate_url(self, url):
return util.url_add_authentication(url, self.auth_username, self.auth_password)
def rename(self, new_title):
new_title = new_title.strip()
if self.title == new_title:
return
fn_template = util.sanitize_filename(new_title, self.MAX_FOLDERNAME_LENGTH)
new_folder_name = self.find_unique_folder_name(fn_template)
if new_folder_name and new_folder_name != self.download_folder:
new_folder = os.path.join(gpodder.downloads, new_folder_name)
old_folder = os.path.join(gpodder.downloads, self.download_folder)
if os.path.exists(old_folder):
if not os.path.exists(new_folder):
# Old folder exists, new folder does not -> simply rename
logger.info('Renaming %s => %s', old_folder, new_folder)
os.rename(old_folder, new_folder)
else:
# Both folders exist -> move files and delete old folder
logger.info('Moving files from %s to %s', old_folder,
new_folder)
for file in glob.glob(os.path.join(old_folder, '*')):
shutil.move(file, new_folder)
logger.info('Removing %s', old_folder)
shutil.rmtree(old_folder, ignore_errors=True)
self.download_folder = new_folder_name
self.title = new_title
self.save()
def _determine_common_prefix(self):
# We need at least 2 episodes for the prefix to be "common" ;)
if len(self.children) < 2:
self._common_prefix = ''
return
prefix = os.path.commonprefix([x.title for x in self.children])
# The common prefix must end with a space - otherwise it's not
# on a word boundary, and we might end up chopping off too much
if prefix and prefix[-1] != ' ':
prefix = prefix[:prefix.rfind(' ') + 1]
self._common_prefix = prefix
def get_all_episodes(self):
return self.children
def get_episodes(self, state):
return [e for e in self.get_all_episodes() if e.state == state]
def find_unique_folder_name(self, download_folder):
# Remove trailing dots to avoid errors on Windows (bug 600)
# Also remove leading dots to avoid hidden folders on Linux
download_folder = download_folder.strip('.' + string.whitespace)
for folder_name in util.generate_names(download_folder):
if (not self.db.podcast_download_folder_exists(folder_name)
or self.download_folder == folder_name):
return folder_name
def get_save_dir(self, force_new=False):
if self.download_folder is None or force_new:
fn_template = util.sanitize_filename(self.title, self.MAX_FOLDERNAME_LENGTH)
if not fn_template:
fn_template = util.sanitize_filename(self.url, self.MAX_FOLDERNAME_LENGTH)
# Find a unique folder name for this podcast
download_folder = self.find_unique_folder_name(fn_template)
# Try removing the download folder if it has been created previously
if self.download_folder is not None:
folder = os.path.join(gpodder.downloads, self.download_folder)
try:
os.rmdir(folder)
except OSError:
logger.info('Old download folder is kept for %s', self.url)
logger.info('Updating download_folder of %s to %s', self.url,
download_folder)
self.download_folder = download_folder
self.save()
save_dir = os.path.join(gpodder.downloads, self.download_folder)
# Create save_dir if it does not yet exist
if not util.make_directory(save_dir):
logger.error('Could not create save_dir: %s', save_dir)
return save_dir
save_dir = property(fget=get_save_dir)
def remove_downloaded(self):
# Remove the download directory
for episode in self.get_episodes(gpodder.STATE_DOWNLOADED):
filename = episode.local_filename(create=False, check_only=True)
if filename is not None:
gpodder.user_extensions.on_episode_delete(episode, filename)
shutil.rmtree(self.save_dir, True)
@property
def cover_file(self):
return os.path.join(self.save_dir, 'folder')
class Model(object):
PodcastClass = PodcastChannel
def __init__(self, db):
self.db = db
self.children = None
def _append_podcast(self, podcast):
if podcast not in self.children:
self.children.append(podcast)
def _remove_podcast(self, podcast):
self.children.remove(podcast)
gpodder.user_extensions.on_podcast_delete(podcast)
def get_podcasts(self):
def podcast_factory(dct, db):
return self.PodcastClass.create_from_dict(dct, self, dct['id'])
if self.children is None:
self.children = self.db.load_podcasts(podcast_factory)
# Check download folders for changes (bug 902)
for podcast in self.children:
podcast.check_download_folder()
return self.children
def get_podcast(self, url):
for p in self.get_podcasts():
if p.url == url:
return p
return None
def load_podcast(self, url, create=True, authentication_tokens=None,
max_episodes=0):
assert all(url != podcast.url for podcast in self.get_podcasts())
return self.PodcastClass.load(self, url, create,
authentication_tokens,
max_episodes)
@classmethod
def podcast_sort_key(cls, podcast):
return cls.PodcastClass.sort_key(podcast)
@classmethod
def episode_sort_key(cls, episode):
return episode.published
@classmethod
def sort_episodes_by_pubdate(cls, episodes, reverse=False):
"""Sort a list of PodcastEpisode objects chronologically
Returns a iterable, sorted sequence of the episodes
"""
return sorted(episodes, key=cls.episode_sort_key, reverse=reverse)
def check_root_folder_path():
root = gpodder.home
if gpodder.ui.win32:
longest = len(root) \
+ 1 + PodcastChannel.MAX_FOLDERNAME_LENGTH \
+ 1 + PodcastEpisode.MAX_FILENAME_WITH_EXT_LENGTH
if longest > 260:
return _("Warning: path to gPodder home (%(root)s) is very long "
"and can result in failure to download files.\n" % {"root": root}) \
+ _("You're advised to set it to a shorter path.")
return None
| 60,823
|
Python
|
.py
| 1,267
| 37.062352
| 137
| 0.615718
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,304
|
opml.py
|
gpodder_gpodder/src/gpodder/opml.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# opml.py -- OPML import and export functionality
# Thomas Perl <thp@perli.net> 2007-08-19
#
# based on: libopmlreader.py (2006-06-13)
# libopmlwriter.py (2005-12-08)
#
"""OPML import and export functionality
This module contains helper classes to import subscriptions
from OPML files on the web and to export a list of channel
objects to valid OPML 1.1 files that can be used to backup
or distribute gPodder's channel subscriptions.
"""
import io
import logging
import os
import os.path
import xml.dom.minidom
from email.utils import formatdate
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
class Importer(object):
"""
Helper class to import an OPML feed from protocols
supported by urllib2 (e.g. HTTP) and return a GTK
ListStore that can be displayed in the GUI.
This class should support standard OPML feeds and
contains workarounds to support odeo.com feeds.
"""
VALID_TYPES = ('rss', 'link')
def __init__(self, url):
"""
Parses the OPML feed from the given URL into
a local data structure containing channel metadata.
"""
self.items = []
if os.path.exists(url):
doc = xml.dom.minidom.parse(url)
else:
doc = xml.dom.minidom.parse(io.BytesIO(util.urlopen(url).content))
for outline in doc.getElementsByTagName('outline'):
# Make sure we are dealing with a valid link type (ignore case)
otl_type = outline.getAttribute('type')
if otl_type is None or otl_type.lower() not in self.VALID_TYPES:
continue
if outline.getAttribute('xmlUrl') or outline.getAttribute('url'):
channel = {
'url':
outline.getAttribute('xmlUrl')
or outline.getAttribute('url'),
'title':
outline.getAttribute('title')
or outline.getAttribute('text')
or outline.getAttribute('xmlUrl')
or outline.getAttribute('url'),
'description':
outline.getAttribute('text')
or outline.getAttribute('xmlUrl')
or outline.getAttribute('url'),
}
if channel['description'] == channel['title']:
channel['description'] = channel['url']
for attr in ('url', 'title', 'description'):
channel[attr] = channel[attr].strip()
self.items.append(channel)
if not len(self.items):
logger.info('OPML import finished, but no items found: %s', url)
class Exporter(object):
"""
Helper class to export a list of channel objects
to a local file in OPML 1.1 format.
See www.opml.org for the OPML specification.
"""
FEED_TYPE = 'rss'
def __init__(self, filename):
if filename is None:
self.filename = None
elif filename.endswith('.opml') or filename.endswith('.xml'):
self.filename = filename
else:
self.filename = '%s.opml' % (filename, )
def create_node(self, doc, name, content):
"""
Creates a simple XML Element node in a document
with tag name "name" and text content "content",
as in <name>content</name> and returns the element.
"""
node = doc.createElement(name)
node.appendChild(doc.createTextNode(content))
return node
def create_outline(self, doc, channel):
"""
Creates a OPML outline as XML Element node in a
document for the supplied channel.
"""
outline = doc.createElement('outline')
outline.setAttribute('title', channel.title)
outline.setAttribute('text', channel.description)
outline.setAttribute('xmlUrl', channel.url)
outline.setAttribute('type', self.FEED_TYPE)
return outline
def write(self, channels):
"""
Creates a XML document containing metadata for each
channel object in the "channels" parameter, which
should be a list of channel objects.
OPML 2.0 specification: http://www.opml.org/spec2
Returns True on success or False when there was an
error writing the file.
"""
if self.filename is None:
return False
doc = xml.dom.minidom.Document()
opml = doc.createElement('opml')
opml.setAttribute('version', '2.0')
doc.appendChild(opml)
head = doc.createElement('head')
head.appendChild(self.create_node(doc, 'title', 'gPodder subscriptions'))
head.appendChild(self.create_node(doc, 'dateCreated', formatdate(localtime=True)))
opml.appendChild(head)
body = doc.createElement('body')
for channel in channels:
body.appendChild(self.create_outline(doc, channel))
opml.appendChild(body)
try:
data = doc.toprettyxml(encoding='utf-8', indent=' ', newl=os.linesep)
# We want to have at least 512 KiB free disk space after
# saving the opml data, if this is not possible, don't
# try to save the new file, but keep the old one so we
# don't end up with a clobbed, empty opml file.
FREE_DISK_SPACE_AFTER = 1024 * 512
path = os.path.dirname(self.filename) or os.path.curdir
available = util.get_free_disk_space(path)
if available != -1 and available < 2 * len(data) + FREE_DISK_SPACE_AFTER:
# On Windows, if we have zero bytes available, assume that we have
# not had the win32file module available + assume enough free space
if not gpodder.ui.win32 or available > 0:
logger.error('Not enough free disk space to save channel list to %s', self.filename)
return False
fp = open(self.filename + '.tmp', 'wb')
fp.write(data)
fp.close()
util.atomic_rename(self.filename + '.tmp', self.filename)
except:
logger.error('Could not open file for writing: %s', self.filename,
exc_info=True)
return False
return True
| 7,126
|
Python
|
.py
| 167
| 33.42515
| 104
| 0.621138
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,305
|
registry.py
|
gpodder_gpodder/src/gpodder/registry.py
|
#
# gpodder.registry - Central hub for exchanging plugin resolvers (2014-03-09)
# Copyright (c) 2014, Thomas Perl <m@thp.io>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
import logging
logger = logging.getLogger(__name__)
class Resolver(object):
def __init__(self, name, description):
self._name = name
self._description = description
self._resolvers = []
def resolve(self, item, default, *args):
for resolver in self._resolvers:
result = resolver(item, *args)
if result is not None:
logger.info('{} resolved by {}: {} -> {}'.format(self._name, self._info(resolver),
default, result))
return result
return default
def each(self, *args):
for resolver in self._resolvers:
result = resolver(*args)
if result is not None:
yield result
def call_each(self, *args):
list(self.each(*args))
def select(self, selector=None):
for resolver in self._resolvers:
if selector is None or selector(resolver):
yield resolver
def register(self, func):
logger.debug('Registering {} resolver: {}'.format(self._name, func))
self._resolvers.append(func)
return func
def unregister(self, func):
logger.debug('Unregistering {} resolver: {}'.format(self._name, func))
self._resolvers.remove(func)
def register_instance(self, klass):
logger.debug('Registering {} resolver instance: {}'.format(self._name, klass))
self._resolvers.append(klass())
return klass
def unregister_instance(self, klass):
logger.debug('Unregistering {} resolver instance: {}'.format(self._name, klass))
self._resolvers = [r for r in self._resolvers if not isinstance(r, klass)]
def _info(self, resolver):
return '%s from %s' % (resolver.__name__ if hasattr(resolver, '__name__')
else resolver.__class__.__name__, resolver.__module__)
def _dump(self, indent=''):
print('== {} ({}) =='.format(self._name, self._description))
print('\n'.join('%s- %s' % (indent, self._info(resolver)) for resolver in self._resolvers))
print()
RESOLVER_NAMES = {
# 'cover_art': 'Resolve the real cover art URL of an episode',
'download_url': 'Resolve the real download URL of an episode',
# 'episode_basename': 'Resolve a good, unique download filename for an episode',
# 'podcast_title': 'Resolve a good title for a podcast',
# 'content_type': 'Resolve the content type (audio, video) of an episode',
'feed_handler': 'Handle fetching of a feed',
# 'fallback_feed_handler': 'Handle parsing of a feed (catch-all)',
# 'url_shortcut': 'Expand shortcuts when adding a new URL',
# 'after_download': 'Function to call with episodes after download finishes',
# 'directory': 'Podcast directory and search provider',
'custom_downloader': 'custom download method. See download.CustomDownloader',
}
LOCALS = locals()
for name, description in RESOLVER_NAMES.items():
LOCALS[name] = Resolver(name, description)
def dump(module_dict=LOCALS):
for name in sorted(RESOLVER_NAMES):
module_dict[name]._dump(' ')
| 4,016
|
Python
|
.py
| 82
| 41.621951
| 99
| 0.655507
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,306
|
schema.py
|
gpodder_gpodder/src/gpodder/schema.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.schema - Database schema update and migration facility
# Thomas Perl <thp@gpodder.org>; 2011-02-01
import logging
import shutil
import time
from sqlite3 import dbapi2 as sqlite
from gpodder import util
logger = logging.getLogger(__name__)
EpisodeColumns = (
'podcast_id',
'title',
'description',
'url',
'published',
'guid',
'link',
'file_size',
'mime_type',
'state',
'is_new',
'archive',
'download_filename',
'total_time',
'current_position',
'current_position_updated',
'last_playback',
'payment_url',
'description_html',
'episode_art_url',
'chapters',
)
PodcastColumns = (
'title',
'url',
'link',
'description',
'cover_url',
'auth_username',
'auth_password',
'http_last_modified',
'http_etag',
'auto_archive_episodes',
'download_folder',
'pause_subscription',
'section',
'payment_url',
'download_strategy',
'sync_to_mp3_player',
'cover_thumb',
)
CURRENT_VERSION = 8
# SQL commands to upgrade old database versions to new ones
# Each item is a tuple (old_version, new_version, sql_commands) that should be
# applied to the database to migrate from old_version to new_version.
UPGRADE_SQL = [
# Version 2: Section labels for the podcast list
(1, 2, """
ALTER TABLE podcast ADD COLUMN section TEXT NOT NULL DEFAULT ''
"""),
# Version 3: Flattr integration (+ invalidate http_* fields to force
# a feed update, so that payment URLs are parsed during the next check)
(2, 3, """
ALTER TABLE podcast ADD COLUMN payment_url TEXT NULL DEFAULT NULL
ALTER TABLE episode ADD COLUMN payment_url TEXT NULL DEFAULT NULL
UPDATE podcast SET http_last_modified=NULL, http_etag=NULL
"""),
# Version 4: Per-podcast download strategy management
(3, 4, """
ALTER TABLE podcast ADD COLUMN download_strategy INTEGER NOT NULL DEFAULT 0
"""),
# Version 5: Per-podcast MP3 player device synchronization option
(4, 5, """
ALTER TABLE podcast ADD COLUMN sync_to_mp3_player INTEGER NOT NULL DEFAULT 1
"""),
# Version 6: Add thumbnail for cover art
(5, 6, """
ALTER TABLE podcast ADD COLUMN cover_thumb BLOB NULL DEFAULT NULL
"""),
# Version 7: Add HTML description
(6, 7, """
ALTER TABLE episode ADD COLUMN description_html TEXT NOT NULL DEFAULT ''
UPDATE episode SET description_html=description WHERE is_html(description)
UPDATE episode SET description=remove_html_tags(description_html) WHERE is_html(description)
UPDATE podcast SET http_last_modified=NULL, http_etag=NULL
"""),
# Version 8: Add episode thumbnail URL and chapters
(7, 8, """
ALTER TABLE episode ADD COLUMN episode_art_url TEXT NULL DEFAULT NULL
ALTER TABLE episode ADD COLUMN chapters TEXT NULL DEFAULT NULL
UPDATE podcast SET http_last_modified=NULL, http_etag=NULL
"""),
]
def initialize_database(db):
# Create table for podcasts
db.execute("""
CREATE TABLE podcast (
id INTEGER PRIMARY KEY NOT NULL,
title TEXT NOT NULL DEFAULT '',
url TEXT NOT NULL DEFAULT '',
link TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
cover_url TEXT NULL DEFAULT NULL,
auth_username TEXT NULL DEFAULT NULL,
auth_password TEXT NULL DEFAULT NULL,
http_last_modified TEXT NULL DEFAULT NULL,
http_etag TEXT NULL DEFAULT NULL,
auto_archive_episodes INTEGER NOT NULL DEFAULT 0,
download_folder TEXT NOT NULL DEFAULT '',
pause_subscription INTEGER NOT NULL DEFAULT 0,
section TEXT NOT NULL DEFAULT '',
payment_url TEXT NULL DEFAULT NULL,
download_strategy INTEGER NOT NULL DEFAULT 0,
sync_to_mp3_player INTEGER NOT NULL DEFAULT 1,
cover_thumb BLOB NULL DEFAULT NULL
)
""")
INDEX_SQL = """
CREATE UNIQUE INDEX idx_podcast_url ON podcast (url)
CREATE UNIQUE INDEX idx_podcast_download_folder ON podcast (download_folder)
"""
for sql in INDEX_SQL.strip().split('\n'):
db.execute(sql)
# Create table for episodes
db.execute("""
CREATE TABLE episode (
id INTEGER PRIMARY KEY NOT NULL,
podcast_id INTEGER NOT NULL,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
url TEXT NOT NULL,
published INTEGER NOT NULL DEFAULT 0,
guid TEXT NOT NULL,
link TEXT NOT NULL DEFAULT '',
file_size INTEGER NOT NULL DEFAULT 0,
mime_type TEXT NOT NULL DEFAULT 'application/octet-stream',
state INTEGER NOT NULL DEFAULT 0,
is_new INTEGER NOT NULL DEFAULT 0,
archive INTEGER NOT NULL DEFAULT 0,
download_filename TEXT NULL DEFAULT NULL,
total_time INTEGER NOT NULL DEFAULT 0,
current_position INTEGER NOT NULL DEFAULT 0,
current_position_updated INTEGER NOT NULL DEFAULT 0,
last_playback INTEGER NOT NULL DEFAULT 0,
payment_url TEXT NULL DEFAULT NULL,
description_html TEXT NOT NULL DEFAULT '',
episode_art_url TEXT NULL DEFAULT NULL,
chapters TEXT NULL DEFAULT NULL
)
""")
INDEX_SQL = """
CREATE INDEX idx_episode_podcast_id ON episode (podcast_id)
CREATE UNIQUE INDEX idx_episode_download_filename ON episode (podcast_id, download_filename)
CREATE UNIQUE INDEX idx_episode_guid ON episode (podcast_id, guid)
CREATE INDEX idx_episode_state ON episode (state)
CREATE INDEX idx_episode_is_new ON episode (is_new)
CREATE INDEX idx_episode_archive ON episode (archive)
CREATE INDEX idx_episode_published ON episode (published)
"""
for sql in INDEX_SQL.strip().split('\n'):
db.execute(sql)
# Create table for version info / metadata + insert initial data
db.execute("""CREATE TABLE version (version integer)""")
db.execute("INSERT INTO version (version) VALUES (%d)" % CURRENT_VERSION)
db.commit()
def upgrade(db, filename):
if not list(db.execute('PRAGMA table_info(version)')):
initialize_database(db)
return
db.create_function('is_html', 1, util.is_html)
db.create_function('remove_html_tags', 1, util.remove_html_tags)
version = db.execute('SELECT version FROM version').fetchone()[0]
if version == CURRENT_VERSION:
return
# We are trying an upgrade - save the current version of the DB
backup = '%s_upgraded-v%d_%d' % (filename, int(version), int(time.time()))
try:
shutil.copy(filename, backup)
except Exception as e:
raise Exception('Cannot create DB backup before upgrade: ' + e)
db.execute("DELETE FROM version")
for old_version, new_version, upgrade in UPGRADE_SQL:
if version == old_version:
for sql in upgrade.strip().split('\n'):
db.execute(sql)
version = new_version
assert version == CURRENT_VERSION
db.execute("INSERT INTO version (version) VALUES (%d)" % version)
db.commit()
if version != CURRENT_VERSION:
raise Exception('Database schema version unknown')
def convert_gpodder2_db(old_db, new_db):
"""Convert gPodder 2.x databases to the new format
Both arguments should be SQLite3 connections to the
corresponding databases.
"""
old_db = sqlite.connect(old_db)
new_db_filename = new_db
new_db = sqlite.connect(new_db)
upgrade(new_db, new_db_filename)
# Copy data for podcasts
old_cur = old_db.cursor()
columns = [x[1] for x in old_cur.execute('PRAGMA table_info(channels)')]
for row in old_cur.execute('SELECT * FROM channels'):
row = dict(list(zip(columns, row)))
values = (
row['id'],
row['override_title'] or row['title'],
row['url'],
row['link'],
row['description'],
row['image'],
row['username'] or None,
row['password'] or None,
row['last_modified'] or None,
row['etag'] or None,
row['channel_is_locked'],
row['foldername'],
not row['feed_update_enabled'],
'',
None,
0,
row['sync_to_devices'],
None,
)
new_db.execute("""
INSERT INTO podcast VALUES (%s)
""" % ', '.join('?' * len(values)), values)
old_cur.close()
# Copy data for episodes
old_cur = old_db.cursor()
columns = [x[1] for x in old_cur.execute('PRAGMA table_info(episodes)')]
for row in old_cur.execute('SELECT * FROM episodes'):
row = dict(list(zip(columns, row)))
values = (
row['id'],
row['channel_id'],
row['title'],
row['description'],
row['url'],
row['pubDate'],
row['guid'],
row['link'],
row['length'],
row['mimetype'],
row['state'],
not row['played'],
row['locked'],
row['filename'],
row['total_time'],
row['current_position'],
row['current_position_updated'],
0,
None,
'',
None,
None,
)
new_db.execute("""
INSERT INTO episode VALUES (%s)
""" % ', '.join('?' * len(values)), values)
# do 6 -> 7 upgrade (description_html)
new_db.create_function('is_html', 1, util.is_html)
new_db.create_function('remove_html_tags', 1, util.remove_html_tags)
new_db.execute("UPDATE episode SET description_html=description WHERE is_html(description)")
new_db.execute("UPDATE episode SET description=remove_html_tags(description_html) WHERE is_html(description)")
new_db.execute("UPDATE podcast SET http_last_modified=NULL, http_etag=NULL")
old_cur.close()
old_db.close()
new_db.commit()
new_db.close()
def check_data(db):
# All episodes must be assigned to a podcast
orphan_episodes = db.get('SELECT COUNT(id) FROM episode '
'WHERE podcast_id NOT IN (SELECT id FROM podcast)')
if orphan_episodes > 0:
logger.error('Orphaned episodes found in database')
| 11,304
|
Python
|
.py
| 294
| 30.673469
| 118
| 0.627667
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,307
|
directory.py
|
gpodder_gpodder/src/gpodder/directory.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.directory - Podcast directory and search providers
# Thomas Perl <thp@gpodder.org>; 2014-10-22
#
import urllib.error
import urllib.parse
import urllib.request
import gpodder
from gpodder import opml, util
_ = gpodder.gettext
class JustAWarning(Exception):
"""
Use this exception in providers to avoid a stack trace shown to the user.
Warning should be an already localized error message.
"""
def __init__(self, warning):
super().__init__(self, warning)
self.warning = warning
class DirectoryEntry(object):
def __init__(self, title, url, image=None, subscribers=-1, description=None):
self.title = title
self.url = url
self.image = image
self.subscribers = subscribers
self.description = description
class DirectoryTag(object):
def __init__(self, tag, weight):
self.tag = tag
self.weight = weight
class Provider(object):
PROVIDER_SEARCH, PROVIDER_URL, PROVIDER_FILE, PROVIDER_TAGCLOUD, PROVIDER_STATIC = list(range(5))
def __init__(self):
self.name = ''
self.kind = self.PROVIDER_SEARCH
self.icon = None
def on_search(self, query):
# Should return a list of DirectoryEntry objects
raise NotImplementedError()
def on_url(self, url):
# Should return a list of DirectoryEntry objects
raise NotImplementedError()
def on_file(self, filename):
# Should return a list of DirectoryEntry objects
raise NotImplementedError()
def on_tag(self, tag):
# Should return a list of DirectoryEntry objects
raise NotImplementedError()
def on_static(self):
# Should return a list of DirectoryEntry objects
raise NotImplementedError()
def get_tags(self):
# Should return a list of DirectoryTag objects
raise NotImplementedError()
def directory_entry_from_opml(url):
return [DirectoryEntry(d['title'], d['url'], description=d['description']) for d in opml.Importer(url).items]
def directory_entry_from_mygpo_json(url):
r = util.urlopen(url)
if not r.ok:
raise Exception('%s: %d %s' % (url, r.status_code, r.reason))
return [DirectoryEntry(d['title'], d['url'], d['logo_url'], d['subscribers'], d['description'])
for d in r.json()]
class GPodderNetSearchProvider(Provider):
def __init__(self):
self.name = _('gpodder.net search')
self.kind = Provider.PROVIDER_SEARCH
self.icon = 'directory-gpodder.png'
def on_search(self, query):
return directory_entry_from_mygpo_json('http://gpodder.net/search.json?q=' + urllib.parse.quote(query))
class OpmlWebImportProvider(Provider):
def __init__(self):
self.name = _('OPML from web')
self.kind = Provider.PROVIDER_URL
self.icon = 'directory-opml.png'
def on_url(self, url):
return directory_entry_from_opml(url)
class OpmlFileImportProvider(Provider):
def __init__(self):
self.name = _('OPML file')
self.kind = Provider.PROVIDER_FILE
self.icon = 'directory-opml.png'
def on_file(self, filename):
return directory_entry_from_opml(filename)
class GPodderRecommendationsProvider(Provider):
def __init__(self):
self.name = _('Getting started')
self.kind = Provider.PROVIDER_STATIC
self.icon = 'directory-examples.png'
def on_static(self):
return directory_entry_from_opml('http://gpodder.org/directory.opml')
class GPodderNetToplistProvider(Provider):
def __init__(self):
self.name = _('gpodder.net Top 50')
self.kind = Provider.PROVIDER_STATIC
self.icon = 'directory-toplist.png'
def on_static(self):
return directory_entry_from_mygpo_json('http://gpodder.net/toplist/50.json')
class GPodderNetTagsProvider(Provider):
def __init__(self):
self.name = _('gpodder.net Tags')
self.kind = Provider.PROVIDER_TAGCLOUD
self.icon = 'directory-tags.png'
def on_tag(self, tag):
return directory_entry_from_mygpo_json('http://gpodder.net/api/2/tag/%s/50.json' % urllib.parse.quote(tag))
def get_tags(self):
url = 'http://gpodder.net/api/2/tags/40.json'
r = util.urlopen(url)
if not r.ok:
raise Exception('%s: %d %s' % (url, r.status_code, r.reason))
return [DirectoryTag(d['tag'], d['usage']) for d in r.json()]
class SoundcloudSearchProvider(Provider):
def __init__(self):
self.name = _('Soundcloud search')
self.kind = Provider.PROVIDER_SEARCH
self.icon = 'directory-soundcloud.png'
def on_search(self, query):
# XXX: This cross-import of the plugin here is bad, but it
# works for now (no proper plugin architecture...)
from gpodder.plugins.soundcloud import search_for_user
results = search_for_user(query)
if isinstance(results, list):
return [DirectoryEntry(entry['username'], entry['permalink_url']) for entry in results]
# {'code': 401, 'message': '', 'status': '401 - Unauthorized',
# 'link': 'https://developers.soundcloud.com/docs/api/explorer/open-api',
# 'errors': [], 'error': None}
if isinstance(results, dict) and results.get('code') == 401:
raise JustAWarning(_("Sorry, soundcloud search doesn't work anymore."))
if isinstance(results, dict) and 'code' in results:
results['msg'] = results.get('message') or results.get('error') or results.get('status')
raise JustAWarning(_("Error querying soundcloud: %(code)s %(msg)s") % results)
raise Exception(_("Unexpected response from soundcloud: %r") % (results, ))
class FixedOpmlFileProvider(Provider):
def __init__(self, filename):
self.name = _('Imported OPML file')
self.kind = Provider.PROVIDER_STATIC
self.icon = 'directory-opml.png'
self.filename = filename
def on_static(self):
return directory_entry_from_opml(self.filename)
PROVIDERS = [
GPodderRecommendationsProvider,
None,
GPodderNetSearchProvider,
GPodderNetToplistProvider,
# GPodderNetTagsProvider,
None,
OpmlWebImportProvider,
# OpmlFileImportProvider,
None,
SoundcloudSearchProvider,
]
| 7,065
|
Python
|
.py
| 168
| 35.77381
| 115
| 0.674218
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,308
|
common.py
|
gpodder_gpodder/src/gpodder/common.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.common - Common helper functions for all UIs
# Thomas Perl <thp@gpodder.org>; 2012-08-16
import glob
import logging
import os
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
def clean_up_downloads(delete_partial=False):
"""Clean up temporary files left behind by old gPodder versions
delete_partial - If True, also delete in-progress downloads
"""
temporary_files = glob.glob('%s/*/.tmp-*' % gpodder.downloads)
if delete_partial:
temporary_files += glob.glob('%s/*/*.partial' % gpodder.downloads)
# youtube-dl creates .partial.* files for adaptive formats
temporary_files += glob.glob('%s/*/*.partial.*' % gpodder.downloads)
for tempfile in temporary_files:
util.delete_file(tempfile)
def find_partial_downloads(channels, start_progress_callback, progress_callback, final_progress_callback, finish_progress_callback):
"""Find partial downloads and match them with episodes
channels - A list of all model.PodcastChannel objects
start_progress_callback - A callback(count) when partial files are searched
progress_callback - A callback(title, progress) when an episode was found
finish_progress_callback - A callback(resumable_episodes) when finished
"""
# Look for partial file downloads, ignoring .partial.* files created by youtube-dl
partial_files = glob.glob(os.path.join(gpodder.downloads, '*', '*.partial'))
count = len(partial_files)
resumable_episodes = []
start_progress_callback(count)
if count:
candidates = [f[:-len('.partial')] for f in partial_files]
found = 0
for channel in channels:
for episode in channel.get_all_episodes():
filename = episode.local_filename(create=False, check_only=True)
if filename in candidates:
found += 1
progress_callback(episode.title, found / count)
candidates.remove(filename)
partial_files.remove(filename + '.partial')
if os.path.exists(filename):
# The file has already been downloaded;
# remove the leftover partial file
util.delete_file(filename + '.partial')
else:
resumable_episodes.append(episode)
if not candidates:
break
if not candidates:
break
final_progress_callback()
for f in partial_files:
logger.warning('Partial file without episode: %s', f)
util.delete_file(f)
# never delete partial: either we can't clean them up because we offer to
# resume download or there are none to delete in the first place.
clean_up_downloads(delete_partial=False)
finish_progress_callback(resumable_episodes)
def get_expired_episodes(channels, config):
for channel in channels:
for index, episode in enumerate(channel.get_episodes(gpodder.STATE_DOWNLOADED)):
# Never consider archived episodes as old
if episode.archive:
continue
# Download strategy "Only keep latest"
if (channel.download_strategy == channel.STRATEGY_LATEST
and index > 0):
logger.info('Removing episode (only keep latest strategy): %s',
episode.title)
yield episode
continue
# Only expire episodes if the age in days is positive
if config.auto.cleanup.days < 1:
continue
# Never consider fresh episodes as old
if episode.age_in_days() < config.auto.cleanup.days:
continue
# Do not delete played episodes (except if configured)
if not episode.is_new:
if not config.auto.cleanup.played:
continue
# Do not delete unfinished episodes (except if configured)
if not episode.is_finished():
if not config.auto.cleanup.unfinished:
continue
# Do not delete unplayed episodes (except if configured)
if episode.is_new:
if not config.auto.cleanup.unplayed:
continue
yield episode
| 5,140
|
Python
|
.py
| 110
| 36.845455
| 132
| 0.642671
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,309
|
services.py
|
gpodder_gpodder/src/gpodder/services.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# services.py -- Core Services for gPodder
# Thomas Perl <thp@perli.net> 2007-08-24
#
#
import gpodder
from gpodder import util
_ = gpodder.gettext
class ObservableService(object):
def __init__(self, signal_names=[]):
self.observers = {}
for signal in signal_names:
self.observers[signal] = []
def register(self, signal_name, observer):
if signal_name in self.observers:
if observer not in self.observers[signal_name]:
self.observers[signal_name].append(observer)
return True
return False
def unregister(self, signal_name, observer):
if signal_name in self.observers:
if observer in self.observers[signal_name]:
self.observers[signal_name].remove(observer)
return True
return False
def notify(self, signal_name, *args):
if signal_name in self.observers:
for observer in self.observers[signal_name]:
util.idle_add(observer, *args)
return True
return False
| 1,841
|
Python
|
.py
| 49
| 31.816327
| 71
| 0.684092
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,310
|
syncui.py
|
gpodder_gpodder/src/gpodder/syncui.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.gtkui.desktop.sync - Glue code between GTK+ UI and sync module
# Thomas Perl <thp@gpodder.org>; 2009-09-05 (based on code from gui.py)
# Ported to gPodder 3 by Joseph Wickremasinghe in June 2012
import logging
import gpodder
from gpodder import sync, util
from gpodder.deviceplaylist import gPodderDevicePlaylist
_ = gpodder.gettext
logger = logging.getLogger(__name__)
class gPodderSyncUI(object):
# download list states
(DL_ONEOFF, DL_ADDING_TASKS, DL_ADDED_TASKS) = list(range(3))
def __init__(self, config, notification, parent_window,
show_confirmation,
show_preferences,
channels,
download_status_model,
download_queue_manager,
set_download_list_state,
commit_changes_to_database,
delete_episode_list,
select_episodes_to_delete,
mount_volume_for_file):
self.device = None
self._config = config
self.notification = notification
self.parent_window = parent_window
self.show_confirmation = show_confirmation
self.show_preferences = show_preferences
self.channels = channels
self.download_status_model = download_status_model
self.download_queue_manager = download_queue_manager
self.set_download_list_state = set_download_list_state
self.commit_changes_to_database = commit_changes_to_database
self.delete_episode_list = delete_episode_list
self.select_episodes_to_delete = select_episodes_to_delete
self.mount_volume_for_file = mount_volume_for_file
def _filter_sync_episodes(self, channels, only_downloaded=False):
"""Return a list of episodes for device synchronization
If only_downloaded is True, this will skip episodes that
have not been downloaded yet and podcasts that are marked
as "Do not synchronize to my device".
"""
episodes = []
for channel in channels:
if only_downloaded or not channel.sync_to_mp3_player:
logger.info('Skipping channel: %s', channel.title)
continue
for episode in channel.get_all_episodes():
if (episode.was_downloaded(and_exists=True)
or not only_downloaded):
episodes.append(episode)
return episodes
def _show_message_unconfigured(self):
title = _('No device configured')
message = _('Please set up your device in the preferences dialog.')
if self.show_confirmation(message, title):
self.show_preferences(self.parent_window, None)
def _show_message_cannot_open(self):
title = _('Cannot open device')
message = _('Please check logs and the settings in the preferences dialog.')
self.notification(message, title, important=True)
def on_synchronize_episodes(self, channels, episodes=None, force_played=True, done_callback=None):
device = sync.open_device(self)
if device is None:
self._show_message_unconfigured()
if done_callback:
done_callback()
return
try:
if not device.open():
self._show_message_cannot_open()
if done_callback:
done_callback()
return
else:
# Only set if device is configured and opened successfully
self.device = device
except Exception as err:
logger.error('opening destination %s failed with %s',
device.destination.get_uri(), err.message)
self._show_message_cannot_open()
if done_callback:
done_callback()
return
if episodes is None:
force_played = False
episodes = self._filter_sync_episodes(channels)
def check_free_space():
# "Will we add this episode to the device?"
def will_add(episode):
# If already on-device, it won't take up any space
if device.episode_on_device(episode):
return False
# Might not be synced if it's played already
if (not force_played
and self._config.device_sync.skip_played_episodes):
return False
# In all other cases, we expect the episode to be
# synchronized to the device, so "answer" positive
return True
# "What is the file size of this episode?"
def file_size(episode):
filename = episode.local_filename(create=False)
if filename is None:
return 0
return util.calculate_size(str(filename))
# Calculate total size of sync and free space on device
total_size = sum(file_size(e) for e in episodes if will_add(e))
free_space = max(device.get_free_space(), 0)
if total_size > free_space:
title = _('Not enough space left on device')
message = (_('Additional free space required: %(required_space)s\nDo you want to continue?') %
{'required_space': util.format_filesize(total_size - free_space)})
if not self.show_confirmation(message, title):
device.cancel()
device.close()
return
# enable updating of UI
self.set_download_list_state(gPodderSyncUI.DL_ONEOFF)
"""Update device playlists
General approach is as follows:
When a episode is downloaded and synched, it is added to the
standard playlist for that podcast which is then written to
the device.
After the user has played that episode on their device, they
can delete that episode from their device.
At the next sync, gPodder will then compare the standard
podcast-specific playlists on the device (as written by
gPodder during the last sync), with the episodes on the
device.If there is an episode referenced in the playlist
that is no longer on the device, gPodder will assume that
the episode has already been synced and subsequently deleted
from the device, and will hence mark that episode as deleted
in gPodder. If there are no playlists, nothing is deleted.
At the next sync, the playlists will be refreshed based on
the downloaded, undeleted episodes in gPodder, and the
cycle begins again...
"""
def resume_sync(episode_urls, channel_urls, progress):
if progress is not None:
progress.on_finished()
# rest of sync process should continue here
self.commit_changes_to_database()
for current_channel in self.channels:
# only sync those channels marked for syncing
if (self._config.device_sync.device_type == 'filesystem'
and current_channel.sync_to_mp3_player
and self._config.device_sync.playlists.create):
# get playlist object
playlist = gPodderDevicePlaylist(self._config,
current_channel.title)
# need to refresh episode list so that
# deleted episodes aren't included in playlists
episodes_for_playlist = sorted(current_channel.get_episodes(gpodder.STATE_DOWNLOADED),
key=lambda ep: ep.published)
# don't add played episodes to playlist if skip_played_episodes is True
if self._config.device_sync.skip_played_episodes:
episodes_for_playlist = [ep for ep in episodes_for_playlist if ep.is_new]
playlist.write_m3u(episodes_for_playlist)
# enable updating of UI, but mark it as tasks being added so that a
# adding a single task that completes immediately doesn't turn off the
# ui updates again
self.set_download_list_state(gPodderSyncUI.DL_ADDING_TASKS)
if (self._config.device_sync.device_type == 'filesystem' and self._config.device_sync.playlists.create):
title = _('Update successful')
message = _('The playlist on your MP3 player has been updated.')
self.notification(message, title)
# called from the main thread to complete adding tasks
def add_downloads_complete():
self.set_download_list_state(gPodderSyncUI.DL_ADDED_TASKS)
# Finally start the synchronization process
@util.run_in_background
def sync_thread_func():
device.add_sync_tasks(episodes, force_played=force_played,
done_callback=done_callback)
util.idle_add(add_downloads_complete)
return
if self._config.device_sync.playlists.create:
try:
episodes_to_delete = []
if self._config.device_sync.playlists.two_way_sync:
for current_channel in self.channels:
# only include channels that are included in the sync
if current_channel.sync_to_mp3_player:
# get playlist object
playlist = gPodderDevicePlaylist(self._config, current_channel.title)
# get episodes to be written to playlist
episodes_for_playlist = sorted(current_channel.get_episodes(gpodder.STATE_DOWNLOADED),
key=lambda ep: ep.published)
episode_keys = list(map(playlist.get_absolute_filename_for_playlist,
episodes_for_playlist))
episode_dict = dict(list(zip(episode_keys, episodes_for_playlist)))
# then get episodes in playlist (if it exists) already on device
episodes_in_playlists = playlist.read_m3u()
# if playlist doesn't exist (yet) episodes_in_playlist will be empty
if episodes_in_playlists:
for episode_filename in episodes_in_playlists:
if ((not self._config.device_sync.playlists.use_absolute_path
and not playlist.playlist_folder.resolve_relative_path(episode_filename).query_exists())
or (self._config.device_sync.playlists.use_absolute_path
and not playlist.mountpoint.resolve_relative_path(episode_filename).query_exists())):
# episode was synced but no longer on device
# i.e. must have been deleted by user, so delete from gpodder
try:
episodes_to_delete.append(episode_dict[episode_filename])
except KeyError:
logger.warning('Episode %s, removed from device has already been deleted from gpodder',
episode_filename)
# delete all episodes from gpodder (will prompt user)
# not using playlists to delete
def auto_delete_callback(episodes):
if not episodes:
# episodes were deleted on device
# but user decided not to delete them from gpodder
# so jump straight to sync
logger.info('Starting sync - no episodes selected for deletion')
resume_sync([], [], None)
else:
# episodes need to be deleted from gpodder
for episode_to_delete in episodes:
logger.info("Deleting episode %s",
episode_to_delete.title)
logger.info('Will start sync - after deleting episodes')
self.delete_episode_list(episodes, False, resume_sync)
return
if episodes_to_delete:
columns = (
('markup_delete_episodes', None, None, _('Episode')),
)
self.select_episodes_to_delete(
self.parent_window,
title=_('Episodes have been deleted on device'),
instructions='Select the episodes you want to delete:',
episodes=episodes_to_delete,
selected=[True, ] * len(episodes_to_delete),
columns=columns,
callback=auto_delete_callback,
_config=self._config)
else:
logger.warning("Starting sync - no episodes to delete")
resume_sync([], [], None)
except IOError as ioe:
title = _('Error writing playlist files')
message = _(str(ioe))
self.notification(message, title)
else:
logger.info('Not creating playlists - starting sync')
resume_sync([], [], None)
# This function is used to remove files from the device
def cleanup_episodes():
# 'skip_played_episodes' must be used or else all the
# played tracks will be copied then immediately deleted
if (self._config.device_sync.delete_deleted_episodes
or (self._config.device_sync.delete_played_episodes
and self._config.device_sync.skip_played_episodes)):
all_episodes = self._filter_sync_episodes(
channels, only_downloaded=False)
for local_episode in all_episodes:
episode = device.episode_on_device(local_episode)
if episode is None:
continue
if local_episode.state == gpodder.STATE_DELETED:
logger.info('Removing episode from device: %s',
episode.title)
device.remove_track(episode)
# When this is done, start the callback in the UI code
util.idle_add(check_free_space)
# This will run the following chain of actions:
# 1. Remove old episodes (in worker thread)
# 2. Check for free space (in UI thread)
# 3. Sync the device (in UI thread)
util.run_in_background(cleanup_episodes)
| 16,437
|
Python
|
.py
| 291
| 38.347079
| 135
| 0.552663
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,311
|
dbusproxy.py
|
gpodder_gpodder/src/gpodder/dbusproxy.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.dbusproxy - Expose Podcasts over D-Bus
# Based on a patch by Iwan van der Kleijn <iwanvanderkleyn@gmail.com>
# See also: http://gpodder.org/bug/699
import dbus
import dbus.service
import gpodder
from gpodder import util
def safe_str(txt):
if txt:
return txt.encode()
else:
return ''
def safe_first_line(txt):
txt = safe_str(txt)
lines = util.remove_html_tags(txt).strip().splitlines()
if not lines or lines[0] == '':
return ''
else:
return lines[0]
class DBusPodcastsProxy(dbus.service.Object):
""" Implements API accessible through D-Bus
Methods on DBusPodcastsProxy can be called by D-Bus clients. They implement
safe-guards to work safely over D-Bus while having type signatures applied
for parameter and return values.
"""
# DBusPodcastsProxy(lambda: self.channels, self.on_itemUpdate_activate(), self.playback_episodes, self.download_episode_list, bus_name)
def __init__(self, get_podcast_list,
check_for_updates, playback_episodes,
download_episodes, episode_from_uri,
bus_name):
self._get_podcasts = get_podcast_list
self._on_check_for_updates = check_for_updates
self._playback_episodes = playback_episodes
self._download_episodes = download_episodes
self._episode_from_uri = episode_from_uri
dbus.service.Object.__init__(self,
object_path=gpodder.dbus_podcasts_object_path,
bus_name=bus_name)
def _get_episode_refs(self, urls):
"""Get Episode instances associated with URLs"""
episodes = []
for p in self._get_podcasts():
for e in p.get_all_episodes():
if e.url in urls:
episodes.append(e)
return episodes
@dbus.service.method(dbus_interface=gpodder.dbus_podcasts, in_signature='', out_signature='a(ssss)')
def get_podcasts(self):
"""Get all podcasts in gPodder's subscription list"""
def podcast_to_tuple(podcast):
title = safe_str(podcast.title)
url = safe_str(podcast.url)
description = safe_first_line(podcast.description)
cover_file = ''
return (title, url, description, cover_file)
return [podcast_to_tuple(p) for p in self._get_podcasts()]
@dbus.service.method(dbus_interface=gpodder.dbus_podcasts, in_signature='s', out_signature='ss')
def get_episode_title(self, url):
episode = self._episode_from_uri(url)
if episode is not None:
return episode.title, episode.channel.title
return ('', '')
@dbus.service.method(dbus_interface=gpodder.dbus_podcasts, in_signature='s', out_signature='a(sssssbbb)')
def get_episodes(self, url):
"""Return all episodes of the podcast with the given URL"""
podcast = None
for channel in self._get_podcasts():
if channel.url == url:
podcast = channel
break
if podcast is None:
return []
def episode_to_tuple(episode):
title = safe_str(episode.title)
url = safe_str(episode.url)
description = safe_first_line(episode._text_description)
filename = safe_str(episode.download_filename)
file_type = safe_str(episode.file_type())
is_new = (episode.state == gpodder.STATE_NORMAL and episode.is_new)
is_downloaded = episode.was_downloaded(and_exists=True)
is_deleted = (episode.state == gpodder.STATE_DELETED)
return (title, url, description, filename, file_type, is_new, is_downloaded, is_deleted)
return [episode_to_tuple(e) for e in podcast.get_all_episodes()]
@dbus.service.method(dbus_interface=gpodder.dbus_podcasts, in_signature='as', out_signature='(bs)')
def play_or_download_episode(self, urls):
"""Play (or download) a list of episodes given by URL"""
episodes = self._get_episode_refs(urls)
if not episodes:
return (0, 'No episodes found')
to_playback = [e for e in episodes if e.was_downloaded(and_exists=True)]
to_download = [e for e in episodes if e not in to_playback]
if to_playback:
self._playback_episodes(to_playback)
if to_download:
self._download_episodes(to_download)
return (1, 'Success')
@dbus.service.method(dbus_interface=gpodder.dbus_podcasts, in_signature='', out_signature='')
def check_for_updates(self):
"""Check for new episodes or offer subscriptions"""
self._on_check_for_updates()
| 5,413
|
Python
|
.py
| 118
| 38.084746
| 139
| 0.659704
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,312
|
download.py
|
gpodder_gpodder/src/gpodder/gtkui/download.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.gtkui.download -- Download management in GUIs (2009-08-24)
# Based on code from gpodder.services (thp, 2007-08-24)
#
import collections
import html
import threading
from gi.repository import Gtk
import gpodder
from gpodder import download, util
_ = gpodder.gettext
class DequeueRequest:
def __init__(self):
self.cv = threading.Condition()
self.value = None
self.resolved = False
def dequeue(self):
with self.cv:
self.cv.wait_for(lambda: self.resolved)
return self.value
def resolve(self, value):
self.value = value
self.resolved = True
with self.cv:
self.cv.notify()
class DownloadStatusModel(Gtk.ListStore):
# Symbolic names for our columns, so we know what we're up to
C_TASK, C_NAME, C_URL, C_PROGRESS, C_PROGRESS_TEXT, C_ICON_NAME = list(range(6))
SEARCH_COLUMNS = (C_NAME, C_URL)
def __init__(self):
Gtk.ListStore.__init__(self, object, str, str, int, str, str)
# Set up stock icon IDs for tasks
self._status_ids = collections.defaultdict(lambda: None)
self._status_ids[download.DownloadTask.DOWNLOADING] = 'go-down'
self._status_ids[download.DownloadTask.DONE] = 'object-select-symbolic'
self._status_ids[download.DownloadTask.FAILED] = 'dialog-error'
self._status_ids[download.DownloadTask.CANCELLING] = 'media-playback-stop'
self._status_ids[download.DownloadTask.CANCELLED] = 'media-playback-stop'
self._status_ids[download.DownloadTask.PAUSING] = 'media-playback-pause'
self._status_ids[download.DownloadTask.PAUSED] = 'media-playback-pause'
self.enabled = True
def _format_message(self, episode, message, podcast):
episode = html.escape(episode)
podcast = html.escape(podcast)
message = html.escape(message)
return '%s\n<small>%s - %s</small>' % (episode, message, podcast)
def request_update(self, iterator, task=None):
if task is None:
# Ongoing update request from UI - get task from model
task = self.get_value(iterator, self.C_TASK)
else:
# Initial update request - update non-changing fields
self.set(iterator,
self.C_TASK, task,
self.C_URL, task.url)
if task.status == task.FAILED:
status_message = '%s: %s' % (
task.STATUS_MESSAGE[task.status],
task.error_message)
elif task.status == task.DOWNLOADING:
status_message = _('%(status)s (%(progress).0f%%, %(rate)s/s, %(remaining)s)') % {
'status': task.STATUS_MESSAGE[task.status],
'progress': task.progress * 100,
'rate': util.format_filesize(task.speed),
'remaining': util.format_time(round((task.total_size * (1 - task.progress)) / task.speed)) if task.speed > 0 else '--:--'
}
else:
status_message = task.STATUS_MESSAGE[task.status]
if task.progress > 0 and task.progress < 1:
current = util.format_filesize(task.progress * task.total_size, digits=1)
total = util.format_filesize(task.total_size, digits=1)
# Remove unit from current if same as in total
# (does: "12 MiB / 24 MiB" => "12 / 24 MiB")
current = current.split()
if current[-1] == total.split()[-1]:
current.pop()
current = ' '.join(current)
progress_message = ' / '.join((current, total))
elif task.total_size > 0:
progress_message = util.format_filesize(task.total_size,
digits=1)
else:
progress_message = ('unknown size')
self.set(iterator,
self.C_NAME, self._format_message(task.episode.title,
status_message, task.episode.channel.title),
self.C_PROGRESS, 100. * task.progress,
self.C_PROGRESS_TEXT, progress_message,
self.C_ICON_NAME, self._status_ids[task.status])
def __add_new_task(self, task):
it = self.append()
self.request_update(it, task)
def register_task(self, task, background=True):
if background:
util.idle_add(self.__add_new_task, task)
else:
self.__add_new_task(task)
def queue_task(self, task):
with task:
if task.status in (task.NEW, task.FAILED, task.CANCELLED, task.PAUSED):
task.status = task.QUEUED
task.set_episode_download_task()
def tell_all_tasks_to_quit(self):
for row in self:
task = row[DownloadStatusModel.C_TASK]
if task is not None:
with task:
# Pause currently queued downloads
if task.status == task.QUEUED:
task.status = task.PAUSED
# Request pause of currently running downloads
elif task.status == task.DOWNLOADING:
task.status = task.PAUSING
# Delete cancelled and failed downloads
elif task.status in (task.CANCELLED, task.FAILED):
task.removed_from_list()
def are_downloads_in_progress(self):
"""
Returns True if there are any downloads in the
QUEUED or DOWNLOADING status, False otherwise.
"""
for row in self:
task = row[DownloadStatusModel.C_TASK]
if task is not None and \
task.status in (task.DOWNLOADING,
task.QUEUED):
return True
return False
def has_work(self):
return any(self._work_gen())
def available_work_count(self):
return len(list(self._work_gen()))
def __get_next(self, dqr):
try:
task = next(self._work_gen())
# this is the only thread accessing the list store, so it's safe
# to assume a) the task is still queued and b) we can transition to downloading
task.status = task.DOWNLOADING
except StopIteration:
task = None
# hand the task off to the worker thread
dqr.resolve(task)
# get the next task to download. this proxies the request to the main thread,
# as only the main thread is allowed to manipulate the list store.
def get_next(self):
dqr = DequeueRequest()
util.idle_add(self.__get_next, dqr)
return dqr.dequeue()
def _work_gen(self):
return (task for task in
(row[DownloadStatusModel.C_TASK] for row in self)
if task.status == task.QUEUED)
class DownloadTaskMonitor(object):
"""A helper class that abstracts download events"""
def __init__(self, episode, on_can_resume, on_can_pause, on_finished):
self.episode = episode
self._status = None
self._on_can_resume = on_can_resume
self._on_can_pause = on_can_pause
self._on_finished = on_finished
def task_updated(self, task):
if self.episode.url == task.episode.url and self._status != task.status:
if task.status in (task.DONE, task.FAILED, task.CANCELLED):
self._on_finished()
elif task.status == task.PAUSED:
self._on_can_resume()
elif task.status in (task.QUEUED, task.DOWNLOADING):
self._on_can_pause()
self._status = task.status
| 8,372
|
Python
|
.py
| 187
| 34.55615
| 141
| 0.604714
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,313
|
config.py
|
gpodder_gpodder/src/gpodder/gtkui/config.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.gtkui.config -- Config object with GTK+ support (2009-08-24)
#
import logging
import gi # isort:skip
gi.require_version('Gdk', '3.0') # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gdk, Gtk, Pango
import gpodder
from gpodder import config, util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
class ConfigModel(Gtk.ListStore):
C_NAME, C_TYPE_TEXT, C_VALUE_TEXT, C_TYPE, C_EDITABLE, C_FONT_STYLE, \
C_IS_BOOLEAN, C_BOOLEAN_VALUE = list(range(8))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, str, str, object, bool, int, bool, bool)
self._config = config
self._fill_model()
self._config.add_observer(self._on_update)
def _type_as_string(self, datatype):
if datatype == int:
return _('Integer')
elif datatype == float:
return _('Float')
elif datatype == bool:
return _('Boolean')
else:
return _('String')
def _fill_model(self):
self.clear()
for key in sorted(self._config.all_keys()):
# Ignore Gtk window state data (position, size, ...)
if key.startswith('ui.gtk.state.'):
continue
value = self._config._lookup(key)
fieldtype = type(value)
style = Pango.Style.NORMAL
# if value == default:
# style = Pango.Style.NORMAL
# else:
# style = Pango.Style.ITALIC
self.append((key, self._type_as_string(fieldtype),
config.config_value_to_string(value),
fieldtype, fieldtype is not bool, style,
fieldtype is bool, bool(value)))
def _on_update(self, name, old_value, new_value):
for row in self:
if row[self.C_NAME] == name:
style = Pango.Style.NORMAL
# if new_value == self._config.Settings[name]:
# style = Pango.Style.NORMAL
# else:
# style = Pango.Style.ITALIC
new_value_text = config.config_value_to_string(new_value)
self.set(row.iter,
self.C_VALUE_TEXT, new_value_text,
self.C_BOOLEAN_VALUE, bool(new_value),
self.C_FONT_STYLE, style)
break
def stop_observing(self):
self._config.remove_observer(self._on_update)
class UIConfig(config.Config):
def __init__(self, filename='gpodder.conf'):
config.Config.__init__(self, filename)
self.__ignore_window_events = False
def connect_gtk_editable(self, name, editable):
editable.delete_text(0, -1)
editable.insert_text(str(getattr(self, name)))
def _editable_changed(editable):
setattr(self, name, editable.get_chars(0, -1))
editable.connect('changed', _editable_changed)
def connect_gtk_spinbutton(self, name, spinbutton, forced_upper=None):
"""
bind a Gtk.SpinButton to a configuration entry.
It's now possible to specify an upper value (forced_upper).
It's not done automatically (always look for name + '_max') because it's
used only once. If it becomes commonplace, better make it automatic.
:param str name: configuration key (e.g. 'limit.downloads.concurrent')
:param Gtk.SpinButton spinbutton: button to bind to config
:param float forced_upper: forced upper limit on spinbutton.
Overrides value in .ui to be consistent with code
"""
current_value = getattr(self, name)
adjustment = spinbutton.get_adjustment()
if forced_upper is not None:
adjustment.set_upper(forced_upper)
if current_value > adjustment.get_upper():
adjustment.set_upper(current_value)
spinbutton.set_value(current_value)
def _spinbutton_changed(spinbutton):
setattr(self, name, spinbutton.get_value())
spinbutton.connect('value-changed', _spinbutton_changed)
def connect_gtk_paned(self, name, paned):
paned.set_position(getattr(self, name))
paned_child = paned.get_child1()
def _child_size_allocate(x, y):
setattr(self, name, paned.get_position())
paned_child.connect('size-allocate', _child_size_allocate)
def connect_gtk_togglebutton(self, name, togglebutton):
togglebutton.set_active(getattr(self, name))
def _togglebutton_toggled(togglebutton):
setattr(self, name, togglebutton.get_active())
togglebutton.connect('toggled', _togglebutton_toggled)
def connect_gtk_combo_box_text(self, name, combo_text):
combo_text.set_active_id(getattr(self, name))
def _combo_box_text_changed(combo):
setattr(self, name, combo.get_active_id())
combo_text.connect('changed', _combo_box_text_changed)
def connect_gtk_window(self, window, config_prefix, show_window=False):
cfg = getattr(self.ui.gtk.state, config_prefix)
if gpodder.ui.win32:
window.set_gravity(Gdk.Gravity.STATIC)
if -1 not in (cfg.x, cfg.y, cfg.width, cfg.height):
# get screen resolution
def get_screen_size(display):
monitor_geometries = [display.get_monitor(i).get_geometry() for i in range(display.get_n_monitors())]
x0 = min(r.x for r in monitor_geometries)
y0 = min(r.y for r in monitor_geometries)
x1 = max(r.x + r.width for r in monitor_geometries)
y1 = max(r.y + r.height for r in monitor_geometries)
return x1 - x0, y1 - y0
screen_width, screen_height = get_screen_size(Gdk.Display.get_default())
logger.debug('Screen %d x %d' % (screen_width, screen_height))
# reset window position if more than 50% is off-screen
half_width = cfg.width / 2
half_height = cfg.height / 2
if (cfg.x + cfg.width - half_width) < 0 or (cfg.y + cfg.height - half_height) < 0 \
or cfg.x > (screen_width - half_width) or cfg.y > (screen_height - half_height):
logger.warning('"%s" window was off-screen at (%d, %d), resetting to default position' % (config_prefix, cfg.x, cfg.y))
cfg.x = -1
cfg.y = -1
if cfg.width != -1 and cfg.height != -1:
window.resize(cfg.width, cfg.height)
# Not all platforms can natively restore position, gPodder must handle it.
# https://github.com/gpodder/gpodder/pull/933#issuecomment-818039693
if cfg.x == -1 or cfg.y == -1:
window.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
else:
window.move(cfg.x, cfg.y)
# From Gtk docs: most window managers ignore requests for initial window
# positions (instead using a user-defined placement algorithm) and honor
# requests after the window has already been shown.
# Move it a second time after the window has been shown.
# The first move reduces chance of window jumping,
# and gives the window manager a position to unmaximize to.
if not cfg.maximized:
util.idle_add(window.move, cfg.x, cfg.y)
# Ignore events while we're connecting to the window
self.__ignore_window_events = True
# Get window state, correct size comes from window.get_size(),
# see https://developer.gnome.org/SaveWindowState/
def _receive_configure_event(widget, event):
if not self.__ignore_window_events:
# TODO: The maximize event might arrive after the configure event.
# This causes the maximized size to be saved, and restoring the
# window will not save its smaller size. Delaying the save with
# idle_add() is not enough time for the state event to arrive.
if not bool(event.window.get_state() & Gdk.WindowState.MAXIMIZED):
x_pos, y_pos = widget.get_position()
width_size, height_size = widget.get_size()
cfg.x = x_pos
cfg.y = y_pos
cfg.width = width_size
cfg.height = height_size
window.connect('configure-event', _receive_configure_event)
def _receive_window_state(widget, event):
new_value = bool(event.new_window_state & Gdk.WindowState.MAXIMIZED)
cfg.maximized = new_value
window.connect('window-state-event', _receive_window_state)
# After the window has been set up, we enable events again
def _enable_window_events():
self.__ignore_window_events = False
util.idle_add(_enable_window_events)
if show_window:
window.show()
if cfg.maximized:
window.maximize()
| 9,805
|
Python
|
.py
| 196
| 39.403061
| 135
| 0.613575
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,314
|
macosx.py
|
gpodder_gpodder/src/gpodder/gtkui/macosx.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import struct
import sys
from gpodder import util
def aeKeyword(fourCharCode):
"""transform four character code into a long"""
return struct.unpack('I', fourCharCode)[0]
# for the kCoreEventClass, kAEOpenDocuments, ... constants
# comes with macpython
try:
from Carbon.AppleEvents import * # noqa: F403
except ImportError:
...
# all this depends on pyObjc (http://pyobjc.sourceforge.net/).
# There may be a way to achieve something equivalent with only
# what's in MacPython (see for instance http://mail.python.org/pipermail/pythonmac-sig/2006-May/017373.html)
# but I couldn't achieve this !
# Also note that it only works when gPodder is not running !
# For some reason I don't get the events afterwards...
try:
from AppKit import NSAppleEventDescriptor, NSAppleEventManager, NSObject
class gPodderEventHandler(NSObject):
""" handles Apple Events for :
- Open With... (and dropping a file on the icon)
- "subscribe to podcast" from firefox
The code was largely inspired by gedit-osx-delegate.m, from the
gedit project
(see http://git.gnome.org/browse/gedit/tree/gedit/osx/gedit-osx-delegate.m?id=GEDIT_2_28_3).
"""
# keeps a reference to the gui.gPodder class
gp = None
def register(self, gp):
""" register all handlers with NSAppleEventManager """
self.gp = gp
aem = NSAppleEventManager.sharedAppleEventManager()
aem.setEventHandler_andSelector_forEventClass_andEventID_(
self, 'openFileEvent:reply:', aeKeyword(kCoreEventClass), aeKeyword(kAEOpenDocuments)) # noqa: F405
aem.setEventHandler_andSelector_forEventClass_andEventID_(
self, 'subscribeEvent:reply:', aeKeyword('GURL'), aeKeyword('GURL'))
def openFileEvent_reply_(self, event, reply):
""" handles an 'Open With...' event"""
urls = []
filelist = event.paramDescriptorForKeyword_(aeKeyword(keyDirectObject)) # noqa: F405
numberOfItems = filelist.numberOfItems()
for i in range(1, numberOfItems + 1):
fileAliasDesc = filelist.descriptorAtIndex_(i)
fileURLDesc = fileAliasDesc.coerceToDescriptorType_(aeKeyword(typeFileURL)) # noqa: F405
fileURLData = fileURLDesc.data()
url = memoryview(fileURLData.bytes(), 0, fileURLData.length())
url = str(url)
util.idle_add(self.gp.on_item_import_from_file_activate, None, url)
urls.append(str(url))
print(("open Files :", urls), file=sys.stderr)
result = NSAppleEventDescriptor.descriptorWithInt32_(42)
reply.setParamDescriptor_forKeyword_(result, aeKeyword('----'))
def subscribeEvent_reply_(self, event, reply):
""" handles a 'Subscribe to...' event"""
filelist = event.paramDescriptorForKeyword_(aeKeyword(keyDirectObject)) # noqa: F405
fileURLData = filelist.data()
url = memoryview(fileURLData.bytes(), 0, fileURLData.length())
url = str(url)
print(("Subscribe to :" + url), file=sys.stderr)
util.idle_add(self.gp.subscribe_to_url, url)
result = NSAppleEventDescriptor.descriptorWithInt32_(42)
reply.setParamDescriptor_forKeyword_(result, aeKeyword('----'))
# global reference to the handler (mustn't be destroyed)
handler = gPodderEventHandler.alloc().init()
except ImportError:
print("""
Warning: pyobjc not found. Disabling "Subscribe with" events handling
""", file=sys.stderr)
handler = None
def register_handlers(gp):
""" register the events handlers (and keep a reference to gPodder's instance)"""
if handler is not None:
handler.register(gp)
| 4,602
|
Python
|
.py
| 93
| 42.096774
| 116
| 0.679581
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,315
|
shownotes.py
|
gpodder_gpodder/src/gpodder/gtkui/shownotes.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import html
import logging
from urllib.parse import urlparse
import gpodder
from gpodder import util
from gpodder.gtkui.draw import (draw_text_box_centered, get_background_color,
get_foreground_color)
# from gpodder.gtkui.draw import investigate_widget_colors
import gi # isort:skip
gi.require_version('Gdk', '3.0') # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gdk, Gio, GLib, Gtk, Pango # isort:skip
_ = gpodder.gettext
logger = logging.getLogger(__name__)
has_webkit2 = False
try:
gi.require_version('WebKit2', '4.0')
from gi.repository import WebKit2
has_webkit2 = True
except (ImportError, ValueError):
logger.info('No WebKit2 gobject bindings, so no HTML shownotes')
def get_shownotes(enable_html, pane):
if enable_html and has_webkit2:
return gPodderShownotesHTML(pane)
else:
return gPodderShownotesText(pane)
class gPodderShownotes:
def __init__(self, shownotes_pane):
self.shownotes_pane = shownotes_pane
self.details_fmt = _('%(date)s | %(size)s | %(duration)s')
self.scrolled_window = Gtk.ScrolledWindow()
self.scrolled_window.set_shadow_type(Gtk.ShadowType.IN)
self.scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.scrolled_window.add(self.init())
self.status = Gtk.Label.new()
self.status.set_halign(Gtk.Align.START)
self.status.set_valign(Gtk.Align.END)
self.status.set_property('ellipsize', Pango.EllipsizeMode.END)
self.set_status(None)
self.status_bg = None
self.color_set = False
self.background_color = None
self.foreground_color = None
self.link_color = None
self.visited_color = None
self.overlay = Gtk.Overlay()
self.overlay.add(self.scrolled_window)
# need an EventBox for an opaque background behind the label
box = Gtk.EventBox()
self.status_bg = box
box.add(self.status)
box.set_hexpand(False)
box.set_vexpand(False)
box.set_valign(Gtk.Align.END)
box.set_halign(Gtk.Align.START)
self.overlay.add_overlay(box)
self.overlay.set_overlay_pass_through(box, True)
self.main_component = self.overlay
self.main_component.show_all()
self.da_message = Gtk.DrawingArea()
self.da_message.set_property('expand', True)
self.da_message.connect('draw', self.on_shownotes_message_expose_event)
self.shownotes_pane.add(self.da_message)
self.shownotes_pane.add(self.main_component)
self.set_complain_about_selection(True)
self.hide_pane()
# Either show the shownotes *or* a message, 'Please select an episode'
def set_complain_about_selection(self, message=True):
if message:
self.scrolled_window.hide()
self.da_message.show()
else:
self.da_message.hide()
self.scrolled_window.show()
def set_episodes(self, selected_episodes):
if self.pane_is_visible:
if len(selected_episodes) == 1:
episode = selected_episodes[0]
self.update(episode)
self.set_complain_about_selection(False)
else:
self.set_complain_about_selection(True)
def show_pane(self, selected_episodes):
self.pane_is_visible = True
self.set_episodes(selected_episodes)
self.shownotes_pane.show()
def hide_pane(self):
self.pane_is_visible = False
self.shownotes_pane.hide()
def toggle_pane_visibility(self, selected_episodes):
if self.pane_is_visible:
self.hide_pane()
else:
self.show_pane(selected_episodes)
def on_shownotes_message_expose_event(self, drawingarea, ctx):
background = get_background_color()
if background is None:
background = Gdk.RGBA(1, 1, 1, 1)
ctx.set_source_rgba(background.red, background.green, background.blue, 1)
x1, y1, x2, y2 = ctx.clip_extents()
ctx.rectangle(x1, y1, x2 - x1, y2 - y1)
ctx.fill()
width, height = drawingarea.get_allocated_width(), drawingarea.get_allocated_height(),
text = _('Please select an episode')
draw_text_box_centered(ctx, drawingarea, width, height, text, None, None)
return False
def set_status(self, text):
self.status.set_label(text or " ")
def define_colors(self):
if not self.color_set:
self.color_set = True
# investigate_widget_colors([
# ([(Gtk.Window, 'background', '')], self.status.get_toplevel()),
# ([(Gtk.Window, 'background', ''), (Gtk.Label, '', '')], self.status),
# ([(Gtk.Window, 'background', ''), (Gtk.TextView, 'view', '')], self.text_view),
# ([(Gtk.Window, 'background', ''), (Gtk.TextView, 'view', 'text')], self.text_view),
# ])
dummy_tv = Gtk.TextView()
self.background_color = get_background_color(Gtk.StateFlags.NORMAL,
widget=dummy_tv) or Gdk.RGBA()
self.foreground_color = get_foreground_color(Gtk.StateFlags.NORMAL,
widget=dummy_tv) or Gdk.RGBA(0, 0, 0)
self.link_color = get_foreground_color(state=Gtk.StateFlags.LINK,
widget=dummy_tv) or Gdk.RGBA(0, 0, 0)
self.visited_color = get_foreground_color(state=Gtk.StateFlags.VISITED,
widget=dummy_tv) or self.link_color
del dummy_tv
self.status_bg.override_background_color(Gtk.StateFlags.NORMAL, self.background_color)
if hasattr(self, "text_buffer"):
self.text_buffer.create_tag('hyperlink',
foreground=self.link_color.to_string(),
underline=Pango.Underline.SINGLE)
class gPodderShownotesText(gPodderShownotes):
def init(self):
self.text_view = Gtk.TextView()
self.text_view.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
self.text_view.set_border_width(10)
self.text_view.set_editable(False)
self.text_buffer = Gtk.TextBuffer()
self.text_buffer.create_tag('heading', scale=1.2, weight=Pango.Weight.BOLD)
self.text_buffer.create_tag('subheading', scale=1.0)
self.text_buffer.create_tag('details', scale=0.9)
self.text_view.set_buffer(self.text_buffer)
self.text_view.set_property('expand', True)
self.text_view.connect('button-release-event', self.on_button_release)
self.text_view.connect('key-press-event', self.on_key_press)
self.text_view.connect('motion-notify-event', self.on_hover_hyperlink)
self.populate_popup_id = None
return self.text_view
def update(self, episode):
self.scrolled_window.get_vadjustment().set_value(0)
heading = episode.title
subheading = _('from %s') % (episode.channel.title)
details = self.details_fmt % {
'date': '{} {}'.format(datetime.datetime.fromtimestamp(episode.published).strftime('%H:%M'),
util.format_date(episode.published)),
'size': util.format_filesize(episode.file_size, digits=1)
if episode.file_size > 0 else "-",
'duration': episode.get_play_info_string()}
self.define_colors()
hyperlinks = [(0, None)]
self.text_buffer.set_text('')
if episode.link:
hyperlinks.append((self.text_buffer.get_char_count(), episode.link))
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(), heading, 'heading')
if episode.link:
hyperlinks.append((self.text_buffer.get_char_count(), None))
self.text_buffer.insert_at_cursor('\n')
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(), subheading, 'subheading')
self.text_buffer.insert_at_cursor('\n')
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(), details, 'details')
self.text_buffer.insert_at_cursor('\n\n')
for target, text in util.extract_hyperlinked_text(episode.html_description()):
hyperlinks.append((self.text_buffer.get_char_count(), target))
if target:
self.text_buffer.insert_with_tags_by_name(
self.text_buffer.get_end_iter(), text, 'hyperlink')
else:
self.text_buffer.insert(
self.text_buffer.get_end_iter(), text)
hyperlinks.append((self.text_buffer.get_char_count(), None))
self.hyperlinks = [(start, end, url) for (start, url), (end, _) in zip(hyperlinks, hyperlinks[1:]) if url]
self.text_buffer.place_cursor(self.text_buffer.get_start_iter())
if self.populate_popup_id is not None:
self.text_view.disconnect(self.populate_popup_id)
self.populate_popup_id = self.text_view.connect('populate-popup', self.on_populate_popup)
self.episode = episode
def on_populate_popup(self, textview, context_menu):
# TODO: Remove items from context menu that are always insensitive in a read-only buffer
if self.episode.link:
# TODO: It is currently not possible to copy links in description.
# Detect if context menu was opened on a hyperlink and add
# "Open Link" and "Copy Link Address" menu items.
# See https://github.com/gpodder/gpodder/issues/1097
item = Gtk.SeparatorMenuItem()
item.show()
context_menu.append(item)
# label links can be opened from context menu or by clicking them, do the same here
item = Gtk.MenuItem(label=_('Open Episode Title Link'))
item.connect('activate', lambda i: util.open_website(self.episode.link))
item.show()
context_menu.append(item)
# hack to allow copying episode.link
item = Gtk.MenuItem(label=_('Copy Episode Title Link Address'))
item.connect('activate', lambda i: util.copy_text_to_clipboard(self.episode.link))
item.show()
context_menu.append(item)
def on_button_release(self, widget, event):
if event.button == 1:
self.activate_links()
def on_key_press(self, widget, event):
if event.keyval == Gdk.KEY_Return:
self.activate_links()
return True
return False
def hyperlink_at_pos(self, pos):
"""
:param int pos: offset in text buffer
:return str: hyperlink target at pos if any or None
"""
return next((url for start, end, url in self.hyperlinks if start < pos < end), None)
def activate_links(self):
if self.text_buffer.get_selection_bounds() == ():
pos = self.text_buffer.props.cursor_position
target = self.hyperlink_at_pos(pos)
if target is not None:
util.open_website(target)
def on_hover_hyperlink(self, textview, e):
x, y = textview.window_to_buffer_coords(Gtk.TextWindowType.TEXT, e.x, e.y)
w = self.text_view.get_window(Gtk.TextWindowType.TEXT)
success, it = textview.get_iter_at_location(x, y)
if success:
pos = it.get_offset()
target = self.hyperlink_at_pos(pos)
if target:
self.set_status(target)
w.set_cursor(Gdk.Cursor.new_from_name(w.get_display(), 'pointer'))
return
self.set_status('')
w.set_cursor(None)
class gPodderShownotesHTML(gPodderShownotes):
def init(self):
self.episode = None
self._base_uri = None
# basic restrictions
self.stylesheet = None
self.manager = WebKit2.UserContentManager()
self.html_view = WebKit2.WebView.new_with_user_content_manager(self.manager)
settings = self.html_view.get_settings()
settings.set_enable_java(False)
settings.set_enable_plugins(False)
settings.set_enable_javascript(False)
# uncomment to show web inspector
# settings.set_enable_developer_extras(True)
self.html_view.set_property('expand', True)
self.html_view.connect('mouse-target-changed', self.on_mouse_over)
self.html_view.connect('context-menu', self.on_context_menu)
self.html_view.connect('decide-policy', self.on_decide_policy)
self.html_view.connect('authenticate', self.on_authenticate)
return self.html_view
def update(self, episode):
self.scrolled_window.get_vadjustment().set_value(0)
self.define_colors()
if episode.has_website_link():
self._base_uri = episode.link
else:
self._base_uri = episode.channel.url
# for incomplete base URI (e.g. http://919.noagendanotes.com)
baseURI = urlparse(self._base_uri)
if baseURI.path == '':
self._base_uri += '/'
self._loaded = False
stylesheet = self.get_stylesheet()
if stylesheet:
self.manager.add_style_sheet(stylesheet)
heading = '<h3>%s</h3>' % html.escape(episode.title)
subheading = _('from %s') % html.escape(episode.channel.title)
details = '<small>%s</small>' % html.escape(self.details_fmt % {
'date': '{} {}'.format(datetime.datetime.fromtimestamp(episode.published).strftime('%H:%M'),
util.format_date(episode.published)),
'size': util.format_filesize(episode.file_size, digits=1)
if episode.file_size > 0 else "-",
'duration': episode.get_play_info_string()})
header_html = _('<div id="gpodder-title">\n%(heading)s\n<p>%(subheading)s</p>\n<p>%(details)s</p></div>\n') \
% {'heading': heading, 'subheading': subheading, 'details': details}
# uncomment to prevent background override in html shownotes
# self.manager.remove_all_style_sheets ()
logger.debug("base uri: %s (chan:%s)", self._base_uri, episode.channel.url)
self.html_view.load_html(header_html + episode.html_description(), self._base_uri)
# uncomment to show web inspector
# self.html_view.get_inspector().show()
self.episode = episode
def on_mouse_over(self, webview, hit_test_result, modifiers):
if hit_test_result.context_is_link():
self.set_status(hit_test_result.get_link_uri())
else:
self.set_status(None)
def on_context_menu(self, webview, context_menu, event, hit_test_result):
whitelist_actions = [
WebKit2.ContextMenuAction.NO_ACTION,
WebKit2.ContextMenuAction.STOP,
WebKit2.ContextMenuAction.RELOAD,
WebKit2.ContextMenuAction.COPY,
WebKit2.ContextMenuAction.CUT,
WebKit2.ContextMenuAction.PASTE,
WebKit2.ContextMenuAction.DELETE,
WebKit2.ContextMenuAction.SELECT_ALL,
WebKit2.ContextMenuAction.INPUT_METHODS,
WebKit2.ContextMenuAction.COPY_VIDEO_LINK_TO_CLIPBOARD,
WebKit2.ContextMenuAction.COPY_AUDIO_LINK_TO_CLIPBOARD,
WebKit2.ContextMenuAction.COPY_LINK_TO_CLIPBOARD,
WebKit2.ContextMenuAction.COPY_IMAGE_TO_CLIPBOARD,
WebKit2.ContextMenuAction.COPY_IMAGE_URL_TO_CLIPBOARD
]
items = context_menu.get_items()
for item in items:
if item.get_stock_action() not in whitelist_actions:
context_menu.remove(item)
if hit_test_result.get_context() == WebKit2.HitTestResultContext.DOCUMENT:
item = self.create_open_item(
'shownotes-in-browser',
_('Open shownotes in web browser'),
self._base_uri)
context_menu.insert(item, -1)
elif hit_test_result.context_is_link():
item = self.create_open_item(
'link-in-browser',
_('Open link in web browser'),
hit_test_result.get_link_uri())
context_menu.insert(item, -1)
return False
def on_decide_policy(self, webview, decision, decision_type):
if decision_type == WebKit2.PolicyDecisionType.NEW_WINDOW_ACTION:
decision.ignore()
return False
elif decision_type == WebKit2.PolicyDecisionType.NAVIGATION_ACTION:
req = decision.get_request()
# about:blank is for plain text shownotes
if req.get_uri() in (self._base_uri, 'about:blank'):
decision.use()
else:
# Avoid opening the page inside the WebView and open in the browser instead
decision.ignore()
util.open_website(req.get_uri())
return False
else:
decision.use()
return False
def on_open_in_browser(self, action, var):
util.open_website(var.get_string())
def on_authenticate(self, view, request):
if request.is_retry():
return False
if not self.episode or not self.episode.channel.auth_username:
return False
chan = self.episode.channel
u = urlparse(chan.url)
host = u.hostname
if u.port:
port = u.port
elif u.scheme == 'https':
port = 443
else:
port = 80
logger.debug("on_authenticate(chan=%s:%s req=%s:%s (scheme=%s))",
host, port, request.get_host(), request.get_port(),
request.get_scheme())
if host == request.get_host() and port == request.get_port() \
and request.get_scheme() == WebKit2.AuthenticationScheme.HTTP_BASIC:
persistence = WebKit2.CredentialPersistence.FOR_SESSION
request.authenticate(WebKit2.Credential(chan.auth_username,
chan.auth_password,
persistence))
return True
else:
return False
def create_open_item(self, name, label, url):
action = Gio.SimpleAction.new(name, GLib.VariantType.new('s'))
action.connect('activate', self.on_open_in_browser)
var = GLib.Variant.new_string(url)
return WebKit2.ContextMenuItem.new_from_gaction(action, label, var)
def get_stylesheet(self):
if self.stylesheet is None:
style = ("html { background: %s; color: %s;}"
" a { color: %s; }"
" a:visited { color: %s; }"
" #gpodder-title h3, #gpodder-title p { margin: 0}"
" #gpodder-title {margin-block-end: 1em;}") % \
(self.background_color.to_string(), self.foreground_color.to_string(),
self.link_color.to_string(), self.visited_color.to_string())
self.stylesheet = WebKit2.UserStyleSheet(style, 0, 1, None, None)
return self.stylesheet
| 19,864
|
Python
|
.py
| 411
| 37.995134
| 117
| 0.622467
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,316
|
draw.py
|
gpodder_gpodder/src/gpodder/gtkui/draw.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# draw.py -- Draw routines for gPodder-specific graphics
# Thomas Perl <thp@perli.net>, 2007-11-25
#
import io
import math
import cairo
import gi # isort:skip
gi.require_version('Gdk', '3.0') # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
gi.require_version('PangoCairo', '1.0') # isort:skip
from gi.repository import Gdk, GdkPixbuf, Gtk, Pango, PangoCairo # isort:skip
class TextExtents(object):
def __init__(self, ctx, text):
extents = ctx.text_extents(text)
(self.x_bearing, self.y_bearing, self.width, self.height, self.x_advance, self.y_advance) = extents
EPISODE_LIST_ICON_SIZE = 16
RRECT_LEFT_SIDE = 1
RRECT_RIGHT_SIDE = 2
def draw_rounded_rectangle(ctx, x, y, w, h, r=10, left_side_width=None,
sides_to_draw=0, close=False):
assert left_side_width is not None
x = int(x)
offset = 0
if close:
offset = 0.5
if sides_to_draw & RRECT_LEFT_SIDE:
ctx.move_to(x + int(left_side_width) - offset, y + h)
ctx.line_to(x + r, y + h)
ctx.curve_to(x, y + h, x, y + h, x, y + h - r)
ctx.line_to(x, y + r)
ctx.curve_to(x, y, x, y, x + r, y)
ctx.line_to(x + int(left_side_width) - offset, y)
if close:
ctx.line_to(x + int(left_side_width) - offset, y + h)
if sides_to_draw & RRECT_RIGHT_SIDE:
ctx.move_to(x + int(left_side_width) + offset, y)
ctx.line_to(x + w - r, y)
ctx.curve_to(x + w, y, x + w, y, x + w, y + r)
ctx.line_to(x + w, y + h - r)
ctx.curve_to(x + w, y + h, x + w, y + h, x + w - r, y + h)
ctx.line_to(x + int(left_side_width) + offset, y + h)
if close:
ctx.line_to(x + int(left_side_width) + offset, y)
def rounded_rectangle(ctx, x, y, width, height, radius=4.):
"""Simple rounded rectangle algorithm
http://www.cairographics.org/samples/rounded_rectangle/
"""
degrees = math.pi / 180.
ctx.new_sub_path()
if width > radius:
ctx.arc(x + width - radius, y + radius, radius, -90. * degrees, 0)
ctx.arc(x + width - radius, y + height - radius, radius, 0, 90. * degrees)
ctx.arc(x + radius, y + height - radius, radius, 90. * degrees, 180. * degrees)
ctx.arc(x + radius, y + radius, radius, 180. * degrees, 270. * degrees)
ctx.close_path()
def draw_text_box_centered(ctx, widget, w_width, w_height, text, font_desc=None, add_progress=None):
style_context = widget.get_style_context()
text_color = style_context.get_color(Gtk.StateFlags.PRELIGHT)
if font_desc is None:
font_desc = style_context.get_font(Gtk.StateFlags.NORMAL)
font_desc.set_size(14 * Pango.SCALE)
pango_context = widget.create_pango_context()
layout = Pango.Layout(pango_context)
layout.set_font_description(font_desc)
layout.set_text(text, -1)
width, height = layout.get_pixel_size()
ctx.move_to(w_width / 2 - width / 2, w_height / 2 - height / 2)
ctx.set_source_rgba(text_color.red, text_color.green, text_color.blue, 0.5)
PangoCairo.show_layout(ctx, layout)
# Draw an optional progress bar below the text (same width)
if add_progress is not None:
bar_height = 10
ctx.set_source_rgba(*text_color)
ctx.set_line_width(1.)
rounded_rectangle(ctx,
w_width / 2 - width / 2 - .5,
w_height / 2 + height - .5, width + 1, bar_height + 1)
ctx.stroke()
rounded_rectangle(ctx,
w_width / 2 - width / 2,
w_height / 2 + height, int(width * add_progress) + .5, bar_height)
ctx.fill()
def draw_cake(percentage, text=None, emblem=None, size=None):
# Download percentage bar icon - it turns out the cake is a lie (d'oh!)
# ..but the initial idea was to have a cake-style indicator, but that
# didn't work as well as the progress bar, but the name stuck..
if size is None:
size = EPISODE_LIST_ICON_SIZE
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
ctx = cairo.Context(surface)
bgc = get_background_color(Gtk.StateFlags.ACTIVE)
# fgc = get_background_color(Gtk.StateFlags.SELECTED)
txc = get_foreground_color(Gtk.StateFlags.NORMAL)
border = 1.5
height = int(size * .4)
width = size - 2 * border
y = (size - height) / 2 + .5
x = border
# Background
ctx.rectangle(x, y, width, height)
ctx.set_source_rgb(bgc.red, bgc.green, bgc.blue)
ctx.fill()
# Filling
if percentage > 0:
fill_width = max(1, min(width - 2, (width - 2) * percentage + .5))
ctx.rectangle(x + 1, y + 1, fill_width, height - 2)
ctx.set_source_rgb(0.289, 0.5625, 0.84765625)
ctx.fill()
# Border
ctx.rectangle(x, y, width, height)
ctx.set_source_rgb(txc.red, txc.green, txc.blue)
ctx.set_line_width(1)
ctx.stroke()
del ctx
return surface
def draw_text_pill(left_text, right_text, x=0, y=0, border=2, radius=14,
widget=None, scale=1):
# Padding (in px) at the right edge of the image (for Ubuntu; bug 1533)
padding_right = 7
x_border = border * 2
if widget is None:
# Use GTK+ style of a normal Button
widget = Gtk.Label()
style_context = widget.get_style_context()
font_desc = style_context.get_font(Gtk.StateFlags.NORMAL)
font_desc.set_weight(Pango.Weight.BOLD)
pango_context = widget.create_pango_context()
layout_left = Pango.Layout(pango_context)
layout_left.set_font_description(font_desc)
layout_left.set_text(left_text, -1)
layout_right = Pango.Layout(pango_context)
layout_right.set_font_description(font_desc)
layout_right.set_text(right_text, -1)
width_left, height_left = layout_left.get_pixel_size()
width_right, height_right = layout_right.get_pixel_size()
text_height = max(height_left, height_right)
left_side_width = width_left + x_border * 2
right_side_width = width_right + x_border * 2
image_height = int(scale * (y + text_height + border * 2))
image_width = int(scale * (x + left_side_width + right_side_width
+ padding_right))
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, image_width, image_height)
surface.set_device_scale(scale, scale)
ctx = cairo.Context(surface)
# Clip so as to not draw on the right padding (for Ubuntu; bug 1533)
ctx.rectangle(0, 0, image_width - padding_right, image_height)
ctx.clip()
if left_text == '0':
left_text = None
if right_text == '0':
right_text = None
rect_width = left_side_width + right_side_width
rect_height = text_height + border * 2
if left_text is not None:
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius,
left_side_width, RRECT_LEFT_SIDE, right_text is None)
linear = cairo.LinearGradient(x, y, x + left_side_width / 2, y + rect_height / 2)
linear.add_color_stop_rgba(0, .8, .8, .8, .5)
linear.add_color_stop_rgba(.4, .8, .8, .8, .7)
linear.add_color_stop_rgba(.6, .8, .8, .8, .6)
linear.add_color_stop_rgba(.9, .8, .8, .8, .8)
linear.add_color_stop_rgba(1, .8, .8, .8, .9)
ctx.set_source(linear)
ctx.fill()
xpos, ypos, width_left, height = x + 1, y + 1, left_side_width, rect_height - 2
if right_text is None:
width_left -= 2
draw_rounded_rectangle(ctx, xpos, ypos, rect_width, height, radius, width_left, RRECT_LEFT_SIDE, right_text is None)
ctx.set_source_rgba(1., 1., 1., .3)
ctx.set_line_width(1)
ctx.stroke()
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius,
left_side_width, RRECT_LEFT_SIDE, right_text is None)
ctx.set_source_rgba(.2, .2, .2, .6)
ctx.set_line_width(1)
ctx.stroke()
ctx.move_to(x + x_border, y + 1 + border)
ctx.set_source_rgba(0, 0, 0, 1)
PangoCairo.show_layout(ctx, layout_left)
ctx.move_to(x - 1 + x_border, y + border)
ctx.set_source_rgba(1, 1, 1, 1)
PangoCairo.show_layout(ctx, layout_left)
if right_text is not None:
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
linear = cairo.LinearGradient(
x + left_side_width,
y,
x + left_side_width + right_side_width / 2,
y + rect_height)
linear.add_color_stop_rgba(0, .2, .2, .2, .9)
linear.add_color_stop_rgba(.4, .2, .2, .2, .8)
linear.add_color_stop_rgba(.6, .2, .2, .2, .6)
linear.add_color_stop_rgba(.9, .2, .2, .2, .7)
linear.add_color_stop_rgba(1, .2, .2, .2, .5)
ctx.set_source(linear)
ctx.fill()
xpos, ypos, width, height = x, y + 1, rect_width - 1, rect_height - 2
if left_text is None:
xpos, width = x + 1, rect_width - 2
draw_rounded_rectangle(ctx, xpos, ypos, width, height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
ctx.set_source_rgba(1., 1., 1., .3)
ctx.set_line_width(1)
ctx.stroke()
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
ctx.set_source_rgba(.1, .1, .1, .6)
ctx.set_line_width(1)
ctx.stroke()
ctx.move_to(x + left_side_width + x_border, y + 1 + border)
ctx.set_source_rgba(0, 0, 0, 1)
PangoCairo.show_layout(ctx, layout_right)
ctx.move_to(x - 1 + left_side_width + x_border, y + border)
ctx.set_source_rgba(1, 1, 1, 1)
PangoCairo.show_layout(ctx, layout_right)
return surface
def draw_cake_pixbuf(percentage, text=None, emblem=None, size=None):
return cairo_surface_to_pixbuf(draw_cake(percentage, text, emblem, size=size))
def draw_pill_pixbuf(left_text, right_text, widget=None, scale=1):
return cairo_surface_to_pixbuf(draw_text_pill(left_text, right_text,
widget=widget, scale=scale))
def cake_size_from_widget(widget=None):
if widget is None:
# Use GTK+ style of a normal Button
widget = Gtk.Label()
style_context = widget.get_style_context()
font_desc = style_context.get_font(Gtk.StateFlags.NORMAL)
pango_context = widget.create_pango_context()
layout = Pango.Layout(pango_context)
layout.set_font_description(font_desc)
layout.set_text("1", -1)
# use text height as size
return layout.get_pixel_size()[1]
def cairo_surface_to_pixbuf(s):
"""
Converts a Cairo surface to a Gtk Pixbuf by
encoding it as PNG and using the PixbufLoader.
"""
bio = io.BytesIO()
try:
s.write_to_png(bio)
except:
# Write an empty PNG file to the StringIO, so
# in case of an error we have "something" to
# load. This happens in PyCairo < 1.1.6, see:
# http://webcvs.cairographics.org/pycairo/NEWS?view=markup
# Thanks to Chris Arnold for reporting this bug
bio.write('iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4'
'c6QAAAAZiS0dEAP8A\n/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAA'
'AAd0SU1FB9cMEQkqIyxn3RkAAAAZdEVYdENv\nbW1lbnQAQ3JlYXRlZCB3a'
'XRoIEdJTVBXgQ4XAAAADUlEQVQI12NgYGBgAAAABQABXvMqOgAAAABJ\nRU'
'5ErkJggg==\n'.decode('base64'))
pbl = GdkPixbuf.PixbufLoader()
pbl.write(bio.getvalue())
pbl.close()
pixbuf = pbl.get_pixbuf()
return pixbuf
def progressbar_pixbuf(width, height, percentage):
COLOR_BG = (.4, .4, .4, .4)
COLOR_FG = (.2, .9, .2, 1.)
COLOR_FG_HIGH = (1., 1., 1., .5)
COLOR_BORDER = (0., 0., 0., 1.)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
padding = int(width / 8.0)
bar_width = 2 * padding
bar_height = height - 2 * padding
bar_height_fill = bar_height * percentage
# Background
ctx.rectangle(padding, padding, bar_width, bar_height)
ctx.set_source_rgba(*COLOR_BG)
ctx.fill()
# Foreground
ctx.rectangle(padding, padding + bar_height - bar_height_fill, bar_width, bar_height_fill)
ctx.set_source_rgba(*COLOR_FG)
ctx.fill()
ctx.rectangle(padding + bar_width / 3,
padding + bar_height - bar_height_fill,
bar_width / 4,
bar_height_fill)
ctx.set_source_rgba(*COLOR_FG_HIGH)
ctx.fill()
# Border
ctx.rectangle(padding - .5, padding - .5, bar_width + 1, bar_height + 1)
ctx.set_source_rgba(* COLOR_BORDER)
ctx.set_line_width(1.)
ctx.stroke()
return cairo_surface_to_pixbuf(surface)
def get_background_color(state=Gtk.StateFlags.NORMAL, widget=Gtk.TreeView()):
"""
@param state state flag (e.g. Gtk.StateFlags.SELECTED to get selected background)
@param widget specific widget to get info from.
defaults to TreeView which has all one usually wants.
@return background color from theme for widget or from its parents if transparent.
"""
p = widget
color = Gdk.RGBA(0, 0, 0, 0)
while p is not None and color.alpha == 0:
style_context = p.get_style_context()
color = style_context.get_background_color(state)
p = p.get_parent()
return color
def get_foreground_color(state=Gtk.StateFlags.NORMAL, widget=Gtk.TreeView()):
"""
@param state state flag (e.g. Gtk.StateFlags.SELECTED to get selected text color)
@param widget specific widget to get info from
defaults to TreeView which has all one usually wants.
@return text color from theme for widget or its parents if transparent
"""
p = widget
color = Gdk.RGBA(0, 0, 0, 0)
while p is not None and color.alpha == 0:
style_context = p.get_style_context()
color = style_context.get_color(state)
p = p.get_parent()
return color
def investigate_widget_colors(type_classes_and_widgets):
"""
investigate using Gtk.StyleContext to get widget style properties
I tried to compare values from static and live widgets.
To sum up, better use the live widget, because you'll get the correct path, classes, regions automatically.
See "CSS Nodes" in widget documentation for classes and sub-nodes (=regions).
WidgetPath and Region are replaced by CSSNodes in gtk4.
Not sure it's legitimate usage, though: I got different results from one run to another.
Run `GTK_DEBUG=interactive ./bin/gpodder` for gtk widget inspection
"""
def investigate_stylecontext(style_ctx, label):
style_ctx.save()
for statename, state in [
('normal', Gtk.StateFlags.NORMAL),
('active', Gtk.StateFlags.ACTIVE),
('link', Gtk.StateFlags.LINK),
('visited', Gtk.StateFlags.VISITED)]:
f.write("<dt>%s %s</dt><dd>\n" % (label, statename))
colors = {
'get_color': style_ctx.get_color(state),
'get_background_color': style_ctx.get_background_color(state),
'color': style_ctx.get_property('color', state),
'background-color': style_ctx.get_property('background-color', state),
'outline-color': style_ctx.get_property('outline-color', state),
}
f.write("<p>PREVIEW: <span style='background-color: %s; color: %s'>get_color + get_background_color</span>"
% (colors['get_background_color'].to_string(),
colors['get_color'].to_string()))
f.write("<span style='background-color: %s; color: %s; border solid 2px %s;'>color + background-color properties</span></p>\n"
% (colors['background-color'].to_string(),
colors['color'].to_string(),
colors['outline-color'].to_string()))
f.write("<p>VALUES: ")
for p, v in colors.items():
f.write("%s=<span style='background-color: %s;'>%s</span>" % (p, v.to_string(), v.to_string()))
f.write("</p></dd>\n")
style_ctx.restore()
with open('/tmp/colors.html', 'w') as f:
f.write("""<html>
<style type='text/css'>
body {color: red; background: yellow;}
span { display: inline-block; margin-right: 1ch; }
dd { margin-bottom: 1em; }
td { vertical-align: top; }
</style>
<table>""")
for type_and_class, w in type_classes_and_widgets:
f.write("<tr><td><dl>\n")
# Create an empty style context
style_ctx = Gtk.StyleContext()
# Create an empty widget path
widget_path = Gtk.WidgetPath()
# Specify the widget class type you want to get colors from
for t, c, r in type_and_class:
widget_path.append_type(t)
if c:
widget_path.iter_add_class(widget_path.length() - 1, c)
if r:
widget_path.iter_add_region(widget_path.length() - 1, r, 0)
style_ctx.set_path(widget_path)
investigate_stylecontext(
style_ctx,
'STATIC {}'.format(' '.join('{}.{}({})'.format(t.__name__, c, r) for t, c, r in type_and_class)))
f.write("</dl></td><td><dl>\n")
investigate_stylecontext(w.get_style_context(), 'LIVE {}'.format(type(w).__name__))
f.write("</dl></td></tr>\n")
f.write("</table></html>\n")
def draw_iconcell_scale(column, cell, model, iterator, scale):
"""
Draw cell's pixbuf to a surface with proper scaling for high resolution
displays. To be used as gtk.TreeViewColumn.set_cell_data_func.
:param column: gtk.TreeViewColumn (ignored)
:param cell: gtk.CellRenderer
:param model: gtk.TreeModel (ignored)
:param iter: gtk.TreeIter (ignored)
:param scale: factor of the target display (e.g. 1 or 2)
"""
pixbuf = cell.props.pixbuf
if not pixbuf:
return
width = pixbuf.get_width()
height = pixbuf.get_height()
scale_inv = 1 / scale
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
surface.set_device_scale(scale, scale)
cr = cairo.Context(surface)
cr.scale(scale_inv, scale_inv)
Gdk.cairo_set_source_pixbuf(cr, cell.props.pixbuf, 0, 0)
cr.paint()
cell.props.surface = surface
| 19,423
|
Python
|
.py
| 423
| 37.806147
| 138
| 0.617663
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,317
|
app.py
|
gpodder_gpodder/src/gpodder/gtkui/app.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import html
import logging
import os
import sys
import xml.etree.ElementTree as ET
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
import gpodder
from gpodder import core, util
from gpodder.model import check_root_folder_path
from .config import UIConfig
from .desktop.preferences import gPodderPreferences
from .main import gPodder
from .model import Model
import gi # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import GdkPixbuf, Gio, GObject, Gtk # isort:skip
logger = logging.getLogger(__name__)
_ = gpodder.gettext
N_ = gpodder.ngettext
def parse_app_menu_for_accels(filename):
"""
grab (accelerator, action) bindings from menus.ui.
See #815 Ctrl-Q doesn't quit for justification.
Unfortunately it's not available from the Gio.MenuModel we get from the Gtk.Builder,
so we get it ourself.
"""
res = []
menu_tree = ET.parse(filename)
assert menu_tree.getroot().tag == 'interface'
for menu in menu_tree.getroot():
assert menu.tag == 'menu'
if menu.attrib.get('id') == 'app-menu':
for itm in menu.iter('item'):
action = None
accel = None
for att in itm.findall('attribute'):
if att.get('name') == 'action':
action = att.text.strip()
elif att.get('name') == 'accel':
accel = att.text.strip()
if action and accel:
res.append((accel, action))
return res
class gPodderApplication(Gtk.Application):
def __init__(self, options):
Gtk.Application.__init__(self, application_id='org.gpodder.gpodder',
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.window = None
self.options = options
self.connect('window-removed', self.on_window_removed)
def create_actions(self):
action = Gio.SimpleAction.new('about', None)
action.connect('activate', self.on_about)
self.add_action(action)
action = Gio.SimpleAction.new('quit', None)
action.connect('activate', self.on_quit)
self.add_action(action)
action = Gio.SimpleAction.new('help', None)
action.connect('activate', self.on_help_activate)
self.add_action(action)
action = Gio.SimpleAction.new('logs', None)
action.connect('activate', self.on_logs_activate)
self.add_action(action)
action = Gio.SimpleAction.new('preferences', None)
action.connect('activate', self.on_itemPreferences_activate)
self.add_action(action)
action = Gio.SimpleAction.new('gotoMygpo', None)
action.connect('activate', self.on_goto_mygpo)
self.add_action(action)
action = Gio.SimpleAction.new('checkForUpdates', None)
action.connect('activate', self.on_check_for_updates_activate)
self.add_action(action)
action = Gio.SimpleAction.new('menu', None)
action.connect('activate', self.on_menu)
self.add_action(action)
def do_startup(self):
Gtk.Application.do_startup(self)
self.create_actions()
builder = Gtk.Builder()
builder.set_translation_domain(gpodder.textdomain)
self.builder = builder
menu_filename = None
for ui_folder in gpodder.ui_folders:
filename = os.path.join(ui_folder, 'gtk/menus.ui')
if os.path.exists(filename):
builder.add_from_file(filename)
menu_filename = filename
break
menubar = builder.get_object('menubar')
if menubar is None:
logger.error('Cannot find gtk/menus.ui in %r, exiting' % gpodder.ui_folders)
sys.exit(1)
self.menu_extras = builder.get_object('menuExtras')
self.menu_view_columns = builder.get_object('menuViewColumns')
self.set_menubar(menubar)
# If $XDG_CURRENT_DESKTOP is set then it contains a colon-separated list of strings.
# https://specifications.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html
# See https://askubuntu.com/a/227669 for a list of values in different environments
xdg_current_desktops = os.environ.get('XDG_CURRENT_DESKTOP', '').split(':')
# See https://developer.gnome.org/gtk3/stable/gtk-running.html
# GTK_CSD=0 is used to disable client side decorations
csd_disabled = os.environ.get('GTK_CSD') == '0'
self.want_headerbar = ('GNOME' in xdg_current_desktops) and not gpodder.ui.osx and not csd_disabled
self.app_menu = builder.get_object('app-menu')
if self.want_headerbar:
# Use GtkHeaderBar for client-side decorations on recent GNOME 3 versions
self.header_bar_menu_button = Gtk.Button.new_from_icon_name('open-menu-symbolic', Gtk.IconSize.SMALL_TOOLBAR)
self.header_bar_menu_button.set_action_name('app.menu')
self.header_bar_refresh_button = Gtk.Button.new_from_icon_name('view-refresh-symbolic', Gtk.IconSize.SMALL_TOOLBAR)
self.header_bar_refresh_button.set_action_name('win.updateChannel')
self.menu_popover = Gtk.Popover.new_from_model(self.header_bar_menu_button, self.app_menu)
self.menu_popover.set_position(Gtk.PositionType.BOTTOM)
for (accel, action) in parse_app_menu_for_accels(menu_filename):
self.add_accelerator(accel, action, None)
else:
self.set_app_menu(self.app_menu)
Gtk.Window.set_default_icon_name('gpodder')
try:
dbus_main_loop = DBusGMainLoop(set_as_default=True)
gpodder.dbus_session_bus = dbus.SessionBus(dbus_main_loop)
self.bus_name = dbus.service.BusName(gpodder.dbus_bus_name, bus=gpodder.dbus_session_bus)
except dbus.exceptions.DBusException as dbe:
logger.warning('Cannot get "on the bus".', exc_info=True)
dlg = Gtk.MessageDialog(None, Gtk.DialogFlags.MODAL, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CLOSE, _('Cannot start gPodder'))
dlg.format_secondary_markup(_('D-Bus error: %s') % (str(dbe),))
dlg.set_title('gPodder')
dlg.run()
dlg.destroy()
sys.exit(0)
util.idle_add(self.check_root_folder_path_gui)
def do_activate(self):
# We only allow a single window and raise any existing ones
if not self.window:
# Windows are associated with the application
# when the last one is closed the application shuts down
self.window = gPodder(self, self.bus_name, core.Core(UIConfig, model_class=Model), self.options)
if gpodder.ui.osx:
from . import macosx
# Handle "subscribe to podcast" events from firefox
macosx.register_handlers(self.window)
# Set dark mode from color_scheme config key, or from Settings portal
# if it exists and color_scheme is 'system'.
if getattr(gpodder.dbus_session_bus, 'fake', False):
self.have_settings_portal = False
self._set_default_color_scheme('light')
self.set_dark_mode(self.window.config.ui.gtk.color_scheme == 'dark')
else:
self.read_portal_color_scheme()
gpodder.dbus_session_bus.add_signal_receiver(
self.on_portal_setting_changed, "SettingChanged", None,
"org.freedesktop.portal.Desktop", "/org/freedesktop/portal/desktop")
self.window.gPodder.present()
def _set_default_color_scheme(self, default):
"""Set the default value for color_scheme based on GTK settings.
If gtk_application_prefer_dark_theme is set to 1 (a non-default value),
the user has set it in GTK settings.ini and we set color_scheme to match
this preference. Otherwise we set the key to the given default, which
should be 'system' in case Settings portal is found, or 'light' if it's not.
"""
if self.window.config.ui.gtk.color_scheme is None:
settings = Gtk.Settings.get_default()
self.window.config.ui.gtk.color_scheme = (
'dark' if settings.props.gtk_application_prefer_dark_theme == 1
else default)
def set_dark_mode(self, dark):
settings = Gtk.Settings.get_default()
settings.props.gtk_application_prefer_dark_theme = 1 if dark else 0
def read_portal_color_scheme(self):
gpodder.dbus_session_bus.call_async(
"org.freedesktop.portal.Desktop", "/org/freedesktop/portal/desktop",
"org.freedesktop.portal.Settings", "ReadOne", "ss",
("org.freedesktop.appearance", "color-scheme"),
self.on_portal_settings_read, self.on_portal_settings_read_error)
def on_portal_settings_read(self, value):
self.have_settings_portal = True
self._set_default_color_scheme('system')
if self.window.config.ui.gtk.color_scheme == 'system':
self.set_dark_mode(value == 1)
else:
self.set_dark_mode(self.window.config.ui.gtk.color_scheme == 'dark')
def on_portal_settings_read_error(self, value):
self.have_settings_portal = False
self._set_default_color_scheme('light')
self.set_dark_mode(self.window.config.ui.gtk.color_scheme == 'dark')
def on_portal_setting_changed(self, namespace, key, value):
if (namespace == 'org.freedesktop.appearance'
and key == 'color-scheme'):
dark = (value == 1)
if self.window.config.ui.gtk.color_scheme == 'system':
logger.debug(
f"'color-scheme' changed to {value}, setting dark mode to {dark}")
self.set_dark_mode(dark)
def on_menu(self, action, param):
self.menu_popover.popup()
def on_about(self, action, param):
dlg = Gtk.Dialog(_('About gPodder'), self.window.gPodder,
Gtk.DialogFlags.MODAL)
dlg.add_button(_('_Close'), Gtk.ResponseType.OK).show()
dlg.set_resizable(True)
bg = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6, margin=16)
pb = GdkPixbuf.Pixbuf.new_from_file_at_size(gpodder.icon_file, 160, 160)
bg.pack_start(Gtk.Image.new_from_pixbuf(pb), False, False, 0)
label = Gtk.Label(justify=Gtk.Justification.CENTER)
label.set_selectable(True)
label.set_markup('\n'.join(x.strip() for x in """
<b>gPodder {version} ({date})</b>
{copyright}
{license}
<a href="{url}">{tr_website}</a> · <a href="{bugs_url}">{tr_bugtracker}</a>
""".format(version=gpodder.__version__,
date=gpodder.__date__,
copyright=gpodder.__copyright__,
license=gpodder.__license__,
bugs_url='https://github.com/gpodder/gpodder/issues',
url=html.escape(gpodder.__url__),
tr_website=_('Website'),
tr_bugtracker=_('Bug Tracker')).strip().split('\n')))
label.connect('activate-link', lambda label, url: util.open_website(url))
bg.pack_start(label, False, False, 0)
bg.pack_start(Gtk.Label(), False, False, 0)
dlg.vbox.pack_start(bg, False, False, 0)
dlg.connect('response', lambda dlg, response: dlg.destroy())
dlg.vbox.show_all()
dlg.run()
def on_quit(self, *args):
self.window.on_gPodder_delete_event()
def on_window_removed(self, *args):
self.quit()
def on_help_activate(self, action, param):
util.open_website('https://gpodder.github.io/docs/')
def on_logs_activate(self, action, param):
util.gui_open(os.path.join(gpodder.home, 'Logs'), gui=self.window)
def on_itemPreferences_activate(self, action, param=None):
gPodderPreferences(self.window.gPodder,
_config=self.window.config,
user_apps_reader=self.window.user_apps_reader,
parent_window=self.window.main_window,
mygpo_client=self.window.mygpo_client,
on_send_full_subscriptions=self.window.on_send_full_subscriptions,
on_itemExportChannels_activate=self.window.on_itemExportChannels_activate,
on_extension_enabled=self.on_extension_enabled,
on_extension_disabled=self.on_extension_disabled,
have_settings_portal=self.have_settings_portal)
def on_goto_mygpo(self, action, param):
self.window.mygpo_client.open_website()
def on_check_for_updates_activate(self, action, param):
if os.path.exists(gpodder.no_update_check_file):
self.window.check_for_distro_updates()
else:
self.window.check_for_updates(silent=False)
def on_extension_enabled(self, extension):
self.window.on_extension_enabled(extension)
def on_extension_disabled(self, extension):
self.window.on_extension_disabled(extension)
@staticmethod
def check_root_folder_path_gui():
msg = check_root_folder_path()
if msg:
dlg = Gtk.MessageDialog(None, Gtk.DialogFlags.MODAL, Gtk.MessageType.WARNING,
Gtk.ButtonsType.CLOSE, msg)
dlg.set_title(_('Path to gPodder home is too long'))
dlg.run()
dlg.destroy()
def main(options=None):
GObject.set_application_name('gPodder')
gp = gPodderApplication(options)
gp.run()
sys.exit(0)
| 14,419
|
Python
|
.py
| 289
| 40.058824
| 127
| 0.640632
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,318
|
__init__.py
|
gpodder_gpodder/src/gpodder/gtkui/__init__.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| 747
|
Python
|
.py
| 18
| 40.5
| 71
| 0.766804
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,319
|
base.py
|
gpodder_gpodder/src/gpodder/gtkui/base.py
|
# -*- coding: utf-8 -*-
"""
UI Base Module for GtkBuilder
Based on SimpleGladeApp.py Copyright (C) 2004 Sandino Flores Moreno
"""
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os
import re
import tokenize
from gi.repository import Gtk
class GtkBuilderWidget(object):
def __init__(self, ui_folders, textdomain, parent, **kwargs):
"""
Loads the UI file from the specified folder (with translations
from the textdomain) and initializes attributes.
ui_folders:
List of folders with GtkBuilder .ui files in search order
textdomain:
The textdomain to be used for translating strings
**kwargs:
Keyword arguments will be set as attributes to this window
"""
for key, value in list(kwargs.items()):
setattr(self, key, value)
self.builder = Gtk.Builder()
if parent is not None:
self.builder.expose_object('parent_widget', parent)
self.builder.set_translation_domain(textdomain)
# print >>sys.stderr, 'Creating new from file', self.__class__.__name__
ui_file = '%s.ui' % self.__class__.__name__.lower()
# Search for the UI file in the UI folders, stop after first match
for ui_folder in ui_folders:
filename = os.path.join(ui_folder, ui_file)
if os.path.exists(filename):
self.builder.add_from_file(filename)
break
self.builder.connect_signals(self)
self.set_attributes()
if hasattr(self, '_gtk_properties'):
for ((gobj, prop), val) in self._gtk_properties.items():
getattr(self, gobj).set_property(prop, val)
self.new()
def set_attributes(self):
"""
Convert widget names to attributes of this object.
It means a widget named vbox-dialog in GtkBuilder
is referred using self.vbox_dialog in the code.
"""
for widget in self.builder.get_objects():
# Just to be safe - every widget from the builder is buildable
if not isinstance(widget, Gtk.Buildable):
continue
# The following call looks ugly, but see Gnome bug 591085
widget_name = Gtk.Buildable.get_name(widget)
widget_api_name = '_'.join(re.findall(tokenize.Name, widget_name))
if hasattr(self, widget_api_name):
raise AttributeError(
"instance %s already has an attribute %s" % (
self, widget_api_name))
else:
setattr(self, widget_api_name, widget)
@property
def main_window(self):
"""Returns the main window of this GtkBuilderWidget"""
return getattr(self, self.__class__.__name__)
def new(self):
"""
Method called when the user interface is loaded and ready to be used.
At this moment, the widgets are loaded and can be referred as self.widget_name
"""
def main(self):
"""
Starts the main loop of processing events.
The default implementation calls Gtk.main()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with Gst.main()
Do not directly call this method in your programs.
Use the method run() instead.
"""
Gtk.main()
def quit(self):
"""
Quit processing events.
The default implementation calls Gtk.main_quit()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with Gst.main_quit()
"""
Gtk.main_quit()
def run(self):
"""
Starts the main loop of processing events checking for Control-C.
The default implementation checks whether a Control-C is pressed,
then calls on_keyboard_interrupt().
Use this method for starting programs.
"""
try:
self.main()
except KeyboardInterrupt:
self.on_keyboard_interrupt()
def on_keyboard_interrupt(self):
"""
This method is called by the default implementation of run()
after a program is finished by pressing Control-C.
"""
| 5,049
|
Python
|
.py
| 119
| 33.882353
| 86
| 0.643061
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,320
|
widgets.py
|
gpodder_gpodder/src/gpodder/gtkui/widgets.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# widgets.py -- Additional widgets for gPodder
# Thomas Perl <thp@gpodder.org> 2009-03-31
#
from gi.repository import Gtk
class SpinningProgressIndicator(Gtk.Image):
# Progress indicator loading inspired by glchess from gnome-games-clutter
def __init__(self, size=32):
Gtk.Image.__init__(self)
self._frames = []
self._frame_id = 0
# Load the progress indicator
icon_theme = Gtk.IconTheme.get_default()
try:
icon = icon_theme.load_icon('process-working', size, 0)
width, height = icon.get_width(), icon.get_height()
if width < size or height < size:
size = min(width, height)
for row in range(height // size):
for column in range(width // size):
frame = icon.subpixbuf(column * size, row * size, size, size)
self._frames.append(frame)
# Remove the first frame (the "idle" icon)
if self._frames:
self._frames.pop(0)
self.step_animation()
except:
# FIXME: This is not very beautiful :/
self.set_from_icon_name('system-run', Gtk.IconSize.BUTTON)
def step_animation(self):
if len(self._frames) > 1:
self._frame_id += 1
if self._frame_id >= len(self._frames):
self._frame_id = 0
self.set_from_pixbuf(self._frames[self._frame_id])
| 2,209
|
Python
|
.py
| 53
| 34.377358
| 81
| 0.637465
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,321
|
model.py
|
gpodder_gpodder/src/gpodder/gtkui/model.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.gtkui.model - GUI model classes for gPodder (2009-08-13)
# Based on code from libpodcasts.py (thp, 2005-10-29)
#
import html
import logging
import os
import re
import time
from itertools import groupby
from gi.repository import GdkPixbuf, GLib, GObject, Gtk
import gpodder
from gpodder import coverart, model, query, util
from gpodder.gtkui import draw
_ = gpodder.gettext
logger = logging.getLogger(__name__)
class GEpisode(model.PodcastEpisode):
__slots__ = ()
@property
def title_markup(self):
return '%s\n<small>%s</small>' % (html.escape(self.title),
html.escape(self.channel.title))
@property
def markup_new_episodes(self):
if self.file_size > 0:
length_str = '%s; ' % util.format_filesize(self.file_size)
else:
length_str = ''
return ('<b>%s</b>\n<small>%s' + _('released %s')
+ '; ' + _('from %s') + '</small>') % (
html.escape(re.sub(r'\s+', ' ', self.title)),
html.escape(length_str),
html.escape(self.pubdate_prop),
html.escape(re.sub(r'\s+', ' ', self.channel.title)))
@property
def markup_delete_episodes(self):
if self.total_time and self.current_position:
played_string = self.get_play_info_string()
elif not self.is_new:
played_string = _('played')
else:
played_string = _('unplayed')
downloaded_string = self.get_age_string()
if not downloaded_string:
downloaded_string = _('today')
return ('<b>%s</b>\n<small>%s; %s; ' + _('downloaded %s')
+ '; ' + _('from %s') + '</small>') % (
html.escape(self.title),
html.escape(util.format_filesize(self.file_size)),
html.escape(played_string),
html.escape(downloaded_string),
html.escape(self.channel.title))
class GPodcast(model.PodcastChannel):
__slots__ = ()
EpisodeClass = GEpisode
@property
def title_markup(self):
""" escaped title for the mass unsubscribe dialog """
return html.escape(self.title)
class Model(model.Model):
PodcastClass = GPodcast
# ----------------------------------------------------------
# Singleton indicator if a row is a section
class SeparatorMarker(object):
pass
class BackgroundUpdate(object):
def __init__(self, model, episodes):
self.model = model
self.episodes = episodes
self.index = 0
def update(self):
model = self.model
started = time.time()
while self.episodes:
episode = self.episodes.pop(0)
base_fields = (
model.C_URL, episode.url,
model.C_TITLE, episode.title,
model.C_EPISODE, episode,
model.C_PUBLISHED_TEXT, episode.cute_pubdate(show_time=self.model._config_ui_gtk_episode_list_show_released_time),
model.C_PUBLISHED, episode.published,
)
update_fields = model.get_update_fields(episode)
try:
it = model.get_iter((self.index,))
# fix #727 the tree might be invalid when trying to update so discard the exception
except ValueError:
break
# model.get_update_fields() takes 38-67% of each iteration, depending on episode status
# with downloaded episodes using the most time
# model.set(), excluding the field expansion, takes 33-62% of each iteration
# and each iteration takes 1ms or more on slow machines
model.set(it, *(base_fields + update_fields))
self.index += 1
# Check for the time limit of 500ms after each 50 rows processed
if self.index % 50 == 0 and (time.time() - started) > 0.5:
break
return bool(self.episodes)
class EpisodeListModel(Gtk.ListStore):
C_URL, C_TITLE, C_FILESIZE_TEXT, C_EPISODE, C_STATUS_ICON, \
C_PUBLISHED_TEXT, C_DESCRIPTION, C_TOOLTIP, \
C_VIEW_SHOW_UNDELETED, C_VIEW_SHOW_DOWNLOADED, \
C_VIEW_SHOW_UNPLAYED, C_FILESIZE, C_PUBLISHED, \
C_TIME, C_TIME_VISIBLE, C_TOTAL_TIME, \
C_LOCKED, \
C_TIME_AND_SIZE, C_TOTAL_TIME_AND_SIZE, C_FILESIZE_AND_TIME_TEXT, C_FILESIZE_AND_TIME = list(range(21))
VIEW_ALL, VIEW_UNDELETED, VIEW_DOWNLOADED, VIEW_UNPLAYED = list(range(4))
VIEWS = ['VIEW_ALL', 'VIEW_UNDELETED', 'VIEW_DOWNLOADED', 'VIEW_UNPLAYED']
# In which steps the UI is updated for "loading" animations
_UI_UPDATE_STEP = .03
# Steps for the "downloading" icon progress
PROGRESS_STEPS = 20
def __init__(self, on_filter_changed=lambda has_episodes: None):
Gtk.ListStore.__init__(self, str, str, str, object, str, str, str,
str, bool, bool, bool, GObject.TYPE_INT64,
GObject.TYPE_INT64, str, bool,
GObject.TYPE_INT64, bool, str, GObject.TYPE_INT64, str, GObject.TYPE_INT64)
# Callback for when the filter / list changes, gets one parameter
# (has_episodes) that is True if the list has any episodes
self._on_filter_changed = on_filter_changed
# Filter to allow hiding some episodes
self._filter = self.filter_new()
self._sorter = Gtk.TreeModelSort(self._filter)
self._view_mode = self.VIEW_ALL
self._search_term = None
self._search_term_eql = None
self._filter.set_visible_func(self._filter_visible_func)
# Are we currently showing "all episodes"/section or a single channel?
self._section_view = False
self.icon_theme = Gtk.IconTheme.get_default()
self.ICON_WEB_BROWSER = 'web-browser'
self.ICON_AUDIO_FILE = 'audio-x-generic'
self.ICON_VIDEO_FILE = 'video-x-generic'
self.ICON_IMAGE_FILE = 'image-x-generic'
self.ICON_GENERIC_FILE = 'text-x-generic'
self.ICON_DOWNLOADING = 'go-down'
self.ICON_DELETED = 'edit-delete'
self.ICON_ERROR = 'dialog-error'
self.background_update = None
self.background_update_tag = None
if 'KDE_FULL_SESSION' in os.environ:
# Workaround until KDE adds all the freedesktop icons
# See https://bugs.kde.org/show_bug.cgi?id=233505 and
# http://gpodder.org/bug/553
self.ICON_DELETED = 'archive-remove'
# Caching config values is faster than accessing them directly from config.ui.gtk.episode_list.*
# and is easier to maintain then threading them through every method call.
self._config_ui_gtk_episode_list_always_show_new = False
self._config_ui_gtk_episode_list_trim_title_prefix = False
self._config_ui_gtk_episode_list_descriptions = False
self._config_ui_gtk_episode_list_show_released_time = False
def cache_config(self, config):
self._config_ui_gtk_episode_list_always_show_new = config.ui.gtk.episode_list.always_show_new
self._config_ui_gtk_episode_list_trim_title_prefix = config.ui.gtk.episode_list.trim_title_prefix
self._config_ui_gtk_episode_list_descriptions = config.ui.gtk.episode_list.descriptions
self._config_ui_gtk_episode_list_show_released_time = config.ui.gtk.episode_list.show_released_time
def _format_filesize(self, episode):
if episode.file_size > 0:
return util.format_filesize(episode.file_size, digits=1)
else:
return None
def _filter_visible_func(self, model, iterator, misc):
# If searching is active, set visibility based on search text
if self._search_term is not None and self._search_term != '':
episode = model.get_value(iterator, self.C_EPISODE)
if episode is None:
return False
try:
return self._search_term_eql.match(episode)
except Exception:
return True
if self._view_mode == self.VIEW_ALL:
return True
elif self._view_mode == self.VIEW_UNDELETED:
return model.get_value(iterator, self.C_VIEW_SHOW_UNDELETED)
elif self._view_mode == self.VIEW_DOWNLOADED:
return model.get_value(iterator, self.C_VIEW_SHOW_DOWNLOADED)
elif self._view_mode == self.VIEW_UNPLAYED:
return model.get_value(iterator, self.C_VIEW_SHOW_UNPLAYED)
return True
def get_filtered_model(self):
"""Returns a filtered version of this episode model
The filtered version should be displayed in the UI,
as this model can have some filters set that should
be reflected in the UI.
"""
return self._sorter
def has_episodes(self):
"""Returns True if episodes are visible (filtered)
If episodes are visible with the current filter
applied, return True (otherwise return False).
"""
return bool(len(self._filter))
def set_view_mode(self, new_mode):
"""Sets a new view mode for this model
After setting the view mode, the filtered model
might be updated to reflect the new mode."""
if self._view_mode != new_mode:
self._view_mode = new_mode
self._filter.refilter()
self._on_filter_changed(self.has_episodes())
def get_view_mode(self):
"""Returns the currently-set view mode"""
return self._view_mode
def set_search_term(self, new_term):
if self._search_term != new_term:
self._search_term = new_term
self._search_term_eql = query.UserEQL(new_term)
self._filter.refilter()
self._on_filter_changed(self.has_episodes())
def get_search_term(self):
return self._search_term
def _format_description(self, episode):
d = []
title = episode.trimmed_title if self._config_ui_gtk_episode_list_trim_title_prefix else episode.title
if episode.state != gpodder.STATE_DELETED and episode.is_new:
d.append('<b>')
d.append(html.escape(title))
d.append('</b>')
else:
d.append(html.escape(title))
if self._config_ui_gtk_episode_list_descriptions:
d.append('\n')
if self._section_view:
d.append(_('from %s') % html.escape(episode.channel.title))
else:
description = episode.one_line_description()
if description.startswith(title):
description = description[len(title):].strip()
d.append(html.escape(description))
return ''.join(d)
def replace_from_channel(self, channel):
"""
Add episode from the given channel to this model.
Downloading should be a callback.
"""
# Remove old episodes in the list store
self.clear()
self._section_view = isinstance(channel, PodcastChannelProxy)
# Avoid gPodder bug 1291
if channel is None:
episodes = []
else:
episodes = channel.get_all_episodes()
# Always make a copy, so we can pass the episode list to BackgroundUpdate
episodes = list(episodes)
for _ in range(len(episodes)):
self.append()
self._update_from_episodes(episodes)
def _update_from_episodes(self, episodes):
if self.background_update_tag is not None:
GLib.source_remove(self.background_update_tag)
self.background_update = BackgroundUpdate(self, episodes)
self.background_update_tag = GLib.idle_add(self._update_background)
def _update_background(self):
if self.background_update is not None:
if self.background_update.update():
return True
self.background_update = None
self.background_update_tag = None
self._on_filter_changed(self.has_episodes())
return False
def update_all(self):
if self.background_update is None:
episodes = [row[self.C_EPISODE] for row in self]
else:
# Update all episodes that have already been initialized...
episodes = [row[self.C_EPISODE] for index, row in enumerate(self) if index < self.background_update.index]
# ...and also include episodes that still need to be initialized
episodes.extend(self.background_update.episodes)
self._update_from_episodes(episodes)
def update_by_urls(self, urls):
for row in self:
if row[self.C_URL] in urls:
self.update_by_iter(row.iter)
def update_by_filter_iter(self, iterator):
# Convenience function for use by "outside" methods that use iters
# from the filtered episode list model (i.e. all UI things normally)
iterator = self._sorter.convert_iter_to_child_iter(iterator)
self.update_by_iter(self._filter.convert_iter_to_child_iter(iterator))
def get_update_fields(self, episode):
tooltip = []
status_icon = None
view_show_undeleted = True
view_show_downloaded = False
view_show_unplayed = False
if episode.downloading:
task = episode.download_task
if task.status in (task.PAUSING, task.PAUSED):
tooltip.append('%s %d%%' % (_('Paused'),
int(task.progress * 100)))
status_icon = 'media-playback-pause'
else:
tooltip.append('%s %d%%' % (_('Downloading'),
int(task.progress * 100)))
index = int(self.PROGRESS_STEPS * task.progress)
status_icon = 'gpodder-progress-%d' % index
view_show_downloaded = True
view_show_unplayed = True
else:
if episode.state == gpodder.STATE_DELETED:
tooltip.append(_('Deleted'))
status_icon = self.ICON_DELETED
view_show_undeleted = False
elif episode.state == gpodder.STATE_DOWNLOADED:
view_show_downloaded = True
view_show_unplayed = episode.is_new
file_type = episode.file_type()
if file_type == 'audio':
tooltip.append(_('Downloaded episode'))
status_icon = self.ICON_AUDIO_FILE
elif file_type == 'video':
tooltip.append(_('Downloaded video episode'))
status_icon = self.ICON_VIDEO_FILE
elif file_type == 'image':
tooltip.append(_('Downloaded image'))
status_icon = self.ICON_IMAGE_FILE
else:
tooltip.append(_('Downloaded file'))
status_icon = self.ICON_GENERIC_FILE
if not episode.file_exists():
tooltip.append(_('missing file'))
else:
if episode.is_new:
if file_type in ('audio', 'video'):
tooltip.append(_('never played'))
elif file_type == 'image':
tooltip.append(_('never displayed'))
else:
tooltip.append(_('never opened'))
else:
if file_type in ('audio', 'video'):
tooltip.append(_('played'))
elif file_type == 'image':
tooltip.append(_('displayed'))
else:
tooltip.append(_('opened'))
if episode.archive:
tooltip.append(_('deletion prevented'))
if episode.total_time > 0 and episode.current_position:
tooltip.append('%d%%' % (
100. * float(episode.current_position) / float(episode.total_time)))
elif episode._download_error is not None:
tooltip.append(_('ERROR: %s') % episode._download_error)
status_icon = self.ICON_ERROR
if episode.state == gpodder.STATE_NORMAL and episode.is_new:
view_show_downloaded = self._config_ui_gtk_episode_list_always_show_new
view_show_unplayed = True
elif not episode.url:
tooltip.append(_('No downloadable content'))
status_icon = self.ICON_WEB_BROWSER
if episode.state == gpodder.STATE_NORMAL and episode.is_new:
view_show_downloaded = self._config_ui_gtk_episode_list_always_show_new
view_show_unplayed = True
elif episode.state == gpodder.STATE_NORMAL and episode.is_new:
tooltip.append(_('New episode'))
view_show_downloaded = self._config_ui_gtk_episode_list_always_show_new
view_show_unplayed = True
if episode.total_time:
total_time = util.format_time(episode.total_time)
if total_time:
tooltip.append(total_time)
tooltip = ', '.join(tooltip)
description = self._format_description(episode)
time = episode.get_play_info_string()
filesize = self._format_filesize(episode)
return (
self.C_STATUS_ICON, status_icon,
self.C_VIEW_SHOW_UNDELETED, view_show_undeleted,
self.C_VIEW_SHOW_DOWNLOADED, view_show_downloaded,
self.C_VIEW_SHOW_UNPLAYED, view_show_unplayed,
self.C_DESCRIPTION, description,
self.C_TOOLTIP, tooltip,
self.C_TIME, time,
self.C_TIME_VISIBLE, bool(episode.total_time),
self.C_TOTAL_TIME, episode.total_time,
self.C_LOCKED, episode.archive,
self.C_FILESIZE_TEXT, filesize,
self.C_FILESIZE, episode.file_size,
self.C_TIME_AND_SIZE, "%s\n<small>%s</small>" % (time, filesize if episode.file_size > 0 else ""),
self.C_TOTAL_TIME_AND_SIZE, episode.total_time,
self.C_FILESIZE_AND_TIME_TEXT, "%s\n<small>%s</small>" % (filesize if episode.file_size > 0 else "", time),
self.C_FILESIZE_AND_TIME, episode.file_size,
)
def update_by_iter(self, iterator):
episode = self.get_value(iterator, self.C_EPISODE)
if episode is not None:
self.set(iterator, *self.get_update_fields(episode))
class PodcastChannelProxy:
""" a bag of podcasts: 'All Episodes' or each section """
def __init__(self, db, config, channels, section, model):
self.ALL_EPISODES_PROXY = not bool(section)
self._db = db
self._config = config
self.channels = channels
if self.ALL_EPISODES_PROXY:
self.title = _('All episodes')
self.description = _('from all podcasts')
self.url = ''
self.cover_file = coverart.CoverDownloader.ALL_EPISODES_ID
else:
self.title = section
self.description = ''
self.url = '-'
self.cover_file = None
# self.parse_error = ''
self.section = section
self.id = None
self.cover_url = None
self.auth_username = None
self.auth_password = None
self.pause_subscription = False
self.sync_to_mp3_player = False
self.cover_thumb = None
self.auto_archive_episodes = False
self.model = model
self._update_error = None
def get_statistics(self):
if self.ALL_EPISODES_PROXY:
# Get the total statistics for all channels from the database
return self._db.get_podcast_statistics()
else:
# Calculate the stats over all podcasts of this section
if len(self.channels) == 0:
total = deleted = new = downloaded = unplayed = 0
else:
total, deleted, new, downloaded, unplayed = list(map(sum,
list(zip(*[c.get_statistics() for c in self.channels]))))
return total, deleted, new, downloaded, unplayed
def get_all_episodes(self):
"""Returns a generator that yields every episode"""
if self.model._search_term is not None:
def matches(channel):
columns = (getattr(channel, c) for c in PodcastListModel.SEARCH_ATTRS)
return any((key in c.lower() for c in columns if c is not None))
key = self.model._search_term
else:
def matches(e):
return True
return Model.sort_episodes_by_pubdate((e for c in self.channels if matches(c)
for e in c.get_all_episodes()), True)
def save(self):
pass
class PodcastListModel(Gtk.ListStore):
C_URL, C_TITLE, C_DESCRIPTION, C_PILL, C_CHANNEL, \
C_COVER, C_ERROR, C_PILL_VISIBLE, \
C_VIEW_SHOW_UNDELETED, C_VIEW_SHOW_DOWNLOADED, \
C_VIEW_SHOW_UNPLAYED, C_HAS_EPISODES, C_SEPARATOR, \
C_DOWNLOADS, C_COVER_VISIBLE, C_SECTION = list(range(16))
SEARCH_COLUMNS = (C_TITLE, C_DESCRIPTION, C_SECTION)
SEARCH_ATTRS = ('title', 'description', 'group_by')
@classmethod
def row_separator_func(cls, model, iterator):
return model.get_value(iterator, cls.C_SEPARATOR)
def __init__(self, cover_downloader):
Gtk.ListStore.__init__(self, str, str, str, GdkPixbuf.Pixbuf,
object, GdkPixbuf.Pixbuf, str, bool, bool, bool, bool,
bool, bool, int, bool, str)
# Filter to allow hiding some episodes
self._filter = self.filter_new()
self._view_mode = -1
self._search_term = None
self._filter.set_visible_func(self._filter_visible_func)
self._cover_cache = {}
self._max_image_side = 40
self._scale = 1
self._cover_downloader = cover_downloader
self.icon_theme = Gtk.IconTheme.get_default()
self.ICON_DISABLED = 'media-playback-pause'
self.ICON_ERROR = 'dialog-warning'
def _filter_visible_func(self, model, iterator, misc):
channel = model.get_value(iterator, self.C_CHANNEL)
# If searching is active, set visibility based on search text
if self._search_term is not None and self._search_term != '':
key = self._search_term.lower()
if isinstance(channel, PodcastChannelProxy):
if channel.ALL_EPISODES_PROXY:
return False
return any(key in getattr(ch, c).lower() for c in PodcastListModel.SEARCH_ATTRS for ch in channel.channels)
columns = (model.get_value(iterator, c) for c in self.SEARCH_COLUMNS)
return any((key in c.lower() for c in columns if c is not None))
# Show section if any of its channels have an update error
if isinstance(channel, PodcastChannelProxy) and not channel.ALL_EPISODES_PROXY:
if any(c._update_error is not None for c in channel.channels):
return True
if model.get_value(iterator, self.C_SEPARATOR):
return True
elif getattr(channel, '_update_error', None) is not None:
return True
elif self._view_mode == EpisodeListModel.VIEW_ALL:
return model.get_value(iterator, self.C_HAS_EPISODES)
elif self._view_mode == EpisodeListModel.VIEW_UNDELETED:
return model.get_value(iterator, self.C_VIEW_SHOW_UNDELETED)
elif self._view_mode == EpisodeListModel.VIEW_DOWNLOADED:
return model.get_value(iterator, self.C_VIEW_SHOW_DOWNLOADED)
elif self._view_mode == EpisodeListModel.VIEW_UNPLAYED:
return model.get_value(iterator, self.C_VIEW_SHOW_UNPLAYED)
return True
def get_filtered_model(self):
"""Returns a filtered version of this episode model
The filtered version should be displayed in the UI,
as this model can have some filters set that should
be reflected in the UI.
"""
return self._filter
def set_view_mode(self, new_mode):
"""Sets a new view mode for this model
After setting the view mode, the filtered model
might be updated to reflect the new mode."""
if self._view_mode != new_mode:
self._view_mode = new_mode
self._filter.refilter()
def get_view_mode(self):
"""Returns the currently-set view mode"""
return self._view_mode
def set_search_term(self, new_term):
if self._search_term != new_term:
self._search_term = new_term
self._filter.refilter()
def get_search_term(self):
return self._search_term
def set_max_image_size(self, size, scale):
self._max_image_side = size * scale
self._scale = scale
self._cover_cache = {}
def _resize_pixbuf_keep_ratio(self, url, pixbuf):
"""
Resizes a GTK Pixbuf but keeps its aspect ratio.
Returns None if the pixbuf does not need to be
resized or the newly resized pixbuf if it does.
"""
if url in self._cover_cache:
return self._cover_cache[url]
max_side = self._max_image_side
w_cur = pixbuf.get_width()
h_cur = pixbuf.get_height()
if w_cur <= max_side and h_cur <= max_side:
return None
f = max_side / (w_cur if w_cur >= h_cur else h_cur)
w_new = int(w_cur * f)
h_new = int(h_cur * f)
logger.debug("Scaling cover image: url=%s from %ix%i to %ix%i",
url, w_cur, h_cur, w_new, h_new)
pixbuf = pixbuf.scale_simple(w_new, h_new,
GdkPixbuf.InterpType.BILINEAR)
self._cover_cache[url] = pixbuf
return pixbuf
def _resize_pixbuf(self, url, pixbuf):
if pixbuf is None:
return None
return self._resize_pixbuf_keep_ratio(url, pixbuf) or pixbuf
def _overlay_pixbuf(self, pixbuf, icon):
try:
emblem = self.icon_theme.load_icon(icon, self._max_image_side / 2, 0)
(width, height) = (emblem.get_width(), emblem.get_height())
xpos = pixbuf.get_width() - width
ypos = pixbuf.get_height() - height
if ypos < 0:
# need to resize overlay for none standard icon size
emblem = self.icon_theme.load_icon(icon, pixbuf.get_height() - 1, 0)
(width, height) = (emblem.get_width(), emblem.get_height())
xpos = pixbuf.get_width() - width
ypos = pixbuf.get_height() - height
emblem.composite(pixbuf, xpos, ypos, width, height, xpos, ypos, 1, 1, GdkPixbuf.InterpType.BILINEAR, 255)
except:
pass
return pixbuf
def _get_cached_thumb(self, channel):
if channel.cover_thumb is None:
return None
try:
loader = GdkPixbuf.PixbufLoader()
loader.write(channel.cover_thumb)
loader.close()
pixbuf = loader.get_pixbuf()
if self._max_image_side not in (pixbuf.get_width(), pixbuf.get_height()):
logger.debug("cached thumb wrong size: %r != %i", (pixbuf.get_width(), pixbuf.get_height()), self._max_image_side)
return None
return pixbuf
except Exception:
logger.warning('Could not load cached cover art for %s', channel.url, exc_info=True)
channel.cover_thumb = None
channel.save()
return None
def _save_cached_thumb(self, channel, pixbuf):
bufs = []
def save_callback(buf, length, user_data):
user_data.append(buf)
return True
pixbuf.save_to_callbackv(save_callback, bufs, 'png', [None], [])
channel.cover_thumb = bytes(b''.join(bufs))
channel.save()
def _get_cover_image(self, channel, add_overlay=False, pixbuf_overlay=None):
""" get channel's cover image. Callable from gtk thread.
:param channel: channel model
:param bool add_overlay: True to add a pause/error overlay
:param GdkPixbuf.Pixbux pixbuf_overlay: existing pixbuf if already loaded, as an optimization
:return GdkPixbuf.Pixbux: channel's cover image as pixbuf
"""
if self._cover_downloader is None:
return pixbuf_overlay
if pixbuf_overlay is None: # optimization: we can pass existing pixbuf
pixbuf_overlay = self._get_cached_thumb(channel)
if pixbuf_overlay is None:
# load cover if it's not in cache
pixbuf = self._cover_downloader.get_cover(channel, avoid_downloading=True)
if pixbuf is None:
return None
pixbuf_overlay = self._resize_pixbuf(channel.url, pixbuf)
self._save_cached_thumb(channel, pixbuf_overlay)
if add_overlay:
if getattr(channel, '_update_error', None) is not None:
pixbuf_overlay = self._overlay_pixbuf(pixbuf_overlay, self.ICON_ERROR)
elif channel.pause_subscription:
pixbuf_overlay = self._overlay_pixbuf(pixbuf_overlay, self.ICON_DISABLED)
pixbuf_overlay.saturate_and_pixelate(pixbuf_overlay, 0.0, False)
return pixbuf_overlay
def _get_pill_image(self, channel, count_downloaded, count_unplayed):
if count_unplayed > 0 or count_downloaded > 0:
return draw.draw_pill_pixbuf('{:n}'.format(count_unplayed),
'{:n}'.format(count_downloaded),
widget=self.widget,
scale=self._scale)
else:
return None
def _format_description(self, channel, total, deleted,
new, downloaded, unplayed):
title_markup = html.escape(channel.title)
if channel._update_error is not None:
description_markup = html.escape(_('ERROR: %s') % channel._update_error)
elif not channel.pause_subscription:
description_markup = html.escape(
util.get_first_line(util.remove_html_tags(channel.description)) or ' ')
else:
description_markup = html.escape(_('Subscription paused'))
d = []
if new:
d.append('<span weight="bold">')
d.append(title_markup)
if new:
d.append('</span>')
if channel._update_error is not None:
return ''.join(d + ['\n', '<span weight="bold">', description_markup, '</span>'])
elif description_markup.strip():
return ''.join(d + ['\n', '<small>', description_markup, '</small>'])
else:
return ''.join(d)
def _format_error(self, channel):
# if channel.parse_error:
# return str(channel.parse_error)
# else:
# return None
return None
def set_channels(self, db, config, channels):
# Clear the model and update the list of podcasts
self.clear()
def channel_to_row(channel, add_overlay=False):
# C_URL, C_TITLE, C_DESCRIPTION, C_PILL, C_CHANNEL
return (channel.url, '', '', None, channel,
# C_COVER, C_ERROR, C_PILL_VISIBLE,
self._get_cover_image(channel, add_overlay), '', True,
# C_VIEW_SHOW_UNDELETED, C_VIEW_SHOW_DOWNLOADED,
True, True,
# C_VIEW_SHOW_UNPLAYED, C_HAS_EPISODES, C_SEPARATOR
True, True, False,
# C_DOWNLOADS, C_COVER_VISIBLE, C_SECTION
0, True, '')
def section_to_row(section):
# C_URL, C_TITLE, C_DESCRIPTION, C_PILL, C_CHANNEL
return (section.url, '', '', None, section,
# C_COVER, C_ERROR, C_PILL_VISIBLE,
None, '', True,
# C_VIEW_SHOW_UNDELETED, C_VIEW_SHOW_DOWNLOADED,
True, True,
# C_VIEW_SHOW_UNPLAYED, C_HAS_EPISODES, C_SEPARATOR
True, True, False,
# C_DOWNLOADS, C_COVER_VISIBLE, C_SECTION
0, False, section.title)
if config.ui.gtk.podcast_list.all_episodes and channels:
all_episodes = PodcastChannelProxy(db, config, channels, '', self)
iterator = self.append(channel_to_row(all_episodes))
self.update_by_iter(iterator)
# Separator item
if not config.ui.gtk.podcast_list.sections:
self.append(('', '', '', None, SeparatorMarker, None, '',
True, True, True, True, True, True, 0, False, ''))
def groupby_func(channel):
return channel.group_by
def key_func(channel):
return (channel.group_by, model.Model.podcast_sort_key(channel))
if config.ui.gtk.podcast_list.sections:
groups = groupby(sorted(channels, key=key_func), groupby_func)
else:
groups = [(None, sorted(channels, key=model.Model.podcast_sort_key))]
for section, section_channels in groups:
if config.ui.gtk.podcast_list.sections and section is not None:
section_channels = list(section_channels)
section_obj = PodcastChannelProxy(db, config, section_channels, section, self)
iterator = self.append(section_to_row(section_obj))
self.update_by_iter(iterator)
for channel in section_channels:
iterator = self.append(channel_to_row(channel, True))
self.update_by_iter(iterator)
def get_filter_path_from_url(self, url):
# Return the path of the filtered model for a given URL
child_path = self.get_path_from_url(url)
if child_path is None:
return None
else:
return self._filter.convert_child_path_to_path(child_path)
def get_path_from_url(self, url):
# Return the tree model path for a given URL
if url is None:
return None
for row in self:
if row[self.C_URL] == url:
return row.path
return None
def update_first_row(self):
# Update the first row in the model (for "all episodes" updates)
self.update_by_iter(self.get_iter_first())
def update_by_urls(self, urls):
# Given a list of URLs, update each matching row
for row in self:
if row[self.C_URL] in urls:
self.update_by_iter(row.iter)
def iter_is_first_row(self, iterator):
iterator = self._filter.convert_iter_to_child_iter(iterator)
path = self.get_path(iterator)
return (path == Gtk.TreePath.new_first())
def update_by_filter_iter(self, iterator):
self.update_by_iter(self._filter.convert_iter_to_child_iter(iterator))
def update_all(self):
for row in self:
self.update_by_iter(row.iter)
def update_sections(self):
for row in self:
if isinstance(row[self.C_CHANNEL], PodcastChannelProxy) and not row[self.C_CHANNEL].ALL_EPISODES_PROXY:
self.update_by_iter(row.iter)
def update_by_iter(self, iterator):
if iterator is None:
return
# Given a GtkTreeIter, update volatile information
channel = self.get_value(iterator, self.C_CHANNEL)
if channel is SeparatorMarker:
return
total, deleted, new, downloaded, unplayed = channel.get_statistics()
if isinstance(channel, PodcastChannelProxy) and not channel.ALL_EPISODES_PROXY:
section = channel.title
# We could customized the section header here with the list
# of channels and their stats (i.e. add some "new" indicator)
description = '<b>%s</b>' % (
html.escape(section))
pill_image = None
cover_image = None
else:
description = self._format_description(channel, total, deleted, new,
downloaded, unplayed)
pill_image = self._get_pill_image(channel, downloaded, unplayed)
cover_image = self._get_cover_image(channel, True)
self.set(iterator,
self.C_TITLE, channel.title,
self.C_DESCRIPTION, description,
self.C_COVER, cover_image,
self.C_SECTION, channel.section,
self.C_ERROR, self._format_error(channel),
self.C_PILL, pill_image,
self.C_PILL_VISIBLE, pill_image is not None,
self.C_VIEW_SHOW_UNDELETED, total - deleted > 0,
self.C_VIEW_SHOW_DOWNLOADED, downloaded + new > 0,
self.C_VIEW_SHOW_UNPLAYED, unplayed + new > 0,
self.C_HAS_EPISODES, total > 0,
self.C_DOWNLOADS, downloaded)
def clear_cover_cache(self, podcast_url):
if podcast_url in self._cover_cache:
logger.info('Clearing cover from cache: %s', podcast_url)
del self._cover_cache[podcast_url]
def add_cover_by_channel(self, channel, pixbuf):
if pixbuf is None:
return
# Remove older images from cache
self.clear_cover_cache(channel.url)
# Resize and add the new cover image
pixbuf = self._resize_pixbuf(channel.url, pixbuf)
self._save_cached_thumb(channel, pixbuf)
pixbuf = self._get_cover_image(channel, add_overlay=True, pixbuf_overlay=pixbuf)
for row in self:
if row[self.C_URL] == channel.url:
row[self.C_COVER] = pixbuf
break
| 38,747
|
Python
|
.py
| 808
| 36.25
| 130
| 0.591978
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,322
|
main.py
|
gpodder_gpodder/src/gpodder/gtkui/main.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import html
import logging
import os
import re
import shutil
import sys
import tempfile
import time
import urllib.parse
import dbus.service
import requests.exceptions
import urllib3.exceptions
import gpodder
from gpodder import common, download, feedcore, my, opml, player, util, youtube
from gpodder.dbusproxy import DBusPodcastsProxy
from gpodder.model import Model, PodcastEpisode
from gpodder.syncui import gPodderSyncUI
from . import shownotes
from .desktop.channel import gPodderChannel
from .desktop.episodeselector import gPodderEpisodeSelector
from .desktop.exportlocal import gPodderExportToLocalFolder
from .desktop.podcastdirectory import gPodderPodcastDirectory
from .desktop.welcome import gPodderWelcome
from .desktopfile import UserAppsReader
from .download import DownloadStatusModel
from .draw import (cake_size_from_widget, draw_cake_pixbuf,
draw_iconcell_scale, draw_text_box_centered)
from .interface.addpodcast import gPodderAddPodcast
from .interface.common import (BuilderWidget, Dummy, ExtensionMenuHelper,
TreeViewHelper)
from .interface.progress import ProgressIndicator
from .interface.searchtree import SearchTree
from .model import EpisodeListModel, PodcastChannelProxy, PodcastListModel
from .services import CoverDownloader
import gi # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gdk, Gio, GLib, Gtk, Pango # isort:skip
logger = logging.getLogger(__name__)
_ = gpodder.gettext
N_ = gpodder.ngettext
class gPodder(BuilderWidget, dbus.service.Object):
def __init__(self, app, bus_name, gpodder_core, options):
dbus.service.Object.__init__(self, object_path=gpodder.dbus_gui_object_path, bus_name=bus_name)
self.podcasts_proxy = DBusPodcastsProxy(lambda: self.channels,
self.on_itemUpdate_activate,
self.playback_episodes,
self.download_episode_list,
self.episode_object_by_uri,
bus_name)
self.application = app
self.core = gpodder_core
self.config = self.core.config
self.db = self.core.db
self.model = self.core.model
self.options = options
self.extensions_actions = []
self._search_podcasts = None
self._search_episodes = None
BuilderWidget.__init__(self, None,
_gtk_properties={('gPodder', 'application'): app})
self.last_episode_date_refresh = None
self.refresh_episode_dates()
self.on_episode_list_selection_changed_id = None
observer = gpodder.config.get_network_proxy_observer(self.config)
self.config.add_observer(observer)
# Trigger the global gpodder.config._proxies observer contraption to initialize it.
observer("network.", None, None)
def new(self):
if self.application.want_headerbar:
self.header_bar = Gtk.HeaderBar()
self.header_bar.pack_end(self.application.header_bar_menu_button)
self.header_bar.pack_start(self.application.header_bar_refresh_button)
self.header_bar.set_show_close_button(True)
self.header_bar.show_all()
# Tweaks to the UI since we moved the refresh button into the header bar
self.vboxChannelNavigator.set_row_spacing(0)
self.main_window.set_titlebar(self.header_bar)
gpodder.user_extensions.on_ui_object_available('gpodder-gtk', self)
self.toolbar.set_property('visible', self.config.ui.gtk.toolbar)
self.bluetooth_available = util.bluetooth_available()
self.config.connect_gtk_window(self.main_window, 'main_window')
self.config.connect_gtk_paned('ui.gtk.state.main_window.paned_position', self.channelPaned)
self.main_window.show()
self.player_receiver = player.MediaPlayerDBusReceiver(self.on_played)
self.gPodder.connect('key-press-event', self.on_key_press)
self.episode_columns_menu = None
self.config.add_observer(self.on_config_changed)
self.shownotes_pane = Gtk.Box()
self.shownotes_object = shownotes.get_shownotes(self.config.ui.gtk.html_shownotes, self.shownotes_pane)
# Vertical paned for the episode list and shownotes
self.vpaned = Gtk.Paned(orientation=Gtk.Orientation.VERTICAL)
paned = self.vbox_episode_list.get_parent()
self.vbox_episode_list.reparent(self.vpaned)
self.vpaned.child_set_property(self.vbox_episode_list, 'resize', True)
self.vpaned.child_set_property(self.vbox_episode_list, 'shrink', False)
self.vpaned.pack2(self.shownotes_pane, resize=False, shrink=False)
self.vpaned.show()
# Minimum height for both episode list and shownotes
self.vbox_episode_list.set_size_request(-1, 100)
self.shownotes_pane.set_size_request(-1, 100)
self.config.connect_gtk_paned('ui.gtk.state.main_window.episode_list_size',
self.vpaned)
paned.add2(self.vpaned)
self.new_episodes_window = None
self.download_status_model = DownloadStatusModel()
self.download_queue_manager = download.DownloadQueueManager(self.config, self.download_status_model)
self.config.connect_gtk_spinbutton('limit.downloads.concurrent', self.spinMaxDownloads,
self.config.limit.downloads.concurrent_max)
self.config.connect_gtk_togglebutton('limit.downloads.enabled', self.cbMaxDownloads)
self.config.connect_gtk_spinbutton('limit.bandwidth.kbps', self.spinLimitDownloads)
self.config.connect_gtk_togglebutton('limit.bandwidth.enabled', self.cbLimitDownloads)
self.spinMaxDownloads.set_sensitive(self.cbMaxDownloads.get_active())
self.spinLimitDownloads.set_sensitive(self.cbLimitDownloads.get_active())
# When the amount of maximum downloads changes, notify the queue manager
def changed_cb(spinbutton):
return self.download_queue_manager.update_max_downloads()
self.spinMaxDownloads.connect('value-changed', changed_cb)
self.cbMaxDownloads.connect('toggled', changed_cb)
# Keep a reference to the last add podcast dialog instance
self._add_podcast_dialog = None
self.default_title = None
self.set_title(_('gPodder'))
self.cover_downloader = CoverDownloader()
# Generate list models for podcasts and their episodes
self.podcast_list_model = PodcastListModel(self.cover_downloader)
self.apply_podcast_list_hide_boring()
self.cover_downloader.register('cover-available', self.cover_download_finished)
# Source IDs for timeouts for search-as-you-type
self._podcast_list_search_timeout = None
self._episode_list_search_timeout = None
# Subscribed channels
self.active_channel = None
self.channels = self.model.get_podcasts()
# For loading the list model
self.episode_list_model = EpisodeListModel(self.on_episode_list_filter_changed)
self.create_actions()
self.releasecell = None
# Init the treeviews that we use
self.init_podcast_list_treeview()
self.init_episode_list_treeview()
self.init_download_list_treeview()
self.download_tasks_seen = set()
self.download_list_update_timer = None
self.things_adding_tasks = 0
self.download_task_monitors = set()
# Set up the first instance of MygPoClient
self.mygpo_client = my.MygPoClient(self.config)
# Extensions section in app menu and menubar Extras menu
extensions_menu = Gio.Menu()
self.application.menu_extras.append_section(_('Extensions'), extensions_menu)
self.extensions_menu_helper = ExtensionMenuHelper(self.gPodder,
extensions_menu, 'extensions.action_',
lambda fun: lambda action, param: fun())
self.extensions_menu_helper.replace_entries(
gpodder.user_extensions.on_create_menu())
gpodder.user_extensions.on_ui_initialized(self.model,
self.extensions_podcast_update_cb,
self.extensions_episode_download_cb)
gpodder.user_extensions.on_application_started()
# load list of user applications for audio playback
self.user_apps_reader = UserAppsReader(['audio', 'video'])
util.run_in_background(self.user_apps_reader.read)
# Now, update the feed cache, when everything's in place
if not self.application.want_headerbar:
self.btnUpdateFeeds.show()
self.feed_cache_update_cancelled = False
self.update_podcast_list_model()
self.partial_downloads_indicator = None
util.run_in_background(self.find_partial_downloads)
# Start the auto-update procedure
self._auto_update_timer_source_id = None
if self.config.auto.update.enabled:
self.restart_auto_update_timer()
# Find expired (old) episodes and delete them
old_episodes = list(common.get_expired_episodes(self.channels, self.config))
if len(old_episodes) > 0:
self.delete_episode_list(old_episodes, confirm=False)
updated_urls = {e.channel.url for e in old_episodes}
self.update_podcast_list_model(updated_urls)
# Do the initial sync with the web service
if self.mygpo_client.can_access_webservice():
util.idle_add(self.mygpo_client.flush, True)
# First-time users should be asked if they want to see the OPML
if self.options.subscribe:
util.idle_add(self.subscribe_to_url, self.options.subscribe)
elif not self.channels:
self.on_itemUpdate_activate()
elif self.config.software_update.check_on_startup:
# Check for software updates from gpodder.org
diff = time.time() - self.config.software_update.last_check
if diff > (60 * 60 * 24) * self.config.software_update.interval:
self.config.software_update.last_check = int(time.time())
if not os.path.exists(gpodder.no_update_check_file):
self.check_for_updates(silent=True)
if self.options.close_after_startup:
logger.warning("Startup done, closing (--close-after-startup)")
self.core.db.close()
sys.exit()
def create_actions(self):
g = self.gPodder
# View
action = Gio.SimpleAction.new_stateful(
'showToolbar', None, GLib.Variant.new_boolean(self.config.ui.gtk.toolbar))
action.connect('activate', self.on_itemShowToolbar_activate)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'searchAlwaysVisible', None, GLib.Variant.new_boolean(self.config.ui.gtk.search_always_visible))
action.connect('activate', self.on_item_view_search_always_visible_toggled)
g.add_action(action)
# View Podcast List
action = Gio.SimpleAction.new_stateful(
'viewHideBoringPodcasts', None, GLib.Variant.new_boolean(self.config.ui.gtk.podcast_list.hide_empty))
action.connect('activate', self.on_item_view_hide_boring_podcasts_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewShowAllEpisodes', None, GLib.Variant.new_boolean(self.config.ui.gtk.podcast_list.all_episodes))
action.connect('activate', self.on_item_view_show_all_episodes_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewShowPodcastSections', None, GLib.Variant.new_boolean(self.config.ui.gtk.podcast_list.sections))
action.connect('activate', self.on_item_view_show_podcast_sections_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'episodeNew', None, GLib.Variant.new_boolean(False))
action.connect('activate', self.on_episode_new_activate)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'episodeLock', None, GLib.Variant.new_boolean(False))
action.connect('activate', self.on_episode_lock_activate)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'channelAutoArchive', None, GLib.Variant.new_boolean(False))
action.connect('activate', self.on_channel_toggle_lock_activate)
g.add_action(action)
# View Episode List
value = EpisodeListModel.VIEWS[
self.config.ui.gtk.episode_list.view_mode or EpisodeListModel.VIEW_ALL]
action = Gio.SimpleAction.new_stateful(
'viewEpisodes', GLib.VariantType.new('s'),
GLib.Variant.new_string(value))
action.connect('activate', self.on_item_view_episodes_changed)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewAlwaysShowNewEpisodes', None, GLib.Variant.new_boolean(self.config.ui.gtk.episode_list.always_show_new))
action.connect('activate', self.on_item_view_always_show_new_episodes_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewTrimEpisodeTitlePrefix', None, GLib.Variant.new_boolean(self.config.ui.gtk.episode_list.trim_title_prefix))
action.connect('activate', self.on_item_view_trim_episode_title_prefix_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewShowEpisodeDescription', None, GLib.Variant.new_boolean(self.config.ui.gtk.episode_list.descriptions))
action.connect('activate', self.on_item_view_show_episode_description_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewShowEpisodeReleasedTime', None, GLib.Variant.new_boolean(self.config.ui.gtk.episode_list.show_released_time))
action.connect('activate', self.on_item_view_show_episode_released_time_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewRightAlignEpisodeReleasedColumn', None,
GLib.Variant.new_boolean(self.config.ui.gtk.episode_list.right_align_released_column))
action.connect('activate', self.on_item_view_right_align_episode_released_column_toggled)
g.add_action(action)
action = Gio.SimpleAction.new_stateful(
'viewCtrlClickToSortEpisodes', None, GLib.Variant.new_boolean(self.config.ui.gtk.episode_list.ctrl_click_to_sort))
action.connect('activate', self.on_item_view_ctrl_click_to_sort_episodes_toggled)
g.add_action(action)
# Other Menus
action_defs = [
# gPodder
# Podcasts
('update', self.on_itemUpdate_activate),
('downloadAllNew', self.on_itemDownloadAllNew_activate),
('removeOldEpisodes', self.on_itemRemoveOldEpisodes_activate),
('findPodcast', self.on_find_podcast_activate),
# Subscriptions
('discover', self.on_itemImportChannels_activate),
('addChannel', self.on_itemAddChannel_activate),
('removeChannel', self.on_itemRemoveChannel_activate),
('massUnsubscribe', self.on_itemMassUnsubscribe_activate),
('updateChannel', self.on_itemUpdateChannel_activate),
('editChannel', self.on_itemEditChannel_activate),
('importFromFile', self.on_item_import_from_file_activate),
('exportChannels', self.on_itemExportChannels_activate),
('markEpisodesAsOld', self.on_mark_episodes_as_old),
('refreshImage', self.on_itemRefreshCover_activate),
# Episodes
('play', self.on_playback_selected_episodes),
('open', self.on_playback_selected_episodes),
('forceDownload', self.on_force_download_selected_episodes),
('download', self.on_download_selected_episodes),
('pause', self.on_pause_selected_episodes),
('cancel', self.on_item_cancel_download_activate),
('moveUp', self.on_move_selected_items_up),
('moveDown', self.on_move_selected_items_down),
('remove', self.on_remove_from_download_list),
('delete', self.on_delete_activate),
('toggleEpisodeNew', self.on_item_toggle_played_activate),
('toggleEpisodeLock', self.on_item_toggle_lock_activate),
('openEpisodeDownloadFolder', self.on_open_episode_download_folder),
('openChannelDownloadFolder', self.on_open_download_folder),
('selectChannel', self.on_select_channel_of_episode),
('findEpisode', self.on_find_episode_activate),
('toggleShownotes', self.on_shownotes_selected_episodes),
('saveEpisodes', self.on_save_episodes_activate),
('bluetoothEpisodes', self.on_bluetooth_episodes_activate),
# Extras
('sync', self.on_sync_to_device_activate),
]
for name, callback in action_defs:
action = Gio.SimpleAction.new(name, None)
action.connect('activate', callback)
g.add_action(action)
# gPodder
# Podcasts
self.update_action = g.lookup_action('update')
# Subscriptions
self.update_channel_action = g.lookup_action('updateChannel')
self.edit_channel_action = g.lookup_action('editChannel')
# Episodes
self.play_action = g.lookup_action('play')
self.open_action = g.lookup_action('open')
self.force_download_action = g.lookup_action('forceDownload')
self.download_action = g.lookup_action('download')
self.pause_action = g.lookup_action('pause')
self.cancel_action = g.lookup_action('cancel')
self.remove_action = g.lookup_action('remove')
self.delete_action = g.lookup_action('delete')
self.toggle_episode_new_action = g.lookup_action('toggleEpisodeNew')
self.toggle_episode_lock_action = g.lookup_action('toggleEpisodeLock')
self.open_episode_download_folder_action = g.lookup_action('openEpisodeDownloadFolder')
self.select_channel_of_episode_action = g.lookup_action('selectChannel')
self.auto_archive_action = g.lookup_action('channelAutoArchive')
self.bluetooth_episodes_action = g.lookup_action('bluetoothEpisodes')
self.episode_new_action = g.lookup_action('episodeNew')
self.episode_lock_action = g.lookup_action('episodeLock')
self.bluetooth_episodes_action.set_enabled(self.bluetooth_available)
def on_resume_all_infobar_response(self, infobar, response_id):
if response_id == Gtk.ResponseType.OK:
selection = self.treeDownloads.get_selection()
selection.select_all()
selected_tasks = self.downloads_list_get_selection()[0]
selection.unselect_all()
self._for_each_task_set_status(selected_tasks, download.DownloadTask.QUEUED)
self.resume_all_infobar.set_revealed(False)
def find_partial_downloads(self):
def start_progress_callback(count):
if count:
self.partial_downloads_indicator = ProgressIndicator(
_('Loading incomplete downloads'),
_('Some episodes have not finished downloading in a previous session.'),
False, self.get_dialog_parent())
self.partial_downloads_indicator.on_message(N_(
'%(count)d partial file', '%(count)d partial files',
count) % {'count': count})
util.idle_add(self.wNotebook.set_current_page, 1)
def progress_callback(title, progress):
self.partial_downloads_indicator.on_message(title)
self.partial_downloads_indicator.on_progress(progress)
self.partial_downloads_indicator.on_tick() # not cancellable
def final_progress_callback():
self.partial_downloads_indicator.on_tick(final=_('Cleaning up...'))
def finish_progress_callback(resumable_episodes):
def offer_resuming():
if resumable_episodes:
self.download_episode_list_paused(resumable_episodes, hide_progress=True)
self.resume_all_infobar.set_revealed(True)
else:
util.idle_add(self.wNotebook.set_current_page, 0)
logger.debug("find_partial_downloads done, calling extensions")
gpodder.user_extensions.on_find_partial_downloads_done()
if self.partial_downloads_indicator:
util.idle_add(self.partial_downloads_indicator.on_finished)
self.partial_downloads_indicator = None
util.idle_add(offer_resuming)
common.find_partial_downloads(self.channels,
start_progress_callback,
progress_callback,
final_progress_callback,
finish_progress_callback)
def episode_object_by_uri(self, uri):
"""Get an episode object given a local or remote URI
This can be used to quickly access an episode object
when all we have is its download filename or episode
URL (e.g. from external D-Bus calls / signals, etc..)
"""
if uri.startswith('/'):
uri = 'file://' + urllib.parse.quote(uri)
prefix = 'file://' + urllib.parse.quote(gpodder.downloads)
if uri.startswith(prefix):
# File is on the local filesystem in the download folder
# Try to reduce search space by pre-selecting the channel
# based on the folder name of the local file
filename = urllib.parse.unquote(uri[len(prefix):])
file_parts = [_f for _f in filename.split(os.sep) if _f]
if len(file_parts) != 2:
return None
foldername, filename = file_parts
def is_channel(c):
return c.download_folder == foldername
def is_episode(e):
return e.download_filename == filename
else:
# By default, assume we can't pre-select any channel
# but can match episodes simply via the download URL
def is_channel(c):
return True
def is_episode(e):
return e.url == uri
# Deep search through channels and episodes for a match
for channel in filter(is_channel, self.channels):
for episode in filter(is_episode, channel.get_all_episodes()):
return episode
return None
def in_downloads_list(self):
return self.wNotebook.get_current_page() == 1
def on_played(self, start, end, total, file_uri):
"""Handle the "played" signal from a media player"""
if start == 0 and end == 0 and total == 0:
# Ignore bogus play event
return
elif end < start + 5:
# Ignore "less than five seconds" segments,
# as they can happen with seeking, etc...
return
logger.debug('Received play action: %s (%d, %d, %d)', file_uri, start, end, total)
episode = self.episode_object_by_uri(file_uri)
if episode is not None:
now = time.time()
if total > 0:
episode.total_time = total
elif total == 0:
# Assume the episode's total time for the action
total = episode.total_time
assert (episode.current_position_updated is None
or now >= episode.current_position_updated)
episode.current_position = end
episode.current_position_updated = now
episode.mark(is_played=True)
episode.save()
self.episode_list_status_changed([episode])
# Submit this action to the webservice
self.mygpo_client.on_playback_full(episode, start, end, total)
def on_add_remove_podcasts_mygpo(self):
actions = self.mygpo_client.get_received_actions()
if not actions:
return False
existing_urls = [c.url for c in self.channels]
# Columns for the episode selector window - just one...
columns = (
('description', None, None, _('Action')),
)
# A list of actions that have to be chosen from
changes = []
# Actions that are ignored (already carried out)
ignored = []
for action in actions:
if action.is_add and action.url not in existing_urls:
changes.append(my.Change(action))
elif action.is_remove and action.url in existing_urls:
podcast_object = None
for podcast in self.channels:
if podcast.url == action.url:
podcast_object = podcast
break
changes.append(my.Change(action, podcast_object))
else:
ignored.append(action)
# Confirm all ignored changes
self.mygpo_client.confirm_received_actions(ignored)
def execute_podcast_actions(selected):
# In the future, we might retrieve the title from gpodder.net here,
# but for now, we just use "None" to use the feed-provided title
title = None
add_list = [(title, c.action.url)
for c in selected if c.action.is_add]
remove_list = [c.podcast for c in selected if c.action.is_remove]
# Apply the accepted changes locally
self.add_podcast_list(add_list)
self.remove_podcast_list(remove_list, confirm=False)
# All selected items are now confirmed
self.mygpo_client.confirm_received_actions(c.action for c in selected)
# Revert the changes on the server
rejected = [c.action for c in changes if c not in selected]
self.mygpo_client.reject_received_actions(rejected)
def ask():
# We're abusing the Episode Selector again ;) -- thp
gPodderEpisodeSelector(self.main_window,
title=_('Confirm changes from gpodder.net'),
instructions=_('Select the actions you want to carry out.'),
episodes=changes,
columns=columns,
size_attribute=None,
ok_button=_('A_pply'),
callback=execute_podcast_actions,
_config=self.config)
# There are some actions that need the user's attention
if changes:
util.idle_add(ask)
return True
# We have no remaining actions - no selection happens
return False
def rewrite_urls_mygpo(self):
# Check if we have to rewrite URLs since the last add
rewritten_urls = self.mygpo_client.get_rewritten_urls()
changed = False
for rewritten_url in rewritten_urls:
if not rewritten_url.new_url:
continue
for channel in self.channels:
if channel.url == rewritten_url.old_url:
logger.info('Updating URL of %s to %s', channel,
rewritten_url.new_url)
channel.url = rewritten_url.new_url
channel.save()
changed = True
break
if changed:
util.idle_add(self.update_episode_list_model)
def on_send_full_subscriptions(self):
# Send the full subscription list to the gpodder.net client
# (this will overwrite the subscription list on the server)
indicator = ProgressIndicator(_('Uploading subscriptions'),
_('Your subscriptions are being uploaded to the server.'),
False, self.get_dialog_parent())
try:
self.mygpo_client.set_subscriptions([c.url for c in self.channels])
util.idle_add(self.show_message, _('List uploaded successfully.'))
except Exception as e:
def show_error(e):
message = str(e)
if not message:
message = e.__class__.__name__
if message == 'NotFound':
message = _(
'Could not find your device.\n'
'\n'
'Check login is a username (not an email)\n'
'and that the device name matches one in your account.'
)
self.show_message(html.escape(message),
_('Error while uploading'),
important=True)
util.idle_add(show_error, e)
indicator.on_finished()
def on_button_subscribe_clicked(self, button):
self.on_itemImportChannels_activate(button)
def on_button_downloads_clicked(self, widget):
self.downloads_window.show()
def on_treeview_button_pressed(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
role = getattr(treeview, TreeViewHelper.ROLE)
if role == TreeViewHelper.ROLE_EPISODES and event.button == 1:
# Toggle episode "new" status by clicking the icon (bug 1432)
result = treeview.get_path_at_pos(int(event.x), int(event.y))
if result is not None:
path, column, x, y = result
# The user clicked the icon if she clicked in the first column
# and the x position is in the area where the icon resides
if (x < self.EPISODE_LIST_ICON_WIDTH
and column == treeview.get_columns()[0]):
model = treeview.get_model()
cursor_episode = model.get_value(model.get_iter(path),
EpisodeListModel.C_EPISODE)
new_value = cursor_episode.is_new
selected_episodes = self.get_selected_episodes()
# Avoid changing anything if the clicked episode is not
# selected already - otherwise update all selected
if cursor_episode in selected_episodes:
for episode in selected_episodes:
episode.mark(is_played=new_value)
self.update_episode_list_icons(selected=True)
self.update_podcast_list_model(selected=True)
return True
return event.button == 3
def on_treeview_channels_button_released(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
return self.treeview_channels_show_context_menu(event)
def on_treeview_channels_long_press(self, gesture, x, y, treeview):
ev = Dummy(x=x, y=y, button=3)
return self.treeview_channels_show_context_menu(ev)
def on_treeview_episodes_button_released(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
return self.treeview_available_show_context_menu(event)
def on_treeview_episodes_long_press(self, gesture, x, y, treeview):
ev = Dummy(x=x, y=y, button=3)
return self.treeview_available_show_context_menu(ev)
def on_treeview_downloads_button_released(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
return self.treeview_downloads_show_context_menu(event)
def on_treeview_downloads_long_press(self, gesture, x, y, treeview):
ev = Dummy(x=x, y=y, button=3)
return self.treeview_downloads_show_context_menu(ev)
def on_find_podcast_activate(self, *args):
if self._search_podcasts:
self._search_podcasts.show_search()
def init_podcast_list_treeview(self):
size = cake_size_from_widget(self.treeChannels) * 2
scale = self.treeChannels.get_scale_factor()
self.podcast_list_model.set_max_image_size(size, scale)
# Set up podcast channel tree view widget
column = Gtk.TreeViewColumn('')
iconcell = Gtk.CellRendererPixbuf()
iconcell.set_property('width', size + 10)
column.pack_start(iconcell, False)
column.add_attribute(iconcell, 'pixbuf', PodcastListModel.C_COVER)
column.add_attribute(iconcell, 'visible', PodcastListModel.C_COVER_VISIBLE)
if scale != 1:
column.set_cell_data_func(iconcell, draw_iconcell_scale, scale)
namecell = Gtk.CellRendererText()
namecell.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(namecell, True)
column.add_attribute(namecell, 'markup', PodcastListModel.C_DESCRIPTION)
iconcell = Gtk.CellRendererPixbuf()
iconcell.set_property('xalign', 1.0)
column.pack_start(iconcell, False)
column.add_attribute(iconcell, 'pixbuf', PodcastListModel.C_PILL)
column.add_attribute(iconcell, 'visible', PodcastListModel.C_PILL_VISIBLE)
if scale != 1:
column.set_cell_data_func(iconcell, draw_iconcell_scale, scale)
self.treeChannels.append_column(column)
self.treeChannels.set_model(self.podcast_list_model.get_filtered_model())
self.podcast_list_model.widget = self.treeChannels
# Set up channels context menu
menu = self.application.builder.get_object('channels-context')
# Extensions section, updated in signal handler
extmenu = Gio.Menu()
menu.insert_section(4, _('Extensions'), extmenu)
self.channel_context_menu_helper = ExtensionMenuHelper(
self.gPodder, extmenu, 'channel_context_action_')
self.channels_popover = Gtk.Popover.new_from_model(self.treeChannels, menu)
self.channels_popover.set_position(Gtk.PositionType.BOTTOM)
self.channels_popover.connect(
'closed', lambda popover: self.allow_tooltips(True))
# Long press gesture
lp = Gtk.GestureLongPress.new(self.treeChannels)
lp.set_touch_only(True)
lp.set_propagation_phase(Gtk.PropagationPhase.CAPTURE)
lp.connect("pressed", self.on_treeview_channels_long_press, self.treeChannels)
setattr(self.treeChannels, "long-press-gesture", lp)
# Set up type-ahead find for the podcast list
def on_key_press(treeview, event):
if event.keyval == Gdk.KEY_Right:
self.treeAvailable.grab_focus()
elif event.keyval in (Gdk.KEY_Up, Gdk.KEY_Down):
# If section markers exist in the treeview, we want to
# "jump over" them when moving the cursor up and down
if event.keyval == Gdk.KEY_Up:
step = -1
else:
step = 1
selection = self.treeChannels.get_selection()
model, it = selection.get_selected()
if it is None:
it = model.get_iter_first()
if it is None:
return False
step = 1
path = model.get_path(it)
path = (path[0] + step,)
if path[0] < 0:
# Valid paths must have a value >= 0
return True
try:
it = model.get_iter(path)
except ValueError:
# Already at the end of the list
return True
self.treeChannels.set_cursor(path)
elif event.keyval == Gdk.KEY_Escape:
self._search_podcasts.hide_search()
elif event.get_state() & Gdk.ModifierType.CONTROL_MASK:
# Don't handle type-ahead when control is pressed (so shortcuts
# with the Ctrl key still work, e.g. Ctrl+A, ...)
return True
elif event.keyval == Gdk.KEY_Delete:
return False
elif event.keyval == Gdk.KEY_Menu:
self.treeview_channels_show_context_menu()
return True
else:
unicode_char_id = Gdk.keyval_to_unicode(event.keyval)
# < 32 to intercept Delete and Tab events
if unicode_char_id < 32:
return False
if self.config.ui.gtk.find_as_you_type:
input_char = chr(unicode_char_id)
self._search_podcasts.show_search(input_char)
return True
self.treeChannels.connect('key-press-event', on_key_press)
self.treeChannels.connect('popup-menu',
lambda _tv, *args: self.treeview_channels_show_context_menu)
# Enable separators to the podcast list to separate special podcasts
# from others (this is used for the "all episodes" view)
self.treeChannels.set_row_separator_func(PodcastListModel.row_separator_func)
TreeViewHelper.set(self.treeChannels, TreeViewHelper.ROLE_PODCASTS)
self._search_podcasts = SearchTree(self.hbox_search_podcasts,
self.entry_search_podcasts,
self.treeChannels,
self.podcast_list_model,
self.config)
if self.config.ui.gtk.search_always_visible:
self._search_podcasts.show_search(grab_focus=False)
def on_find_episode_activate(self, *args):
if self._search_episodes:
self._search_episodes.show_search()
def set_episode_list_column(self, index, new_value):
mask = (1 << index)
if new_value:
self.config.ui.gtk.episode_list.columns |= mask
else:
self.config.ui.gtk.episode_list.columns &= ~mask
def update_episode_list_columns_visibility(self):
columns = TreeViewHelper.get_columns(self.treeAvailable)
for index, column in enumerate(columns):
visible = bool(self.config.ui.gtk.episode_list.columns & (1 << index))
column.set_visible(visible)
self.view_column_actions[index].set_state(GLib.Variant.new_boolean(visible))
self.treeAvailable.columns_autosize()
def on_episode_list_header_reordered(self, treeview):
self.config.ui.gtk.state.main_window.episode_column_order = \
[column.get_sort_column_id() for column in treeview.get_columns()]
def on_episode_list_header_sorted(self, column):
self.config.ui.gtk.state.main_window.episode_column_sort_id = column.get_sort_column_id()
self.config.ui.gtk.state.main_window.episode_column_sort_order = \
(column.get_sort_order() is Gtk.SortType.ASCENDING)
def on_episode_list_header_clicked(self, button, event):
if event.button == 1:
# Require control click to sort episodes, when enabled
if self.config.ui.gtk.episode_list.ctrl_click_to_sort and (event.state & Gdk.ModifierType.CONTROL_MASK) == 0:
return True
elif event.button == 3:
if self.episode_columns_menu is not None:
self.episode_columns_menu.popup(None, None, None, None, event.button, event.time)
return False
def align_releasecell(self):
if self.config.ui.gtk.episode_list.right_align_released_column:
self.releasecell.set_property('xalign', 1)
self.releasecell.set_property('alignment', Pango.Alignment.RIGHT)
else:
self.releasecell.set_property('xalign', 0)
self.releasecell.set_property('alignment', Pango.Alignment.LEFT)
def init_episode_list_treeview(self):
self.episode_list_model.set_view_mode(self.config.ui.gtk.episode_list.view_mode)
# Set up episode context menu
menu = self.application.builder.get_object('episodes-context')
# Extensions section, updated dynamically
extmenu = Gio.Menu()
menu.insert_section(2, _('Extensions'), extmenu)
self.episode_context_menu_helper = ExtensionMenuHelper(
self.gPodder, extmenu, 'episode_context_action_')
# Send To submenu section, shown only for downloaded episodes
self.sendto_menu = Gio.Menu()
menu.insert_section(2, None, self.sendto_menu)
self.episodes_popover = Gtk.Popover.new_from_model(self.treeAvailable, menu)
self.episodes_popover.set_position(Gtk.PositionType.BOTTOM)
self.episodes_popover.connect(
'closed', lambda popover: self.allow_tooltips(True))
# Initialize progress icons
cake_size = cake_size_from_widget(self.treeAvailable)
for i in range(EpisodeListModel.PROGRESS_STEPS + 1):
pixbuf = draw_cake_pixbuf(
i / EpisodeListModel.PROGRESS_STEPS, size=cake_size)
icon_name = 'gpodder-progress-%d' % i
Gtk.IconTheme.add_builtin_icon(icon_name, cake_size, pixbuf)
self.treeAvailable.set_model(self.episode_list_model.get_filtered_model())
TreeViewHelper.set(self.treeAvailable, TreeViewHelper.ROLE_EPISODES)
iconcell = Gtk.CellRendererPixbuf()
episode_list_icon_size = Gtk.icon_size_register('episode-list',
cake_size, cake_size)
iconcell.set_property('stock-size', episode_list_icon_size)
iconcell.set_fixed_size(cake_size + 20, -1)
self.EPISODE_LIST_ICON_WIDTH = cake_size
namecell = Gtk.CellRendererText()
namecell.set_property('ellipsize', Pango.EllipsizeMode.END)
namecolumn = Gtk.TreeViewColumn(_('Episode'))
namecolumn.pack_start(iconcell, False)
namecolumn.add_attribute(iconcell, 'icon-name', EpisodeListModel.C_STATUS_ICON)
namecolumn.pack_start(namecell, True)
namecolumn.add_attribute(namecell, 'markup', EpisodeListModel.C_DESCRIPTION)
namecolumn.set_sort_column_id(EpisodeListModel.C_DESCRIPTION)
namecolumn.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
namecolumn.set_resizable(True)
namecolumn.set_expand(True)
lockcell = Gtk.CellRendererPixbuf()
lockcell.set_fixed_size(40, -1)
lockcell.set_property('stock-size', Gtk.IconSize.MENU)
lockcell.set_property('icon-name', 'emblem-readonly')
namecolumn.pack_start(lockcell, False)
namecolumn.add_attribute(lockcell, 'visible', EpisodeListModel.C_LOCKED)
sizecell = Gtk.CellRendererText()
sizecell.set_property('xalign', 1)
sizecolumn = Gtk.TreeViewColumn(_('Size'), sizecell, text=EpisodeListModel.C_FILESIZE_TEXT)
sizecolumn.set_sort_column_id(EpisodeListModel.C_FILESIZE)
timecell = Gtk.CellRendererText()
timecell.set_property('xalign', 1)
timecolumn = Gtk.TreeViewColumn(_('Duration'), timecell, text=EpisodeListModel.C_TIME)
timecolumn.set_sort_column_id(EpisodeListModel.C_TOTAL_TIME)
self.releasecell = Gtk.CellRendererText()
self.align_releasecell()
releasecolumn = Gtk.TreeViewColumn(_('Released'))
releasecolumn.pack_start(self.releasecell, True)
releasecolumn.add_attribute(self.releasecell, 'markup', EpisodeListModel.C_PUBLISHED_TEXT)
releasecolumn.set_sort_column_id(EpisodeListModel.C_PUBLISHED)
sizetimecell = Gtk.CellRendererText()
sizetimecell.set_property('xalign', 1)
sizetimecell.set_property('alignment', Pango.Alignment.RIGHT)
sizetimecolumn = Gtk.TreeViewColumn(_('Size+'))
sizetimecolumn.pack_start(sizetimecell, True)
sizetimecolumn.add_attribute(sizetimecell, 'markup', EpisodeListModel.C_FILESIZE_AND_TIME_TEXT)
sizetimecolumn.set_sort_column_id(EpisodeListModel.C_FILESIZE_AND_TIME)
timesizecell = Gtk.CellRendererText()
timesizecell.set_property('xalign', 1)
timesizecell.set_property('alignment', Pango.Alignment.RIGHT)
timesizecolumn = Gtk.TreeViewColumn(_('Duration+'))
timesizecolumn.pack_start(timesizecell, True)
timesizecolumn.add_attribute(timesizecell, 'markup', EpisodeListModel.C_TIME_AND_SIZE)
timesizecolumn.set_sort_column_id(EpisodeListModel.C_TOTAL_TIME_AND_SIZE)
namecolumn.set_reorderable(True)
self.treeAvailable.append_column(namecolumn)
# EpisodeListModel.C_PUBLISHED is not available in config.py, set it here on first run
if not self.config.ui.gtk.state.main_window.episode_column_sort_id:
self.config.ui.gtk.state.main_window.episode_column_sort_id = EpisodeListModel.C_PUBLISHED
for itemcolumn in (sizecolumn, timecolumn, releasecolumn, sizetimecolumn, timesizecolumn):
itemcolumn.set_reorderable(True)
self.treeAvailable.append_column(itemcolumn)
TreeViewHelper.register_column(self.treeAvailable, itemcolumn)
# Add context menu to all tree view column headers
for column in self.treeAvailable.get_columns():
label = Gtk.Label(label=column.get_title())
label.show_all()
column.set_widget(label)
w = column.get_widget()
while w is not None and not isinstance(w, Gtk.Button):
w = w.get_parent()
w.connect('button-release-event', self.on_episode_list_header_clicked)
# Restore column sorting
if column.get_sort_column_id() == self.config.ui.gtk.state.main_window.episode_column_sort_id:
self.episode_list_model._sorter.set_sort_column_id(Gtk.TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID,
Gtk.SortType.DESCENDING)
self.episode_list_model._sorter.set_sort_column_id(column.get_sort_column_id(),
Gtk.SortType.ASCENDING if self.config.ui.gtk.state.main_window.episode_column_sort_order
else Gtk.SortType.DESCENDING)
# Save column sorting when user clicks column headers
column.connect('clicked', self.on_episode_list_header_sorted)
def restore_column_ordering():
prev_column = None
for col in self.config.ui.gtk.state.main_window.episode_column_order:
for column in self.treeAvailable.get_columns():
if col is column.get_sort_column_id():
break
else:
# Column ID not found, abort
# Manually re-ordering columns should fix the corrupt setting
break
self.treeAvailable.move_column_after(column, prev_column)
prev_column = column
# Save column ordering when user drags column headers
self.treeAvailable.connect('columns-changed', self.on_episode_list_header_reordered)
# Delay column ordering until shown to prevent "Negative content height" warnings for themes with vertical padding or borders
util.idle_add(restore_column_ordering)
# For each column that can be shown/hidden, add a menu item
self.view_column_actions = []
columns = TreeViewHelper.get_columns(self.treeAvailable)
def on_visible_toggled(action, param, index):
state = action.get_state()
self.set_episode_list_column(index, not state)
action.set_state(GLib.Variant.new_boolean(not state))
for index, column in enumerate(columns):
name = 'showColumn%i' % index
action = Gio.SimpleAction.new_stateful(
name, None, GLib.Variant.new_boolean(False))
action.connect('activate', on_visible_toggled, index)
self.main_window.add_action(action)
self.view_column_actions.append(action)
self.application.menu_view_columns.insert(index, column.get_title(), 'win.' + name)
self.episode_columns_menu = Gtk.Menu.new_from_model(self.application.menu_view_columns)
self.episode_columns_menu.attach_to_widget(self.main_window)
# Update the visibility of the columns and the check menu items
self.update_episode_list_columns_visibility()
# Long press gesture
lp = Gtk.GestureLongPress.new(self.treeAvailable)
lp.set_touch_only(True)
lp.set_propagation_phase(Gtk.PropagationPhase.CAPTURE)
lp.connect("pressed", self.on_treeview_episodes_long_press, self.treeAvailable)
setattr(self.treeAvailable, "long-press-gesture", lp)
# Set up type-ahead find for the episode list
def on_key_press(treeview, event):
if event.keyval == Gdk.KEY_Left:
self.treeChannels.grab_focus()
elif event.keyval == Gdk.KEY_Escape:
if self.hbox_search_episodes.get_property('visible'):
self._search_episodes.hide_search()
else:
self.shownotes_object.hide_pane()
elif event.keyval == Gdk.KEY_Menu:
self.treeview_available_show_context_menu()
elif event.get_state() & Gdk.ModifierType.CONTROL_MASK:
# Don't handle type-ahead when control is pressed (so shortcuts
# with the Ctrl key still work, e.g. Ctrl+A, ...)
return False
else:
unicode_char_id = Gdk.keyval_to_unicode(event.keyval)
# < 32 to intercept Delete and Tab events
if unicode_char_id < 32:
return False
if self.config.ui.gtk.find_as_you_type:
input_char = chr(unicode_char_id)
self._search_episodes.show_search(input_char)
return True
self.treeAvailable.connect('key-press-event', on_key_press)
self.treeAvailable.connect('popup-menu',
lambda _tv, *args: self.treeview_available_show_context_menu)
self.treeAvailable.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK,
(('text/uri-list', 0, 0),), Gdk.DragAction.COPY)
def drag_data_get(tree, context, selection_data, info, timestamp):
uris = ['file://' + urllib.parse.quote(e.local_filename(create=False))
for e in self.get_selected_episodes()
if e.was_downloaded(and_exists=True)]
selection_data.set_uris(uris)
self.treeAvailable.connect('drag-data-get', drag_data_get)
selection = self.treeAvailable.get_selection()
selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.episode_selection_handler_id = selection.connect('changed', self.on_episode_list_selection_changed)
self._search_episodes = SearchTree(self.hbox_search_episodes,
self.entry_search_episodes,
self.treeAvailable,
self.episode_list_model,
self.config)
if self.config.ui.gtk.search_always_visible:
self._search_episodes.show_search(grab_focus=False)
def on_episode_list_selection_changed(self, selection):
# Only update the UI every 250ms to prevent lag when rapidly changing selected episode or shift-selecting episodes
if self.on_episode_list_selection_changed_id is None:
self.on_episode_list_selection_changed_id = util.idle_timeout_add(250, self._on_episode_list_selection_changed)
def _on_episode_list_selection_changed(self):
self.on_episode_list_selection_changed_id = None
# Update the toolbar buttons
self.play_or_download()
# and the shownotes
self.shownotes_object.set_episodes(self.get_selected_episodes())
def on_download_list_selection_changed(self, selection):
if self.in_downloads_list():
# Update the toolbar buttons
self.play_or_download()
def init_download_list_treeview(self):
# columns and renderers for "download progress" tab
# First column: [ICON] Episodename
column = Gtk.TreeViewColumn(_('Episode'))
cell = Gtk.CellRendererPixbuf()
cell.set_property('stock-size', Gtk.IconSize.BUTTON)
column.pack_start(cell, False)
column.add_attribute(cell, 'icon-name',
DownloadStatusModel.C_ICON_NAME)
cell = Gtk.CellRendererText()
cell.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(cell, True)
column.add_attribute(cell, 'markup', DownloadStatusModel.C_NAME)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
column.set_expand(True)
self.treeDownloads.append_column(column)
# Second column: Progress
cell = Gtk.CellRendererProgress()
cell.set_property('yalign', .5)
cell.set_property('ypad', 6)
column = Gtk.TreeViewColumn(_('Progress'), cell,
value=DownloadStatusModel.C_PROGRESS,
text=DownloadStatusModel.C_PROGRESS_TEXT)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
column.set_expand(False)
self.treeDownloads.append_column(column)
column.set_property('min-width', 150)
column.set_property('max-width', 150)
self.treeDownloads.set_model(self.download_status_model)
TreeViewHelper.set(self.treeDownloads, TreeViewHelper.ROLE_DOWNLOADS)
# enable multiple selection support
selection = self.treeDownloads.get_selection()
selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.download_selection_handler_id = selection.connect('changed', self.on_download_list_selection_changed)
self.treeDownloads.set_search_equal_func(TreeViewHelper.make_search_equal_func(DownloadStatusModel))
# Set up downloads context menu
menu = self.application.builder.get_object('downloads-context')
self.downloads_popover = Gtk.Popover.new_from_model(self.treeDownloads, menu)
self.downloads_popover.set_position(Gtk.PositionType.BOTTOM)
# Long press gesture
lp = Gtk.GestureLongPress.new(self.treeDownloads)
lp.set_touch_only(True)
lp.set_propagation_phase(Gtk.PropagationPhase.CAPTURE)
lp.connect("pressed", self.on_treeview_downloads_long_press, self.treeDownloads)
setattr(self.treeDownloads, "long-press-gesture", lp)
def on_key_press(treeview, event):
if event.keyval == Gdk.KEY_Menu:
self.treeview_downloads_show_context_menu()
return True
return False
self.treeDownloads.connect('key-press-event', on_key_press)
self.treeDownloads.connect('popup-menu',
lambda _tv, *args: self.treeview_downloads_show_context_menu)
def on_treeview_expose_event(self, treeview, ctx):
model = treeview.get_model()
if (model is not None and model.get_iter_first() is not None):
return False
role = getattr(treeview, TreeViewHelper.ROLE, None)
if role is None:
return False
width = treeview.get_allocated_width()
height = treeview.get_allocated_height()
if role == TreeViewHelper.ROLE_EPISODES:
if self.config.ui.gtk.episode_list.view_mode != EpisodeListModel.VIEW_ALL:
text = _('No episodes in current view')
else:
text = _('No episodes available')
elif role == TreeViewHelper.ROLE_PODCASTS:
if self.config.ui.gtk.episode_list.view_mode != \
EpisodeListModel.VIEW_ALL and \
self.config.ui.gtk.podcast_list.hide_empty and \
len(self.channels) > 0:
text = _('No podcasts in this view')
else:
text = _('No subscriptions')
elif role == TreeViewHelper.ROLE_DOWNLOADS:
text = _('No active tasks')
else:
raise Exception('on_treeview_expose_event: unknown role')
draw_text_box_centered(ctx, treeview, width, height, text, None, None)
return True
def set_download_list_state(self, state):
if state == gPodderSyncUI.DL_ADDING_TASKS:
self.things_adding_tasks += 1
elif state == gPodderSyncUI.DL_ADDED_TASKS:
self.things_adding_tasks -= 1
if self.download_list_update_timer is None:
self.update_downloads_list()
self.download_list_update_timer = util.IdleTimeout(1500, self.update_downloads_list).set_max_milliseconds(5000)
def stop_download_list_update_timer(self):
if self.download_list_update_timer is None:
return False
self.download_list_update_timer.cancel()
self.download_list_update_timer = None
return True
def cleanup_downloads(self):
model = self.download_status_model
all_tasks = [(Gtk.TreeRowReference.new(model, row.path), row[0]) for row in model]
changed_episode_urls = set()
for row_reference, task in all_tasks:
if task.status in (task.DONE, task.CANCELLED):
model.remove(model.get_iter(row_reference.get_path()))
try:
# We don't "see" this task anymore - remove it;
# this is needed, so update_episode_list_icons()
# below gets the correct list of "seen" tasks
self.download_tasks_seen.remove(task)
except KeyError:
pass
changed_episode_urls.add(task.url)
# Tell the task that it has been removed (so it can clean up)
task.removed_from_list()
# Tell the podcasts tab to update icons for our removed podcasts
self.update_episode_list_icons(changed_episode_urls)
# Update the downloads list one more time
self.update_downloads_list(can_call_cleanup=False)
def on_tool_downloads_toggled(self, toolbutton):
if toolbutton.get_active():
self.wNotebook.set_current_page(1)
else:
self.wNotebook.set_current_page(0)
def add_download_task_monitor(self, monitor):
self.download_task_monitors.add(monitor)
model = self.download_status_model
if model is None:
model = ()
for row in model.get_model():
task = row[self.download_status_model.C_TASK]
monitor.task_updated(task)
def remove_download_task_monitor(self, monitor):
self.download_task_monitors.remove(monitor)
def set_download_progress(self, progress):
gpodder.user_extensions.on_download_progress(progress)
def update_downloads_list(self, can_call_cleanup=True):
try:
model = self.download_status_model
downloading, synchronizing, pausing, cancelling, queued, paused, failed, finished = (0,) * 8
total_speed, total_size, done_size = 0, 0, 0
files_downloading = 0
# Keep a list of all download tasks that we've seen
download_tasks_seen = set()
# Do not go through the list of the model is not (yet) available
if model is None:
model = ()
for row in model:
self.download_status_model.request_update(row.iter)
task = row[self.download_status_model.C_TASK]
speed, size, status, progress, activity = task.speed, task.total_size, task.status, task.progress, task.activity
# Let the download task monitors know of changes
for monitor in self.download_task_monitors:
monitor.task_updated(task)
total_size += size
done_size += size * progress
download_tasks_seen.add(task)
if status == download.DownloadTask.DOWNLOADING:
if activity == download.DownloadTask.ACTIVITY_DOWNLOAD:
downloading += 1
files_downloading += 1
total_speed += speed
elif activity == download.DownloadTask.ACTIVITY_SYNCHRONIZE:
synchronizing += 1
elif status == download.DownloadTask.PAUSING:
pausing += 1
if activity == download.DownloadTask.ACTIVITY_DOWNLOAD:
files_downloading += 1
elif status == download.DownloadTask.CANCELLING:
cancelling += 1
if activity == download.DownloadTask.ACTIVITY_DOWNLOAD:
files_downloading += 1
elif status == download.DownloadTask.QUEUED:
queued += 1
elif status == download.DownloadTask.PAUSED:
paused += 1
elif status == download.DownloadTask.FAILED:
failed += 1
elif status == download.DownloadTask.DONE:
finished += 1
# Remember which tasks we have seen after this run
self.download_tasks_seen = download_tasks_seen
text = [_('Progress')]
if downloading + synchronizing + pausing + cancelling + queued + paused + failed > 0:
s = []
if downloading > 0:
s.append(N_('%(count)d active', '%(count)d active', downloading) % {'count': downloading})
if synchronizing > 0:
s.append(N_('%(count)d active', '%(count)d active', synchronizing) % {'count': synchronizing})
if pausing > 0:
s.append(N_('%(count)d pausing', '%(count)d pausing', pausing) % {'count': pausing})
if cancelling > 0:
s.append(N_('%(count)d cancelling', '%(count)d cancelling', cancelling) % {'count': cancelling})
if queued > 0:
s.append(N_('%(count)d queued', '%(count)d queued', queued) % {'count': queued})
if paused > 0:
s.append(N_('%(count)d paused', '%(count)d paused', paused) % {'count': paused})
if failed > 0:
s.append(N_('%(count)d failed', '%(count)d failed', failed) % {'count': failed})
text.append(' (' + ', '.join(s) + ')')
self.labelDownloads.set_text(''.join(text))
title = [self.default_title]
# Accessing task.status_changed has the side effect of re-setting
# the changed flag, but we only do it once here so that's okay
channel_urls = [task.podcast_url for task in
self.download_tasks_seen if task.status_changed]
episode_urls = [task.url for task in self.download_tasks_seen]
if files_downloading > 0:
title.append(N_('downloading %(count)d file',
'downloading %(count)d files',
files_downloading) % {'count': files_downloading})
if total_size > 0:
percentage = 100.0 * done_size / total_size
else:
percentage = 0.0
self.set_download_progress(percentage / 100)
total_speed = util.format_filesize(total_speed)
title[1] += ' (%d%%, %s/s)' % (percentage, total_speed)
if synchronizing > 0:
title.append(N_('synchronizing %(count)d file',
'synchronizing %(count)d files',
synchronizing) % {'count': synchronizing})
if queued > 0:
title.append(N_('%(queued)d task queued',
'%(queued)d tasks queued',
queued) % {'queued': queued})
if (downloading + synchronizing + pausing + cancelling + queued) == 0 and self.things_adding_tasks == 0:
self.set_download_progress(1.)
self.downloads_finished(self.download_tasks_seen)
gpodder.user_extensions.on_all_episodes_downloaded()
logger.info('All tasks have finished.')
# Remove finished episodes
if self.config.ui.gtk.download_list.remove_finished and can_call_cleanup:
self.cleanup_downloads()
# Stop updating the download list here
self.stop_download_list_update_timer()
self.gPodder.set_title(' - '.join(title))
self.update_episode_list_icons(episode_urls)
self.play_or_download()
if channel_urls:
self.update_podcast_list_model(channel_urls)
return (self.download_list_update_timer is not None)
except Exception as e:
logger.error('Exception happened while updating download list.', exc_info=True)
self.show_message(
'%s\n\n%s' % (_('Please report this problem and restart gPodder:'), html.escape(str(e))),
_('Unhandled exception'), important=True)
# We return False here, so the update loop won't be called again,
# that's why we require the restart of gPodder in the message.
return False
def on_config_changed(self, *args):
util.idle_add(self._on_config_changed, *args)
def _on_config_changed(self, name, old_value, new_value):
if name == 'ui.gtk.toolbar':
self.toolbar.set_property('visible', new_value)
elif name in ('ui.gtk.episode_list.show_released_time',
'ui.gtk.episode_list.descriptions',
'ui.gtk.episode_list.trim_title_prefix',
'ui.gtk.episode_list.always_show_new'):
self.update_episode_list_model()
elif name in ('auto.update.enabled', 'auto.update.frequency'):
self.restart_auto_update_timer()
elif name in ('ui.gtk.podcast_list.all_episodes',
'ui.gtk.podcast_list.sections'):
# Force a update of the podcast list model
self.update_podcast_list_model()
elif name == 'ui.gtk.episode_list.columns':
self.update_episode_list_columns_visibility()
elif name == 'ui.gtk.color_scheme':
if new_value == 'system':
self.application.read_portal_color_scheme()
else:
self.application.set_dark_mode(new_value == 'dark')
elif name == 'limit.downloads.concurrent_max':
# Do not allow value to be set below 1
if new_value < 1:
self.config.limit.downloads.concurrent_max = 1
return
# Clamp current value to new maximum value
if self.config.limit.downloads.concurrent > new_value:
self.config.limit.downloads.concurrent = new_value
self.spinMaxDownloads.get_adjustment().set_upper(new_value)
elif name == 'limit.downloads.concurrent':
if self.config.clamp_range('limit.downloads.concurrent', 1, self.config.limit.downloads.concurrent_max):
return
self.spinMaxDownloads.set_value(new_value)
elif name == 'limit.bandwidth.kbps':
adjustment = self.spinLimitDownloads.get_adjustment()
if self.config.clamp_range('limit.bandwidth.kbps', adjustment.get_lower(), adjustment.get_upper()):
return
self.spinLimitDownloads.set_value(new_value)
def on_treeview_query_tooltip(self, treeview, x, y, keyboard_tooltip, tooltip):
# With get_bin_window, we get the window that contains the rows without
# the header. The Y coordinate of this window will be the height of the
# treeview header. This is the amount we have to subtract from the
# event's Y coordinate to get the coordinate to pass to get_path_at_pos
(x_bin, y_bin) = treeview.get_bin_window().get_position()
x -= x_bin
y -= y_bin
(path, column, rx, ry) = treeview.get_path_at_pos(x, y) or (None,) * 4
if not getattr(treeview, TreeViewHelper.CAN_TOOLTIP) or x > 50 or (column is not None and column != treeview.get_columns()[0]):
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, None)
return False
if path is not None:
model = treeview.get_model()
iterator = model.get_iter(path)
role = getattr(treeview, TreeViewHelper.ROLE)
if role == TreeViewHelper.ROLE_EPISODES:
url = model.get_value(iterator, EpisodeListModel.C_URL)
elif role == TreeViewHelper.ROLE_PODCASTS:
url = model.get_value(iterator, PodcastListModel.C_URL)
if url == '-':
# Section header - no tooltip here (for now at least)
return False
last_tooltip = getattr(treeview, TreeViewHelper.LAST_TOOLTIP)
if last_tooltip is not None and last_tooltip != url:
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, None)
return False
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, url)
if role == TreeViewHelper.ROLE_EPISODES:
description = model.get_value(iterator, EpisodeListModel.C_TOOLTIP)
if description:
tooltip.set_text(description)
else:
return False
elif role == TreeViewHelper.ROLE_PODCASTS:
channel = model.get_value(iterator, PodcastListModel.C_CHANNEL)
if channel is None or not hasattr(channel, 'title'):
return False
error_str = model.get_value(iterator, PodcastListModel.C_ERROR)
if error_str:
error_str = _('Feedparser error: %s') % html.escape(error_str.strip())
error_str = '<span foreground="#ff0000">%s</span>' % error_str
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
box.set_border_width(5)
heading = Gtk.Label()
heading.set_max_width_chars(60)
heading.set_alignment(0, 1)
heading.set_markup('<b><big>%s</big></b>\n<small>%s</small>' % (html.escape(channel.title), html.escape(channel.url)))
box.add(heading)
box.add(Gtk.HSeparator())
channel_description = util.remove_html_tags(channel.description)
if channel._update_error is not None:
description = _('ERROR: %s') % channel._update_error
elif len(channel_description) < 500:
description = channel_description
else:
pos = channel_description.find('\n\n')
if pos == -1 or pos > 500:
description = channel_description[:498] + '[...]'
else:
description = channel_description[:pos]
description = Gtk.Label(label=description)
description.set_max_width_chars(60)
if error_str:
description.set_markup(error_str)
description.set_alignment(0, 0)
description.set_line_wrap(True)
box.add(description)
box.show_all()
tooltip.set_custom(box)
return True
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, None)
return False
def allow_tooltips(self, allow):
setattr(self.treeChannels, TreeViewHelper.CAN_TOOLTIP, allow)
setattr(self.treeAvailable, TreeViewHelper.CAN_TOOLTIP, allow)
def treeview_handle_context_menu_click(self, treeview, event):
if event is None:
selection = treeview.get_selection()
return selection.get_selected_rows()
x, y = int(event.x), int(event.y)
path, column, rx, ry = treeview.get_path_at_pos(x, y) or (None,) * 4
selection = treeview.get_selection()
model, paths = selection.get_selected_rows()
if path is None or (path not in paths
and event.button == 3):
# We have right-clicked, but not into the selection,
# assume we don't want to operate on the selection
paths = []
if (path is not None and not paths
and event.button == 3):
# No selection or clicked outside selection;
# select the single item where we clicked
treeview.grab_focus()
treeview.set_cursor(path, column, 0)
paths = [path]
if not paths:
# Unselect any remaining items (clicked elsewhere)
if not treeview.is_rubber_banding_active():
selection.unselect_all()
return model, paths
def downloads_list_get_selection(self, model=None, paths=None):
if model is None and paths is None:
selection = self.treeDownloads.get_selection()
model, paths = selection.get_selected_rows()
can_force, can_queue, can_pause, can_cancel, can_remove = (True,) * 5
selected_tasks = [(Gtk.TreeRowReference.new(model, path),
model.get_value(model.get_iter(path),
DownloadStatusModel.C_TASK)) for path in paths]
for row_reference, task in selected_tasks:
if task.status != download.DownloadTask.QUEUED:
can_force = False
if not task.can_queue():
can_queue = False
if not task.can_pause():
can_pause = False
if not task.can_cancel():
can_cancel = False
if not task.can_remove():
can_remove = False
return selected_tasks, can_force, can_queue, can_pause, can_cancel, can_remove
def downloads_finished(self, download_tasks_seen):
# Separate tasks into downloads & syncs
# Since calling notify_as_finished or notify_as_failed clears the flag,
# need to iterate through downloads & syncs separately, else all sync
# tasks will have their flags cleared if we do downloads first
def filter_by_activity(activity, tasks):
return [task for task in tasks if task.activity == activity]
download_tasks = filter_by_activity(download.DownloadTask.ACTIVITY_DOWNLOAD,
download_tasks_seen)
finished_downloads = [str(task)
for task in download_tasks if task.notify_as_finished()]
failed_downloads = ['%s (%s)' % (task, task.error_message)
for task in download_tasks if task.notify_as_failed()]
sync_tasks = filter_by_activity(download.DownloadTask.ACTIVITY_SYNCHRONIZE,
download_tasks_seen)
finished_syncs = [task for task in sync_tasks if task.notify_as_finished()]
failed_syncs = [task for task in sync_tasks if task.notify_as_failed()]
# Note that 'finished_ / failed_downloads' is a list of strings
# Whereas 'finished_ / failed_syncs' is a list of SyncTask objects
if finished_downloads and failed_downloads:
message = self.format_episode_list(finished_downloads, 5)
message += '\n\n<i>%s</i>\n' % _('Could not download some episodes:')
message += self.format_episode_list(failed_downloads, 5)
self.show_message(message, _('Downloads finished'))
elif finished_downloads:
message = self.format_episode_list(finished_downloads)
self.show_message(message, _('Downloads finished'))
elif failed_downloads:
message = self.format_episode_list(failed_downloads)
self.show_message(message, _('Downloads failed'))
if finished_syncs and failed_syncs:
message = self.format_episode_list(
[str(task) for task in finished_syncs], 5)
message += '\n\n<i>%s</i>\n' % _('Could not sync some episodes:')
message += self.format_episode_list(
[str(task) for task in failed_syncs], 5)
self.show_message(message, _('Device synchronization finished'), True)
elif finished_syncs:
message = self.format_episode_list([str(task) for task in finished_syncs])
self.show_message(message, _('Device synchronization finished'))
elif failed_syncs:
message = self.format_episode_list([str(task) for task in failed_syncs])
self.show_message(message, _('Device synchronization failed'), True)
# Do post-sync processing if required
for task in finished_syncs:
if self.config.device_sync.after_sync.mark_episodes_played:
logger.info('Marking as played on transfer: %s', task.episode.url)
task.episode.mark(is_played=True)
if self.config.device_sync.after_sync.delete_episodes:
logger.info('Removing episode after transfer: %s', task.episode.url)
task.episode.delete_from_disk()
self.sync_ui.device.close()
# Update icon list to show changes, if any
self.update_episode_list_icons(update_all=True)
self.update_podcast_list_model()
def format_episode_list(self, episode_list, max_episodes=10):
"""
Format a list of episode names for notifications
Will truncate long episode names and limit the amount of
episodes displayed (max_episodes=10).
The episode_list parameter should be a list of strings.
"""
MAX_TITLE_LENGTH = 100
result = []
for title in episode_list[:min(len(episode_list), max_episodes)]:
# Bug 1834: make sure title is a unicode string,
# so it may be cut correctly on UTF-8 char boundaries
title = util.convert_bytes(title)
if len(title) > MAX_TITLE_LENGTH:
middle = (MAX_TITLE_LENGTH // 2) - 2
title = '%s...%s' % (title[0:middle], title[-middle:])
result.append(html.escape(title))
result.append('\n')
more_episodes = len(episode_list) - max_episodes
if more_episodes > 0:
result.append('(...')
result.append(N_('%(count)d more episode',
'%(count)d more episodes',
more_episodes) % {'count': more_episodes})
result.append('...)')
return (''.join(result)).strip()
def queue_task(self, task, force_start):
if force_start:
self.download_queue_manager.force_start_task(task)
else:
self.download_queue_manager.queue_task(task)
def _for_each_task_set_status(self, tasks, status, force_start=False):
count = len(tasks)
if count:
progress_indicator = ProgressIndicator(
_('Queueing') if status == download.DownloadTask.QUEUED else
_('Removing') if status is None else download.DownloadTask.STATUS_MESSAGE[status],
'', True, self.get_dialog_parent(), count)
else:
progress_indicator = None
restart_timer = self.stop_download_list_update_timer()
self.download_queue_manager.disable()
self.__for_each_task_set_status(tasks, status, force_start, progress_indicator, restart_timer)
self.download_queue_manager.enable()
if progress_indicator:
progress_indicator.on_finished()
def __for_each_task_set_status(self, tasks, status, force_start=False, progress_indicator=None, restart_timer=False):
episode_urls = set()
model = self.treeDownloads.get_model()
has_queued_tasks = False
for row_reference, task in tasks:
with task:
if status == download.DownloadTask.QUEUED:
# Only queue task when it's paused/failed/cancelled (or forced)
if task.can_queue() or force_start:
# add the task back in if it was already cleaned up
# (to trigger this cancel one downloads in the active list, cancel all
# other downloads, quickly right click on the cancelled on one to get
# the context menu, wait until the active list is cleared, and then
# then choose download)
if task not in self.download_tasks_seen:
self.download_status_model.register_task(task, False)
self.download_tasks_seen.add(task)
self.queue_task(task, force_start)
has_queued_tasks = True
elif status == download.DownloadTask.CANCELLING:
logger.info(("cancelling task %s" % task.status))
task.cancel()
elif status == download.DownloadTask.PAUSING:
task.pause()
elif status is None:
if task.can_cancel():
task.cancel()
path = row_reference.get_path()
# path isn't set if the item has already been removed from the list
# (to trigger this cancel one downloads in the active list, cancel all
# other downloads, quickly right click on the cancelled on one to get
# the context menu, wait until the active list is cleared, and then
# then choose remove from list)
if path:
model.remove(model.get_iter(path))
# Remember the URL, so we can tell the UI to update
try:
# We don't "see" this task anymore - remove it;
# this is needed, so update_episode_list_icons()
# below gets the correct list of "seen" tasks
self.download_tasks_seen.remove(task)
except KeyError:
pass
episode_urls.add(task.url)
# Tell the task that it has been removed (so it can clean up)
task.removed_from_list()
else:
# We can (hopefully) simply set the task status here
task.status = status
if progress_indicator:
if not progress_indicator.on_tick():
break
if progress_indicator:
progress_indicator.on_tick(final=_('Updating...'))
# Update the tab title and downloads list
if has_queued_tasks or restart_timer:
self.set_download_list_state(gPodderSyncUI.DL_ONEOFF)
else:
self.update_downloads_list()
# Tell the podcasts tab to update icons for our removed podcasts
self.update_episode_list_icons(episode_urls)
def treeview_downloads_show_context_menu(self, event=None):
treeview = self.treeDownloads
model, paths = self.treeview_handle_context_menu_click(treeview, event)
if not paths:
return not treeview.is_rubber_banding_active()
if event is None or event.button == 3:
selected_tasks, can_force, can_queue, can_pause, can_cancel, can_remove = \
self.downloads_list_get_selection(model, paths)
menu = self.application.builder.get_object('downloads-context')
vsec = menu.get_item_link(0, Gio.MENU_LINK_SECTION)
dsec = menu.get_item_link(1, Gio.MENU_LINK_SECTION)
def insert_menuitem(position, label, action, icon):
dsec.insert(position, label, action)
menuitem = Gio.MenuItem.new(label, action)
menuitem.set_attribute_value('verb-icon', GLib.Variant.new_string(icon))
vsec.insert_item(position, menuitem)
vsec.remove(0)
dsec.remove(0)
if can_force:
insert_menuitem(0, _('Start download now'), 'win.forceDownload', 'document-save-symbolic')
else:
insert_menuitem(0, _('Download'), 'win.download', 'document-save-symbolic')
self.remove_action.set_enabled(can_remove)
area = TreeViewHelper.get_popup_rectangle(treeview, event)
self.downloads_popover.set_pointing_to(area)
self.downloads_popover.show()
return True
def on_mark_episodes_as_old(self, item, *args):
assert self.active_channel is not None
for episode in self.active_channel.get_all_episodes():
if not episode.was_downloaded(and_exists=True):
episode.mark(is_played=True)
self.update_podcast_list_model(selected=True)
self.update_episode_list_icons(update_all=True)
def on_open_download_folder(self, item, *args):
assert self.active_channel is not None
util.gui_open(self.active_channel.save_dir, gui=self)
def on_open_episode_download_folder(self, unused1=None, unused2=None):
episodes = self.get_selected_episodes()
assert len(episodes) == 1
util.gui_open(episodes[0].parent.save_dir, gui=self)
def on_select_channel_of_episode(self, unused1=None, unused2=None):
episodes = self.get_selected_episodes()
assert len(episodes) == 1
channel = episodes[0].parent
# Focus channel list
self.treeChannels.grab_focus()
# Select channel in list
path = self.podcast_list_model.get_filter_path_from_url(channel.url)
self.treeChannels.set_cursor(path)
def treeview_channels_show_context_menu(self, event=None):
treeview = self.treeChannels
model, paths = self.treeview_handle_context_menu_click(treeview, event)
if not paths:
return True
# Check for valid channel id, if there's no id then
# assume that it is a proxy channel or equivalent
# and cannot be operated with right click
if self.active_channel.id is None:
return True
if event is None or event.button == 3:
self.auto_archive_action.change_state(
GLib.Variant.new_boolean(self.active_channel.auto_archive_episodes))
self.channel_context_menu_helper.replace_entries([
(label,
None if func is None else lambda a, b, f=func: f(self.active_channel))
for label, func in list(
gpodder.user_extensions.on_channel_context_menu(self.active_channel)
or [])])
self.allow_tooltips(False)
area = TreeViewHelper.get_popup_rectangle(treeview, event)
self.channels_popover.set_pointing_to(area)
self.channels_popover.show()
return True
def cover_download_finished(self, channel, pixbuf):
"""
The Cover Downloader calls this when it has finished
downloading (or registering, if already downloaded)
a new channel cover, which is ready for displaying.
"""
util.idle_add(self.podcast_list_model.add_cover_by_channel,
channel, pixbuf)
@staticmethod
def build_filename(filename, extension):
filename, extension = util.sanitize_filename_ext(
filename,
extension,
PodcastEpisode.MAX_FILENAME_LENGTH,
PodcastEpisode.MAX_FILENAME_WITH_EXT_LENGTH)
if not filename.endswith(extension):
filename += extension
return filename
def on_save_episodes_activate(self, action, *args):
episodes = self.get_selected_episodes()
util.idle_add(self.save_episodes_as_file, episodes)
def save_episodes_as_file(self, episodes):
def do_save_episode(copy_from, copy_to):
if os.path.exists(copy_to):
logger.warning(copy_from)
logger.warning(copy_to)
title = _('File already exists')
d = {'filename': os.path.basename(copy_to)}
message = _('A file named "%(filename)s" already exists. Do you want to replace it?') % d
if not self.show_confirmation(message, title):
return
try:
shutil.copyfile(copy_from, copy_to)
except (OSError, IOError) as e:
logger.warning('Error copying from %s to %s: %r', copy_from, copy_to, e, exc_info=True)
folder, filename = os.path.split(copy_to)
# Remove characters not supported by VFAT (#282)
new_filename = re.sub(r"[\"*/:<>?\\|]", "_", filename)
destination = os.path.join(folder, new_filename)
if (copy_to != destination):
shutil.copyfile(copy_from, destination)
else:
raise
PRIVATE_FOLDER_ATTRIBUTE = '_save_episodes_as_file_folder'
folder = getattr(self, PRIVATE_FOLDER_ATTRIBUTE, None)
allRemainingDefault = False
remaining = len(episodes)
dialog = gPodderExportToLocalFolder(self.main_window,
_config=self.config)
episodes.sort(key=lambda episode: episode.published)
for episode in episodes:
remaining -= 1
if episode.was_downloaded(and_exists=True):
copy_from = episode.local_filename(create=False)
assert copy_from is not None
base, extension = os.path.splitext(copy_from)
if len(self.config.sendto.custom_file_format) > 0:
filename = self.build_filename(episode.sync_filename(
self.config.sendto.custom_file_format_enabled,
self.config.sendto.custom_file_format), extension)
else:
filename = self.build_filename(episode.title, extension)
try:
if allRemainingDefault:
do_save_episode(copy_from, os.path.join(folder, filename))
else:
(notCancelled, folder, dest_path, allRemainingDefault) = dialog.save_as(folder, filename, remaining)
if notCancelled:
do_save_episode(copy_from, dest_path)
else:
break
except (OSError, IOError) as e:
if remaining:
msg = _('Error saving to local folder: %(error)r.\n'
'Would you like to continue?') % {'error': e}
if not self.show_confirmation(msg, _('Error saving to local folder')):
logger.warning("Save to Local Folder cancelled following error")
break
else:
self.notification(_('Error saving to local folder: %(error)r') % {'error': e},
_('Error saving to local folder'), important=True)
setattr(self, PRIVATE_FOLDER_ATTRIBUTE, folder)
def on_bluetooth_episodes_activate(self, action, *args):
episodes = self.get_selected_episodes()
util.idle_add(self.copy_episodes_bluetooth, episodes)
def copy_episodes_bluetooth(self, episodes):
episodes_to_copy = [e for e in episodes if e.was_downloaded(and_exists=True)]
def convert_and_send_thread(episode):
for episode in episodes:
filename = episode.local_filename(create=False)
assert filename is not None
(base, ext) = os.path.splitext(filename)
destfile = self.build_filename(episode.sync_filename(), ext)
destfile = os.path.join(tempfile.gettempdir(), destfile)
try:
shutil.copyfile(filename, destfile)
util.bluetooth_send_file(destfile)
except:
logger.error('Cannot copy "%s" to "%s".', filename, destfile)
self.notification(_('Error converting file.'), _('Bluetooth file transfer'), important=True)
util.delete_file(destfile)
util.run_in_background(lambda: convert_and_send_thread(episodes_to_copy))
def treeview_available_show_context_menu(self, event=None):
treeview = self.treeAvailable
model, paths = self.treeview_handle_context_menu_click(treeview, event)
if not paths:
return not treeview.is_rubber_banding_active()
if event is None or event.button == 3:
episodes = self.get_selected_episodes()
any_locked = any(e.archive for e in episodes)
any_new = any(e.is_new and e.state != gpodder.STATE_DELETED for e in episodes)
downloaded = all(e.was_downloaded(and_exists=True) for e in episodes)
(open_instead_of_play, can_play, can_preview, can_download, can_pause,
can_cancel, can_delete, can_lock) = self.play_or_download()
menu = self.application.builder.get_object('episodes-context')
vsec = menu.get_item_link(0, Gio.MENU_LINK_SECTION)
psec = menu.get_item_link(1, Gio.MENU_LINK_SECTION)
def insert_menuitem(position, label, action, icon):
psec.insert(position, label, action)
menuitem = Gio.MenuItem.new(label, action)
menuitem.set_attribute_value('verb-icon', GLib.Variant.new_string(icon))
vsec.insert_item(position, menuitem)
# Play / Stream / Preview / Open
vsec.remove(0)
psec.remove(0)
if open_instead_of_play:
insert_menuitem(0, _('Open'), 'win.open', 'document-open-symbolic')
else:
if downloaded:
insert_menuitem(0, _('Play'), 'win.play', 'media-playback-start-symbolic')
elif can_preview:
insert_menuitem(0, _('Preview'), 'win.play', 'media-playback-start-symbolic')
else:
insert_menuitem(0, _('Stream'), 'win.play', 'media-playback-start-symbolic')
# Download / Pause
vsec.remove(1)
psec.remove(1)
if can_pause:
insert_menuitem(1, _('Pause'), 'win.pause', 'media-playback-pause-symbolic')
else:
insert_menuitem(1, _('Download'), 'win.download', 'document-save-symbolic')
# Cancel
have_cancel = (psec.get_item_attribute_value(
2, "action", GLib.VariantType("s")).get_string() == 'win.cancel')
if not can_cancel and have_cancel:
vsec.remove(2)
psec.remove(2)
elif can_cancel and not have_cancel:
insert_menuitem(2, _('Cancel'), 'win.cancel', 'process-stop-symbolic')
# Extensions section
self.episode_context_menu_helper.replace_entries([
(label, None if func is None else lambda a, b, f=func: f(episodes))
for label, func in list(
gpodder.user_extensions.on_episodes_context_menu(episodes) or [])])
# 'Send to' submenu
if downloaded:
if self.sendto_menu.get_n_items() < 1:
self.sendto_menu.insert_submenu(
0, _('Send to'),
self.application.builder.get_object('episodes-context-sendto'))
else:
self.sendto_menu.remove_all()
# New and Archive state
self.episode_new_action.change_state(GLib.Variant.new_boolean(any_new))
self.episode_lock_action.change_state(GLib.Variant.new_boolean(any_locked))
self.allow_tooltips(False)
area = TreeViewHelper.get_popup_rectangle(treeview, event)
self.episodes_popover.set_pointing_to(area)
self.episodes_popover.show()
return True
def set_episode_actions(self, open_instead_of_play=False, can_play=False, can_force=False, can_download=False,
can_pause=False, can_cancel=False, can_delete=False, can_lock=False, is_episode_selected=False):
episodes = self.get_selected_episodes() if is_episode_selected else []
# play icon and label
if open_instead_of_play or not is_episode_selected:
self.toolPlay.set_icon_name('document-open-symbolic')
self.toolPlay.set_label(_('Open'))
else:
self.toolPlay.set_icon_name('media-playback-start-symbolic')
downloaded = all(e.was_downloaded(and_exists=True) for e in episodes)
downloading = any(e.downloading for e in episodes)
if downloaded:
self.toolPlay.set_label(_('Play'))
elif downloading:
self.toolPlay.set_label(_('Preview'))
else:
self.toolPlay.set_label(_('Stream'))
# toolbar
self.toolPlay.set_sensitive(can_play)
self.toolForceDownload.set_visible(can_force)
self.toolForceDownload.set_sensitive(can_force)
self.toolDownload.set_visible(not can_force)
self.toolDownload.set_sensitive(can_download)
self.toolPause.set_sensitive(can_pause)
self.toolCancel.set_sensitive(can_cancel)
# Episodes menu
self.play_action.set_enabled(can_play and not open_instead_of_play)
self.open_action.set_enabled(can_play and open_instead_of_play)
self.download_action.set_enabled(can_force or can_download)
self.pause_action.set_enabled(can_pause)
self.cancel_action.set_enabled(can_cancel)
self.delete_action.set_enabled(can_delete)
self.toggle_episode_new_action.set_enabled(is_episode_selected)
self.toggle_episode_lock_action.set_enabled(can_lock)
self.open_episode_download_folder_action.set_enabled(len(episodes) == 1)
self.select_channel_of_episode_action.set_enabled(len(episodes) == 1)
# Episodes context menu
self.episode_new_action.set_enabled(is_episode_selected)
self.episode_lock_action.set_enabled(can_lock)
def set_title(self, new_title):
self.default_title = new_title
self.gPodder.set_title(new_title)
def update_episode_list_icons(self, urls=None, selected=False, update_all=False):
"""
Updates the status icons in the episode list.
If urls is given, it should be a list of URLs
of episodes that should be updated.
If urls is None, set ONE OF selected, all to
True (the former updates just the selected
episodes and the latter updates all episodes).
"""
self.episode_list_model.cache_config(self.config)
if urls is not None:
# We have a list of URLs to walk through
self.episode_list_model.update_by_urls(urls)
elif selected and not update_all:
# We should update all selected episodes
selection = self.treeAvailable.get_selection()
model, paths = selection.get_selected_rows()
for path in reversed(paths):
iterator = model.get_iter(path)
self.episode_list_model.update_by_filter_iter(iterator)
elif update_all and not selected:
# We update all (even the filter-hidden) episodes
self.episode_list_model.update_all()
else:
# Wrong/invalid call - have to specify at least one parameter
raise ValueError('Invalid call to update_episode_list_icons')
def episode_list_status_changed(self, episodes):
self.update_episode_list_icons({e.url for e in episodes})
self.update_podcast_list_model({e.channel.url for e in episodes})
self.db.commit()
def playback_episodes_for_real(self, episodes):
groups = collections.defaultdict(list)
for episode in episodes:
episode._download_error = None
if episode.download_task is not None and episode.download_task.status == episode.download_task.FAILED:
if not episode.can_stream(self.config):
# Do not cancel failed tasks that can not be streamed
continue
# Cancel failed task and remove from progress list
episode.download_task.cancel()
self.cleanup_downloads()
player = episode.get_player(self.config)
try:
allow_partial = (player != 'default')
filename = episode.get_playback_url(self.config, allow_partial)
except Exception as e:
episode._download_error = str(e)
continue
# Mark episode as played in the database
episode.playback_mark()
self.mygpo_client.on_playback([episode])
# Determine the playback resume position - if the file
# was played 100%, we simply start from the beginning
resume_position = episode.current_position
if resume_position == episode.total_time:
resume_position = 0
# If Panucci is configured, use D-Bus to call it
if player == 'panucci':
try:
PANUCCI_NAME = 'org.panucci.panucciInterface'
PANUCCI_PATH = '/panucciInterface'
PANUCCI_INTF = 'org.panucci.panucciInterface'
o = gpodder.dbus_session_bus.get_object(PANUCCI_NAME, PANUCCI_PATH)
i = dbus.Interface(o, PANUCCI_INTF)
def on_reply(*args):
pass
def error_handler(filename, err):
logger.error('Exception in D-Bus call: %s', str(err))
# Fallback: use the command line client
for command in util.format_desktop_command('panucci',
[filename]):
logger.info('Executing: %s', repr(command))
util.Popen(command, close_fds=True)
def on_error(err):
return error_handler(filename, err)
# This method only exists in Panucci > 0.9 ('new Panucci')
i.playback_from(filename, resume_position,
reply_handler=on_reply, error_handler=on_error)
continue # This file was handled by the D-Bus call
except Exception:
logger.error('Calling Panucci using D-Bus', exc_info=True)
groups[player].append(filename)
# Open episodes with system default player
if 'default' in groups:
for filename in groups['default']:
logger.debug('Opening with system default: %s', filename)
util.gui_open(filename, gui=self)
del groups['default']
# For each type now, go and create play commands
for group in groups:
for command in util.format_desktop_command(group, groups[group], resume_position):
logger.debug('Executing: %s', repr(command))
util.Popen(command, close_fds=True)
# Persist episode status changes to the database
self.db.commit()
# Flush updated episode status
if self.mygpo_client.can_access_webservice():
self.mygpo_client.flush()
def playback_episodes(self, episodes):
# We need to create a list, because we run through it more than once
episodes = list(Model.sort_episodes_by_pubdate(e for e in episodes if e.can_play(self.config)))
try:
self.playback_episodes_for_real(episodes)
except Exception:
logger.error('Error in playback!', exc_info=True)
self.show_message(_('Please check your media player settings in the preferences dialog.'),
_('Error opening player'))
self.episode_list_status_changed(episodes)
def play_or_download(self):
if not self.in_downloads_list():
(open_instead_of_play, can_play, can_preview, can_download,
can_pause, can_cancel, can_delete, can_lock) = (False,) * 8
selection = self.treeAvailable.get_selection()
if selection.count_selected_rows() > 0:
(model, paths) = selection.get_selected_rows()
for path in paths:
try:
episode = model.get_value(model.get_iter(path), EpisodeListModel.C_EPISODE)
if episode is None:
logger.info('Invalid episode at path %s', str(path))
continue
except TypeError:
logger.error('Invalid episode at path %s', str(path))
continue
# These values should only ever be set, never unset them once set.
# Actions filter episodes using these methods.
open_instead_of_play = open_instead_of_play or episode.file_type() not in ('audio', 'video')
can_play = can_play or episode.can_play(self.config)
can_preview = can_preview or episode.can_preview()
can_download = can_download or episode.can_download()
can_pause = can_pause or episode.can_pause()
can_cancel = can_cancel or episode.can_cancel()
can_delete = can_delete or episode.can_delete()
can_lock = can_lock or episode.can_lock()
self.set_episode_actions(open_instead_of_play, can_play, False, can_download, can_pause, can_cancel, can_delete, can_lock,
selection.count_selected_rows() > 0)
return (open_instead_of_play, can_play, can_preview, can_download,
can_pause, can_cancel, can_delete, can_lock)
else:
(can_queue, can_pause, can_cancel, can_remove) = (False,) * 4
can_force = True
selection = self.treeDownloads.get_selection()
if selection.count_selected_rows() > 0:
(model, paths) = selection.get_selected_rows()
for path in paths:
try:
task = model.get_value(model.get_iter(path), 0)
if task is None:
logger.info('Invalid task at path %s', str(path))
continue
except TypeError:
logger.error('Invalid task at path %s', str(path))
continue
if task.status != download.DownloadTask.QUEUED:
can_force = False
# These values should only ever be set, never unset them once set.
# Actions filter tasks using these methods.
can_queue = can_queue or task.can_queue()
can_pause = can_pause or task.can_pause()
can_cancel = can_cancel or task.can_cancel()
can_remove = can_remove or task.can_remove()
else:
can_force = False
self.set_episode_actions(False, False, can_force, can_queue, can_pause, can_cancel, can_remove, False, False)
return (False, False, False, can_queue, can_pause, can_cancel,
can_remove, False)
def on_cbMaxDownloads_toggled(self, widget, *args):
self.spinMaxDownloads.set_sensitive(self.cbMaxDownloads.get_active())
def on_cbLimitDownloads_toggled(self, widget, *args):
self.spinLimitDownloads.set_sensitive(self.cbLimitDownloads.get_active())
def episode_new_status_changed(self, urls):
self.update_podcast_list_model()
self.update_episode_list_icons(urls)
def refresh_episode_dates(self):
t = time.localtime()
current_day = t[:3]
if self.last_episode_date_refresh is not None and self.last_episode_date_refresh != current_day:
# update all episodes in current view
for row in self.episode_list_model:
row[EpisodeListModel.C_PUBLISHED_TEXT] = row[EpisodeListModel.C_EPISODE].cute_pubdate()
self.last_episode_date_refresh = current_day
remaining_seconds = 86400 - 3600 * t.tm_hour - 60 * t.tm_min - t.tm_sec
if remaining_seconds > 3600:
# timeout an hour early in the event daylight savings changes the clock forward
remaining_seconds = remaining_seconds - 3600
util.idle_timeout_add(remaining_seconds * 1000, self.refresh_episode_dates)
def update_podcast_list_model(self, urls=None, selected=False, select_url=None,
sections_changed=False):
"""Update the podcast list treeview model
If urls is given, it should list the URLs of each
podcast that has to be updated in the list.
If selected is True, only update the model contents
for the currently-selected podcast - nothing more.
The caller can optionally specify "select_url",
which is the URL of the podcast that is to be
selected in the list after the update is complete.
This only works if the podcast list has to be
reloaded; i.e. something has been added or removed
since the last update of the podcast list).
"""
selection = self.treeChannels.get_selection()
model, iterator = selection.get_selected()
def is_section(r):
return r[PodcastListModel.C_URL] == '-'
def is_separator(r):
return r[PodcastListModel.C_SEPARATOR]
sections_active = any(is_section(x) for x in self.podcast_list_model)
if self.config.ui.gtk.podcast_list.all_episodes:
# Update "all episodes" view in any case (if enabled)
self.podcast_list_model.update_first_row()
# List model length minus 1, because of "All"
list_model_length = len(self.podcast_list_model) - 1
else:
list_model_length = len(self.podcast_list_model)
force_update = (sections_active != self.config.ui.gtk.podcast_list.sections
or sections_changed)
# Filter items in the list model that are not podcasts, so we get the
# correct podcast list count (ignore section headers and separators)
def is_not_podcast(r):
return is_section(r) or is_separator(r)
list_model_length -= len(list(filter(is_not_podcast, self.podcast_list_model)))
if selected and not force_update:
# very cheap! only update selected channel
if iterator is not None:
# If we have selected the "all episodes" view, we have
# to update all channels for selected episodes:
if self.config.ui.gtk.podcast_list.all_episodes and \
self.podcast_list_model.iter_is_first_row(iterator):
urls = self.get_podcast_urls_from_selected_episodes()
self.podcast_list_model.update_by_urls(urls)
else:
# Otherwise just update the selected row (a podcast)
self.podcast_list_model.update_by_filter_iter(iterator)
if self.config.ui.gtk.podcast_list.sections:
self.podcast_list_model.update_sections()
elif list_model_length == len(self.channels) and not force_update:
# we can keep the model, but have to update some
if urls is None:
# still cheaper than reloading the whole list
self.podcast_list_model.update_all()
else:
# ok, we got a bunch of urls to update
self.podcast_list_model.update_by_urls(urls)
if self.config.ui.gtk.podcast_list.sections:
self.podcast_list_model.update_sections()
else:
if model and iterator and select_url is None:
# Get the URL of the currently-selected podcast
select_url = model.get_value(iterator, PodcastListModel.C_URL)
# Update the podcast list model with new channels
self.podcast_list_model.set_channels(self.db, self.config, self.channels)
try:
selected_iter = model.get_iter_first()
# Find the previously-selected URL in the new
# model if we have an URL (else select first)
if select_url is not None:
pos = model.get_iter_first()
while pos is not None:
url = model.get_value(pos, PodcastListModel.C_URL)
if url == select_url:
selected_iter = pos
break
pos = model.iter_next(pos)
if selected_iter is not None:
selection.select_iter(selected_iter)
self.on_treeChannels_cursor_changed(self.treeChannels)
except:
logger.error('Cannot select podcast in list', exc_info=True)
def on_episode_list_filter_changed(self, has_episodes):
self.play_or_download()
def update_episode_list_model(self):
if self.channels and self.active_channel is not None:
self.treeAvailable.get_selection().unselect_all()
self.treeAvailable.scroll_to_point(0, 0)
self.episode_list_model.cache_config(self.config)
with self.treeAvailable.get_selection().handler_block(self.episode_selection_handler_id):
# have to block the on_episode_list_selection_changed handler because
# when selecting any channel from All Episodes, on_episode_list_selection_changed
# is called once per episode (4k time in my case), causing episode shownotes
# to be updated as many time, resulting in UI freeze for 10 seconds.
self.episode_list_model.replace_from_channel(self.active_channel)
else:
self.episode_list_model.clear()
@dbus.service.method(gpodder.dbus_interface)
def offer_new_episodes(self, channels=None):
new_episodes = self.get_new_episodes(channels)
if new_episodes:
self.new_episodes_show(new_episodes)
return True
return False
def add_podcast_list(self, podcasts, auth_tokens=None):
"""Subscribe to a list of podcast given (title, url) pairs
If auth_tokens is given, it should be a dictionary
mapping URLs to (username, password) tuples."""
if auth_tokens is None:
auth_tokens = {}
existing_urls = {podcast.url for podcast in self.channels}
# For a given URL, the desired title (or None)
title_for_url = {}
# Sort and split the URL list into five buckets
queued, failed, existing, worked, authreq = [], [], [], [], []
for input_title, input_url in podcasts:
url = util.normalize_feed_url(input_url)
# Check if it's a YouTube channel, user, or playlist and resolves it to its feed if that's the case
url = youtube.parse_youtube_url(url)
if url is None:
# Fail this one because the URL is not valid
failed.append(input_url)
elif url in existing_urls:
# A podcast already exists in the list for this URL
existing.append(url)
# XXX: Should we try to update the title of the existing
# subscription from input_title here if it is different?
else:
# This URL has survived the first round - queue for add
title_for_url[url] = input_title
queued.append(url)
if url != input_url and input_url in auth_tokens:
auth_tokens[url] = auth_tokens[input_url]
error_messages = {}
redirections = {}
progress = ProgressIndicator(_('Adding podcasts'),
_('Please wait while episode information is downloaded.'),
parent=self.get_dialog_parent())
def on_after_update():
progress.on_finished()
# Report already-existing subscriptions to the user
if existing:
title = _('Existing subscriptions skipped')
message = _('You are already subscribed to these podcasts:') \
+ '\n\n' + '\n'.join(html.escape(url) for url in existing)
self.show_message(message, title, widget=self.treeChannels)
# Report subscriptions that require authentication
retry_podcasts = {}
if authreq:
for url in authreq:
title = _('Podcast requires authentication')
message = _('Please login to %s:') % (html.escape(url),)
success, auth_tokens = self.show_login_dialog(title, message)
if success:
retry_podcasts[url] = auth_tokens
else:
# Stop asking the user for more login data
retry_podcasts = {}
for url in authreq:
error_messages[url] = _('Authentication failed')
failed.append(url)
break
# Report website redirections
for url in redirections:
title = _('Website redirection detected')
message = _('The URL %(url)s redirects to %(target)s.') \
+ '\n\n' + _('Do you want to visit the website now?')
message = message % {'url': url, 'target': redirections[url]}
if self.show_confirmation(message, title):
util.open_website(url)
else:
break
# Report failed subscriptions to the user
if failed:
title = _('Could not add some podcasts')
message = _('Some podcasts could not be added to your list:')
details = '\n\n'.join('<b>{}</b>:\n{}'.format(html.escape(url),
html.escape(error_messages.get(url, _('Unknown')))) for url in failed)
self.show_message_details(title, message, details)
# Upload subscription changes to gpodder.net
self.mygpo_client.on_subscribe(worked)
# Fix URLs if mygpo has rewritten them
self.rewrite_urls_mygpo()
# If only one podcast was added, select it after the update
if len(worked) == 1:
url = worked[0]
else:
url = None
# Update the list of subscribed podcasts
self.update_podcast_list_model(select_url=url)
# If we have authentication data to retry, do so here
if retry_podcasts:
podcasts = [(title_for_url.get(url), url)
for url in list(retry_podcasts.keys())]
self.add_podcast_list(podcasts, retry_podcasts)
# This will NOT show new episodes for podcasts that have
# been added ("worked"), but it will prevent problems with
# multiple dialogs being open at the same time ;)
return
# Offer to download new episodes
episodes = []
for podcast in self.channels:
if podcast.url in worked:
episodes.extend(podcast.get_all_episodes())
if episodes:
episodes = list(Model.sort_episodes_by_pubdate(episodes,
reverse=True))
self.new_episodes_show(episodes,
selected=[e.check_is_new() for e in episodes])
@util.run_in_background
def thread_proc():
# After the initial sorting and splitting, try all queued podcasts
length = len(queued)
for index, url in enumerate(queued):
title = title_for_url.get(url)
progress.on_progress(float(index) / float(length))
progress.on_message(title or url)
try:
# The URL is valid and does not exist already - subscribe!
channel = self.model.load_podcast(url=url, create=True,
authentication_tokens=auth_tokens.get(url, None),
max_episodes=self.config.limit.episodes)
try:
username, password = util.username_password_from_url(url)
except ValueError:
username, password = (None, None)
if title is not None:
# Prefer title from subscription source (bug 1711)
channel.title = title
if username is not None and channel.auth_username is None and \
password is not None and channel.auth_password is None:
channel.auth_username = username
channel.auth_password = password
channel.save()
self._update_cover(channel)
except feedcore.AuthenticationRequired as e:
# use e.url because there might have been a redirection (#571)
if e.url in auth_tokens:
# Fail for wrong authentication data
error_messages[e.url] = _('Authentication failed')
failed.append(e.url)
else:
# Queue for login dialog later
authreq.append(e.url)
continue
except feedcore.WifiLogin as error:
redirections[url] = error.data
failed.append(url)
error_messages[url] = _('Redirection detected')
continue
except Exception as e:
logger.error('Subscription error: %s', e, exc_info=True)
error_messages[url] = str(e)
failed.append(url)
continue
assert channel is not None
worked.append(channel.url)
util.idle_add(on_after_update)
def find_episode(self, podcast_url, episode_url):
"""Find an episode given its podcast and episode URL
The function will return a PodcastEpisode object if
the episode is found, or None if it's not found.
"""
for podcast in self.channels:
if podcast_url == podcast.url:
for episode in podcast.get_all_episodes():
if episode_url == episode.url:
return episode
return None
def process_received_episode_actions(self):
"""Process/merge episode actions from gpodder.net
This function will merge all changes received from
the server to the local database and update the
status of the affected episodes as necessary.
"""
indicator = ProgressIndicator(_('Merging episode actions'),
_('Episode actions from gpodder.net are merged.'),
False, self.get_dialog_parent())
Gtk.main_iteration()
self.mygpo_client.process_episode_actions(self.find_episode)
self.db.commit()
indicator.on_finished()
def _update_cover(self, channel):
if channel is not None:
self.cover_downloader.request_cover(channel)
def show_update_feeds_buttons(self):
# Make sure that the buttons for updating feeds
# appear - this should happen after a feed update
self.hboxUpdateFeeds.hide()
if not self.application.want_headerbar:
self.btnUpdateFeeds.show()
self.update_action.set_enabled(True)
self.update_channel_action.set_enabled(True)
def on_btnCancelFeedUpdate_clicked(self, widget):
if not self.feed_cache_update_cancelled:
self.pbFeedUpdate.set_text(_('Cancelling...'))
self.feed_cache_update_cancelled = True
self.btnCancelFeedUpdate.set_sensitive(False)
else:
self.show_update_feeds_buttons()
def update_feed_cache(self, channels=None,
show_new_episodes_dialog=True):
if self.config.check_connection and not util.connection_available():
self.show_message(_('Please connect to a network, then try again.'),
_('No network connection'), important=True)
return
# Fix URLs if mygpo has rewritten them
self.rewrite_urls_mygpo()
if channels is None:
# Only update podcasts for which updates are enabled
channels = [c for c in self.channels if not c.pause_subscription]
self.update_action.set_enabled(False)
self.update_channel_action.set_enabled(False)
self.feed_cache_update_cancelled = False
self.btnCancelFeedUpdate.show()
self.btnCancelFeedUpdate.set_sensitive(True)
self.btnCancelFeedUpdate.set_image(Gtk.Image.new_from_icon_name('process-stop', Gtk.IconSize.BUTTON))
self.hboxUpdateFeeds.show_all()
self.btnUpdateFeeds.hide()
count = len(channels)
text = N_('Updating %(count)d feed...', 'Updating %(count)d feeds...',
count) % {'count': count}
self.pbFeedUpdate.set_text(text)
self.pbFeedUpdate.set_fraction(0)
@util.run_in_background
def update_feed_cache_proc():
updated_channels = []
nr_update_errors = 0
new_episodes = []
for updated, channel in enumerate(channels):
if self.feed_cache_update_cancelled:
break
def indicate_updating_podcast(channel):
d = {'podcast': channel.title, 'position': updated + 1, 'total': count}
progression = _('Updating %(podcast)s (%(position)d/%(total)d)') % d
logger.info(progression)
self.pbFeedUpdate.set_text(progression)
try:
channel._update_error = None
util.idle_add(indicate_updating_podcast, channel)
new_episodes.extend(channel.update(max_episodes=self.config.limit.episodes))
self._update_cover(channel)
except Exception as e:
message = str(e)
if message:
channel._update_error = message
else:
channel._update_error = '?'
nr_update_errors += 1
logger.error('Error updating feed: %s: %s', channel.title, message, exc_info=(e.__class__ not in [
gpodder.feedcore.BadRequest,
gpodder.feedcore.AuthenticationRequired,
gpodder.feedcore.Unsubscribe,
gpodder.feedcore.NotFound,
gpodder.feedcore.InternalServerError,
gpodder.feedcore.UnknownStatusCode,
requests.exceptions.ConnectionError,
requests.exceptions.RetryError,
urllib3.exceptions.MaxRetryError,
urllib3.exceptions.ReadTimeoutError,
]))
updated_channels.append(channel)
def update_progress(channel):
self.update_podcast_list_model([channel.url])
# If the currently-viewed podcast is updated, reload episodes
if self.active_channel is not None and \
self.active_channel == channel:
logger.debug('Updated channel is active, updating UI')
self.update_episode_list_model()
self.pbFeedUpdate.set_fraction(float(updated + 1) / float(count))
util.idle_add(update_progress, channel)
if nr_update_errors > 0:
self.notification(
N_('%(count)d channel failed to update',
'%(count)d channels failed to update',
nr_update_errors) % {'count': nr_update_errors},
_('Error while updating feeds'), widget=self.treeChannels)
def update_feed_cache_finish_callback(new_episodes):
# Process received episode actions for all updated URLs
self.process_received_episode_actions()
# If we are currently viewing "All episodes" or a section, update its episode list now
if self.active_channel is not None and \
isinstance(self.active_channel, PodcastChannelProxy):
self.update_episode_list_model()
if self.feed_cache_update_cancelled:
# The user decided to abort the feed update
self.show_update_feeds_buttons()
# The filter extension can mark newly added episodes as old,
# so take only episodes marked as new.
episodes = ((e for e in new_episodes if e.check_is_new())
if self.config.ui.gtk.only_added_are_new
else self.get_new_episodes(list(updated_channels)))
if self.config.downloads.chronological_order:
# download older episodes first
episodes = list(Model.sort_episodes_by_pubdate(episodes))
# Remove episodes without downloadable content
downloadable_episodes = [e for e in episodes if e.url]
if not downloadable_episodes:
# Nothing new here - but inform the user
self.pbFeedUpdate.set_fraction(1.0)
self.pbFeedUpdate.set_text(
_('No new episodes with downloadable content') if episodes else _('No new episodes'))
self.feed_cache_update_cancelled = True
self.btnCancelFeedUpdate.show()
self.btnCancelFeedUpdate.set_sensitive(True)
self.update_action.set_enabled(True)
self.btnCancelFeedUpdate.set_image(Gtk.Image.new_from_icon_name('edit-clear', Gtk.IconSize.BUTTON))
else:
episodes = downloadable_episodes
count = len(episodes)
# New episodes are available
self.pbFeedUpdate.set_fraction(1.0)
if self.config.ui.gtk.new_episodes == 'download':
self.download_episode_list(episodes)
title = N_('Downloading %(count)d new episode.',
'Downloading %(count)d new episodes.',
count) % {'count': count}
self.show_message(title, _('New episodes available'))
elif self.config.ui.gtk.new_episodes == 'queue':
self.download_episode_list_paused(episodes)
title = N_(
'%(count)d new episode added to download list.',
'%(count)d new episodes added to download list.',
count) % {'count': count}
self.show_message(title, _('New episodes available'))
else:
if (show_new_episodes_dialog
and self.config.ui.gtk.new_episodes == 'show'):
self.new_episodes_show(episodes, notification=True)
else: # !show_new_episodes_dialog or ui.gtk.new_episodes == 'ignore'
message = N_('%(count)d new episode available',
'%(count)d new episodes available',
count) % {'count': count}
self.pbFeedUpdate.set_text(message)
self.show_update_feeds_buttons()
util.idle_add(update_feed_cache_finish_callback, new_episodes)
def on_gPodder_delete_event(self, *args):
"""Called when the GUI wants to close the window
Displays a confirmation dialog (and closes/hides gPodder)
"""
if self.confirm_quit():
self.close_gpodder()
return True
def confirm_quit(self):
"""Called when the GUI wants to close the window
Displays a confirmation dialog
"""
downloading = self.download_status_model.are_downloads_in_progress()
if downloading:
dialog = Gtk.MessageDialog(self.gPodder, Gtk.DialogFlags.MODAL, Gtk.MessageType.QUESTION, Gtk.ButtonsType.NONE)
dialog.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
quit_button = dialog.add_button(_('_Quit'), Gtk.ResponseType.CLOSE)
title = _('Quit gPodder')
message = _('You are downloading episodes. You can resume downloads the next time you start gPodder. Do you want to quit now?')
dialog.set_title(title)
dialog.set_markup('<span weight="bold" size="larger">%s</span>\n\n%s' % (title, message))
quit_button.grab_focus()
result = dialog.run()
dialog.destroy()
return result == Gtk.ResponseType.CLOSE
else:
return True
def close_gpodder(self):
""" clean everything and exit properly
"""
# Cancel any running background updates of the episode list model
self.episode_list_model.background_update = None
self.gPodder.hide()
# Notify all tasks to to carry out any clean-up actions
self.download_status_model.tell_all_tasks_to_quit()
while Gtk.events_pending() or self.download_queue_manager.has_workers():
Gtk.main_iteration()
self.core.shutdown()
self.application.remove_window(self.gPodder)
def format_delete_message(self, message, things, max_things, max_length):
titles = []
for index, thing in zip(range(max_things), things):
titles.append('• ' + (html.escape(thing.title if len(thing.title) <= max_length else thing.title[:max_length] + '…')))
if len(things) > max_things:
titles.append('+%(count)d more…' % {'count': len(things) - max_things})
return '\n'.join(titles) + '\n\n' + message
def delete_episode_list(self, episodes, confirm=True, callback=None):
if self.in_downloads_list():
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [(Gtk.TreeRowReference.new(model, path),
model.get_value(model.get_iter(path),
DownloadStatusModel.C_TASK)) for path in paths]
self._for_each_task_set_status(selected_tasks, status=None)
return
if not episodes:
return False
episodes = [e for e in episodes if not e.archive]
if not episodes:
title = _('Episodes are locked')
message = _(
'The selected episodes are locked. Please unlock the '
'episodes that you want to delete before trying '
'to delete them.')
self.notification(message, title, widget=self.treeAvailable)
return False
count = len(episodes)
title = N_('Delete %(count)d episode?', 'Delete %(count)d episodes?',
count) % {'count': count}
message = _('Deleting episodes removes downloaded files.')
message = self.format_delete_message(message, episodes, 5, 60)
if confirm and not self.show_confirmation(message, title):
return False
self.on_item_cancel_download_activate(force=True)
progress = ProgressIndicator(_('Deleting episodes'),
_('Please wait while episodes are deleted'),
parent=self.get_dialog_parent())
def finish_deletion(episode_urls, channel_urls):
# Episodes have been deleted - persist the database
self.db.commit()
self.update_episode_list_icons(episode_urls)
self.update_podcast_list_model(channel_urls)
self.play_or_download()
progress.on_finished()
@util.run_in_background
def thread_proc():
episode_urls = set()
channel_urls = set()
episodes_status_update = []
for idx, episode in enumerate(episodes):
progress.on_progress(idx / len(episodes))
if not episode.archive:
progress.on_message(episode.title)
episode.delete_from_disk()
episode_urls.add(episode.url)
channel_urls.add(episode.channel.url)
episodes_status_update.append(episode)
# Notify the web service about the status update + upload
if self.mygpo_client.can_access_webservice():
self.mygpo_client.on_delete(episodes_status_update)
self.mygpo_client.flush()
if callback is None:
util.idle_add(finish_deletion, episode_urls, channel_urls)
else:
util.idle_add(callback, episode_urls, channel_urls, progress)
return True
def on_itemRemoveOldEpisodes_activate(self, action, param):
self.show_delete_episodes_window()
def show_delete_episodes_window(self, channel=None):
"""Offer deletion of episodes
If channel is None, offer deletion of all episodes.
Otherwise only offer deletion of episodes in the channel.
"""
columns = (
('markup_delete_episodes', None, None, _('Episode')),
)
msg_older_than = N_('Select older than %(count)d day', 'Select older than %(count)d days', self.config.auto.cleanup.days)
selection_buttons = {
_('Select played'): lambda episode: not episode.is_new,
_('Select finished'): lambda episode: episode.is_finished(),
msg_older_than % {'count': self.config.auto.cleanup.days}:
lambda episode: episode.age_in_days() > self.config.auto.cleanup.days,
}
instructions = _('Select the episodes you want to delete:')
if channel is None:
channels = self.channels
else:
channels = [channel]
episodes = []
for channel in channels:
for episode in channel.get_episodes(gpodder.STATE_DOWNLOADED):
# Disallow deletion of locked episodes that still exist
if not episode.archive or not episode.file_exists():
episodes.append(episode)
selected = [not e.is_new or not e.file_exists() for e in episodes]
gPodderEpisodeSelector(
self.main_window, title=_('Delete episodes'),
instructions=instructions,
episodes=episodes, selected=selected, columns=columns,
ok_button=_('_Delete'), callback=self.delete_episode_list,
selection_buttons=selection_buttons, _config=self.config)
def on_selected_episodes_status_changed(self):
# The order of the updates here is important! When "All episodes" is
# selected, the update of the podcast list model depends on the episode
# list selection to determine which podcasts are affected. Updating
# the episode list could remove the selection if a filter is active.
self.update_podcast_list_model(selected=True)
self.update_episode_list_icons(selected=True)
self.db.commit()
self.play_or_download()
def mark_selected_episodes_new(self):
for episode in self.get_selected_episodes():
episode.mark(is_played=False)
self.on_selected_episodes_status_changed()
def mark_selected_episodes_old(self):
for episode in self.get_selected_episodes():
episode.mark(is_played=True)
self.on_selected_episodes_status_changed()
def on_item_toggle_played_activate(self, action, param):
for episode in self.get_selected_episodes():
episode.mark(is_played=episode.is_new and episode.state != gpodder.STATE_DELETED)
self.on_selected_episodes_status_changed()
def on_item_toggle_lock_activate(self, unused, toggle=True, new_value=False):
for episode in self.get_selected_episodes():
if episode.state == gpodder.STATE_DELETED:
# Always unlock deleted episodes
episode.mark(is_locked=False)
elif toggle or toggle is None:
# Gio.SimpleAction activate signal passes None (see #681)
episode.mark(is_locked=not episode.archive)
else:
episode.mark(is_locked=new_value)
self.on_selected_episodes_status_changed()
self.play_or_download()
def on_episode_lock_activate(self, action, *params):
new_value = not action.get_state().get_boolean()
self.on_item_toggle_lock_activate(None, toggle=False, new_value=new_value)
action.change_state(GLib.Variant.new_boolean(new_value))
self.episodes_popover.popdown()
return True
def on_channel_toggle_lock_activate(self, action, *params):
if self.active_channel is None:
return
self.active_channel.auto_archive_episodes = not self.active_channel.auto_archive_episodes
self.active_channel.save()
for episode in self.active_channel.get_all_episodes():
episode.mark(is_locked=self.active_channel.auto_archive_episodes)
self.update_podcast_list_model(selected=True)
self.update_episode_list_icons(update_all=True)
action.change_state(
GLib.Variant.new_boolean(self.active_channel.auto_archive_episodes))
self.channels_popover.popdown()
def on_itemUpdateChannel_activate(self, *params):
if self.active_channel is None:
title = _('No podcast selected')
message = _('Please select a podcast in the podcasts list to update.')
self.show_message(message, title, widget=self.treeChannels)
return
# Dirty hack to check for "All episodes" (see gpodder.gtkui.model)
if getattr(self.active_channel, 'ALL_EPISODES_PROXY', False):
self.update_feed_cache()
else:
self.update_feed_cache(channels=[self.active_channel])
def on_itemUpdate_activate(self, action=None, param=None):
# Check if we have outstanding subscribe/unsubscribe actions
self.on_add_remove_podcasts_mygpo()
if self.channels:
self.update_feed_cache()
else:
def show_welcome_window():
def on_show_example_podcasts(widget):
welcome_window.main_window.response(Gtk.ResponseType.CANCEL)
self.on_itemImportChannels_activate(None)
def on_add_podcast_via_url(widget):
welcome_window.main_window.response(Gtk.ResponseType.CANCEL)
self.on_itemAddChannel_activate(None)
def on_setup_my_gpodder(widget):
welcome_window.main_window.response(Gtk.ResponseType.CANCEL)
self.on_download_subscriptions_from_mygpo(None)
welcome_window = gPodderWelcome(self.main_window,
center_on_widget=self.main_window,
on_show_example_podcasts=on_show_example_podcasts,
on_add_podcast_via_url=on_add_podcast_via_url,
on_setup_my_gpodder=on_setup_my_gpodder)
welcome_window.main_window.run()
welcome_window.main_window.destroy()
util.idle_add(show_welcome_window)
def download_episode_list_paused(self, episodes, hide_progress=False):
self.download_episode_list(episodes, True, hide_progress=hide_progress)
def download_episode_list(self, episodes, add_paused=False, force_start=False, downloader=None, hide_progress=False):
# Start progress indicator to queue existing tasks
count = len(episodes)
if count and not hide_progress:
progress_indicator = ProgressIndicator(
_('Queueing'),
'', True, self.get_dialog_parent(), count)
else:
progress_indicator = None
restart_timer = self.stop_download_list_update_timer()
self.download_queue_manager.disable()
def queue_tasks(tasks, queued_existing_task):
if progress_indicator is None or not progress_indicator.cancelled:
if progress_indicator:
count = len(tasks)
if count:
# Restart progress indicator to queue new tasks
progress_indicator.set_max_ticks(count)
progress_indicator.on_progress(0.0)
for task in tasks:
with task:
if add_paused:
task.status = task.PAUSED
else:
self.mygpo_client.on_download([task.episode])
self.queue_task(task, force_start)
if progress_indicator:
if not progress_indicator.on_tick():
break
if progress_indicator:
progress_indicator.on_tick(final=_('Updating...'))
self.download_queue_manager.enable()
# Update the tab title and downloads list
if tasks or queued_existing_task or restart_timer:
self.set_download_list_state(gPodderSyncUI.DL_ONEOFF)
# Flush updated episode status
if self.mygpo_client.can_access_webservice():
self.mygpo_client.flush()
if progress_indicator:
progress_indicator.on_finished()
queued_existing_task = False
new_tasks = []
if self.config.downloads.chronological_order:
# Download episodes in chronological order (older episodes first)
episodes = list(Model.sort_episodes_by_pubdate(episodes))
for episode in episodes:
if progress_indicator:
# The continues require ticking before doing the work
if not progress_indicator.on_tick():
break
logger.debug('Downloading episode: %s', episode.title)
if not episode.was_downloaded(and_exists=True):
episode._download_error = None
if episode.state == gpodder.STATE_DELETED:
episode.state = gpodder.STATE_NORMAL
episode.save()
task_exists = False
for task in self.download_tasks_seen:
if episode.url == task.url:
task_exists = True
task.unpause()
task.reuse()
if task.status not in (task.DOWNLOADING, task.QUEUED):
if downloader:
# replace existing task's download with forced one
task.downloader = downloader
self.queue_task(task, force_start)
queued_existing_task = True
continue
if task_exists:
continue
try:
task = download.DownloadTask(episode, self.config, downloader=downloader)
except Exception as e:
episode._download_error = str(e)
d = {'episode': html.escape(episode.title), 'message': html.escape(str(e))}
message = _('Download error while downloading %(episode)s: %(message)s')
self.show_message(message % d, _('Download error'), important=True)
logger.error('While downloading %s', episode.title, exc_info=True)
continue
# New Task, we must wait on the GTK Loop
self.download_status_model.register_task(task)
new_tasks.append(task)
# Executes after tasks have been registered
util.idle_add(queue_tasks, new_tasks, queued_existing_task)
def cancel_task_list(self, tasks, force=False):
if not tasks:
return
progress_indicator = ProgressIndicator(
download.DownloadTask.STATUS_MESSAGE[download.DownloadTask.CANCELLING],
'', True, self.get_dialog_parent(), len(tasks))
restart_timer = self.stop_download_list_update_timer()
self.download_queue_manager.disable()
for task in tasks:
task.cancel()
if not progress_indicator.on_tick():
break
progress_indicator.on_tick(final=_('Updating...'))
self.download_queue_manager.enable()
self.update_episode_list_icons([task.url for task in tasks])
self.play_or_download()
# Update the tab title and downloads list
if restart_timer:
self.set_download_list_state(gPodderSyncUI.DL_ONEOFF)
else:
self.update_downloads_list()
progress_indicator.on_finished()
def new_episodes_show(self, episodes, notification=False, selected=None):
columns = (
('markup_new_episodes', None, None, _('Episode')),
)
instructions = _('Select the episodes you want to download:')
if self.new_episodes_window is not None:
self.new_episodes_window.main_window.destroy()
self.new_episodes_window = None
def download_episodes_callback(episodes):
self.new_episodes_window = None
self.download_episode_list(episodes)
# Remove episodes without downloadable content
episodes = [e for e in episodes if e.url]
if len(episodes) == 0:
return
if selected is None:
# Select all by default
selected = [True] * len(episodes)
self.new_episodes_window = gPodderEpisodeSelector(self.main_window,
title=_('New episodes available'),
instructions=instructions,
episodes=episodes,
columns=columns,
selected=selected,
ok_button='gpodder-download',
callback=download_episodes_callback,
remove_callback=lambda e: e.mark_old(),
remove_action=_('_Mark as old'),
remove_finished=self.episode_new_status_changed,
_config=self.config,
show_notification=False)
def on_itemDownloadAllNew_activate(self, action, param):
if not self.offer_new_episodes():
self.show_message(_('Please check for new episodes later.'),
_('No new episodes available'))
def get_new_episodes(self, channels=None):
return [e for c in channels or self.channels for e in
[e for e in c.get_all_episodes() if e.check_is_new()]]
def commit_changes_to_database(self):
"""This will be called after the sync process is finished"""
self.db.commit()
def on_itemShowToolbar_activate(self, action, param):
state = action.get_state()
self.config.ui.gtk.toolbar = not state
action.set_state(GLib.Variant.new_boolean(not state))
def on_item_view_search_always_visible_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.search_always_visible = not state
action.set_state(GLib.Variant.new_boolean(not state))
for search in (self._search_episodes, self._search_podcasts):
if search:
if self.config.ui.gtk.search_always_visible:
search.show_search(grab_focus=False)
else:
search.hide_search()
def on_item_view_hide_boring_podcasts_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.podcast_list.hide_empty = not state
action.set_state(GLib.Variant.new_boolean(not state))
self.apply_podcast_list_hide_boring()
def on_item_view_show_all_episodes_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.podcast_list.all_episodes = not state
action.set_state(GLib.Variant.new_boolean(not state))
def on_item_view_show_podcast_sections_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.podcast_list.sections = not state
action.set_state(GLib.Variant.new_boolean(not state))
def on_item_view_episodes_changed(self, action, param):
self.config.ui.gtk.episode_list.view_mode = getattr(EpisodeListModel, param.get_string()) or EpisodeListModel.VIEW_ALL
action.set_state(param)
self.episode_list_model.set_view_mode(self.config.ui.gtk.episode_list.view_mode)
self.apply_podcast_list_hide_boring()
def on_item_view_always_show_new_episodes_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.episode_list.always_show_new = not state
action.set_state(GLib.Variant.new_boolean(not state))
def on_item_view_trim_episode_title_prefix_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.episode_list.trim_title_prefix = not state
action.set_state(GLib.Variant.new_boolean(not state))
def on_item_view_show_episode_description_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.episode_list.descriptions = not state
action.set_state(GLib.Variant.new_boolean(not state))
def on_item_view_show_episode_released_time_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.episode_list.show_released_time = not state
action.set_state(GLib.Variant.new_boolean(not state))
def on_item_view_right_align_episode_released_column_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.episode_list.right_align_released_column = not state
action.set_state(GLib.Variant.new_boolean(not state))
self.align_releasecell()
self.treeAvailable.queue_draw()
def on_item_view_ctrl_click_to_sort_episodes_toggled(self, action, param):
state = action.get_state()
self.config.ui.gtk.episode_list.ctrl_click_to_sort = not state
action.set_state(GLib.Variant.new_boolean(not state))
def apply_podcast_list_hide_boring(self):
if self.config.ui.gtk.podcast_list.hide_empty:
self.podcast_list_model.set_view_mode(self.config.ui.gtk.episode_list.view_mode)
else:
self.podcast_list_model.set_view_mode(-1)
def on_download_subscriptions_from_mygpo(self, action=None):
def after_login():
title = _('Subscriptions on %(server)s') \
% {'server': self.config.mygpo.server}
gpd = gPodderPodcastDirectory(
self.gPodder,
_config=self.config,
custom_title=title,
add_podcast_list=self.add_podcast_list)
url = self.mygpo_client.get_download_user_subscriptions_url()
gpd.download_opml_file(url)
title = _('Login to gpodder.net')
message = _('Please login to download your subscriptions.')
def on_register_button_clicked():
util.open_website('http://gpodder.net/register/')
success, (root_url, username, password) = self.show_login_dialog(title, message,
self.config.mygpo.server,
self.config.mygpo.username, self.config.mygpo.password,
register_callback=on_register_button_clicked,
ask_server=True)
if not success:
return
self.config.mygpo.server = root_url
self.config.mygpo.username = username
self.config.mygpo.password = password
util.idle_add(after_login)
def on_itemAddChannel_activate(self, action=None, param=None):
self._add_podcast_dialog = gPodderAddPodcast(self.gPodder,
add_podcast_list=self.add_podcast_list)
def on_itemEditChannel_activate(self, action, param=None):
if self.active_channel is None:
title = _('No podcast selected')
message = _('Please select a podcast in the podcasts list to edit.')
self.show_message(message, title, widget=self.treeChannels)
return
gPodderChannel(self.main_window,
channel=self.active_channel,
update_podcast_list_model=self.update_podcast_list_model,
cover_downloader=self.cover_downloader,
sections={c.section for c in self.channels},
clear_cover_cache=self.podcast_list_model.clear_cover_cache,
_config=self.config)
def on_itemMassUnsubscribe_activate(self, action, param):
columns = (
('title_markup', None, None, _('Podcast')),
)
# We're abusing the Episode Selector for selecting Podcasts here,
# but it works and looks good, so why not? -- thp
gPodderEpisodeSelector(self.main_window,
title=_('Delete podcasts'),
instructions=_('Select the podcast you want to delete.'),
episodes=self.channels,
columns=columns,
size_attribute=None,
ok_button=_('_Delete'),
callback=self.remove_podcast_list,
_config=self.config)
def remove_podcast_list(self, channels, confirm=True):
if not channels:
return
if len(channels) == 1:
title = _('Deleting podcast')
info = _('Please wait while the podcast is deleted')
message = _('This podcast and all its episodes will be PERMANENTLY DELETED.\nAre you sure you want to continue?')
else:
title = _('Deleting podcasts')
info = _('Please wait while the podcasts are deleted')
message = _('These podcasts and all their episodes will be PERMANENTLY DELETED.\nAre you sure you want to continue?')
message = self.format_delete_message(message, channels, 5, 60)
if confirm and not self.show_confirmation(message, title):
return
progress = ProgressIndicator(title, info, parent=self.get_dialog_parent())
def finish_deletion(select_url):
# Upload subscription list changes to the web service
self.mygpo_client.on_unsubscribe([c.url for c in channels])
# Re-load the channels and select the desired new channel
self.update_podcast_list_model(select_url=select_url)
progress.on_finished()
@util.run_in_background
def thread_proc():
select_url = None
for idx, channel in enumerate(channels):
# Update the UI for correct status messages
progress.on_progress(idx / len(channels))
progress.on_message(channel.title)
# Delete downloaded episodes
channel.remove_downloaded()
# cancel any active downloads from this channel
for episode in channel.get_all_episodes():
if episode.downloading:
episode.download_task.cancel()
if len(channels) == 1:
# get the URL of the podcast we want to select next
if channel in self.channels:
position = self.channels.index(channel)
else:
position = -1
if position == len(self.channels) - 1:
# this is the last podcast, so select the URL
# of the item before this one (i.e. the "new last")
select_url = self.channels[position - 1].url
else:
# there is a podcast after the deleted one, so
# we simply select the one that comes after it
select_url = self.channels[position + 1].url
# Remove the channel and clean the database entries
channel.delete()
# Clean up downloads and download directories
common.clean_up_downloads()
# The remaining stuff is to be done in the GTK main thread
util.idle_add(finish_deletion, select_url)
def on_itemRefreshCover_activate(self, widget, *args):
assert self.active_channel is not None
self.podcast_list_model.clear_cover_cache(self.active_channel.url)
self.cover_downloader.replace_cover(self.active_channel, custom_url=False)
def on_itemRemoveChannel_activate(self, widget, *args):
if self.active_channel is None:
title = _('No podcast selected')
message = _('Please select a podcast in the podcasts list to remove.')
self.show_message(message, title, widget=self.treeChannels)
return
self.remove_podcast_list([self.active_channel])
def get_opml_filter(self):
flt = Gtk.FileFilter()
flt.add_pattern('*.opml')
flt.add_pattern('*.xml')
flt.set_name(_('OPML files') + ' (*.opml, *.xml)')
return flt
def on_item_import_from_file_activate(self, action, filename=None):
if filename is None:
dlg = Gtk.FileChooserDialog(title=_('Import from OPML'),
parent=self.main_window,
action=Gtk.FileChooserAction.OPEN)
dlg.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
dlg.add_button(_('_Open'), Gtk.ResponseType.OK)
dlg.set_filter(self.get_opml_filter())
response = dlg.run()
filename = None
if response == Gtk.ResponseType.OK:
filename = dlg.get_filename()
dlg.destroy()
if filename is not None:
gpd = gPodderPodcastDirectory(self.gPodder, _config=self.config,
custom_title=_('Import podcasts from OPML file'),
add_podcast_list=self.add_podcast_list)
gpd.download_opml_file(filename)
def on_itemExportChannels_activate(self, widget, *args):
if not self.channels:
title = _('Nothing to export')
message = _('Your list of podcast subscriptions is empty. '
'Please subscribe to some podcasts first before '
'trying to export your subscription list.')
self.show_message(message, title, widget=self.treeChannels)
return
dlg = Gtk.FileChooserDialog(title=_('Export to OPML'),
parent=self.gPodder,
action=Gtk.FileChooserAction.SAVE)
dlg.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
dlg.add_button(_('_Save'), Gtk.ResponseType.OK)
dlg.set_filter(self.get_opml_filter())
response = dlg.run()
if response == Gtk.ResponseType.OK:
filename = dlg.get_filename()
dlg.destroy()
exporter = opml.Exporter(filename)
if filename is not None and exporter.write(self.channels):
count = len(self.channels)
title = N_('%(count)d subscription exported',
'%(count)d subscriptions exported',
count) % {'count': count}
self.show_message(_('Your podcast list has been successfully '
'exported.'),
title, widget=self.treeChannels)
else:
self.show_message(_('Could not export OPML to file. '
'Please check your permissions.'),
_('OPML export failed'), important=True)
else:
dlg.destroy()
def on_itemImportChannels_activate(self, widget, *args):
self._podcast_directory = gPodderPodcastDirectory(self.main_window,
_config=self.config,
add_podcast_list=self.add_podcast_list)
def on_homepage_activate(self, widget, *args):
util.open_website(gpodder.__url__)
def check_for_distro_updates(self):
title = _('Managed by distribution')
message = _('Please check your distribution for gPodder updates.')
self.show_message(message, title, important=True)
def check_for_updates(self, silent):
"""Check for updates and (optionally) show a message
If silent=False, a message will be shown even if no updates are
available (set silent=False when the check is manually triggered).
"""
try:
up_to_date, version, released, days = util.get_update_info()
except Exception:
if silent:
logger.warning('Could not check for updates.', exc_info=True)
else:
title = _('Could not check for updates')
message = _('Please try again later.')
self.show_message(message, title, important=True)
return
if up_to_date and not silent:
title = _('No updates available')
message = _('You have the latest version of gPodder.')
self.show_message(message, title, important=True)
if not up_to_date:
title = _('New version available')
message = '\n'.join([
_('Installed version: %s') % gpodder.__version__,
_('Newest version: %s') % version,
_('Release date: %s') % released,
'',
_('Download the latest version from gpodder.org?'),
])
if self.show_confirmation(message, title):
util.open_website('http://gpodder.org/downloads')
def on_wNotebook_switch_page(self, notebook, page, page_num):
# wNotebook.get_current_page() (called in in_downloads_list() via
# play_or_download()) returns the previous notebook page number
# when called during the handling of 'switch-page' signal.
# Call play_or_download() in the main loop after the signal
# handling has completed, so it sees the correct page number.
util.idle_add(self.play_or_download)
def on_treeChannels_row_activated(self, widget, path, *args):
# double-click action of the podcast list or enter
self.treeChannels.set_cursor(path)
# open channel settings
channel = self.get_selected_channels()[0]
if channel and not isinstance(channel, PodcastChannelProxy):
self.on_itemEditChannel_activate(None)
def get_selected_channels(self):
"""Get a list of selected channels from treeChannels"""
selection = self.treeChannels.get_selection()
model, paths = selection.get_selected_rows()
channels = [model.get_value(model.get_iter(path), PodcastListModel.C_CHANNEL) for path in paths]
channels = [c for c in channels if c is not None]
return channels
def on_treeChannels_cursor_changed(self, widget, *args):
model, iterator = self.treeChannels.get_selection().get_selected()
if model is not None and iterator is not None:
old_active_channel = self.active_channel
self.active_channel = model.get_value(iterator, PodcastListModel.C_CHANNEL)
if self.active_channel == old_active_channel:
return
# Dirty hack to check for "All episodes" or a section (see gpodder.gtkui.model)
if isinstance(self.active_channel, PodcastChannelProxy):
self.edit_channel_action.set_enabled(False)
else:
self.edit_channel_action.set_enabled(True)
else:
self.active_channel = None
self.edit_channel_action.set_enabled(False)
self.update_episode_list_model()
def get_podcast_urls_from_selected_episodes(self):
"""Get a set of podcast URLs based on the selected episodes"""
return {episode.channel.url for episode in self.get_selected_episodes()}
def get_selected_episodes(self):
"""Get a list of selected episodes from treeAvailable"""
selection = self.treeAvailable.get_selection()
model, paths = selection.get_selected_rows()
episodes = [model.get_value(model.get_iter(path), EpisodeListModel.C_EPISODE) for path in paths]
episodes = [e for e in episodes if e is not None]
return episodes
def on_playback_selected_episodes(self, *params):
self.playback_episodes(self.get_selected_episodes())
def on_episode_new_activate(self, action, *params):
state = not action.get_state().get_boolean()
if state:
self.mark_selected_episodes_new()
else:
self.mark_selected_episodes_old()
action.change_state(GLib.Variant.new_boolean(state))
self.episodes_popover.popdown()
return True
def on_shownotes_selected_episodes(self, *params):
episodes = self.get_selected_episodes()
self.shownotes_object.toggle_pane_visibility(episodes)
def on_download_selected_episodes(self, action_or_widget, param=None):
if not self.in_downloads_list():
episodes = [e for e in self.get_selected_episodes() if e.can_download()]
self.download_episode_list(episodes)
else:
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [(Gtk.TreeRowReference.new(model, path),
model.get_value(model.get_iter(path),
DownloadStatusModel.C_TASK)) for path in paths]
self._for_each_task_set_status(selected_tasks, download.DownloadTask.QUEUED)
def on_force_download_selected_episodes(self, action_or_widget, param=None):
if self.in_downloads_list():
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [(Gtk.TreeRowReference.new(model, path),
model.get_value(model.get_iter(path),
DownloadStatusModel.C_TASK)) for path in paths]
self._for_each_task_set_status(selected_tasks, download.DownloadTask.QUEUED, True)
def on_pause_selected_episodes(self, action_or_widget, param=None):
if not self.in_downloads_list():
selection = self.get_selected_episodes()
selected_tasks = [(None, e.download_task) for e in selection if e.download_task is not None and e.can_pause()]
self._for_each_task_set_status(selected_tasks, download.DownloadTask.PAUSING)
else:
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [(Gtk.TreeRowReference.new(model, path),
model.get_value(model.get_iter(path),
DownloadStatusModel.C_TASK)) for path in paths]
self._for_each_task_set_status(selected_tasks, download.DownloadTask.PAUSING)
def on_move_selected_items_up(self, action, *args):
selection = self.treeDownloads.get_selection()
model, selected_paths = selection.get_selected_rows()
for path in selected_paths:
index_above = path[0] - 1
if index_above < 0:
return
model.move_before(
model.get_iter(path),
model.get_iter((index_above,)))
def on_move_selected_items_down(self, action, *args):
selection = self.treeDownloads.get_selection()
model, selected_paths = selection.get_selected_rows()
for path in reversed(selected_paths):
index_below = path[0] + 1
if index_below >= len(model):
return
model.move_after(
model.get_iter(path),
model.get_iter((index_below,)))
def on_remove_from_download_list(self, action, *args):
selected_tasks, x, x, x, x, x = self.downloads_list_get_selection()
self._for_each_task_set_status(selected_tasks, None, False)
def on_treeAvailable_row_activated(self, widget, path, view_column):
"""Double-click/enter action handler for treeAvailable"""
self.on_shownotes_selected_episodes(widget)
def restart_auto_update_timer(self):
if self._auto_update_timer_source_id is not None:
logger.debug('Removing existing auto update timer.')
GLib.source_remove(self._auto_update_timer_source_id)
self._auto_update_timer_source_id = None
if (self.config.auto.update.enabled
and self.config.auto.update.frequency):
interval = 60 * 1000 * self.config.auto.update.frequency
logger.debug('Setting up auto update timer with interval %d.',
self.config.auto.update.frequency)
self._auto_update_timer_source_id = util.idle_timeout_add(interval, self._on_auto_update_timer)
def _on_auto_update_timer(self):
if self.config.check_connection and not util.connection_available():
logger.debug('Skipping auto update (no connection available)')
return True
logger.debug('Auto update timer fired.')
self.update_feed_cache()
# Ask web service for sub changes (if enabled)
if self.mygpo_client.can_access_webservice():
self.mygpo_client.flush()
return True
def on_treeDownloads_row_activated(self, widget, *args):
# Use the standard way of working on the treeview
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [(Gtk.TreeRowReference.new(model, path), model.get_value(model.get_iter(path), 0)) for path in paths]
has_queued_tasks = False
for tree_row_reference, task in selected_tasks:
with task:
if task.status in (task.DOWNLOADING, task.QUEUED):
task.pause()
elif task.status in (task.CANCELLED, task.PAUSED, task.FAILED):
self.download_queue_manager.queue_task(task)
has_queued_tasks = True
elif task.status == task.DONE:
model.remove(model.get_iter(tree_row_reference.get_path()))
if has_queued_tasks:
self.set_download_list_state(gPodderSyncUI.DL_ONEOFF)
self.play_or_download()
# Update the tab title and downloads list
self.update_downloads_list()
def on_item_cancel_download_activate(self, *params, force=False):
if not self.in_downloads_list():
selection = self.treeAvailable.get_selection()
(model, paths) = selection.get_selected_rows()
urls = [model.get_value(model.get_iter(path),
self.episode_list_model.C_URL) for path in paths]
selected_tasks = [task for task in self.download_tasks_seen
if task.url in urls]
else:
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [model.get_value(model.get_iter(path),
self.download_status_model.C_TASK) for path in paths]
self.cancel_task_list(selected_tasks, force=force)
def on_btnCancelAll_clicked(self, widget, *args):
self.cancel_task_list(self.download_tasks_seen)
def on_delete_activate(self, widget, *args):
episodes = self.get_selected_episodes()
self.delete_episode_list(episodes)
def on_key_press(self, widget, event):
# Allow tab switching with Ctrl + PgUp/PgDown/Tab
if event.get_state() & Gdk.ModifierType.CONTROL_MASK:
current_page = self.wNotebook.get_current_page()
if event.keyval in (Gdk.KEY_Page_Up, Gdk.KEY_ISO_Left_Tab):
if current_page == 0:
current_page = self.wNotebook.get_n_pages()
self.wNotebook.set_current_page(current_page - 1)
return True
elif event.keyval in (Gdk.KEY_Page_Down, Gdk.KEY_Tab):
if current_page == self.wNotebook.get_n_pages() - 1:
current_page = -1
self.wNotebook.set_current_page(current_page + 1)
return True
elif event.keyval == Gdk.KEY_Delete:
if isinstance(widget.get_focus(), Gtk.Entry):
logger.debug("Entry has focus, ignoring Delete")
else:
self.main_window.activate_action('delete')
return True
return False
def uniconify_main_window(self):
if self.is_iconified():
# We need to hide and then show the window in WMs like Metacity
# or KWin4 to move the window to the active workspace
# (see http://gpodder.org/bug/1125)
self.gPodder.hide()
self.gPodder.show()
self.gPodder.present()
def iconify_main_window(self):
if not self.is_iconified():
self.gPodder.iconify()
@dbus.service.method(gpodder.dbus_interface)
def show_gui_window(self):
parent = self.get_dialog_parent()
parent.present()
@dbus.service.method(gpodder.dbus_interface)
def subscribe_to_url(self, url):
# Strip leading application protocol, so these URLs work:
# gpodder://example.com/episodes.rss
# gpodder:https://example.org/podcast.xml
if url.startswith('gpodder:'):
url = url[len('gpodder:'):]
while url.startswith('/'):
url = url[1:]
self._add_podcast_dialog = gPodderAddPodcast(self.gPodder,
add_podcast_list=self.add_podcast_list,
preset_url=url)
@dbus.service.method(gpodder.dbus_interface)
def mark_episode_played(self, filename):
if filename is None:
return False
for channel in self.channels:
for episode in channel.get_all_episodes():
fn = episode.local_filename(create=False, check_only=True)
if fn == filename:
episode.mark(is_played=True)
self.db.commit()
self.update_episode_list_icons([episode.url])
self.update_podcast_list_model([episode.channel.url])
return True
return False
def extensions_podcast_update_cb(self, podcast):
logger.debug('extensions_podcast_update_cb(%s)', podcast)
self.update_feed_cache(channels=[podcast],
show_new_episodes_dialog=False)
def extensions_episode_download_cb(self, episode):
logger.debug('extension_episode_download_cb(%s)', episode)
self.download_episode_list(episodes=[episode])
def mount_volume_cb(self, file, res, mount_result):
result = True
try:
file.mount_enclosing_volume_finish(res)
except GLib.Error as err:
if (not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_SUPPORTED)
and not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.ALREADY_MOUNTED)):
logger.error('mounting volume %s failed: %s' % (file.get_uri(), err.message))
result = False
finally:
mount_result["result"] = result
Gtk.main_quit()
def mount_volume_for_file(self, file):
op = Gtk.MountOperation.new(self.main_window)
result, message = util.mount_volume_for_file(file, op)
if not result:
logger.error('mounting volume %s failed: %s' % (file.get_uri(), message))
return result
def on_sync_to_device_activate(self, widget, episodes=None, force_played=True):
self.sync_ui = gPodderSyncUI(self.config, self.notification,
self.main_window,
self.show_confirmation,
self.application.on_itemPreferences_activate,
self.channels,
self.download_status_model,
self.download_queue_manager,
self.set_download_list_state,
self.commit_changes_to_database,
self.delete_episode_list,
gPodderEpisodeSelector,
self.mount_volume_for_file)
self.sync_ui.on_synchronize_episodes(self.channels, episodes, force_played)
def on_extension_enabled(self, extension):
if getattr(extension, 'on_ui_object_available', None) is not None:
extension.on_ui_object_available('gpodder-gtk', self)
if getattr(extension, 'on_ui_initialized', None) is not None:
extension.on_ui_initialized(self.model,
self.extensions_podcast_update_cb,
self.extensions_episode_download_cb)
self.extensions_menu_helper.replace_entries(
gpodder.user_extensions.on_create_menu())
def on_extension_disabled(self, extension):
self.extensions_menu_helper.replace_entries(
gpodder.user_extensions.on_create_menu())
| 183,927
|
Python
|
.py
| 3,396
| 40.48881
| 139
| 0.60405
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,323
|
desktopfile.py
|
gpodder_gpodder/src/gpodder/gtkui/desktopfile.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# libplayers.py -- get list of potential playback apps
# thomas perl <thp@perli.net> 20060329
#
#
import glob
import logging
import os
import os.path
import re
import threading
from configparser import RawConfigParser
from gi.repository import GdkPixbuf, GObject, Gtk
import gpodder
_ = gpodder.gettext
logger = logging.getLogger(__name__)
# where are the .desktop files located?
userappsdirs = [os.path.expanduser(p) for p in (
'/usr/share/applications/',
'/usr/local/share/applications/',
'~/.local/share/applications',
'/var/lib/flatpak/exports/share/applications/',
'~/.local/share/flatpak/exports/share/applications/',
)]
# the name of the section in the .desktop files
sect = 'Desktop Entry'
class PlayerListModel(Gtk.ListStore):
C_ICON, C_NAME, C_COMMAND, C_CUSTOM = list(range(4))
def __init__(self):
Gtk.ListStore.__init__(self, GdkPixbuf.Pixbuf, str, str, bool)
def insert_app(self, pixbuf, name, command):
self.append((pixbuf, name, command, False))
def get_command(self, index):
return self[index][self.C_COMMAND]
def get_index(self, value):
for index, row in enumerate(self):
if value == row[self.C_COMMAND]:
return index
last_row = self[-1]
name = _('Command: %s') % value
if last_row[self.C_CUSTOM]:
last_row[self.C_COMMAND] = value
last_row[self.C_NAME] = name
else:
self.append((None, name, value, True))
return len(self) - 1
@classmethod
def is_separator(cls, model, iterator):
return model.get_value(iterator, cls.C_COMMAND) == ''
class UserApplication(object):
def __init__(self, name, cmd, mime, icon):
self.name = name
self.cmd = cmd
self.icon = icon
self.mime = mime
def get_icon(self):
if self.icon is not None:
# Load it from an absolute filename
if os.path.exists(self.icon):
try:
return GdkPixbuf.Pixbuf.new_from_file_at_size(self.icon, 24, 24)
except GObject.GError:
pass
# Load it from the current icon theme
(icon_name, extension) = os.path.splitext(os.path.basename(self.icon))
theme = Gtk.IconTheme()
if theme.has_icon(icon_name):
return theme.load_icon(icon_name, 24, Gtk.IconLookupFlags.FORCE_SIZE)
def is_mime(self, mimetype):
return self.mime.find(mimetype + '/') != -1
WIN32_APP_REG_KEYS = [
('Winamp', ('audio',), r'HKEY_CLASSES_ROOT\Winamp.File\shell\Play\command'),
('foobar2000', ('audio',), r'HKEY_CLASSES_ROOT\Applications\foobar2000.exe\shell\open\command'),
('Windows Media Player 11', ('audio', 'video'), r'HKEY_CLASSES_ROOT\WMP11.AssocFile.MP3\shell\open\command'),
('QuickTime Player', ('audio', 'video'), r'HKEY_CLASSES_ROOT\QuickTime.mp3\shell\open\command'),
('VLC', ('audio', 'video'), r'HKEY_CLASSES_ROOT\VLC.mp3\shell\open\command'),
('PotPlayer', ('audio', 'video'), r'HKEY_CLASSES_ROOT\potrun\shell\open\command'),
]
def win32_read_registry_key(path):
import winreg
rootmap = {
'HKEY_CLASSES_ROOT': winreg.HKEY_CLASSES_ROOT,
}
parts = path.split('\\')
root = parts.pop(0)
key = winreg.OpenKey(rootmap[root], parts.pop(0))
while parts:
key = winreg.OpenKey(key, parts.pop(0))
value, type_ = winreg.QueryValueEx(key, '')
if type_ == winreg.REG_EXPAND_SZ:
cmdline = re.sub(r'%([^%]+)%', lambda m: os.environ[m.group(1)], value)
elif type_ == winreg.REG_SZ:
cmdline = value
else:
raise ValueError('Not a string: ' + path)
return cmdline.replace('%1', '%f').replace('%L', '%f')
class UserAppsReader(object):
# XDG categories, plus some others found in random .desktop files
# https://standards.freedesktop.org/menu-spec/latest/apa.html
PLAYER_CATEGORIES = ('Audio', 'Video', 'AudioVideo', 'Player')
def __init__(self, mimetypes):
self.apps = []
self.mimetypes = mimetypes
self.__has_read = False
self.__finished = threading.Event()
self.__has_sep = False
self.apps.append(UserApplication(
_('Default application'), 'default',
';'.join((mime + '/*' for mime in self.mimetypes)),
'document-open'))
def add_separator(self):
self.apps.append(UserApplication(
'', '',
';'.join((mime + '/*' for mime in self.mimetypes)), ''))
self.__has_sep = True
def read(self):
if self.__has_read:
return
self.__has_read = True
if gpodder.ui.win32:
for caption, types, hkey in WIN32_APP_REG_KEYS:
try:
cmdline = win32_read_registry_key(hkey)
self.apps.append(UserApplication(
caption, cmdline,
';'.join(typ + '/*' for typ in types), None))
except Exception as e:
logger.warning('Parse HKEY error: %s (%s)', hkey, e)
for appdir in userappsdirs:
if os.path.exists(appdir):
for file in glob.glob(os.path.join(appdir, '*.desktop')):
self.parse_and_append(file)
self.__finished.set()
def parse_and_append(self, filename):
try:
parser = RawConfigParser()
parser.read([filename])
if not parser.has_section(sect):
return
app_categories = parser.get(sect, 'Categories')
if not app_categories:
return
if not any(category in self.PLAYER_CATEGORIES
for category in app_categories.split(';')):
return
# Find out if we need it by comparing mime types
app_mime = parser.get(sect, 'MimeType')
for needed_type in self.mimetypes:
if app_mime.find(needed_type + '/') != -1:
app_name = parser.get(sect, 'Name')
app_cmd = parser.get(sect, 'Exec')
app_icon = parser.get(sect, 'Icon')
if not self.__has_sep:
self.add_separator()
self.apps.append(UserApplication(app_name, app_cmd, app_mime, app_icon))
return
except:
return
def get_model(self, mimetype):
self.__finished.wait()
model = PlayerListModel()
for app in self.apps:
if app.is_mime(mimetype):
model.insert_app(app.get_icon(), app.name, app.cmd)
return model
| 7,482
|
Python
|
.py
| 183
| 32.213115
| 113
| 0.603391
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,324
|
services.py
|
gpodder_gpodder/src/gpodder/gtkui/services.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.gtkui.services - UI parts for the services module (2009-08-24)
#
import logging
from gi.repository import GdkPixbuf
import gpodder
from gpodder import coverart, util
from gpodder.services import ObservableService
_ = gpodder.gettext
logger = logging.getLogger(__name__)
class CoverDownloader(ObservableService):
"""
This class manages downloading cover art and notification
of other parts of the system. Downloading cover art can
happen either synchronously via get_cover() or in
asynchronous mode via request_cover(). When in async mode,
the cover downloader will send the cover via the
'cover-available' message (via the ObservableService).
"""
def __init__(self):
self.downloader = coverart.CoverDownloader()
signal_names = ['cover-available', 'cover-removed']
ObservableService.__init__(self, signal_names)
def request_cover(self, channel, custom_url=None, avoid_downloading=False):
"""
Sends an asynchronous request to download a
cover for the specific channel.
After the cover has been downloaded, the
"cover-available" signal will be sent with
the channel url and new cover as pixbuf.
If you specify a custom_url, the cover will
be downloaded from the specified URL and not
taken from the channel metadata.
The optional parameter "avoid_downloading",
when true, will make sure we return only
already-downloaded covers and return None
when we have no cover on the local disk.
"""
logger.debug('cover download request for %s', channel.url)
util.run_in_background(lambda: self.__get_cover(channel,
custom_url, True, avoid_downloading))
def get_cover(self, channel, custom_url=None, avoid_downloading=False):
"""
Sends a synchronous request to download a
cover for the specified channel.
The cover will be returned to the caller.
The custom_url has the same semantics as
in request_cover().
The optional parameter "avoid_downloading",
when true, will make sure we return only
already-downloaded covers and return None
when we have no cover on the local disk.
"""
(url, pixbuf) = self.__get_cover(channel, custom_url, False, avoid_downloading)
return pixbuf
def replace_cover(self, channel, custom_url=None):
"""
This is a convenience function that deletes
the current cover file and requests a new
cover from the URL specified.
"""
self.request_cover(channel, custom_url)
def __get_cover(self, channel, url, async_mode=False, avoid_downloading=False):
def get_filename():
return self.downloader.get_cover(channel.cover_file,
url or channel.cover_url, channel.url, channel.title,
channel.auth_username, channel.auth_password,
not avoid_downloading)
if url is not None:
filename = get_filename()
if filename.startswith(channel.cover_file):
logger.info('Replacing cover: %s', filename)
util.delete_file(filename)
filename = get_filename()
pixbuf = None
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)
except Exception:
logger.warning('Cannot load cover art', exc_info=True)
if pixbuf is None and filename.startswith(channel.cover_file):
logger.info('Deleting broken cover: %s', filename)
util.delete_file(filename)
filename = get_filename()
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)
except Exception:
logger.warning('Corrupt cover art on server, deleting', exc_info=True)
util.delete_file(filename)
if async_mode:
self.notify('cover-available', channel, pixbuf)
else:
return (channel.url, pixbuf)
| 4,826
|
Python
|
.py
| 110
| 36.063636
| 87
| 0.672778
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,325
|
searchtree.py
|
gpodder_gpodder/src/gpodder/gtkui/interface/searchtree.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gi # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gdk, GLib # isort:skip
class SearchTree:
"""
handle showing/hiding the search box for podcast or episode treeviews,
as well as searching for text entered in the search entry.
Automatically attaches to entry signals on creation.
"""
def __init__(self, search_box, search_entry, tree, model, config):
self.search_box = search_box
self.search_entry = search_entry
self.tree = tree
self.model = model
self.config = config
self._search_timeout = None
self.search_entry.connect('icon-press', self.hide_search)
self.search_entry.connect('changed', self.on_entry_changed)
self.search_entry.connect('key-press-event', self.on_entry_key_press)
def set_search_term(self, text):
self.model.set_search_term(text)
self._search_timeout = None
return False
def on_entry_changed(self, editable):
if self.search_box.get_property('visible'):
if self._search_timeout is not None:
GLib.source_remove(self._search_timeout)
# use timeout_add, not util.idle_timeout_add, so it updates the TreeView before background tasks
self._search_timeout = GLib.timeout_add(
self.config.ui.gtk.live_search_delay,
self.set_search_term, editable.get_chars(0, -1))
def on_entry_key_press(self, editable, event):
if event.keyval == Gdk.KEY_Escape:
self.hide_search()
return True
def hide_search(self, *args):
if self._search_timeout is not None:
GLib.source_remove(self._search_timeout)
self._search_timeout = None
if not self.config.ui.gtk.search_always_visible:
self.search_box.hide()
self.search_entry.set_text('')
self.model.set_search_term(None)
self.tree.grab_focus()
def show_search(self, input_char=None, grab_focus=True):
self.search_box.show()
if input_char:
self.search_entry.insert_text(input_char, -1)
if grab_focus:
self.search_entry.grab_focus()
self.search_entry.set_position(-1)
| 3,025
|
Python
|
.py
| 69
| 36.695652
| 108
| 0.667232
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,326
|
__init__.py
|
gpodder_gpodder/src/gpodder/gtkui/interface/__init__.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| 747
|
Python
|
.py
| 18
| 40.5
| 71
| 0.766804
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,327
|
configeditor.py
|
gpodder_gpodder/src/gpodder/gtkui/interface/configeditor.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import html
from gi.repository import Gtk
import gpodder
from gpodder.gtkui.config import ConfigModel
from gpodder.gtkui.interface.common import BuilderWidget
_ = gpodder.gettext
class gPodderConfigEditor(BuilderWidget):
def new(self):
self.gPodderConfigEditor.set_transient_for(self.parent_widget)
name_column = Gtk.TreeViewColumn(_('Setting'))
name_renderer = Gtk.CellRendererText()
name_column.pack_start(name_renderer, True)
name_column.add_attribute(name_renderer, 'text', 0)
name_column.add_attribute(name_renderer, 'style', 5)
name_column.set_expand(True)
self.configeditor.append_column(name_column)
value_column = Gtk.TreeViewColumn(_('Set to'))
value_check_renderer = Gtk.CellRendererToggle()
# align left otherwise the checkbox is very far away and not visible
value_check_renderer.set_alignment(0, 0.5)
value_column.pack_start(value_check_renderer, False)
value_column.add_attribute(value_check_renderer, 'active', 7)
value_column.add_attribute(value_check_renderer, 'visible', 6)
value_column.add_attribute(value_check_renderer, 'activatable', 6)
value_check_renderer.connect('toggled', self.value_toggled)
value_renderer = Gtk.CellRendererText()
value_column.pack_start(value_renderer, True)
value_column.add_attribute(value_renderer, 'text', 2)
value_column.add_attribute(value_renderer, 'visible', 4)
value_column.add_attribute(value_renderer, 'editable', 4)
value_column.add_attribute(value_renderer, 'style', 5)
value_renderer.connect('edited', self.value_edited)
value_column.set_expand(False)
self.configeditor.append_column(value_column)
self.model = ConfigModel(self._config)
self.filter = self.model.filter_new()
self.filter.set_visible_func(self.visible_func)
self.configeditor.set_model(self.filter)
self.configeditor.set_rules_hint(True)
self._config.connect_gtk_window(self.main_window, 'config_editor', True)
def visible_func(self, model, iterator, user_data=None):
text = self.entryFilter.get_text().lower()
if text == '':
return True
else:
# either the variable name or its value
return (text in model.get_value(iterator, 0).lower()
or text in model.get_value(iterator, 2).lower())
def value_edited(self, renderer, path, new_text):
model = self.configeditor.get_model()
iterator = model.get_iter(path)
name = model.get_value(iterator, 0)
type_cute = model.get_value(iterator, 1)
if not self._config.update_field(name, new_text):
message = _('Cannot set %(field)s to %(value)s. Needed data type: %(datatype)s')
d = {'field': html.escape(name),
'value': html.escape(new_text),
'datatype': html.escape(type_cute)}
self.notification(message % d, _('Error setting option'))
def value_toggled(self, renderer, path):
model = self.configeditor.get_model()
iterator = model.get_iter(path)
field_name = model.get_value(iterator, 0)
field_type = model.get_value(iterator, 3)
# Flip the boolean config flag
if field_type == bool:
self._config.toggle_flag(field_name)
def on_entryFilter_changed(self, widget):
self.filter.refilter()
def on_btnShowAll_clicked(self, widget):
self.entryFilter.set_text('')
self.entryFilter.grab_focus()
def on_btnClose_clicked(self, widget):
self.gPodderConfigEditor.destroy()
def on_gPodderConfigEditor_destroy(self, widget):
self.model.stop_observing()
| 4,548
|
Python
|
.py
| 94
| 41.021277
| 92
| 0.682905
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,328
|
progress.py
|
gpodder_gpodder/src/gpodder/gtkui/interface/progress.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
from gi.repository import GLib, Gtk, Pango
import gpodder
from gpodder import util
from gpodder.gtkui.widgets import SpinningProgressIndicator
_ = gpodder.gettext
class ProgressIndicator(object):
# Delayed time until window is shown (for short operations)
DELAY = 500
# Time between GUI updates after window creation
INTERVAL = 250
def __init__(self, title, subtitle=None, cancellable=False, parent=None, max_ticks=None):
self.title = title
self.subtitle = subtitle
self.cancellable = True if cancellable else False
self.cancel_callback = cancellable
self.cancel_id = 0
self.cancelled = False
self.next_update = time.time() + (self.DELAY / 1000)
self.parent = parent
self.dialog = None
self.progressbar = None
self.indicator = None
self._initial_message = None
self._initial_progress = None
self._progress_set = False
# use timeout_add, not util.idle_timeout_add, so it updates before Gtk+ redraws the dialog
self.source_id = GLib.timeout_add(self.DELAY, self._create_progress)
self.set_max_ticks(max_ticks)
def set_max_ticks(self, max_ticks):
self.max_ticks = max_ticks
self.tick_counter = 0
if max_ticks is not None:
self.on_message('0 / %d' % max_ticks)
def _on_delete_event(self, window, event):
if self.cancellable:
self.dialog.response(Gtk.ResponseType.CANCEL)
self.cancellable = False
self.cancelled = True
return True
def _create_progress(self):
self.dialog = Gtk.MessageDialog(self.parent,
0, 0, Gtk.ButtonsType.CANCEL, self.subtitle or self.title)
self.dialog.set_modal(True)
self.dialog.connect('delete-event', self._on_delete_event)
if self.cancellable:
def cancel_callback(dialog, response):
self.cancellable = False
self.cancelled = True
self.dialog.set_deletable(False)
self.dialog.set_response_sensitive(Gtk.ResponseType.CANCEL, False)
if callable(self.cancel_callback):
self.cancel_callback(dialog, response)
self.cancel_id = self.dialog.connect('response', cancel_callback)
self.dialog.set_title(self.title)
self.dialog.set_deletable(self.cancellable)
# Avoid selectable text (requires PyGTK >= 2.22)
if hasattr(self.dialog, 'get_message_area'):
for label in self.dialog.get_message_area():
if isinstance(label, Gtk.Label):
label.set_selectable(False)
self.dialog.set_response_sensitive(Gtk.ResponseType.CANCEL, self.cancellable)
self.progressbar = Gtk.ProgressBar()
self.progressbar.set_show_text(True)
self.progressbar.set_ellipsize(Pango.EllipsizeMode.END)
# If the window is shown after the first update, set the progress
# info so that when the window appears, data is there already
if self._initial_progress is not None:
self.progressbar.set_fraction(self._initial_progress)
if self._initial_message is not None:
self.progressbar.set_text(self._initial_message)
self.dialog.vbox.add(self.progressbar)
self.indicator = SpinningProgressIndicator()
self.dialog.set_image(self.indicator)
self.dialog.show_all()
self._update_gui()
# previous self.source_id timeout is removed when this returns False
# use timeout_add, not util.idle_timeout_add, so it updates before Gtk+ redraws the dialog
self.source_id = GLib.timeout_add(self.INTERVAL, self._update_gui)
return False
def _update_gui(self):
if self.indicator:
self.indicator.step_animation()
if not self._progress_set and self.progressbar:
self.progressbar.pulse()
self.next_update = time.time() + (self.INTERVAL / 1000)
return True
def on_message(self, message):
if self.progressbar:
self.progressbar.set_text(message)
else:
self._initial_message = message
def on_progress(self, progress):
self._progress_set = True
if self.progressbar:
self.progressbar.set_fraction(progress)
else:
self._initial_progress = progress
def on_tick(self, final=False):
if final:
# Dialog is no longer cancellable
self.cancellable = False
if self.dialog is not None:
self.dialog.set_response_sensitive(Gtk.ResponseType.CANCEL, False)
self.dialog.set_deletable(False)
elif 2 * (time.time() - (self.next_update - (self.DELAY / 1000))) > (self.DELAY / 1000):
# Assume final operation will take as long as all ticks and open dialog
if self.source_id:
GLib.source_remove(self.source_id)
self._create_progress()
if self.max_ticks is not None and not final:
self.tick_counter += 1
if time.time() >= self.next_update or (final and self.dialog):
if isinstance(final, str):
self.on_message(final)
self.on_progress(1.0)
elif self.max_ticks is not None:
self.on_message('%d / %d' % (self.tick_counter, self.max_ticks))
self.on_progress(self.tick_counter / self.max_ticks)
# Allow UI to redraw.
util.idle_add(Gtk.main_quit)
# self._create_progress() or self._update_gui() is called by a timer to update the dialog
Gtk.main()
if self.cancelled:
return False
return True
def on_finished(self):
if self.dialog is not None:
if self.cancel_id:
self.dialog.disconnect(self.cancel_id)
self.dialog.destroy()
if self.source_id:
GLib.source_remove(self.source_id)
| 6,836
|
Python
|
.py
| 151
| 35.768212
| 101
| 0.64143
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,329
|
tagcloud.py
|
gpodder_gpodder/src/gpodder/gtkui/interface/tagcloud.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import html
from gi.repository import GLib, GObject, Gtk
class TagCloud(Gtk.Layout):
__gsignals__ = {
'selected': (GObject.SignalFlags.RUN_LAST, None,
(GObject.TYPE_STRING,))
}
def __init__(self, min_size=20, max_size=36):
Gtk.Layout.__init__(self)
self._min_weight = 0
self._max_weight = 0
self._min_size = min_size
self._max_size = max_size
self._size = 0, 0
self._alloc_id = self.connect('size-allocate', self._on_size_allocate)
self._in_relayout = False
def clear_tags(self):
for child in self.get_children():
self.remove(child)
def set_tags(self, tags):
tags = list(tags)
self._min_weight = min(weight for tag, weight in tags)
self._max_weight = max(weight for tag, weight in tags)
for tag, weight in tags:
label = Gtk.Label()
markup = '<span size="%d">%s</span>' % (1000 * self._scale(weight), html.escape(tag))
label.set_markup(markup)
button = Gtk.ToolButton(label)
button.connect('clicked', lambda b, t: self.emit('selected', t), tag)
self.put(button, 1, 1)
button.show_all()
self.relayout()
def _on_size_allocate(self, widget, allocation):
self._size = (allocation.width, allocation.height)
if not self._in_relayout:
self.relayout()
def _scale(self, weight):
weight_range = self._max_weight - self._min_weight
ratio = (weight - self._min_weight) / weight_range
return int(self._min_size + (self._max_size - self._min_size) * ratio)
def relayout(self):
self._in_relayout = True
x, y, max_h = 0, 0, 0
current_row = []
pw, ph = self._size
def fixup_row(widgets, x, y, max_h):
residue = (pw - x)
x = int(residue // 2)
for widget in widgets:
cw, ch = widget.size_request()
self.move(widget, x, y + max(0, int(max_h - ch) // 2))
x += cw + 10
for child in self.get_children():
w, h = child.size_request()
if x + w > pw:
fixup_row(current_row, x, y, max_h)
y += max_h + 10
max_h, x = 0, 0
current_row = []
self.move(child, x, y)
x += w + 10
max_h = max(max_h, h)
current_row.append(child)
fixup_row(current_row, x, y, max_h)
self.set_size(pw, y + max_h)
def unrelayout():
self._in_relayout = False
return False
GLib.idle_add(unrelayout)
GObject.type_register(TagCloud)
| 3,489
|
Python
|
.py
| 88
| 31.022727
| 97
| 0.582619
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,330
|
common.py
|
gpodder_gpodder/src/gpodder/gtkui/interface/common.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from gi.repository import Gdk, Gio, Gtk
import gpodder
from gpodder import util
from gpodder.gtkui.base import GtkBuilderWidget
_ = gpodder.gettext
def show_message_dialog(parent, message, title=None):
dlg = Gtk.MessageDialog(parent, Gtk.DialogFlags.MODAL, Gtk.MessageType.INFO, Gtk.ButtonsType.OK)
if title:
dlg.set_title(title)
dlg.set_property('text', title)
dlg.format_secondary_text(message)
else:
dlg.set_property('text', message)
# make message copy/pastable
for lbl in dlg.get_message_area():
if isinstance(lbl, Gtk.Label):
lbl.set_selectable(True)
dlg.run()
dlg.destroy()
class BuilderWidget(GtkBuilderWidget):
def __init__(self, parent, **kwargs):
self._window_iconified = False
GtkBuilderWidget.__init__(self, gpodder.ui_folders, gpodder.textdomain, parent, **kwargs)
# Enable support for tracking iconified state
if hasattr(self, 'on_iconify') and hasattr(self, 'on_uniconify'):
self.main_window.connect('window-state-event',
self._on_window_state_event_iconified)
def _on_window_state_event_iconified(self, widget, event):
if event.new_window_state & Gdk.WindowState.ICONIFIED:
if not self._window_iconified:
self._window_iconified = True
self.on_iconify()
else:
if self._window_iconified:
self._window_iconified = False
self.on_uniconify()
return False
def is_iconified(self):
return self._window_iconified
def notification(self, message, title=None, important=False, widget=None):
util.idle_add(self.show_message, message, title, important, widget)
def get_dialog_parent(self):
"""Return a Gtk.Window that should be the parent of dialogs"""
return self.main_window
def show_message_details(self, title, message, details):
dlg = Gtk.MessageDialog(self.main_window, Gtk.DialogFlags.MODAL, Gtk.MessageType.INFO, Gtk.ButtonsType.OK)
dlg.set_title(title)
dlg.set_property('text', title)
dlg.format_secondary_text(message)
# make message copy/pastable
for lbl in dlg.get_message_area():
if isinstance(lbl, Gtk.Label):
lbl.set_halign(Gtk.Align.START)
lbl.set_selectable(True)
tv = Gtk.TextView()
tv.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
tv.set_border_width(10)
tv.set_editable(False)
tb = Gtk.TextBuffer()
tb.insert_markup(tb.get_start_iter(), details, -1)
tv.set_buffer(tb)
tv.set_property('expand', True)
sw = Gtk.ScrolledWindow()
sw.set_size_request(400, 200)
sw.set_property('shadow-type', Gtk.ShadowType.IN)
sw.add(tv)
sw.show_all()
dlg.get_message_area().add(sw)
dlg.get_widget_for_response(Gtk.ResponseType.OK).grab_focus()
dlg.run()
dlg.destroy()
def show_message(self, message, title=None, important=False, widget=None):
if important:
show_message_dialog(self.main_window, message, title)
else:
gpodder.user_extensions.on_notification_show(title, message)
def show_confirmation(self, message, title=None):
dlg = Gtk.MessageDialog(self.main_window, Gtk.DialogFlags.MODAL, Gtk.MessageType.QUESTION, Gtk.ButtonsType.YES_NO)
if title:
dlg.set_title(str(title))
dlg.set_markup('<span weight="bold" size="larger">%s</span>\n\n%s' % (title, message))
else:
dlg.set_markup('<span weight="bold" size="larger">%s</span>' % (message))
response = dlg.run()
dlg.destroy()
return response == Gtk.ResponseType.YES
def show_text_edit_dialog(self, title, prompt, text=None, empty=False,
is_url=False, affirmative_text=_('_OK')):
dialog = Gtk.Dialog(title, self.get_dialog_parent(),
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT)
dialog.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
dialog.add_button(affirmative_text, Gtk.ResponseType.OK)
dialog.set_default_size(300, -1)
dialog.set_default_response(Gtk.ResponseType.OK)
text_entry = Gtk.Entry()
text_entry.set_activates_default(True)
if text is not None:
text_entry.set_text(text)
text_entry.select_region(0, -1)
if not empty:
def on_text_changed(editable):
can_confirm = (editable.get_text() != '')
dialog.set_response_sensitive(Gtk.ResponseType.OK, can_confirm)
text_entry.connect('changed', on_text_changed)
if text is None:
dialog.set_response_sensitive(Gtk.ResponseType.OK, False)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
hbox.set_border_width(10)
hbox.set_spacing(10)
hbox.pack_start(Gtk.Label(prompt, True, True, 0), False, False, 0)
hbox.pack_start(text_entry, True, True, 0)
dialog.vbox.pack_start(hbox, True, True, 0)
dialog.show_all()
response = dialog.run()
result = text_entry.get_text()
dialog.destroy()
if response == Gtk.ResponseType.OK:
return result
else:
return None
def show_login_dialog(self, title, message, root_url=None, username=None, password=None,
username_prompt=None, register_callback=None, register_text=None, ask_server=False):
def toggle_password_visibility(_, entry):
entry.set_visibility(not entry.get_visibility())
if username_prompt is None:
username_prompt = _('Username')
if register_text is None:
register_text = _('New user')
dialog = Gtk.MessageDialog(
self.main_window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.CANCEL)
dialog.add_button(_('Login'), Gtk.ResponseType.OK)
dialog.set_image(Gtk.Image.new_from_icon_name('dialog-password', Gtk.IconSize.DIALOG))
dialog.set_title(_('Authentication required'))
dialog.set_markup('<span weight="bold" size="larger">' + title + '</span>')
dialog.format_secondary_markup(message)
dialog.set_default_response(Gtk.ResponseType.OK)
if register_callback is not None:
dialog.add_button(register_text, Gtk.ResponseType.HELP)
server_entry = Gtk.Entry()
server_entry.set_tooltip_text(_('hostname or root URL (e.g. https://gpodder.net)'))
username_entry = Gtk.Entry()
password_entry = Gtk.Entry()
server_entry.connect('activate', lambda w: username_entry.grab_focus())
username_entry.connect('activate', lambda w: password_entry.grab_focus())
password_entry.set_visibility(False)
password_entry.set_activates_default(True)
if root_url is not None:
server_entry.set_text(root_url)
if username is not None:
username_entry.set_text(username)
if password is not None:
password_entry.set_text(password)
table = Gtk.Table(3, 2)
table.set_row_spacings(6)
table.set_col_spacings(6)
server_label = Gtk.Label()
server_label.set_markup('<b>' + _('Server') + ':</b>')
username_label = Gtk.Label()
username_label.set_markup('<b>' + username_prompt + ':</b>')
password_label = Gtk.Label()
password_label.set_markup('<b>' + _('Password') + ':</b>')
show_password_label = Gtk.Label()
show_password = Gtk.CheckButton.new_with_label(_('Show Password'))
show_password.connect('toggled', toggle_password_visibility, password_entry)
label_entries = [(username_label, username_entry),
(password_label, password_entry),
(show_password_label, show_password)]
if ask_server:
label_entries.insert(0, (server_label, server_entry))
for i, (label, entry) in enumerate(label_entries):
label.set_alignment(0.0, 0.5)
table.attach(label, 0, 1, i, i + 1, Gtk.AttachOptions.FILL, 0)
table.attach(entry, 1, 2, i, i + 1)
dialog.vbox.pack_end(table, True, True, 0)
dialog.show_all()
username_entry.grab_focus()
response = dialog.run()
while response == Gtk.ResponseType.HELP:
register_callback()
response = dialog.run()
password_entry.set_visibility(True)
root_url = server_entry.get_text()
username = username_entry.get_text()
password = password_entry.get_text()
success = (response == Gtk.ResponseType.OK)
dialog.destroy()
if ask_server:
return (success, (root_url, username, password))
else:
return (success, (username, password))
def show_folder_select_dialog(self, initial_directory=None, title=_('Select destination')):
if initial_directory is None:
initial_directory = os.path.expanduser('~')
dlg = Gtk.FileChooserDialog(title=title, parent=self.main_window, action=Gtk.FileChooserAction.SELECT_FOLDER)
dlg.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
dlg.add_button(_('_Save'), Gtk.ResponseType.OK)
dlg.set_do_overwrite_confirmation(True)
dlg.set_current_folder(initial_directory)
result = False
folder = initial_directory
if dlg.run() == Gtk.ResponseType.OK:
result = True
folder = dlg.get_current_folder()
dlg.destroy()
return (result, folder)
class TreeViewHelper(object):
"""Container for gPodder-specific TreeView attributes."""
LAST_TOOLTIP = '_gpodder_last_tooltip'
CAN_TOOLTIP = '_gpodder_can_tooltip'
ROLE = '_gpodder_role'
COLUMNS = '_gpodder_columns'
# Enum for the role attribute
ROLE_PODCASTS, ROLE_EPISODES, ROLE_DOWNLOADS = list(range(3))
@classmethod
def set(cls, treeview, role):
setattr(treeview, cls.LAST_TOOLTIP, None)
setattr(treeview, cls.CAN_TOOLTIP, True)
setattr(treeview, cls.ROLE, role)
@staticmethod
def make_search_equal_func(gpodder_model):
def func(model, column, key, iterator):
if model is None:
return True
key = key.lower()
for column in gpodder_model.SEARCH_COLUMNS:
if key in model.get_value(iterator, column).lower():
return False
return True
return func
@classmethod
def register_column(cls, treeview, column):
if not hasattr(treeview, cls.COLUMNS):
setattr(treeview, cls.COLUMNS, [])
columns = getattr(treeview, cls.COLUMNS)
columns.append(column)
@classmethod
def get_columns(cls, treeview):
return getattr(treeview, cls.COLUMNS, [])
@staticmethod
def make_popup_position_func(widget):
"""
:return: suitable function to pass to Gtk.Menu.popup()
It's used for instance when the popup trigger is the Menu key:
it will position the menu on top of the selected row even if the mouse is elsewhere
see http://lazka.github.io/pgi-docs/#Gtk-3.0/classes/Menu.html#Gtk.Menu.popup
"""
def position_func(menu, *unused_args):
_, x, y = widget.get_bin_window().get_origin()
# If there's a selection, place the popup menu on top of
# the first-selected row (otherwise in the top left corner)
selection = widget.get_selection()
model, paths = selection.get_selected_rows()
if paths:
path = paths[0]
area = widget.get_cell_area(path, widget.get_column(0))
x += area.x
y += area.y
return (x, y, True)
return position_func
@staticmethod
def get_popup_rectangle(treeview, event, column=0):
"""
:return: Gdk.Rectangle to pass to Gtk.Popover.set_pointing_to()
If event is given, return a zero-width and height rectangle with the
event coordinates. If event is None, get the area of the column in the
first selected treeview row.
Used for instance when the popup trigger is the Menu key: It will
position the popover on top of the column on the selected row, even if
the mouse is elsewhere
"""
if event is not None:
area = Gdk.Rectangle()
area.x, area.y = treeview.convert_bin_window_to_widget_coords(event.x, event.y)
return area
# If there's a selection, place the popup menu on top of
# the first-selected row and given column (otherwise in the top left corner)
selection = treeview.get_selection()
model, paths = selection.get_selected_rows()
if paths:
path = paths[0]
area = treeview.get_cell_area(path, treeview.get_column(column))
else:
area = Gdk.Rectangle() # x, y, width, height are all 0
area.x, area.y = treeview.convert_bin_window_to_widget_coords(area.x, area.y)
return area
class ExtensionMenuHelper(object):
"""A helper class to handle extension submenus"""
def __init__(self, gpodder, menu, action_prefix, gen_callback_func=None):
self.gPodder = gpodder
self.menu = menu
self.action_prefix = action_prefix
self.gen_callback_func = gen_callback_func
self.actions = []
def replace_entries(self, new_entries):
# remove previous menu entries
for a in self.actions:
self.gPodder.remove_action(a.get_property('name'))
self.actions = []
self.menu.remove_all()
# create new ones
new_entries = list(new_entries or [])
for i, (label, callback) in enumerate(new_entries):
action_id = self.action_prefix + str(i)
action = Gio.SimpleAction.new(action_id)
action.set_enabled(callback is not None)
if callback is not None:
if self.gen_callback_func is None:
action.connect('activate', callback)
else:
action.connect('activate', self.gen_callback_func(callback))
self.actions.append(action)
self.gPodder.add_action(action)
itm = Gio.MenuItem.new(label, 'win.' + action_id)
self.menu.append_item(itm)
class Dummy:
"""A class for objects with arbitrary attributes (for imitating Gtk Events etc.)"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
| 15,616
|
Python
|
.py
| 340
| 36.423529
| 122
| 0.630938
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,331
|
addpodcast.py
|
gpodder_gpodder/src/gpodder/gtkui/interface/addpodcast.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gdk, Gtk
import gpodder
from gpodder import util
from gpodder.gtkui.interface.common import BuilderWidget
_ = gpodder.gettext
class gPodderAddPodcast(BuilderWidget):
def new(self):
self.gPodderAddPodcast.set_transient_for(self.parent_widget)
if not hasattr(self, 'add_podcast_list'):
self.add_podcast_list = None
if hasattr(self, 'custom_label'):
self.label_add.set_text(self.custom_label)
if hasattr(self, 'custom_title'):
self.gPodderAddPodcast.set_title(self.custom_title)
if hasattr(self, 'preset_url'):
self.entry_url.set_text(self.preset_url)
self.entry_url.connect('activate', self.on_entry_url_activate)
self.entry_url.connect('icon-press', self.on_clear_url)
self.gPodderAddPodcast.show()
if not hasattr(self, 'preset_url'):
# Fill the entry if a valid URL is in the clipboard, but
# only if there's no preset_url available (see bug 1132).
# First try from CLIPBOARD (everyday copy-paste),
# then from SELECTION (text selected and pasted via
# middle mouse button).
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
def receive_clipboard_text(clipboard, text, second_try):
# Heuristic: If space is present in clipboard text
# normalize_feed_url will either fix to valid url or
# return None if URL cannot be validated
if text is not None:
url = util.normalize_feed_url(text)
if url is not None:
self.entry_url.set_text(url)
self.entry_url.set_position(-1)
return
if not second_try:
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
clipboard.request_text(receive_clipboard_text, True)
clipboard.request_text(receive_clipboard_text, False)
def on_clear_url(self, widget, icon_position, event):
self.entry_url.set_text('')
def on_btn_close_clicked(self, widget):
self.gPodderAddPodcast.destroy()
def on_btn_paste_clicked(self, widget):
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.request_text(self.receive_clipboard_text)
def receive_clipboard_text(self, clipboard, text, data=None):
if text is not None:
self.entry_url.set_text(text.strip())
else:
self.show_message(_('Nothing to paste.'), _('Clipboard is empty'))
def on_entry_url_changed(self, widget):
self.btn_add.set_sensitive(self.entry_url.get_text().strip() != '')
def on_entry_url_activate(self, widget):
self.on_btn_add_clicked(widget)
def on_btn_add_clicked(self, widget):
url = self.entry_url.get_text()
self.on_btn_close_clicked(widget)
if self.add_podcast_list is not None:
title = None # FIXME: Add title GUI element
self.add_podcast_list([(title, url)])
| 3,862
|
Python
|
.py
| 80
| 39.3875
| 78
| 0.654101
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,332
|
channel.py
|
gpodder_gpodder/src/gpodder/gtkui/desktop/channel.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gdk, Gio, Gtk
import gpodder
from gpodder import util
from gpodder.gtkui.interface.common import BuilderWidget
_ = gpodder.gettext
class gPodderChannel(BuilderWidget):
MAX_SIZE = 120
def new(self):
self.show_on_cover_load = True
self.gPodderChannel.set_transient_for(self.parent_widget)
self.title_label.set_text(self.channel.title)
self.labelURL.set_text(self.channel.url)
self.skip_feed_update_switch.set_active(self.channel.pause_subscription)
self.enable_device_sync_switch.set_active(self.channel.sync_to_mp3_player)
self.section_list = Gtk.ListStore(str)
active_index = 0
for index, section in enumerate(sorted(self.sections)):
self.section_list.append([section])
if section == self.channel.section:
active_index = index
self.combo_section.set_model(self.section_list)
cell_renderer = Gtk.CellRendererText()
self.combo_section.pack_start(cell_renderer, True)
self.combo_section.add_attribute(cell_renderer, 'text', 0)
self.combo_section.set_active(active_index)
self.strategy_list = Gtk.ListStore(str, int)
active_index = 0
for index, (checked, strategy_id, strategy) in \
enumerate(self.channel.get_download_strategies()):
self.strategy_list.append([strategy, strategy_id])
if checked:
active_index = index
self.combo_strategy.set_model(self.strategy_list)
cell_renderer = Gtk.CellRendererText()
self.combo_strategy.pack_start(cell_renderer, True)
self.combo_strategy.add_attribute(cell_renderer, 'text', 0)
self.combo_strategy.set_active(active_index)
self.LabelDownloadTo.set_text(self.channel.save_dir)
self.website_label.set_markup('<a href="{}">{}</a>'.format(
self.channel.link, self.channel.link)
if self.channel.link else '')
self.website_label.connect('activate-link', lambda label, url: util.open_website(url))
if self.channel.auth_username:
self.FeedUsername.set_text(self.channel.auth_username)
if self.channel.auth_password:
self.FeedPassword.set_text(self.channel.auth_password)
# Cover image
ag = Gio.SimpleActionGroup()
open_cover_action = Gio.SimpleAction.new("openCover", None)
open_cover_action.connect('activate', self.on_open_cover_activate)
ag.add_action(open_cover_action)
refresh_cover_action = Gio.SimpleAction.new("refreshCover", None)
refresh_cover_action.connect('activate', self.on_refresh_cover_activate)
ag.add_action(refresh_cover_action)
self.main_window.insert_action_group("channel", ag)
cover_menu = Gio.Menu()
cover_menu.append("Change cover image", "channel.openCover")
cover_menu.append("Refresh image", "channel.refreshCover")
self.cover_menubutton.set_menu_model(cover_menu)
self.cover_downloader.register('cover-available', self.cover_download_finished)
self.cover_downloader.request_cover(self.channel)
if self.channel._update_error:
err = '\n\n' + (_('ERROR: %s') % self.channel._update_error)
else:
err = ''
self.channel_description.set_text(util.remove_html_tags(self.channel.description) + err)
# Add Drag and Drop Support
flags = Gtk.DestDefaults.ALL
targets = [Gtk.TargetEntry.new('text/uri-list', 0, 2), Gtk.TargetEntry.new('text/plain', 0, 4)]
actions = Gdk.DragAction.DEFAULT | Gdk.DragAction.COPY
self.imgCover.drag_dest_set(flags, targets, actions)
self.imgCover.connect('drag_data_received', self.drag_data_received)
border = 6
size = self.MAX_SIZE + border * 2
self.imgCover.set_size_request(size, size)
# Title save button state
self.title_save_button_saves = True
self._config.connect_gtk_window(self.gPodderChannel, 'channel_editor', True)
gpodder.user_extensions.on_ui_object_available('channel-gtk', self)
result = gpodder.user_extensions.on_channel_settings(self.channel)
if result:
for label, callback in result:
sw = Gtk.ScrolledWindow()
sw.add(callback(self.channel))
sw.show_all()
self.notebookChannelEditor.append_page(sw, Gtk.Label(label))
def on_button_add_section_clicked(self, widget):
text = self.show_text_edit_dialog(_('Add section'), _('New section:'),
affirmative_text=_('_Add'))
if text is not None:
for index, (section,) in enumerate(self.section_list):
if text == section:
self.combo_section.set_active(index)
return
self.section_list.append([text])
self.combo_section.set_active(len(self.section_list) - 1)
def on_open_cover_activate(self, action, *args):
dlg = Gtk.FileChooserDialog(
title=_('Select new podcast cover artwork'),
parent=self.gPodderChannel,
action=Gtk.FileChooserAction.OPEN)
dlg.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
dlg.add_button(_('_Open'), Gtk.ResponseType.OK)
if dlg.run() == Gtk.ResponseType.OK:
url = dlg.get_uri()
self.clear_cover_cache(self.channel.url)
self.cover_downloader.replace_cover(self.channel, custom_url=url)
dlg.destroy()
def on_refresh_cover_activate(self, action, *args):
self.clear_cover_cache(self.channel.url)
self.cover_downloader.replace_cover(self.channel, custom_url=False)
def cover_download_finished(self, channel, pixbuf):
def set_cover(channel, pixbuf):
if self.channel == channel:
if pixbuf is not None:
self.imgCover.set_from_pixbuf(util.scale_pixbuf(pixbuf, self.MAX_SIZE))
if self.show_on_cover_load:
self.main_window.show()
self.show_on_cover_load = False
util.idle_add(set_cover, channel, pixbuf)
def drag_data_received(self, widget, content, x, y, sel, ttype, time):
files = sel.get_text().strip().split('\n')
if len(files) != 1:
self.show_message(
_('You can only drop a single image or URL here.'),
_('Drag and drop'))
return
file = files[0]
if file.startswith('file://') or file.startswith('http://') or file.startswith('https://'):
self.clear_cover_cache(self.channel.url)
self.cover_downloader.replace_cover(self.channel, custom_url=file)
return
self.show_message(
_('You can only drop local files and http:// URLs here.'),
_('Drag and drop'))
def on_gPodderChannel_destroy(self, widget, *args):
self.cover_downloader.unregister('cover-available', self.cover_download_finished)
# Title editing callbacks
def on_title_edit_button_clicked(self, button):
self.title_save_button_saves = True
self.title_save_button.set_label(_("_Save"))
self.title_stack.set_visible_child(self.title_edit_box)
self.title_entry.set_text(self.title_label.get_text())
self.title_entry.grab_focus()
def on_title_entry_changed(self, entry):
if len(entry.get_text()) > 0:
self.title_save_button_saves = True
self.title_save_button.set_label(_("_Save"))
else:
self.title_save_button_saves = False
self.title_save_button.set_label(_("Cancel"))
def on_title_entry_icon_press(self, entry, icon_pos, *args):
self.title_entry.set_text("")
def on_title_save_button_clicked(self, button):
if self.title_save_button_saves:
self.title_label.set_text(self.title_entry.get_text())
self.title_stack.set_visible_child(self.title_box)
def on_feed_url_copy_button_clicked(self, button):
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(self.channel.url, -1)
def on_open_folder_button_clicked(self, button):
util.gui_open(self.channel.save_dir, gui=self)
def on_row_activated(self, listbox, row, *args):
# Find the correct widget in the row to activate
def _do(w, *args):
if w.get_name().startswith("no_activation"):
return
elif isinstance(w, Gtk.Box):
w.foreach(_do)
elif isinstance(w, Gtk.ComboBox):
w.popup()
elif isinstance(w, Gtk.Entry):
w.grab_focus()
elif isinstance(w, Gtk.Switch):
w.set_state(not w.get_state())
elif isinstance(w, Gtk.Button):
w.emit("clicked")
row.foreach(_do)
def on_btnCancel_clicked(self, widget, *args):
self.main_window.destroy()
def on_btnOK_clicked(self, widget, *args):
self.channel.pause_subscription = self.skip_feed_update_switch.get_state()
self.channel.sync_to_mp3_player = self.enable_device_sync_switch.get_state()
self.channel.rename(self.title_label.get_text())
self.channel.auth_username = self.FeedUsername.get_text().strip()
self.channel.auth_password = self.FeedPassword.get_text()
self.cover_downloader.unregister('cover-available', self.cover_download_finished)
self.clear_cover_cache(self.channel.url)
self.cover_downloader.request_cover(self.channel)
new_section = self.section_list[self.combo_section.get_active()][0]
if self.channel.section != new_section:
self.channel.section = new_section
section_changed = True
else:
section_changed = False
new_strategy = self.strategy_list[self.combo_strategy.get_active()][1]
self.channel.set_download_strategy(new_strategy)
self.channel.save()
self.main_window.destroy()
self.update_podcast_list_model(selected=True,
sections_changed=section_changed)
| 10,985
|
Python
|
.py
| 219
| 40.383562
| 103
| 0.64567
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,333
|
welcome.py
|
gpodder_gpodder/src/gpodder/gtkui/desktop/welcome.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gtk
import gpodder
from gpodder.gtkui.interface.common import BuilderWidget
_ = gpodder.gettext
class gPodderWelcome(BuilderWidget):
def new(self):
self.gPodderWelcome.set_transient_for(self.parent_widget)
def on_btnCancel_clicked(self, button):
self.main_window.response(Gtk.ResponseType.CANCEL)
| 1,101
|
Python
|
.py
| 27
| 38.62963
| 71
| 0.774133
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,334
|
exportlocal.py
|
gpodder_gpodder/src/gpodder/gtkui/desktop/exportlocal.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import gpodder
from gpodder.gtkui.interface.common import BuilderWidget
_ = gpodder.gettext
N_ = gpodder.ngettext
class gPodderExportToLocalFolder(BuilderWidget):
""" Export to Local Folder UI: file dialog + checkbox to save all to same folder """
def new(self):
self.gPodderExportToLocalFolder.set_transient_for(self.parent_widget)
self.RES_CANCEL = -6
self.RES_SAVE = -3
self.gPodderExportToLocalFolder.add_buttons("_Cancel", self.RES_CANCEL,
"_Save", self.RES_SAVE)
self._config.connect_gtk_window(self.gPodderExportToLocalFolder,
'export_to_local_folder', True)
def save_as(self, initial_directory, filename, remaining=0):
"""
blocking method: prompt for save to local folder
:param str initial_directory: folder to show to user or None
:param str filename: default export filename
:param int remaining: remaining episodes (to show/hide and customize checkbox label)
:return (bool, str, str, bool): notCancelled, selected folder, selected path,
save all remaining episodes with default params
"""
if remaining:
self.allsamefolder.set_label(
N_('Export remaining %(count)d episode to this folder with its default name',
'Export remaining %(count)d episodes to this folder with their default name',
remaining) % {'count': remaining})
else:
self.allsamefolder.hide()
if initial_directory is None:
initial_directory = os.path.expanduser('~')
self.gPodderExportToLocalFolder.set_current_folder(initial_directory)
self.gPodderExportToLocalFolder.set_current_name(filename)
res = self.gPodderExportToLocalFolder.run()
self.gPodderExportToLocalFolder.hide()
notCancelled = (res == self.RES_SAVE)
allRemainingDefault = self.allsamefolder.get_active()
if notCancelled:
folder = self.gPodderExportToLocalFolder.get_current_folder()
filename = self.gPodderExportToLocalFolder.get_filename()
else:
folder = initial_directory
filename = None
return (notCancelled, folder, filename, allRemainingDefault)
| 3,129
|
Python
|
.py
| 64
| 40.40625
| 96
| 0.677451
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,335
|
__init__.py
|
gpodder_gpodder/src/gpodder/gtkui/desktop/__init__.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| 747
|
Python
|
.py
| 18
| 40.5
| 71
| 0.766804
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,336
|
podcastdirectory.py
|
gpodder_gpodder/src/gpodder/gtkui/desktop/podcastdirectory.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.gtkui.desktop.podcastdirectory - Podcast directory Gtk UI
# Thomas Perl <thp@gpodder.org>; 2014-10-22
#
import html
import logging
import os
import traceback
from gi.repository import Gdk, GdkPixbuf, Gtk, Pango
import gpodder
from gpodder import directory, util
from gpodder.gtkui.interface.common import BuilderWidget
from gpodder.gtkui.interface.progress import ProgressIndicator
from gpodder.gtkui.interface.tagcloud import TagCloud
_ = gpodder.gettext
logger = logging.getLogger(__name__)
class DirectoryPodcastsModel(Gtk.ListStore):
C_SELECTED, C_MARKUP, C_TITLE, C_URL = list(range(4))
def __init__(self, callback_can_subscribe):
Gtk.ListStore.__init__(self, bool, str, str, str)
self.callback_can_subscribe = callback_can_subscribe
def load(self, directory_entries):
self.clear()
for entry in directory_entries:
if entry.subscribers != -1:
self.append((False, '%s (%d)\n<small>%s</small>' % (html.escape(entry.title),
entry.subscribers, html.escape(entry.url)), entry.title, entry.url))
else:
self.append((False, '%s\n<small>%s</small>' % (html.escape(entry.title),
html.escape(entry.url)), entry.title, entry.url))
self.callback_can_subscribe(len(self.get_selected_podcasts()) > 0)
def toggle(self, path):
self[path][self.C_SELECTED] = not self[path][self.C_SELECTED]
self.callback_can_subscribe(len(self.get_selected_podcasts()) > 0)
def set_selection_to(self, selected):
for row in self:
row[self.C_SELECTED] = selected
self.callback_can_subscribe(len(self.get_selected_podcasts()) > 0)
def get_selected_podcasts(self):
return [(row[self.C_TITLE], row[self.C_URL]) for row in self if row[self.C_SELECTED]]
class DirectoryProvidersModel(Gtk.ListStore):
C_WEIGHT, C_TEXT, C_ICON, C_PROVIDER = list(range(4))
SEPARATOR = (Pango.Weight.NORMAL, '', None, None)
def __init__(self, providers):
Gtk.ListStore.__init__(self, int, str, GdkPixbuf.Pixbuf, object)
for provider in providers:
instance = provider() if provider else None
if instance is not None and instance.kind == directory.Provider.PROVIDER_TAGCLOUD:
logger.warning("PROVIDER_TAGCLOUD is unsupported")
else:
self.add_provider(instance)
def add_provider(self, provider):
if provider is None:
self.append(self.SEPARATOR)
else:
pixbuf = None
if provider.icon:
search_path = {gpodder.images_folder, }
# let an extension provide an icon by putting it next to the source code
for e in gpodder.user_extensions.filenames:
search_path.add(os.path.dirname(e))
for d in search_path:
path_to_try = os.path.join(d, provider.icon)
if os.path.exists(path_to_try):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path_to_try)
break
except Exception as e:
logger.warning('Could not load icon: %s (%s)', provider.icon, e)
self.append((Pango.Weight.NORMAL, provider.name, pixbuf, provider))
def is_row_separator(self, model, it):
return self.get_value(it, self.C_PROVIDER) is None
class gPodderPodcastDirectory(BuilderWidget):
def new(self):
self.gPodderPodcastDirectory.set_transient_for(self.parent_widget)
if hasattr(self, 'custom_title'):
self.main_window.set_title(self.custom_title)
if not hasattr(self, 'add_podcast_list'):
self.add_podcast_list = None
self.providers_model = DirectoryProvidersModel(directory.PROVIDERS)
self.podcasts_model = DirectoryPodcastsModel(self.on_can_subscribe_changed)
self.current_provider = None
self.setup_providers_treeview()
self.setup_podcasts_treeview()
accel = Gtk.AccelGroup()
accel.connect(Gdk.KEY_Escape, 0, Gtk.AccelFlags.VISIBLE, self.on_escape)
self.main_window.add_accel_group(accel)
self._config.connect_gtk_window(self.main_window, 'podcastdirectory', True)
self.btnBack.hide()
self.main_window.show()
def download_opml_file(self, filename):
provider = directory.FixedOpmlFileProvider(filename)
self.providers_model.add_provider(provider)
self.tv_providers.set_cursor(len(self.providers_model) - 1)
self.use_provider(provider, allow_back=False)
def setup_podcasts_treeview(self):
column = Gtk.TreeViewColumn('')
cell = Gtk.CellRendererToggle()
cell.set_fixed_size(48, -1)
column.pack_start(cell, False)
column.add_attribute(cell, 'active', DirectoryPodcastsModel.C_SELECTED)
cell.connect('toggled', lambda cell, path: self.podcasts_model.toggle(path))
self.tv_podcasts.append_column(column)
column = Gtk.TreeViewColumn('')
cell = Gtk.CellRendererText()
cell.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(cell, True)
column.add_attribute(cell, 'markup', DirectoryPodcastsModel.C_MARKUP)
self.tv_podcasts.append_column(column)
self.tv_podcasts.set_model(self.podcasts_model)
self.podcasts_model.append((False, 'a', 'b', 'c'))
def setup_providers_treeview(self):
column = Gtk.TreeViewColumn('')
cell = Gtk.CellRendererPixbuf()
column.pack_start(cell, False)
column.add_attribute(cell, 'pixbuf', DirectoryProvidersModel.C_ICON)
cell = Gtk.CellRendererText()
# cell.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(cell, True)
column.add_attribute(cell, 'text', DirectoryProvidersModel.C_TEXT)
column.add_attribute(cell, 'weight', DirectoryProvidersModel.C_WEIGHT)
self.tv_providers.append_column(column)
self.tv_providers.set_row_separator_func(self.providers_model.is_row_separator)
self.tv_providers.set_model(self.providers_model)
self.tv_providers.connect("row-activated", self.on_tv_providers_row_activated)
def on_tv_providers_row_activated(self, treeview, path, column):
it = self.providers_model.get_iter(path)
for row in self.providers_model:
row[DirectoryProvidersModel.C_WEIGHT] = Pango.Weight.NORMAL
if it:
self.providers_model.set_value(it, DirectoryProvidersModel.C_WEIGHT, Pango.Weight.BOLD)
provider = self.providers_model.get_value(it, DirectoryProvidersModel.C_PROVIDER)
self.use_provider(provider)
def use_provider(self, provider, allow_back=True):
self.podcasts_model.clear()
self.current_provider = provider
self.main_window.set_title(provider.name)
if provider.kind == directory.Provider.PROVIDER_SEARCH:
self.lb_search.set_text(_('Search:'))
self.bt_search.set_label(_('Search'))
elif provider.kind == directory.Provider.PROVIDER_URL:
self.lb_search.set_text(_('URL:'))
self.bt_search.set_label(_('Download'))
elif provider.kind == directory.Provider.PROVIDER_FILE:
self.lb_search.set_text(_('Filename:'))
self.bt_search.set_label(_('Open'))
elif provider.kind == directory.Provider.PROVIDER_STATIC:
self.obtain_podcasts_with(provider.on_static)
if provider.kind in (directory.Provider.PROVIDER_SEARCH,
directory.Provider.PROVIDER_URL,
directory.Provider.PROVIDER_FILE):
self.en_query.set_text('')
self.hb_text_entry.show()
util.idle_add(self.en_query.grab_focus)
else:
self.hb_text_entry.hide()
self.progressBar.set_fraction(0)
self.progressLabel.set_label('')
self.txtError.hide()
self.stState.set_visible_child(self.stPodcasts)
self.btnBack.set_visible(allow_back)
def obtain_podcasts_with(self, callback):
self.progressBar.set_fraction(0.1)
self.progressLabel.set_text(_('Please wait while the podcast list is downloaded'))
self.txtError.hide()
self.stackProgressErrorPodcasts.set_visible_child(self.stProgError)
self.selectbox.hide()
self.btnOK.hide()
original_provider = self.current_provider
self.en_query.set_sensitive(False)
self.bt_search.set_sensitive(False)
self.podcasts_model.clear()
@util.run_in_background
def download_data():
podcasts = []
error = None
try:
podcasts = callback()
except directory.JustAWarning as e:
error = e
except Exception as e:
logger.warning(
'Got exception while loading podcasts: %r', e,
exc_info=True)
error = e
@util.idle_add
def update_ui():
self.progressBar.set_fraction(1)
if original_provider == self.current_provider:
self.podcasts_model.load(podcasts or [])
if error:
self.progressLabel.set_text(_("Error"))
if isinstance(error, directory.JustAWarning):
self.txtError.get_buffer().set_text(error.warning)
else:
self.txtError.get_buffer().set_text(_("Error: %s\n\n%s") % (error, "".join(traceback.format_exception(error))))
self.txtError.show()
elif not podcasts:
self.progressLabel.set_text(_("No results"))
self.txtError.get_buffer().set_text(_("Sorry, no podcasts were found"))
self.txtError.show()
else:
self.stackProgressErrorPodcasts.set_visible_child(self.sw_podcasts)
self.selectbox.show()
self.btnOK.show()
else:
logger.warning('Ignoring update from old thread')
self.en_query.set_sensitive(True)
self.bt_search.set_sensitive(True)
if self.en_query.get_realized():
self.en_query.grab_focus()
def on_bt_search_clicked(self, widget):
if self.current_provider is None:
return
query = self.en_query.get_text()
@self.obtain_podcasts_with
def load_data():
if self.current_provider.kind == directory.Provider.PROVIDER_SEARCH:
return self.current_provider.on_search(query)
elif self.current_provider.kind == directory.Provider.PROVIDER_URL:
return self.current_provider.on_url(query)
elif self.current_provider.kind == directory.Provider.PROVIDER_FILE:
return self.current_provider.on_file(query)
def on_can_subscribe_changed(self, can_subscribe):
self.btnOK.set_sensitive(can_subscribe)
def on_btnSelectAll_clicked(self, widget, *args):
self.podcasts_model.set_selection_to(True)
def on_btnSelectNone_clicked(self, widget, *args):
self.podcasts_model.set_selection_to(False)
def on_btnOK_clicked(self, widget, *args):
urls = self.podcasts_model.get_selected_podcasts()
self.main_window.destroy()
# add channels that have been selected
if self.add_podcast_list is not None:
self.add_podcast_list(urls)
def on_btnCancel_clicked(self, widget, *args):
self.main_window.destroy()
def on_btnBack_clicked(self, widget, *args):
self.stState.set_visible_child(self.stProviders)
widget.hide()
self.selectbox.hide()
self.btnOK.hide()
def on_escape(self, *args, **kwargs):
if self.stState.get_visible_child() == self.stProviders:
self.main_window.destroy()
else:
self.on_btnBack_clicked(self.btnBack)
| 13,032
|
Python
|
.py
| 265
| 38.45283
| 139
| 0.635133
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,337
|
preferences.py
|
gpodder_gpodder/src/gpodder/gtkui/desktop/preferences.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import html
import logging
from urllib.request import getproxies
from gi.repository import Gdk, Gtk, Pango
import gpodder
from gpodder import util, vimeo, youtube
from gpodder.gtkui.desktopfile import PlayerListModel
from gpodder.gtkui.interface.common import (BuilderWidget, TreeViewHelper,
show_message_dialog)
from gpodder.gtkui.interface.configeditor import gPodderConfigEditor
logger = logging.getLogger(__name__)
_ = gpodder.gettext
N_ = gpodder.ngettext
class NewEpisodeActionList(Gtk.ListStore):
C_CAPTION, C_AUTO_DOWNLOAD = list(range(2))
ACTION_NONE, ACTION_ASK, ACTION_MINIMIZED, ACTION_ALWAYS = list(range(4))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, str)
self._config = config
self.append((_('Do nothing'), 'ignore'))
self.append((_('Show episode list'), 'show'))
self.append((_('Add to download list'), 'queue'))
self.append((_('Download immediately'), 'download'))
def get_index(self):
for index, row in enumerate(self):
if self._config.ui.gtk.new_episodes == row[self.C_AUTO_DOWNLOAD]:
return index
return 1 # Some sane default
def set_index(self, index):
self._config.ui.gtk.new_episodes = self[index][self.C_AUTO_DOWNLOAD]
class DeviceTypeActionList(Gtk.ListStore):
C_CAPTION, C_DEVICE_TYPE = list(range(2))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, str)
self._config = config
self.append((_('None'), 'none'))
self.append((_('iPod'), 'ipod'))
self.append((_('Filesystem-based'), 'filesystem'))
def get_index(self):
for index, row in enumerate(self):
if self._config.device_sync.device_type == row[self.C_DEVICE_TYPE]:
return index
return 0 # Some sane default
def set_index(self, index):
self._config.device_sync.device_type = self[index][self.C_DEVICE_TYPE]
class OnSyncActionList(Gtk.ListStore):
C_CAPTION, C_ON_SYNC_DELETE, C_ON_SYNC_MARK_PLAYED = list(range(3))
ACTION_NONE, ACTION_ASK, ACTION_MINIMIZED, ACTION_ALWAYS = list(range(4))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, bool, bool)
self._config = config
self.append((_('Do nothing'), False, False))
self.append((_('Mark as played'), False, True))
self.append((_('Delete from gPodder'), True, False))
def get_index(self):
for index, row in enumerate(self):
if (self._config.device_sync.after_sync.delete_episodes
and row[self.C_ON_SYNC_DELETE]):
return index
if (self._config.device_sync.after_sync.mark_episodes_played
and row[self.C_ON_SYNC_MARK_PLAYED] and not
self._config.device_sync.after_sync.delete_episodes):
return index
return 0 # Some sane default
def set_index(self, index):
self._config.device_sync.after_sync.delete_episodes = self[index][self.C_ON_SYNC_DELETE]
self._config.device_sync.after_sync.mark_episodes_played = self[index][self.C_ON_SYNC_MARK_PLAYED]
class YouTubeVideoFormatListModel(Gtk.ListStore):
C_CAPTION, C_ID = list(range(2))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, int)
self._config = config
if self._config.youtube.preferred_fmt_ids:
caption = _('Custom (%(format_ids)s)') % {
'format_ids': ', '.join(str(x) for x in self._config.youtube.preferred_fmt_ids),
}
self.append((caption, 0))
for fmt, (fmt_id, path, description) in youtube.formats:
self.append((description, fmt))
def get_index(self):
for index, row in enumerate(self):
if self._config.youtube.preferred_fmt_id == row[self.C_ID]:
return index
return 0
def set_index(self, index):
self._config.youtube.preferred_fmt_id = self[index][self.C_ID]
class YouTubeVideoHLSFormatListModel(Gtk.ListStore):
C_CAPTION, C_ID = list(range(2))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, int)
self._config = config
if self._config.youtube.preferred_hls_fmt_ids:
caption = _('Custom (%(format_ids)s)') % {
'format_ids': ', '.join(str(x) for x in self._config.youtube.preferred_hls_fmt_ids),
}
self.append((caption, 0))
for fmt, (fmt_id, path, description) in youtube.hls_formats:
self.append((description, fmt))
def get_index(self):
for index, row in enumerate(self):
if self._config.youtube.preferred_hls_fmt_id == row[self.C_ID]:
return index
return 0
def set_index(self, index):
self._config.youtube.preferred_hls_fmt_id = self[index][self.C_ID]
class VimeoVideoFormatListModel(Gtk.ListStore):
C_CAPTION, C_ID = list(range(2))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, str)
self._config = config
for fileformat, description in vimeo.FORMATS:
self.append((description, fileformat))
def get_index(self):
for index, row in enumerate(self):
if self._config.vimeo.fileformat == row[self.C_ID]:
return index
return 0
def set_index(self, index):
value = self[index][self.C_ID]
if value is not None:
self._config.vimeo.fileformat = value
class ProxyTypeActionList(Gtk.ListStore):
C_CAPTION, C_PROXY_TYPE = list(range(2))
def __init__(self, config):
Gtk.ListStore.__init__(self, str, str)
self._config = config
self.append((_('SOCKS5h (Remote DNS)'), 'socks5h'))
self.append((_('SOCKS5'), 'socks5'))
self.append((_('HTTP'), 'http'))
def get_index(self):
for index, row in enumerate(self):
if self._config.network.proxy_type == row[self.C_PROXY_TYPE]:
return index
return 0
def set_index(self, index):
self._config.network.proxy_type = self[index][self.C_PROXY_TYPE]
class gPodderPreferences(BuilderWidget):
C_TOGGLE, C_LABEL, C_EXTENSION, C_SHOW_TOGGLE = list(range(4))
def new(self):
self.gPodderPreferences.set_transient_for(self.parent_widget)
for cb in (self.combo_audio_player_app, self.combo_video_player_app):
cellrenderer = Gtk.CellRendererPixbuf()
cb.pack_start(cellrenderer, False)
cb.add_attribute(cellrenderer, 'pixbuf', PlayerListModel.C_ICON)
cellrenderer = Gtk.CellRendererText()
cellrenderer.set_property('ellipsize', Pango.EllipsizeMode.END)
cb.pack_start(cellrenderer, True)
cb.add_attribute(cellrenderer, 'markup', PlayerListModel.C_NAME)
cb.set_row_separator_func(PlayerListModel.is_separator)
self.audio_player_model = self.user_apps_reader.get_model('audio')
self.combo_audio_player_app.set_model(self.audio_player_model)
index = self.audio_player_model.get_index(self._config.player.audio)
self.combo_audio_player_app.set_active(index)
self.video_player_model = self.user_apps_reader.get_model('video')
self.combo_video_player_app.set_model(self.video_player_model)
index = self.video_player_model.get_index(self._config.player.video)
self.combo_video_player_app.set_active(index)
self.combo_color_scheme.remove_all()
self.combo_color_scheme.prepend('dark', 'Dark')
self.combo_color_scheme.prepend('light', 'Light')
cs = self._config.ui.gtk.color_scheme
if self.have_settings_portal:
self.combo_color_scheme.prepend('system', 'System')
self.combo_color_scheme.set_active_id(cs)
else:
if cs == 'system':
self.combo_color_scheme.set_active_id('light')
self._config.ui.gtk.color_scheme = 'light'
else:
self.combo_color_scheme.set_active_id(cs)
self._config.connect_gtk_combo_box_text('ui.gtk.color_scheme', self.combo_color_scheme)
self.preferred_youtube_format_model = YouTubeVideoFormatListModel(self._config)
self.combobox_preferred_youtube_format.set_model(self.preferred_youtube_format_model)
cellrenderer = Gtk.CellRendererText()
cellrenderer.set_property('ellipsize', Pango.EllipsizeMode.END)
self.combobox_preferred_youtube_format.pack_start(cellrenderer, True)
self.combobox_preferred_youtube_format.add_attribute(cellrenderer, 'text', self.preferred_youtube_format_model.C_CAPTION)
self.combobox_preferred_youtube_format.set_active(self.preferred_youtube_format_model.get_index())
self.preferred_youtube_hls_format_model = YouTubeVideoHLSFormatListModel(self._config)
self.combobox_preferred_youtube_hls_format.set_model(self.preferred_youtube_hls_format_model)
cellrenderer = Gtk.CellRendererText()
cellrenderer.set_property('ellipsize', Pango.EllipsizeMode.END)
self.combobox_preferred_youtube_hls_format.pack_start(cellrenderer, True)
self.combobox_preferred_youtube_hls_format.add_attribute(cellrenderer, 'text', self.preferred_youtube_hls_format_model.C_CAPTION)
self.combobox_preferred_youtube_hls_format.set_active(self.preferred_youtube_hls_format_model.get_index())
self.preferred_vimeo_format_model = VimeoVideoFormatListModel(self._config)
self.combobox_preferred_vimeo_format.set_model(self.preferred_vimeo_format_model)
cellrenderer = Gtk.CellRendererText()
cellrenderer.set_property('ellipsize', Pango.EllipsizeMode.END)
self.combobox_preferred_vimeo_format.pack_start(cellrenderer, True)
self.combobox_preferred_vimeo_format.add_attribute(cellrenderer, 'text', self.preferred_vimeo_format_model.C_CAPTION)
self.combobox_preferred_vimeo_format.set_active(self.preferred_vimeo_format_model.get_index())
self._config.connect_gtk_togglebutton('ui.gtk.find_as_you_type',
self.checkbutton_find_as_you_type)
self._config.connect_gtk_togglebutton('ui.gtk.podcast_list.hide_empty',
self.checkbutton_podcast_list_hide_empty)
self._config.connect_gtk_togglebutton('ui.gtk.podcast_list.all_episodes',
self.checkbutton_podcast_list_all_episodes)
self._config.connect_gtk_togglebutton('ui.gtk.podcast_list.sections',
self.checkbutton_podcast_list_sections)
self._config.connect_gtk_togglebutton('ui.gtk.episode_list.always_show_new',
self.checkbutton_episode_list_always_show_new)
self._config.connect_gtk_togglebutton('ui.gtk.episode_list.trim_title_prefix',
self.checkbutton_episode_list_trim_title_prefix)
self._config.connect_gtk_togglebutton('ui.gtk.episode_list.descriptions',
self.checkbutton_episode_list_descriptions)
self.update_interval_presets = [0, 10, 30, 60, 2 * 60, 6 * 60, 12 * 60]
adjustment_update_interval = self.hscale_update_interval.get_adjustment()
adjustment_update_interval.set_upper(len(self.update_interval_presets) - 1)
if self._config.auto.update.frequency in self.update_interval_presets:
index = self.update_interval_presets.index(self._config.auto.update.frequency)
self.hscale_update_interval.set_value(index)
else:
# Patch in the current "custom" value into the mix
self.update_interval_presets.append(self._config.auto.update.frequency)
self.update_interval_presets.sort()
adjustment_update_interval.set_upper(len(self.update_interval_presets) - 1)
index = self.update_interval_presets.index(self._config.auto.update.frequency)
self.hscale_update_interval.set_value(index)
self._config.connect_gtk_spinbutton('limit.episodes', self.spinbutton_episode_limit)
self._config.connect_gtk_togglebutton('ui.gtk.only_added_are_new',
self.checkbutton_only_added_are_new)
self.auto_download_model = NewEpisodeActionList(self._config)
self.combo_auto_download.set_model(self.auto_download_model)
cellrenderer = Gtk.CellRendererText()
self.combo_auto_download.pack_start(cellrenderer, True)
self.combo_auto_download.add_attribute(cellrenderer, 'text', NewEpisodeActionList.C_CAPTION)
self.combo_auto_download.set_active(self.auto_download_model.get_index())
self._config.connect_gtk_togglebutton('check_connection',
self.checkbutton_check_connection)
if self._config.auto.cleanup.played:
adjustment_expiration = self.hscale_expiration.get_adjustment()
if self._config.auto.cleanup.days > adjustment_expiration.get_upper():
# Patch the adjustment to include the higher current value
adjustment_expiration.set_upper(self._config.auto.cleanup.days)
self.hscale_expiration.set_value(self._config.auto.cleanup.days)
else:
self.hscale_expiration.set_value(0)
self._config.connect_gtk_togglebutton('auto.cleanup.unplayed',
self.checkbutton_expiration_unplayed)
self._config.connect_gtk_togglebutton('auto.cleanup.unfinished',
self.checkbutton_expiration_unfinished)
self.device_type_model = DeviceTypeActionList(self._config)
self.combobox_device_type.set_model(self.device_type_model)
cellrenderer = Gtk.CellRendererText()
self.combobox_device_type.pack_start(cellrenderer, True)
self.combobox_device_type.add_attribute(cellrenderer, 'text',
DeviceTypeActionList.C_CAPTION)
self.combobox_device_type.set_active(self.device_type_model.get_index())
self.on_sync_model = OnSyncActionList(self._config)
self.combobox_on_sync.set_model(self.on_sync_model)
cellrenderer = Gtk.CellRendererText()
self.combobox_on_sync.pack_start(cellrenderer, True)
self.combobox_on_sync.add_attribute(cellrenderer, 'text', OnSyncActionList.C_CAPTION)
self.combobox_on_sync.set_active(self.on_sync_model.get_index())
self._config.connect_gtk_togglebutton('device_sync.skip_played_episodes',
self.checkbutton_skip_played_episodes)
self._config.connect_gtk_togglebutton('device_sync.playlists.create',
self.checkbutton_create_playlists)
self._config.connect_gtk_togglebutton('device_sync.playlists.two_way_sync',
self.checkbutton_delete_using_playlists)
self._config.connect_gtk_togglebutton('device_sync.delete_deleted_episodes',
self.checkbutton_delete_deleted_episodes)
self._config.connect_gtk_togglebutton('device_sync.compare_episode_filesize',
self.checkbutton_compare_episode_filesize)
# Have to do this before calling set_active on checkbutton_enable
self._enable_mygpo = self._config.mygpo.enabled
# Initialize the UI state with configuration settings
self.checkbutton_enable.set_active(self._config.mygpo.enabled)
self.entry_server.set_text(self._config.mygpo.server)
self.entry_username.set_text(self._config.mygpo.username)
self.entry_password.set_text(self._config.mygpo.password)
self.entry_caption.set_text(self._config.mygpo.device.caption)
# Disable mygpo sync while the dialog is open
self._config.mygpo.enabled = False
# Network proxy settings UI
self._config.connect_gtk_togglebutton('network.use_proxy',
self.checkbutton_use_proxy)
self._config.connect_gtk_togglebutton('network.proxy_use_username_password',
self.checkbutton_proxy_use_username_password)
self.entry_proxy_hostname.set_text(self._config.network.proxy_hostname)
self.entry_proxy_port.set_text(self._config.network.proxy_port)
# This will disable the proxy input details on creation if checkbutton
# is unticked (value from _config) on each preferences menu creation
self.on_checkbutton_use_proxy_toggled(self.checkbutton_use_proxy)
self.on_checkbutton_proxy_use_username_password_toggled(self.checkbutton_proxy_use_username_password)
self.proxy_type_model = ProxyTypeActionList(self._config)
self.combobox_proxy_type.set_model(self.proxy_type_model)
self.combobox_proxy_type.pack_start(cellrenderer, True)
self.combobox_proxy_type.add_attribute(cellrenderer, 'text',
ProxyTypeActionList.C_CAPTION)
self.combobox_proxy_type.set_active(self.proxy_type_model.get_index())
env_proxies = getproxies()
env_proxies_str = 'None'
if env_proxies:
env_proxies_str = ''
for var, url in env_proxies.items():
env_proxies_str += f"{var}_proxy={url}\n"
self.label_env_proxy.set_text(env_proxies_str)
# Configure the extensions manager GUI
self.set_extension_preferences()
self._config.connect_gtk_window(self.main_window, 'preferences', True)
gpodder.user_extensions.on_ui_object_available('preferences-gtk', self)
self.inject_extensions_preferences(init=True)
self.prefs_stack.foreach(self._wrap_checkbox_labels)
def _wrap_checkbox_labels(self, w, *args):
if w.get_name().startswith("no_label_wrap"):
return
elif isinstance(w, Gtk.CheckButton):
label = w.get_child()
label.set_line_wrap(True)
elif isinstance(w, Gtk.Container):
w.foreach(self._wrap_checkbox_labels)
def inject_extensions_preferences(self, init=False):
if not init:
# remove preferences buttons for all extensions
for child in self.prefs_stack.get_children():
if child.get_name().startswith("extension."):
self.prefs_stack.remove(child)
# add preferences buttons for all extensions
result = gpodder.user_extensions.on_preferences()
if result:
for label, callback in result:
page = callback()
name = "extension." + label
page.set_name(name)
page.foreach(self._wrap_checkbox_labels)
self.prefs_stack.add_titled(page, name, label)
def _extensions_select_function(self, selection, model, path, path_currently_selected):
return model.get_value(model.get_iter(path), self.C_SHOW_TOGGLE)
def set_extension_preferences(self):
def search_equal_func(model, column, key, it):
label = model.get_value(it, self.C_LABEL)
if key.lower() in label.lower():
# from http://www.pyGtk.org/docs/pygtk/class-gtktreeview.html:
# "func should return False to indicate that the row matches
# the search criteria."
return False
return True
self.treeviewExtensions.set_search_equal_func(search_equal_func)
selection = self.treeviewExtensions.get_selection()
selection.set_select_function(self._extensions_select_function)
toggle_cell = Gtk.CellRendererToggle()
toggle_cell.connect('toggled', self.on_extensions_cell_toggled)
toggle_column = Gtk.TreeViewColumn('')
toggle_column.pack_start(toggle_cell, True)
toggle_column.add_attribute(toggle_cell, 'active', self.C_TOGGLE)
toggle_column.add_attribute(toggle_cell, 'visible', self.C_SHOW_TOGGLE)
toggle_column.set_property('min-width', 32)
self.treeviewExtensions.append_column(toggle_column)
name_cell = Gtk.CellRendererText()
name_cell.set_property('ellipsize', Pango.EllipsizeMode.END)
extension_column = Gtk.TreeViewColumn(_('Name'))
extension_column.pack_start(name_cell, True)
extension_column.add_attribute(name_cell, 'markup', self.C_LABEL)
extension_column.set_expand(True)
self.treeviewExtensions.append_column(extension_column)
self.extensions_model = Gtk.ListStore(bool, str, object, bool)
def key_func(pair):
category, container = pair
return (category, container.metadata.title)
def convert(extensions):
for container in extensions:
yield (container.metadata.category, container)
old_category = None
for category, container in sorted(convert(
gpodder.user_extensions.get_extensions()), key=key_func):
if old_category != category:
label = '<span weight="bold">%s</span>' % html.escape(category)
self.extensions_model.append((None, label, None, False))
old_category = category
label = '%s\n<small>%s</small>' % (
html.escape(container.metadata.title),
html.escape(container.metadata.description))
self.extensions_model.append((container.enabled, label, container, True))
self.treeviewExtensions.set_model(self.extensions_model)
self.treeviewExtensions.columns_autosize()
def on_treeview_extension_button_released(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
if event.type == Gdk.EventType.BUTTON_RELEASE and event.button == 3:
return self.on_treeview_extension_show_context_menu(treeview, event)
return False
def on_treeview_extension_show_context_menu(self, treeview, event=None):
selection = treeview.get_selection()
model, paths = selection.get_selected_rows()
container = model.get_value(model.get_iter(paths[0]), self.C_EXTENSION)
if not container:
return
menu = Gtk.Menu()
if container.metadata.doc:
menu_item = Gtk.MenuItem(_('Documentation'))
menu_item.connect('activate', self.open_weblink,
container.metadata.doc)
menu.append(menu_item)
menu_item = Gtk.MenuItem(_('Extension info'))
menu_item.connect('activate', self.show_extension_info, model, container)
menu.append(menu_item)
if container.metadata.payment:
menu_item = Gtk.MenuItem(_('Support the author'))
menu_item.connect('activate', self.open_weblink, container.metadata.payment)
menu.append(menu_item)
menu.show_all()
if event is None:
func = TreeViewHelper.make_popup_position_func(treeview)
menu.popup(None, None, func, None, 3, Gtk.get_current_event_time())
else:
menu.popup(None, None, None, None, 3, Gtk.get_current_event_time())
return True
def on_extensions_cell_toggled(self, cell, path):
model = self.treeviewExtensions.get_model()
it = model.get_iter(path)
container = model.get_value(it, self.C_EXTENSION)
enabled_extensions = list(self._config.extensions.enabled)
new_enabled = not model.get_value(it, self.C_TOGGLE)
if new_enabled and container.name not in enabled_extensions:
enabled_extensions.append(container.name)
elif not new_enabled and container.name in enabled_extensions:
enabled_extensions.remove(container.name)
self._config.extensions.enabled = enabled_extensions
now_enabled = (container.name in self._config.extensions.enabled)
if new_enabled == now_enabled:
model.set_value(it, self.C_TOGGLE, new_enabled)
if now_enabled:
self.on_extension_enabled(container.module)
else:
self.on_extension_disabled(container.module)
self.inject_extensions_preferences()
elif container.error is not None:
if hasattr(container.error, 'message'):
error_msg = container.error.message
else:
error_msg = str(container.error)
self.show_message(error_msg,
_('Extension cannot be activated'), important=True)
model.set_value(it, self.C_TOGGLE, False)
def show_extension_info(self, w, model, container):
if not container or not model:
return
info = '\n'.join('<b>{}:</b> {}'.format(html.escape(key), html.escape(value))
for key, value in container.metadata.get_sorted()
if key not in ('title', 'description'))
self.show_message_details(container.metadata.title, container.metadata.description, info)
def open_weblink(self, w, url):
util.open_website(url)
def on_dialog_destroy(self, widget):
# Re-enable mygpo sync if the user has selected it
self._config.mygpo.enabled = self._enable_mygpo
# Make sure the device is successfully created/updated
self.mygpo_client.create_device()
# Flush settings for mygpo client now
self.mygpo_client.flush(now=True)
def on_button_close_clicked(self, widget):
self.main_window.destroy()
def on_button_advanced_clicked(self, widget):
self.main_window.destroy()
gPodderConfigEditor(self.parent_window, _config=self._config)
def on_combo_audio_player_app_changed(self, widget):
index = self.combo_audio_player_app.get_active()
self._config.player.audio = self.audio_player_model.get_command(index)
def on_combo_video_player_app_changed(self, widget):
index = self.combo_video_player_app.get_active()
self._config.player.video = self.video_player_model.get_command(index)
def on_combobox_preferred_youtube_format_changed(self, widget):
index = self.combobox_preferred_youtube_format.get_active()
self.preferred_youtube_format_model.set_index(index)
def on_combobox_preferred_youtube_hls_format_changed(self, widget):
index = self.combobox_preferred_youtube_hls_format.get_active()
self.preferred_youtube_hls_format_model.set_index(index)
def on_combobox_preferred_vimeo_format_changed(self, widget):
index = self.combobox_preferred_vimeo_format.get_active()
self.preferred_vimeo_format_model.set_index(index)
def on_button_audio_player_clicked(self, widget):
result = self.show_text_edit_dialog(_('Configure audio player'),
_('Command:'),
self._config.player.audio)
if result:
self._config.player.audio = result
index = self.audio_player_model.get_index(self._config.player.audio)
self.combo_audio_player_app.set_active(index)
def on_button_video_player_clicked(self, widget):
result = self.show_text_edit_dialog(_('Configure video player'),
_('Command:'),
self._config.player.video)
if result:
self._config.player.video = result
index = self.video_player_model.get_index(self._config.player.video)
self.combo_video_player_app.set_active(index)
def format_update_interval_value(self, scale, value):
value = int(value)
ret = None
if value == 0:
ret = _('manually')
elif value > 0 and len(self.update_interval_presets) > value:
ret = util.format_seconds_to_hour_min_sec(self.update_interval_presets[value] * 60)
else:
ret = str(value)
# bug in gtk3: value representation (pixels) must be smaller than value for highest value.
# this makes sense when formatting e.g. 0 to 1000 where '1000' is the longest
# string, but not when '10 minutes' is longer than '12 hours'
# so we replace spaces with non breaking spaces otherwise '10 minutes' is displayed as '10'
ret = ret.replace(' ', '\xa0')
return ret
def on_update_interval_value_changed(self, gtk_range):
value = int(gtk_range.get_value())
self._config.auto.update.enabled = (value > 0)
self._config.auto.update.frequency = self.update_interval_presets[value]
def on_combo_auto_download_changed(self, widget):
index = self.combo_auto_download.get_active()
self.auto_download_model.set_index(index)
def format_expiration_value(self, scale, value):
value = int(value)
if value == 0:
return _('manually')
else:
return N_('after %(count)d day', 'after %(count)d days',
value) % {'count': value}
def on_expiration_value_changed(self, gtk_range):
value = int(gtk_range.get_value())
if value == 0:
self.checkbutton_expiration_unplayed.set_active(False)
self._config.auto.cleanup.played = False
self._config.auto.cleanup.unplayed = False
else:
self._config.auto.cleanup.played = True
self._config.auto.cleanup.days = value
self.checkbutton_expiration_unplayed.set_sensitive(value > 0)
self.checkbutton_expiration_unfinished.set_sensitive(value > 0)
def on_enabled_toggled(self, widget):
# Only update indirectly (see on_dialog_destroy)
self._enable_mygpo = widget.get_active()
def on_server_changed(self, widget):
self._config.mygpo.server = widget.get_text()
def on_username_changed(self, widget):
self._config.mygpo.username = widget.get_text()
def on_password_changed(self, widget):
self._config.mygpo.password = widget.get_text()
def on_device_caption_changed(self, widget):
self._config.mygpo.device.caption = widget.get_text()
def on_button_overwrite_clicked(self, button):
title = _('Replace subscription list on server')
message = _('Remote podcasts that have not been added locally will be removed on the server. Continue?')
if self.show_confirmation(message, title):
@util.run_in_background
def thread_proc():
self._config.mygpo.enabled = True
self.on_send_full_subscriptions()
self._config.mygpo.enabled = False
def on_combobox_on_sync_changed(self, widget):
index = self.combobox_on_sync.get_active()
self.on_sync_model.set_index(index)
def on_checkbutton_create_playlists_toggled(
self, widget, device_type_changed=False):
if not widget.get_active():
self._config.device_sync.playlists.create = False
self.toggle_playlist_interface(False)
# need to read value of checkbutton from interface,
# rather than value of parameter
else:
self._config.device_sync.playlists.create = True
self.toggle_playlist_interface(True)
def toggle_playlist_interface(self, enabled):
if enabled and self._config.device_sync.device_type == 'filesystem':
self.btn_playlistfolder.set_sensitive(True)
self.btn_playlistfolder.set_label(self._config.device_sync.playlists.folder)
self.checkbutton_delete_using_playlists.set_sensitive(True)
children = self.btn_playlistfolder.get_children()
if children:
label = children.pop()
label.set_ellipsize(Pango.EllipsizeMode.START)
label.set_xalign(0.0)
else:
self.btn_playlistfolder.set_sensitive(False)
self.btn_playlistfolder.set_label('')
self.checkbutton_delete_using_playlists.set_sensitive(False)
def on_combobox_device_type_changed(self, widget):
index = self.combobox_device_type.get_active()
self.device_type_model.set_index(index)
device_type = self._config.device_sync.device_type
if device_type == 'none':
self.btn_filesystemMountpoint.set_label('')
self.btn_filesystemMountpoint.set_sensitive(False)
self.checkbutton_create_playlists.set_sensitive(False)
self.toggle_playlist_interface(False)
self.checkbutton_delete_using_playlists.set_sensitive(False)
self.combobox_on_sync.set_sensitive(False)
self.checkbutton_skip_played_episodes.set_sensitive(False)
elif device_type == 'filesystem':
self.btn_filesystemMountpoint.set_label(self._config.device_sync.device_folder or "")
self.btn_filesystemMountpoint.set_sensitive(True)
self.checkbutton_create_playlists.set_sensitive(True)
self.toggle_playlist_interface(self._config.device_sync.playlists.create)
self.combobox_on_sync.set_sensitive(True)
self.checkbutton_skip_played_episodes.set_sensitive(True)
self.checkbutton_delete_deleted_episodes.set_sensitive(True)
elif device_type == 'ipod':
self.btn_filesystemMountpoint.set_label(self._config.device_sync.device_folder)
self.btn_filesystemMountpoint.set_sensitive(True)
self.checkbutton_create_playlists.set_sensitive(False)
self.toggle_playlist_interface(False)
self.checkbutton_delete_using_playlists.set_sensitive(False)
self.combobox_on_sync.set_sensitive(False)
self.checkbutton_skip_played_episodes.set_sensitive(True)
self.checkbutton_delete_deleted_episodes.set_sensitive(True)
self.checkbutton_compare_episode_filesize.set_sensitive(True)
children = self.btn_filesystemMountpoint.get_children()
if children:
label = children.pop()
label.set_ellipsize(Pango.EllipsizeMode.START)
label.set_xalign(0.0)
def on_btn_device_mountpoint_clicked(self, widget):
fs = Gtk.FileChooserDialog(title=_('Select folder for mount point'),
action=Gtk.FileChooserAction.SELECT_FOLDER)
fs.set_local_only(False)
fs.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
fs.add_button(_('_Open'), Gtk.ResponseType.OK)
fs.set_uri(self.btn_filesystemMountpoint.get_label() or "")
if fs.run() == Gtk.ResponseType.OK:
if self._config.device_sync.device_type == 'filesystem':
self._config.device_sync.device_folder = fs.get_uri()
elif self._config.device_sync.device_type == 'ipod':
self._config.device_sync.device_folder = fs.get_filename()
# Request an update of the mountpoint button
self.on_combobox_device_type_changed(None)
fs.destroy()
def on_btn_playlist_folder_clicked(self, widget):
fs = Gtk.FileChooserDialog(title=_('Select folder for playlists'),
action=Gtk.FileChooserAction.SELECT_FOLDER)
fs.set_local_only(False)
fs.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
fs.add_button(_('_Open'), Gtk.ResponseType.OK)
device_folder = util.new_gio_file(self._config.device_sync.device_folder)
playlists_folder = device_folder.resolve_relative_path(self._config.device_sync.playlists.folder)
fs.set_file(playlists_folder)
while fs.run() == Gtk.ResponseType.OK:
filename = util.relpath(fs.get_uri(),
self._config.device_sync.device_folder)
if not filename:
show_message_dialog(fs, _('The playlists folder must be on the device'))
continue
if self._config.device_sync.device_type == 'filesystem':
self._config.device_sync.playlists.folder = filename
self.btn_playlistfolder.set_label(filename or "")
children = self.btn_playlistfolder.get_children()
if children:
label = children.pop()
label.set_ellipsize(Pango.EllipsizeMode.START)
label.set_xalign(0.0)
break
fs.destroy()
def on_checkbutton_use_proxy_toggled(self, widget):
widgets = (self.grid_network_proxy_details,
self.vbox_network_proxy_username_password)
for w in widgets:
w.set_sensitive(widget.get_active())
def on_checkbutton_proxy_use_username_password_toggled(self, widget):
self.grid_network_proxy_username_password.set_sensitive(widget.get_active())
def on_combobox_proxy_type_changed(self, widget):
index = self.combobox_proxy_type.get_active()
self.proxy_type_model.set_index(index)
def on_proxy_hostname_changed(self, widget):
self._config.network.proxy_hostname = widget.get_text()
def on_proxy_port_changed(self, widget):
self._config.network.proxy_port = widget.get_text()
def on_proxy_username_changed(self, widget):
self._config.network.proxy_username = widget.get_text()
def on_proxy_password_changed(self, widget):
self._config.network.proxy_password = widget.get_text()
| 38,583
|
Python
|
.py
| 686
| 44.731778
| 137
| 0.646703
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,338
|
episodeselector.py
|
gpodder_gpodder/src/gpodder/gtkui/desktop/episodeselector.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gtk, Pango
import gpodder
from gpodder import util
from gpodder.gtkui.interface.common import BuilderWidget, TreeViewHelper
_ = gpodder.gettext
N_ = gpodder.ngettext
class gPodderEpisodeSelector(BuilderWidget):
"""Episode selection dialog
Optional keyword arguments that modify the behaviour of this dialog:
- callback: Function that takes 1 parameter which is a list of
the selected episodes (or empty list when none selected)
- remove_callback: Function that takes 1 parameter which is a list
of episodes that should be "removed" (see below)
(default is None, which means remove not possible)
- remove_action: Label for the "remove" action (default is "Remove")
- remove_finished: Callback after all remove callbacks have finished
(default is None, also depends on remove_callback)
It will get a list of episode URLs that have been
removed, so the main UI can update those
- episodes: List of episodes that are presented for selection
- selected: (optional) List of boolean variables that define the
default checked state for the given episodes
- selected_default: (optional) The default boolean value for the
checked state if no other value is set
(default is False)
- columns: List of (name, sort_name, sort_type, caption) pairs for the
columns, the name is the attribute name of the episode to be
read from each episode object. The sort name is the
attribute name of the episode to be used to sort this column.
If the sort_name is None it will use the attribute name for
sorting. The sort type is the type of the sort column.
The caption attribute is the text that appear as column caption
(default is [('title_markup', None, None, 'Episode'),])
- title: (optional) The title of the window + heading
- instructions: (optional) A one-line text describing what the
user should select / what the selection is for
- ok_button: (optional) Will replace the "OK" button label with this
string (e.g. can be '_Delete' when the episodes to be
selected will be deleted after closing the dialog)
- selection_buttons: (optional) A dictionary with labels as
keys and callbacks as values; for each
key a button will be generated, and when
the button is clicked, the callback will
be called for each episode and the return
value of the callback (True or False) will
be the new selected state of the episode
- size_attribute: (optional) The name of an attribute of the
supplied episode objects that can be used to
calculate the size of an episode; set this to
None if no total size calculation should be
done (in cases where total size is useless)
(default is 'file_size')
- tooltip_attribute: (optional) The name of an attribute of
the supplied episode objects that holds
the text for the tooltips when hovering
over an episode (default is 'description')
"""
COLUMN_INDEX = 0
COLUMN_TOOLTIP = 1
COLUMN_TOGGLE = 2
COLUMN_ADDITIONAL = 3
def new(self):
self.gPodderEpisodeSelector.set_transient_for(self.parent_widget)
if hasattr(self, 'title'):
self.gPodderEpisodeSelector.set_title(self.title)
self._config.connect_gtk_window(self.gPodderEpisodeSelector, 'episode_selector', True)
if not hasattr(self, 'callback'):
self.callback = None
if not hasattr(self, 'remove_callback'):
self.remove_callback = None
if not hasattr(self, 'remove_action'):
self.remove_action = _('Remove')
if not hasattr(self, 'remove_finished'):
self.remove_finished = None
if not hasattr(self, 'episodes'):
self.episodes = []
if not hasattr(self, 'size_attribute'):
self.size_attribute = 'file_size'
if not hasattr(self, 'tooltip_attribute'):
self.tooltip_attribute = '_text_description'
if not hasattr(self, 'selection_buttons'):
self.selection_buttons = {}
if not hasattr(self, 'selected_default'):
self.selected_default = False
if not hasattr(self, 'selected'):
self.selected = [self.selected_default] * len(self.episodes)
if len(self.selected) < len(self.episodes):
self.selected += [self.selected_default] * (len(self.episodes) - len(self.selected))
if not hasattr(self, 'columns'):
self.columns = (('title_markup', None, None, _('Episode')),)
if hasattr(self, 'instructions'):
self.labelInstructions.set_text(self.instructions)
self.labelInstructions.show_all()
if hasattr(self, 'ok_button'):
if self.ok_button == 'gpodder-download':
self.btnOK.set_image(Gtk.Image.new_from_icon_name('go-down', Gtk.IconSize.BUTTON))
self.btnOK.set_label(_('_Download'))
else:
self.btnOK.set_image(None)
self.btnOK.set_label(self.ok_button)
# check/uncheck column
toggle_cell = Gtk.CellRendererToggle()
toggle_cell.connect('toggled', self.toggle_cell_handler)
toggle_column = Gtk.TreeViewColumn('', toggle_cell, active=self.COLUMN_TOGGLE)
toggle_column.set_clickable(True)
self.treeviewEpisodes.append_column(toggle_column)
next_column = self.COLUMN_ADDITIONAL
for name, sort_name, sort_type, caption in self.columns:
renderer = Gtk.CellRendererText()
if next_column < self.COLUMN_ADDITIONAL + 1:
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
column = Gtk.TreeViewColumn(caption, renderer, markup=next_column)
column.set_clickable(False)
column.set_resizable(True)
# Only set "expand" on the first column
if next_column < self.COLUMN_ADDITIONAL + 1:
column.set_expand(True)
if sort_name is not None:
column.set_sort_column_id(next_column + 1)
else:
column.set_sort_column_id(next_column)
self.treeviewEpisodes.append_column(column)
next_column += 1
if sort_name is not None:
# add the sort column
column = Gtk.TreeViewColumn()
column.set_clickable(False)
column.set_visible(False)
self.treeviewEpisodes.append_column(column)
next_column += 1
column_types = [int, str, bool]
# add string column type plus sort column type if it exists
for name, sort_name, sort_type, caption in self.columns:
column_types.append(str)
if sort_name is not None:
column_types.append(sort_type)
self.model = Gtk.ListStore(*column_types)
tooltip = None
for index, episode in enumerate(self.episodes):
if self.tooltip_attribute is not None:
try:
tooltip = getattr(episode, self.tooltip_attribute)
except:
tooltip = None
row = [index, tooltip, self.selected[index]]
for name, sort_name, sort_type, caption in self.columns:
if not hasattr(episode, name):
row.append(None)
else:
row.append(getattr(episode, name))
if sort_name is not None:
if not hasattr(episode, sort_name):
row.append(None)
else:
row.append(getattr(episode, sort_name))
self.model.append(row)
if self.remove_callback is not None:
self.btnRemoveAction.show()
self.btnRemoveAction.set_label(self.remove_action)
# connect to tooltip signals
if self.tooltip_attribute is not None:
try:
self.treeviewEpisodes.set_property('has-tooltip', True)
self.treeviewEpisodes.connect('query-tooltip', self.treeview_episodes_query_tooltip)
except:
pass
self.last_tooltip_episode = None
self.episode_list_can_tooltip = True
self.treeviewEpisodes.connect('button-press-event', self.treeview_episodes_button_pressed)
self.treeviewEpisodes.connect('popup-menu', self.treeview_episodes_button_pressed)
self.treeviewEpisodes.set_rules_hint(True)
self.treeviewEpisodes.set_model(self.model)
self.treeviewEpisodes.columns_autosize()
# Focus the toggle column for Tab-focusing (bug 503)
path, column = self.treeviewEpisodes.get_cursor()
if path is not None:
self.treeviewEpisodes.set_cursor(path, toggle_column)
self.calculate_total_size()
def treeview_episodes_query_tooltip(self, treeview, x, y, keyboard_tooltip, tooltip):
# With get_bin_window, we get the window that contains the rows without
# the header. The Y coordinate of this window will be the height of the
# treeview header. This is the amount we have to subtract from the
# event's Y coordinate to get the coordinate to pass to get_path_at_pos
(x_bin, y_bin) = treeview.get_bin_window().get_position()
y -= x_bin
y -= y_bin
(path, column, rx, ry) = treeview.get_path_at_pos(x, y) or (None,) * 4
if not self.episode_list_can_tooltip or column != treeview.get_columns()[1]:
self.last_tooltip_episode = None
return False
if path is not None:
model = treeview.get_model()
iterator = model.get_iter(path)
index = model.get_value(iterator, self.COLUMN_INDEX)
description = model.get_value(iterator, self.COLUMN_TOOLTIP)
if self.last_tooltip_episode is not None and self.last_tooltip_episode != index:
self.last_tooltip_episode = None
return False
self.last_tooltip_episode = index
description = util.remove_html_tags(description)
# Bug 1825: make sure description is a unicode string,
# so it may be cut correctly on UTF-8 char boundaries
description = util.convert_bytes(description)
if description is not None:
if len(description) > 400:
description = description[:398] + '[...]'
tooltip.set_text(description)
return True
else:
return False
self.last_tooltip_episode = None
return False
def treeview_episodes_button_pressed(self, treeview, event=None):
if event is None or event.triggers_context_menu():
menu = Gtk.Menu()
if len(self.selection_buttons):
for label in self.selection_buttons:
item = Gtk.MenuItem(label)
item.connect('activate', self.custom_selection_button_clicked, label)
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.MenuItem(_('Select _all'))
item.connect('activate', self.on_btnCheckAll_clicked)
menu.append(item)
item = Gtk.MenuItem(_('Select _none'))
item.connect('activate', self.on_btnCheckNone_clicked)
menu.append(item)
menu.show_all()
# Disable tooltips while we are showing the menu, so
# the tooltip will not appear over the menu
self.episode_list_can_tooltip = False
menu.connect('deactivate', lambda menushell: self.episode_list_allow_tooltips())
if event is None:
func = TreeViewHelper.make_popup_position_func(treeview)
menu.popup(None, None, func, None, 3, Gtk.get_current_event_time())
else:
menu.popup(None, None, None, None, event.button, event.time)
return True
def episode_list_allow_tooltips(self):
self.episode_list_can_tooltip = True
def calculate_total_size(self):
if self.size_attribute is not None:
(total_size, count) = (0, 0)
for episode in self.get_selected_episodes():
try:
total_size += int(getattr(episode, self.size_attribute))
count += 1
except:
pass
text = []
if count == 0:
text.append(_('Nothing selected'))
text.append(N_('%(count)d episode', '%(count)d episodes',
count) % {'count': count})
if total_size > 0:
text.append(_('size: %s') % util.format_filesize(total_size))
self.labelTotalSize.set_text(', '.join(text))
self.btnOK.set_sensitive(count > 0)
self.btnRemoveAction.set_sensitive(count > 0)
if count > 0:
self.btnCancel.set_label(_('_Cancel'))
else:
self.btnCancel.set_label(_('_Close'))
else:
self.btnOK.set_sensitive(False)
self.btnRemoveAction.set_sensitive(False)
for index, row in enumerate(self.model):
if self.model.get_value(row.iter, self.COLUMN_TOGGLE) is True:
self.btnOK.set_sensitive(True)
self.btnRemoveAction.set_sensitive(True)
break
self.labelTotalSize.set_text('')
def toggle_cell_handler(self, cell, path):
model = self.treeviewEpisodes.get_model()
model[path][self.COLUMN_TOGGLE] = not model[path][self.COLUMN_TOGGLE]
self.calculate_total_size()
def custom_selection_button_clicked(self, button, label):
callback = self.selection_buttons[label]
for index, row in enumerate(self.model):
new_value = callback(self.episodes[index])
self.model.set_value(row.iter, self.COLUMN_TOGGLE, new_value)
self.calculate_total_size()
def on_btnCheckAll_clicked(self, widget):
for row in self.model:
self.model.set_value(row.iter, self.COLUMN_TOGGLE, True)
self.calculate_total_size()
def on_btnCheckNone_clicked(self, widget):
for row in self.model:
self.model.set_value(row.iter, self.COLUMN_TOGGLE, False)
self.calculate_total_size()
def on_remove_action_activate(self, widget):
episodes = self.get_selected_episodes(remove_episodes=True)
urls = []
for episode in episodes:
urls.append(episode.url)
self.remove_callback(episode)
if self.remove_finished is not None:
self.remove_finished(urls)
self.calculate_total_size()
# Close the window when there are no episodes left
model = self.treeviewEpisodes.get_model()
if model.get_iter_first() is None:
self.on_btnCancel_clicked(None)
def on_row_activated(self, treeview, path, view_column):
model = treeview.get_model()
iterator = model.get_iter(path)
value = model.get_value(iterator, self.COLUMN_TOGGLE)
model.set_value(iterator, self.COLUMN_TOGGLE, not value)
self.calculate_total_size()
def get_selected_episodes(self, remove_episodes=False):
selected_episodes = []
for index, row in enumerate(self.model):
if self.model.get_value(row.iter, self.COLUMN_TOGGLE) is True:
selected_episodes.append(self.episodes[self.model.get_value(
row.iter, self.COLUMN_INDEX)])
if remove_episodes:
for episode in selected_episodes:
index = self.episodes.index(episode)
iterator = self.model.get_iter_first()
while iterator is not None:
if self.model.get_value(iterator, self.COLUMN_INDEX) == index:
self.model.remove(iterator)
break
iterator = self.model.iter_next(iterator)
return selected_episodes
def on_btnOK_clicked(self, widget):
self.gPodderEpisodeSelector.destroy()
if self.callback is not None:
self.callback(self.get_selected_episodes())
def on_btnCancel_clicked(self, widget):
self.gPodderEpisodeSelector.destroy()
if self.callback is not None:
self.callback([])
| 17,933
|
Python
|
.py
| 351
| 38.367521
| 100
| 0.606235
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,339
|
__init__.py
|
gpodder_gpodder/src/gpodder/plugins/__init__.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| 747
|
Python
|
.py
| 18
| 40.5
| 71
| 0.766804
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,340
|
soundcloud.py
|
gpodder_gpodder/src/gpodder/plugins/soundcloud.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Soundcloud.com API client module for gPodder
# Thomas Perl <thp@gpodder.org>; 2009-11-03
import json
import logging
import os
import re
import time
import urllib.error
import urllib.parse
import urllib.request
import gpodder
from gpodder import feedcore, model, registry, util
_ = gpodder.gettext
# gPodder's consumer key for the Soundcloud API
CONSUMER_KEY = 'zrweghtEtnZLpXf3mlm8mQ'
logger = logging.getLogger(__name__)
def soundcloud_parsedate(s):
"""Parse a string into a unix timestamp
Only strings provided by Soundcloud's API are
parsed with this function (2009/11/03 13:37:00).
"""
m = re.match(r'(\d{4})/(\d{2})/(\d{2}) (\d{2}):(\d{2}):(\d{2})', s)
return time.mktime(tuple([int(x) for x in m.groups()] + [0, 0, -1]))
def get_metadata(url):
"""Get file download metadata
Returns a (size, type, name) from the given download
URL. Will use the network connection to determine the
metadata via the HTTP header fields.
"""
track_response = util.urlopen(url)
filesize = track_response.headers['content-length'] or '0'
filetype = track_response.headers['content-type'] or 'application/octet-stream'
filename = util.get_header_param(track_response.headers, 'filename', 'content-disposition') \
or os.path.basename(os.path.dirname(url))
track_response.close()
return filesize, filetype, filename
class SoundcloudUser(object):
def __init__(self, username):
self.username = username
self.cache_file = os.path.join(gpodder.home, 'Soundcloud')
if os.path.exists(self.cache_file):
try:
self.cache = json.load(open(self.cache_file, 'r'))
except:
self.cache = {}
else:
self.cache = {}
def commit_cache(self):
json.dump(self.cache, open(self.cache_file, 'w'))
def get_user_info(self):
global CONSUMER_KEY
key = ':'.join((self.username, 'user_info'))
if key in self.cache:
if self.cache[key].get('code', 200) == 200:
return self.cache[key]
try:
# find user ID in soundcloud page
url = 'https://soundcloud.com/' + self.username
r = util.urlopen(url)
if not r.ok:
raise Exception('Soundcloud "%s": %d %s' % (url, r.status_code, r.reason))
uid = re.search(r'"https://api.soundcloud.com/users/([0-9]+)"', r.text)
if not uid:
raise Exception('Soundcloud user ID not found for "%s"' % url)
uid = int(uid.group(1))
# load user info API
json_url = 'https://api.soundcloud.com/users/%d.json?consumer_key=%s' % (uid, CONSUMER_KEY)
r = util.urlopen(json_url)
if not r.ok:
raise Exception('Soundcloud "%s": %d %s' % (json_url, r.status_code, r.reason))
user_info = json.loads(r.text)
if user_info.get('code', 200) != 200:
raise Exception('Soundcloud "%s": %s' % (json_url, user_info.get('message', '')))
self.cache[key] = user_info
finally:
self.commit_cache()
return user_info
def get_coverart(self):
user_info = self.get_user_info()
return user_info.get('avatar_url', None)
def get_user_id(self):
user_info = self.get_user_info()
return user_info.get('id', None)
def get_tracks(self, feed):
"""Get a generator of tracks from a SC user
The generator will give you a dictionary for every
track it can find for its user."""
global CONSUMER_KEY
try:
json_url = ('https://api.soundcloud.com/users/%(user)s/%(feed)s.'
'json?consumer_key=%'
'(consumer_key)s&limit=200'
% {"user": self.get_user_id(),
"feed": feed,
"consumer_key": CONSUMER_KEY})
logger.debug("loading %s", json_url)
json_tracks = util.urlopen(json_url).json()
tracks = [track for track in json_tracks if track['streamable'] or track['downloadable']]
total_count = len(json_tracks)
if len(tracks) == 0 and total_count > 0:
logger.warning("Download of all %i %s of user %s is disabled" %
(total_count, feed, self.username))
else:
logger.info("%i/%i downloadable tracks for user %s %s feed" %
(len(tracks), total_count, self.username, feed))
for track in tracks:
# Prefer stream URL (MP3), fallback to download URL
base_url = track.get('stream_url') if track['streamable'] else track['download_url']
url = base_url + '?consumer_key=' + CONSUMER_KEY
if url not in self.cache:
try:
self.cache[url] = get_metadata(url)
except:
continue
filesize, filetype, filename = self.cache[url]
yield {
'title': track.get('title', track.get('permalink')) or _('Unknown track'),
'link': track.get('permalink_url') or 'https://soundcloud.com/' + self.username,
'description': util.remove_html_tags(track.get('description') or ''),
'description_html': '',
'url': url,
'file_size': int(filesize),
'mime_type': filetype,
'guid': str(track.get('permalink', track.get('id'))),
'published': soundcloud_parsedate(track.get('created_at', None)),
}
finally:
self.commit_cache()
class SoundcloudFeed(model.Feed):
URL_REGEX = re.compile(r'https?://([a-z]+\.)?soundcloud\.com/([^/]+)$', re.I)
@classmethod
def fetch_channel(cls, channel, max_episodes=0):
url = channel.authenticate_url(channel.url)
return cls.handle_url(url, max_episodes)
@classmethod
def handle_url(cls, url, max_episodes):
m = cls.URL_REGEX.match(url)
if m is not None:
subdomain, username = m.groups()
return feedcore.Result(feedcore.UPDATED_FEED, cls(username, max_episodes))
def __init__(self, username, max_episodes):
self.username = username
self.sc_user = SoundcloudUser(username)
self.max_episodes = max_episodes
def get_title(self):
return _('%s on Soundcloud') % self.username
def get_cover_url(self):
return self.sc_user.get_coverart()
def get_link(self):
return 'https://soundcloud.com/%s' % self.username
def get_description(self):
return _('Tracks published by %s on Soundcloud.') % self.username
def get_new_episodes(self, channel, existing_guids):
return self._get_new_episodes(channel, existing_guids, 'tracks')
def get_next_page(self, channel, max_episodes=0):
# one could return more, but it would consume too many api calls
# (see PR #184)
return None
def _get_new_episodes(self, channel, existing_guids, track_type):
tracks = list(self.sc_user.get_tracks(track_type))
if self.max_episodes > 0:
tracks = tracks[:self.max_episodes]
seen_guids = {track['guid'] for track in tracks}
episodes = []
for track in tracks:
if track['guid'] not in existing_guids:
episode = channel.episode_factory(track)
episode.save()
episodes.append(episode)
return episodes, seen_guids
class SoundcloudFavFeed(SoundcloudFeed):
URL_REGEX = re.compile(r'https?://([a-z]+\.)?soundcloud\.com/([^/]+)/favorites', re.I)
def __init__(self, username):
super(SoundcloudFavFeed, self).__init__(username)
def get_title(self):
return _("%s's favorites on Soundcloud") % self.username
def get_link(self):
return 'https://soundcloud.com/%s/favorites' % self.username
def get_description(self):
return _('Tracks favorited by %s on Soundcloud.') % self.username
def get_new_episodes(self, channel, existing_guids):
return self._get_new_episodes(channel, existing_guids, 'favorites')
# Register our URL handlers
registry.feed_handler.register(SoundcloudFeed.fetch_channel)
registry.feed_handler.register(SoundcloudFavFeed.fetch_channel)
def search_for_user(query):
json_url = 'https://api.soundcloud.com/users.json?q=%s&consumer_key=%s' % (urllib.parse.quote(query), CONSUMER_KEY)
return util.urlopen(json_url).json()
| 9,480
|
Python
|
.py
| 206
| 36.504854
| 119
| 0.60729
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,341
|
__init__.py
|
gpodder_gpodder/tests/__init__.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| 747
|
Python
|
.py
| 18
| 40.5
| 71
| 0.766804
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,342
|
test_feedcore.py
|
gpodder_gpodder/tests/test_feedcore.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2023 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import io
import pytest
import requests.exceptions
from gpodder.feedcore import Fetcher, NEW_LOCATION, Result, UPDATED_FEED
class MyFetcher(Fetcher):
def parse_feed(self, url, feed_data, data_stream, headers, status, **kwargs):
return Result(status, {
'parse_feed': {
'url': url,
'feed_data': feed_data,
'data_stream': data_stream,
'headers': headers,
'extra_args': dict(**kwargs),
},
})
SIMPLE_RSS = """
<rss>
<channel>
<title>Feed Name</title>
<item>
<title>Some Episode Title</title>
<guid>urn:test/ep1</guid>
<pubDate>Sun, 25 Nov 2018 17:28:03 +0000</pubDate>
<enclosure
url="/ep1.ogg"
type="audio/ogg"
length="100000"/>
</item>
</channel>
</rss>
"""
def test_easy(httpserver):
httpserver.expect_request('/feed').respond_with_data(SIMPLE_RSS, content_type='text/xml')
res = MyFetcher().fetch(httpserver.url_for('/feed'), custom_key='value')
assert res.status == UPDATED_FEED
args = res.feed['parse_feed']
assert args['headers']['content-type'] == 'text/xml'
assert isinstance(args['data_stream'], io.BytesIO)
assert args['data_stream'].getvalue().decode('utf-8') == SIMPLE_RSS
assert args['url'] == httpserver.url_for('/feed')
assert args['extra_args']['custom_key'] == 'value'
def test_redirect(httpserver):
httpserver.expect_request('/endfeed').respond_with_data(SIMPLE_RSS, content_type='text/xml')
redir_headers = {
'Location': '/endfeed',
}
# temporary redirect
httpserver.expect_request('/feed').respond_with_data(status=302, headers=redir_headers)
httpserver.expect_request('/permanentfeed').respond_with_data(status=301, headers=redir_headers)
res = MyFetcher().fetch(httpserver.url_for('/feed'))
assert res.status == UPDATED_FEED
args = res.feed['parse_feed']
assert args['headers']['content-type'] == 'text/xml'
assert isinstance(args['data_stream'], io.BytesIO)
assert args['data_stream'].getvalue().decode('utf-8') == SIMPLE_RSS
assert args['url'] == httpserver.url_for('/feed')
res = MyFetcher().fetch(httpserver.url_for('/permanentfeed'))
assert res.status == NEW_LOCATION
assert res.feed == httpserver.url_for('/endfeed')
def test_redirect_loop(httpserver):
""" verify that feedcore fetching will not loop indefinitely on redirects """
redir_headers = {
'Location': '/feed', # it loops
}
httpserver.expect_request('/feed').respond_with_data(status=302, headers=redir_headers)
with pytest.raises(requests.exceptions.TooManyRedirects):
res = MyFetcher().fetch(httpserver.url_for('/feed'))
assert res.status == UPDATED_FEED
args = res.feed['parse_feed']
assert args['headers']['content-type'] == 'text/xml'
assert isinstance(args['data_stream'], io.BytesIO)
assert args['data_stream'].getvalue().decode('utf-8') == SIMPLE_RSS
assert args['url'] == httpserver.url_for('/feed')
def test_temporary_error_retry(httpserver):
httpserver.expect_ordered_request('/feed').respond_with_data(status=503)
httpserver.expect_ordered_request('/feed').respond_with_data(SIMPLE_RSS, content_type='text/xml')
res = MyFetcher().fetch(httpserver.url_for('/feed'))
assert res.status == UPDATED_FEED
args = res.feed['parse_feed']
assert args['headers']['content-type'] == 'text/xml'
assert args['url'] == httpserver.url_for('/feed')
| 4,330
|
Python
|
.py
| 99
| 38.141414
| 101
| 0.670622
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,343
|
model.py
|
gpodder_gpodder/tests/model.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.test.model - Unit tests for gpodder.model
# Thomas Perl <thp@gpodder.org>; 2013-02-12
import unittest
from gpodder import model
class TestEpisodePublishedProperties(unittest.TestCase):
PUBLISHED_UNIXTIME = 1360666744
PUBLISHED_SORT = '2013-02-12'
PUBLISHED_YEAR = '13'
PUBLISHED_MONTH = '02'
PUBLISHED_DAY = '12'
def setUp(self):
self.podcast = model.PodcastChannel(None)
self.episode = model.PodcastEpisode(self.podcast)
self.episode.published = self.PUBLISHED_UNIXTIME
def test_sortdate(self):
self.assertEqual(self.episode.sortdate, self.PUBLISHED_SORT)
def test_pubdate_year(self):
self.assertEqual(self.episode.pubdate_year, self.PUBLISHED_YEAR)
def test_pubdate_month(self):
self.assertEqual(self.episode.pubdate_month, self.PUBLISHED_MONTH)
def test_pubdate_day(self):
self.assertEqual(self.episode.pubdate_day, self.PUBLISHED_DAY)
| 1,703
|
Python
|
.py
| 40
| 38.9
| 74
| 0.748184
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,344
|
video_converter.py
|
gpodder_gpodder/share/gpodder/extensions/video_converter.py
|
# -*- coding: utf-8 -*-
# Convertes video files to avi or mp4
# This requires ffmpeg to be installed. Also works as a context
# menu item for already-downloaded files.
#
# (c) 2011-08-05 Thomas Perl <thp.io/about>
# Released under the same license terms as gPodder itself.
import logging
import os
import subprocess
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Convert video files')
__description__ = _('Transcode video files to avi/mp4/m4v')
__authors__ = 'Thomas Perl <thp@gpodder.org>, Bernd Schlapsi <brot@gmx.info>'
__doc__ = 'https://gpodder.github.io/docs/extensions/videoconverter.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/VideoConverter'
__category__ = 'post-download'
DefaultConfig = {
'output_format': 'mp4', # At the moment we support/test only mp4, m4v and avi
'context_menu': True, # Show the conversion option in the context menu
}
class gPodderExtension:
MIME_TYPES = ('video/mp4', 'video/m4v', 'video/x-flv', )
EXT = ('.mp4', '.m4v', '.flv', )
CMD = {'avconv': ['-i', '%(old_file)s', '-codec', 'copy', '%(new_file)s'],
'ffmpeg': ['-i', '%(old_file)s', '-codec', 'copy', '%(new_file)s']
}
def __init__(self, container):
self.container = container
self.config = self.container.config
# Dependency checks
self.command = self.container.require_any_command(['avconv', 'ffmpeg'])
# extract command without extension (.exe on Windows) from command-string
command_without_ext = os.path.basename(os.path.splitext(self.command)[0])
self.command_param = self.CMD[command_without_ext]
def on_episode_downloaded(self, episode):
self._convert_episode(episode)
def _get_new_extension(self):
ext = self.config.output_format
if not ext.startswith('.'):
ext = '.' + ext
return ext
def _check_source(self, episode):
if episode.extension() == self._get_new_extension():
return False
if episode.mime_type in self.MIME_TYPES:
return True
# Also check file extension (bug 1770)
if episode.extension() in self.EXT:
return True
return False
def on_episodes_context_menu(self, episodes):
if not self.config.context_menu:
return None
if not all(e.was_downloaded(and_exists=True) for e in episodes):
return None
if not any(self._check_source(episode) for episode in episodes):
return None
menu_item = _('Convert to %(format)s') % {'format': self.config.output_format}
return [(menu_item, self._convert_episodes)]
def _convert_episode(self, episode):
if not self._check_source(episode):
return
new_extension = self._get_new_extension()
old_filename = episode.local_filename(create=False)
filename, old_extension = os.path.splitext(old_filename)
new_filename = filename + new_extension
cmd = [self.command] + \
[param % {'old_file': old_filename, 'new_file': new_filename}
for param in self.command_param]
if gpodder.ui.win32:
ffmpeg = util.Popen(cmd)
ffmpeg.wait()
stdout, stderr = ("<unavailable>",) * 2
else:
ffmpeg = util.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = ffmpeg.communicate()
if ffmpeg.returncode == 0:
util.rename_episode_file(episode, new_filename)
os.remove(old_filename)
logger.info('Converted video file to %(format)s.' % {'format': self.config.output_format})
gpodder.user_extensions.on_notification_show(_('File converted'), episode.title)
else:
logger.warning('Error converting video file: %s / %s', stdout, stderr)
gpodder.user_extensions.on_notification_show(_('Conversion failed'), episode.title)
def _convert_episodes(self, episodes):
for episode in episodes:
self._convert_episode(episode)
| 4,221
|
Python
|
.py
| 92
| 37.880435
| 122
| 0.635765
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,345
|
youtube-dl.py
|
gpodder_gpodder/share/gpodder/extensions/youtube-dl.py
|
# -*- coding: utf-8 -*-
# Manage YouTube subscriptions using youtube-dl (https://github.com/ytdl-org/youtube-dl)
# Requirements: youtube-dl module (pip install youtube_dl)
# (c) 2019-08-17 Eric Le Lay <elelay.fr:contact>
# Released under the same license terms as gPodder itself.
import logging
import os
import re
import sys
import time
from collections.abc import Iterable
try:
import yt_dlp as youtube_dl
program_name = 'yt-dlp'
want_ytdl_version = '2023.06.22'
except:
import youtube_dl
program_name = 'youtube-dl'
want_ytdl_version = '2023.02.17' # youtube-dl has been patched, but not yet released
import gpodder
from gpodder import download, feedcore, model, registry, util, youtube
import gi # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gtk # isort:skip
_ = gpodder.gettext
logger = logging.getLogger(__name__)
__title__ = 'youtube-dl'
__description__ = _('Manage YouTube subscriptions using youtube-dl (pip install youtube_dl) or yt-dlp (pip install yt-dlp)')
__only_for__ = 'gtk, cli'
__authors__ = 'Eric Le Lay <elelay.fr:contact>'
__doc__ = 'https://gpodder.github.io/docs/extensions/youtubedl.html'
want_ytdl_version_msg = _('Your version of youtube-dl/yt-dlp %(have_version)s has known issues, please upgrade to %(want_version)s or newer.')
DefaultConfig = {
# youtube-dl downloads and parses each video page to get information about it, which is very slow.
# Set to False to fall back to the fast but limited (only 15 episodes) gpodder code
'manage_channel': True,
# If for some reason youtube-dl download doesn't work for you, you can fallback to gpodder code.
# Set to False to fall back to default gpodder code (less available formats).
'manage_downloads': True,
# Embed all available subtitles to downloaded videos. Needs ffmpeg.
'embed_subtitles': False,
}
# youtube feed still preprocessed by youtube.py (compat)
CHANNEL_RE = re.compile(r'https://www.youtube.com/feeds/videos.xml\?channel_id=(.+)')
USER_RE = re.compile(r'https://www.youtube.com/feeds/videos.xml\?user=(.+)')
PLAYLIST_RE = re.compile(r'https://www.youtube.com/feeds/videos.xml\?playlist_id=(.+)')
def youtube_parsedate(s):
"""Parse a string into a unix timestamp
Only strings provided by youtube-dl API are
parsed with this function (20170920).
"""
if s:
return time.mktime(time.strptime(s, "%Y%m%d"))
return 0
def video_guid(video_id):
"""
generate same guid as youtube
"""
return 'yt:video:{}'.format(video_id)
class YoutubeCustomDownload(download.CustomDownload):
"""
Represents the download of a single episode using youtube-dl.
Actual youtube-dl interaction via gPodderYoutubeDL.
"""
def __init__(self, ytdl, url, episode):
self._ytdl = ytdl
self._url = url
self._reporthook = None
self._prev_dl_bytes = 0
self._episode = episode
self._partial_filename = None
@property
def partial_filename(self):
return self._partial_filename
@partial_filename.setter
def partial_filename(self, val):
self._partial_filename = val
def retrieve_resume(self, tempname, reporthook=None):
"""
called by download.DownloadTask to perform the download.
"""
self._reporthook = reporthook
# outtmpl: use given tempname by DownloadTask
# (escape % because outtmpl used as a string template by youtube-dl)
outtmpl = tempname.replace('%', '%%')
info, opts = self._ytdl.fetch_info(self._url, outtmpl, self._my_hook)
if program_name == 'yt-dlp':
default = opts['outtmpl']['default'] if isinstance(opts['outtmpl'], dict) else opts['outtmpl']
self.partial_filename = os.path.join(opts['paths']['home'], default) % info
elif program_name == 'youtube-dl':
self.partial_filename = opts['outtmpl'] % info
res = self._ytdl.fetch_video(info, opts)
if program_name == 'yt-dlp':
# yt-dlp downloads to whatever file name it wants, so rename
filepath = res.get('requested_downloads', [{}])[0].get('filepath')
if filepath is None:
raise Exception("Could not determine youtube-dl output file")
if filepath != tempname:
logger.debug('yt-dlp downloaded to "%s" instead of "%s", moving',
os.path.basename(filepath),
os.path.basename(tempname))
os.remove(tempname)
os.rename(filepath, tempname)
if 'duration' in res and res['duration']:
self._episode.total_time = res['duration']
headers = {}
# youtube-dl doesn't return a content-type but an extension
if 'ext' in res:
dot_ext = '.{}'.format(res['ext'])
if program_name == 'youtube-dl':
# See #673 when merging multiple formats, the extension is appended to the tempname
# by youtube-dl resulting in empty .partial file + .partial.mp4 exists
# and #796 .mkv is chosen by ytdl sometimes
for try_ext in (dot_ext, ".mp4", ".m4a", ".webm", ".mkv"):
tempname_with_ext = tempname + try_ext
if os.path.isfile(tempname_with_ext):
logger.debug('youtube-dl downloaded to "%s" instead of "%s", moving',
os.path.basename(tempname_with_ext),
os.path.basename(tempname))
os.remove(tempname)
os.rename(tempname_with_ext, tempname)
dot_ext = try_ext
break
ext_filetype = util.mimetype_from_extension(dot_ext)
if ext_filetype:
# YouTube weba formats have a webm extension and get a video/webm mime-type
# but audio content has no width or height, so change it to audio/webm for correct icon and player
if ext_filetype.startswith('video/') and ('height' not in res or res['height'] is None):
ext_filetype = ext_filetype.replace('video/', 'audio/')
headers['content-type'] = ext_filetype
return headers, res.get('url', self._url)
def _my_hook(self, d):
if d['status'] == 'downloading':
if self._reporthook:
dl_bytes = d['downloaded_bytes']
total_bytes = d.get('total_bytes') or d.get('total_bytes_estimate') or 0
self._reporthook(self._prev_dl_bytes + dl_bytes,
1,
self._prev_dl_bytes + total_bytes)
elif d['status'] == 'finished':
dl_bytes = d['downloaded_bytes']
self._prev_dl_bytes += dl_bytes
if self._reporthook:
self._reporthook(self._prev_dl_bytes, 1, self._prev_dl_bytes)
elif d['status'] == 'error':
logger.error('download hook error: %r', d)
else:
logger.debug('unknown download hook status: %r', d)
class YoutubeFeed(model.Feed):
"""
Represents the youtube feed for model.PodcastChannel
"""
def __init__(self, url, cover_url, description, max_episodes, ie_result, downloader):
self._url = url
self._cover_url = cover_url
self._description = description
self._max_episodes = max_episodes
ie_result['entries'] = self._process_entries(ie_result.get('entries', []))
self._ie_result = ie_result
self._downloader = downloader
def _process_entries(self, entries):
filtered_entries = []
seen_guids = set()
for i, e in enumerate(entries): # consumes the generator!
if e.get('_type', 'video') in ('url', 'url_transparent') and e.get('ie_key') == 'Youtube':
guid = video_guid(e['id'])
e['guid'] = guid
if guid in seen_guids:
logger.debug('dropping already seen entry %s title="%s"', guid, e.get('title'))
else:
filtered_entries.append(e)
seen_guids.add(guid)
else:
logger.debug('dropping entry not youtube video %r', e)
if len(filtered_entries) == self._max_episodes:
# entries is a generator: stopping now prevents it to download more pages
logger.debug('stopping entry enumeration')
break
return filtered_entries
def get_title(self):
return '{} (YouTube)'.format(self._ie_result.get('title') or self._ie_result.get('id') or self._url)
def get_link(self):
return self._ie_result.get('webpage_url')
def get_description(self):
return self._description
def get_cover_url(self):
return self._cover_url
def get_http_etag(self):
""" :return str: optional -- last HTTP etag header, for conditional request next time """
# youtube-dl doesn't provide it!
return None
def get_http_last_modified(self):
""" :return str: optional -- last HTTP Last-Modified header, for conditional request next time """
# youtube-dl doesn't provide it!
return None
def get_new_episodes(self, channel, existing_guids):
# entries are already sorted by decreasing date
# trim guids to max episodes
entries = [e for i, e in enumerate(self._ie_result['entries'])
if not self._max_episodes or i < self._max_episodes]
all_seen_guids = {e['guid'] for e in entries}
# only fetch new ones from youtube since they are so slow to get
new_entries = [e for e in entries if e['guid'] not in existing_guids]
logger.debug('%i/%i new entries', len(new_entries), len(all_seen_guids))
self._ie_result['entries'] = new_entries
self._downloader.refresh_entries(self._ie_result)
# episodes from entries
episodes = []
for en in self._ie_result['entries']:
guid = video_guid(en['id'])
if en.get('ext'):
mime_type = util.mimetype_from_extension('.{}'.format(en['ext']))
else:
mime_type = 'application/octet-stream'
if en.get('filesize'):
filesize = int(en['filesize'] or 0)
else:
filesize = sum(int(f.get('filesize') or 0)
for f in en.get('requested_formats', []))
ep = {
'title': en.get('title', guid),
'link': en.get('webpage_url'),
'episode_art_url': en.get('thumbnail'),
'description': util.remove_html_tags(en.get('description') or ''),
'description_html': '',
'url': en.get('webpage_url'),
'file_size': filesize,
'mime_type': mime_type,
'guid': guid,
'published': youtube_parsedate(en.get('upload_date', None)),
'total_time': int(en.get('duration') or 0),
}
episode = channel.episode_factory(ep)
episode.save()
episodes.append(episode)
return episodes, all_seen_guids
def get_next_page(self, channel, max_episodes):
"""
Paginated feed support (RFC 5005).
If the feed is paged, return the next feed page.
Returned page will in turn be asked for the next page, until None is returned.
:return feedcore.Result: the next feed's page,
as a fully parsed Feed or None
"""
return None
class gPodderYoutubeDL(download.CustomDownloader):
def __init__(self, gpodder_config, my_config, force=False):
"""Instance of CustomDownloader using youtube-dl or yt-dlp.
:param force: force using this downloader even if config says don't manage downloads
"""
self.gpodder_config = gpodder_config
self.my_config = my_config
self.force = force
# cachedir is not much used in youtube-dl, but set it anyway
cachedir = os.path.join(gpodder.home, 'youtube-dl')
os.makedirs(cachedir, exist_ok=True)
self._ydl_opts = {
'cachedir': cachedir,
'noprogress': True, # prevent progress bar from appearing in console
}
# prevent escape codes in desktop notifications on errors
if program_name == 'yt-dlp':
self._ydl_opts['color'] = 'no_color'
else:
self._ydl_opts['no_color'] = True
if gpodder.verbose:
self._ydl_opts['verbose'] = True
else:
self._ydl_opts['quiet'] = True
# Don't create downloaders for URLs supported by these youtube-dl extractors
self.ie_blacklist = ["Generic"]
# Cache URL regexes from youtube-dl matches here, seed with youtube regex
self.regex_cache = [(re.compile(r'https://www.youtube.com/watch\?v=.+'),)]
# #686 on windows without a console, sys.stdout is None, causing exceptions
# when adding podcasts.
# See https://docs.python.org/3/library/sys.html#sys.__stderr__ Note
if not sys.stdout:
logger.debug('no stdout, setting youtube-dl logger')
self._ydl_opts['logger'] = logger
def add_format(self, gpodder_config, opts, fallback=None):
""" construct youtube-dl -f argument from configured format. """
# You can set a custom format or custom formats by editing the config for key
# `youtube.preferred_fmt_ids`
#
# It takes a list of format strings separated by comma: bestaudio, 18
# they are translated to youtube dl format bestaudio/18, meaning preferably
# the best audio quality (audio-only) and MP4 360p if it's not available.
#
# See https://github.com/ytdl-org/youtube-dl#format-selection for details
# about youtube-dl format specification.
fmt_ids = youtube.get_fmt_ids(gpodder_config.youtube, False)
opts['format'] = '/'.join(str(fmt) for fmt in fmt_ids)
if fallback:
opts['format'] += '/' + fallback
logger.debug('format=%s', opts['format'])
def fetch_info(self, url, tempname, reporthook):
subs = self.my_config.embed_subtitles
opts = {
'paths': {'home': os.path.dirname(tempname)},
# Postprocessing in yt-dlp breaks without ext
'outtmpl': (os.path.basename(tempname) if program_name == 'yt-dlp'
else tempname) + '.%(ext)s',
'nopart': True, # don't append .part (already .partial)
'retries': 3, # retry a few times
'progress_hooks': [reporthook], # to notify UI
'writesubtitles': subs,
'subtitleslangs': ['all'] if subs else [],
'postprocessors': [{'key': 'FFmpegEmbedSubtitle'}] if subs else [],
}
# Need the proxy_url from src/gpodder/config.py:get_proxies_from_config()
if gpodder.config._proxies:
opts['proxy'] = gpodder.config._proxies['http']
logger.debug(f"Setting proxy from network setting proxy: {opts['proxy']}")
opts.update(self._ydl_opts)
self.add_format(self.gpodder_config, opts)
with youtube_dl.YoutubeDL(opts) as ydl:
info = ydl.extract_info(url, download=False)
return info, opts
def fetch_video(self, info, opts):
with youtube_dl.YoutubeDL(opts) as ydl:
return ydl.process_video_result(info, download=True)
def refresh_entries(self, ie_result):
# only interested in video metadata
opts = {
'skip_download': True, # don't download the video
'youtube_include_dash_manifest': False, # don't download the DASH manifest
}
self.add_format(self.gpodder_config, opts, fallback='18')
opts.update(self._ydl_opts)
new_entries = []
# Need the proxy_url from src/gpodder/config.py:get_proxies_from_config()
if gpodder.config._proxies:
opts['proxy'] = gpodder.config._proxies['http']
logger.debug(f"Setting proxy from network setting proxy: {opts['proxy']}")
# refresh videos one by one to catch single videos blocked by youtube
for e in ie_result.get('entries', []):
tmp = {k: v for k, v in ie_result.items() if k != 'entries'}
tmp['entries'] = [e]
try:
with youtube_dl.YoutubeDL(opts) as ydl:
ydl.process_ie_result(tmp, download=False)
new_entries.extend(tmp.get('entries'))
except youtube_dl.utils.DownloadError as ex:
if ex.exc_info[0] == youtube_dl.utils.ExtractorError:
# for instance "This video contains content from xyz, who has blocked it on copyright grounds"
logger.warning('Skipping %s: %s', e.get('title', ''), ex.exc_info[1])
continue
logger.exception('Skipping %r: %s', tmp, ex.exc_info)
ie_result['entries'] = new_entries
def refresh(self, url, channel_url, max_episodes):
"""
Fetch a channel or playlist contents.
Doesn't yet fetch video entry information, so we only get the video id and title.
"""
# Duplicate a bit of the YoutubeDL machinery here because we only
# want to parse the channel/playlist first, not to fetch video entries.
# We call YoutubeDL.extract_info(process=False), so we
# have to call extract_info again ourselves when we get a result of type 'url'.
def extract_type(ie_result):
result_type = ie_result.get('_type', 'video')
if result_type not in ('url', 'playlist', 'multi_video'):
raise Exception('Unsuported result_type: {}'.format(result_type))
has_playlist = result_type in ('playlist', 'multi_video')
return result_type, has_playlist
opts = {
'youtube_include_dash_manifest': False, # only interested in video title and id
}
opts.update(self._ydl_opts)
# Need the proxy_url from src/gpodder/config.py:get_proxies_from_config()
if gpodder.config._proxies:
opts['proxy'] = gpodder.config._proxies['http']
logger.debug(f"Setting proxy from network setting proxy: {opts['proxy']}")
with youtube_dl.YoutubeDL(opts) as ydl:
ie_result = ydl.extract_info(url, download=False, process=False)
result_type, has_playlist = extract_type(ie_result)
while not has_playlist:
if result_type in ('url', 'url_transparent'):
ie_result['url'] = youtube_dl.utils.sanitize_url(ie_result['url'])
if result_type == 'url':
logger.debug("extract_info(%s) to get the video list", ie_result['url'])
# We have to add extra_info to the results because it may be
# contained in a playlist
ie_result = ydl.extract_info(ie_result['url'],
download=False,
process=False,
ie_key=ie_result.get('ie_key'))
result_type, has_playlist = extract_type(ie_result)
cover_url = youtube.get_cover(channel_url) # youtube-dl doesn't provide the cover url!
description = youtube.get_channel_desc(channel_url) # youtube-dl doesn't provide the description!
return feedcore.Result(feedcore.UPDATED_FEED,
YoutubeFeed(url, cover_url, description, max_episodes, ie_result, self))
def fetch_channel(self, channel, max_episodes=0):
"""
called by model.gPodderFetcher to get a custom feed.
:returns feedcore.Result: a YoutubeFeed or None if channel is not a youtube channel or playlist
"""
if not self.my_config.manage_channel:
return None
url = None
m = CHANNEL_RE.match(channel.url)
if m:
url = 'https://www.youtube.com/channel/{}/videos'.format(m.group(1))
else:
m = USER_RE.match(channel.url)
if m:
url = 'https://www.youtube.com/user/{}/videos'.format(m.group(1))
else:
m = PLAYLIST_RE.match(channel.url)
if m:
url = 'https://www.youtube.com/playlist?list={}'.format(m.group(1))
if url:
logger.info('youtube-dl handling %s => %s', channel.url, url)
return self.refresh(url, channel.url, max_episodes)
return None
def is_supported_url(self, url):
if url is None:
return False
for i, res in enumerate(self.regex_cache):
if next(filter(None, (r.match(url) for r in res)), None) is not None:
if i > 0:
self.regex_cache.remove(res)
self.regex_cache.insert(0, res)
return True
with youtube_dl.YoutubeDL(self._ydl_opts) as ydl:
# youtube-dl returns a list, yt-dlp returns a dict
ies = ydl._ies
if isinstance(ydl._ies, dict):
ies = ydl._ies.values()
for ie in ies:
if ie.suitable(url) and ie.ie_key() not in self.ie_blacklist:
self.regex_cache.insert(
0, (ie._VALID_URL_RE if isinstance(ie._VALID_URL_RE, Iterable)
else (ie._VALID_URL_RE,)))
return True
return False
def custom_downloader(self, unused_config, episode):
"""
called from registry.custom_downloader.resolve
"""
if not self.force and not self.my_config.manage_downloads:
return None
try: # Reject URLs linking to known media files
(_, ext) = util.filename_from_url(episode.url)
if util.file_type_by_extension(ext) is not None:
return None
except Exception:
pass
if self.is_supported_url(episode.url):
return YoutubeCustomDownload(self, episode.url, episode)
return None
class gPodderExtension:
def __init__(self, container):
self.container = container
self.ytdl = None
self.infobar = None
def on_load(self):
self.ytdl = gPodderYoutubeDL(self.container.manager.core.config, self.container.config)
logger.info('Registering youtube-dl. (using %s %s)' % (program_name, youtube_dl.version.__version__))
registry.feed_handler.register(self.ytdl.fetch_channel)
registry.custom_downloader.register(self.ytdl.custom_downloader)
if youtube_dl.utils.version_tuple(youtube_dl.version.__version__) < youtube_dl.utils.version_tuple(want_ytdl_version):
logger.error(want_ytdl_version_msg
% {'have_version': youtube_dl.version.__version__, 'want_version': want_ytdl_version})
def on_unload(self):
logger.info('Unregistering youtube-dl.')
try:
registry.feed_handler.unregister(self.ytdl.fetch_channel)
except ValueError:
pass
try:
registry.custom_downloader.unregister(self.ytdl.custom_downloader)
except ValueError:
pass
self.ytdl = None
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.gpodder = ui_object
if youtube_dl.utils.version_tuple(youtube_dl.version.__version__) < youtube_dl.utils.version_tuple(want_ytdl_version):
ui_object.notification(want_ytdl_version_msg %
{'have_version': youtube_dl.version.__version__, 'want_version': want_ytdl_version},
_('Old youtube-dl'), important=True, widget=ui_object.main_window)
def on_episodes_context_menu(self, episodes):
if not self.container.config.manage_downloads and any(e.can_download() for e in episodes):
return [(_("Download with youtube-dl"), self.download_episodes)]
def download_episodes(self, episodes):
episodes = [e for e in episodes if e.can_download()]
# create a new gPodderYoutubeDL to force using it even if manage_downloads is False
downloader = gPodderYoutubeDL(self.container.manager.core.config, self.container.config, force=True)
self.gpodder.download_episode_list(episodes, downloader=downloader)
def toggle_manage_channel(self, widget):
self.container.config.manage_channel = widget.get_active()
def toggle_manage_downloads(self, widget):
self.container.config.manage_downloads = widget.get_active()
def toggle_embed_subtitles(self, widget):
if widget.get_active():
if not util.find_command('ffmpeg'):
self.infobar.show()
widget.set_active(False)
self.container.config.embed_subtitles = False
else:
self.container.config.embed_subtitles = True
else:
self.container.config.embed_subtitles = False
def show_preferences(self):
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
box.set_border_width(10)
label = Gtk.Label('%s %s' % (program_name, youtube_dl.version.__version__))
box.pack_start(label, False, False, 0)
box.pack_start(Gtk.HSeparator(), False, False, 0)
checkbox = Gtk.CheckButton(_('Parse YouTube channel feeds with youtube-dl to access more than 15 episodes'))
checkbox.set_active(self.container.config.manage_channel)
checkbox.connect('toggled', self.toggle_manage_channel)
box.pack_start(checkbox, False, False, 0)
box.pack_start(Gtk.HSeparator(), False, False, 0)
checkbox = Gtk.CheckButton(_('Download all supported episodes with youtube-dl'))
checkbox.set_active(self.container.config.manage_downloads)
checkbox.connect('toggled', self.toggle_manage_downloads)
box.pack_start(checkbox, False, False, 0)
note = Gtk.Label(use_markup=True, wrap=True, label=_(
'youtube-dl provides access to additional YouTube formats and DRM content.'
' Episodes from non-YouTube channels, that have youtube-dl support, will <b>fail</b> to download unless you manually'
' <a href="https://gpodder.github.io/docs/youtube.html#formats">add custom formats</a> for each site.'
' <b>Download with youtube-dl</b> appears in the episode menu when this option is disabled,'
' and can be used to manually download from supported sites.'))
note.connect('activate-link', lambda label, url: util.open_website(url))
note.set_property('xalign', 0.0)
box.add(note)
box.pack_start(Gtk.HSeparator(), False, False, 0)
checkbox = Gtk.CheckButton(_('Embed all available subtitles in downloaded video'))
checkbox.set_active(self.container.config.embed_subtitles)
checkbox.connect('toggled', self.toggle_embed_subtitles)
box.pack_start(checkbox, False, False, 0)
infobar = Gtk.InfoBar()
infobar.get_content_area().add(Gtk.Label(wrap=True, label=_(
'The "ffmpeg" command was not found. FFmpeg is required for embedding subtitles.')))
self.infobar = infobar
box.pack_end(infobar, False, False, 0)
box.show_all()
infobar.hide()
return box
def on_preferences(self):
return [(_('youtube-dl'), self.show_preferences)]
| 27,834
|
Python
|
.py
| 542
| 40.210332
| 142
| 0.605822
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,346
|
notification.py
|
gpodder_gpodder/share/gpodder/extensions/notification.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2011 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Bernd Schlapsi <brot@gmx.info>; 2011-11-20
__title__ = 'Gtk+ Desktop Notifications'
__description__ = 'Display notification bubbles for different events.'
__category__ = 'desktop-integration'
__only_for__ = 'gtk'
__mandatory_in__ = 'gtk'
__disable_in__ = 'win32'
import logging
import gpodder
logger = logging.getLogger(__name__)
try:
import gi
gi.require_version('Notify', '0.7')
from gi.repository import Notify
pynotify = True
except ImportError:
pynotify = None
except ValueError:
pynotify = None
if pynotify is None:
class gPodderExtension(object):
def __init__(self, container):
logger.info('Could not find PyNotify.')
else:
class gPodderExtension(object):
def __init__(self, container):
self.container = container
def on_load(self):
Notify.init('gPodder')
def on_unload(self):
Notify.uninit()
def on_notification_show(self, title, message):
if not message and not title:
return
notify = Notify.Notification.new(title or '', message or '',
gpodder.icon_file)
try:
notify.show()
except:
# See http://gpodder.org/bug/966
pass
| 2,060
|
Python
|
.py
| 59
| 29.423729
| 72
| 0.668008
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,347
|
tagging.py
|
gpodder_gpodder/share/gpodder/extensions/tagging.py
|
# -*- coding: utf-8 -*-
####
# 01/2011 Bernd Schlapsi <brot@gmx.info>
#
# This script is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Dependencies:
# * python-mutagen (Mutagen is a Python module to handle audio metadata)
#
# This extension script adds episode title and podcast title to the audio file
# The episode title is written into the title tag
# The podcast title is written into the album tag
import base64
import datetime
import logging
import mimetypes
from mutagen import File
from mutagen.easyid3 import EasyID3
from mutagen.easymp4 import EasyMP4Tags
from mutagen.flac import Picture
from mutagen.id3 import APIC, ID3
from mutagen.mp3 import MP3, EasyMP3
from mutagen.mp4 import MP4Cover, MP4Tags
import gpodder
from gpodder import coverart
logger = logging.getLogger(__name__)
# workaround for https://github.com/quodlibet/mutagen/issues/334
# can't add_tags to MP4 when file has no tag
MP4Tags._padding = 0
_ = gpodder.gettext
__title__ = _('Tag downloaded files using Mutagen')
__description__ = _('Add episode and podcast titles to MP3/OGG tags')
__authors__ = 'Bernd Schlapsi <brot@gmx.info>'
__doc__ = 'https://gpodder.github.io/docs/extensions/tagging.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/Tagging'
__category__ = 'post-download'
DefaultConfig = {
'strip_album_from_title': True,
'genre_tag': 'Podcast',
'always_remove_tags': False,
'auto_embed_coverart': False,
'set_artist_to_album': False,
'set_version': 4,
'modify_tags': True,
'remove_before_modify': False
}
class AudioFile(object):
def __init__(self, filename, album, title, subtitle, genre, pubDate, cover):
self.filename = filename
self.album = album
self.title = title
self.subtitle = subtitle
self.genre = genre
self.pubDate = pubDate
self.cover = cover
def remove_tags(self):
audio = File(self.filename, easy=True)
if audio.tags is not None:
audio.delete()
audio.save()
def write_basic_tags(self, remove_before_modify, modify_tags, set_artist_to_album, set_version):
audio = File(self.filename, easy=True)
if audio is None:
logger.warning("Unable to add tags to file '%s'", self.filename)
return
if audio.tags is None:
audio.add_tags()
if modify_tags:
if remove_before_modify:
audio.delete()
if self.album is not None:
audio.tags['album'] = self.album
if self.title is not None:
audio.tags['title'] = self.title
if self.subtitle is not None:
audio.tags['subtitle'] = self.subtitle
if self.subtitle is not None:
audio.tags['comments'] = self.subtitle
if self.genre is not None:
audio.tags['genre'] = self.genre
if self.pubDate is not None:
audio.tags['date'] = self.pubDate
if set_artist_to_album:
audio.tags['artist'] = self.album
if type(audio) is EasyMP3:
audio.save(v2_version=set_version)
else:
# Not actually audio
audio.save()
def insert_coverart(self):
""" implement the cover art logic in the subclass
"""
None
def get_cover_picture(self, cover):
""" Returns mutagen Picture class for the cover image
Useful for OGG and FLAC format
Picture type = cover image
see http://flac.sourceforge.net/documentation_tools_flac.html#encoding_options
"""
f = open(cover, mode='rb')
p = Picture()
p.type = 3
p.data = f.read()
p.mime = mimetypes.guess_type(cover)[0]
f.close()
return p
class OggFile(AudioFile):
def __init__(self, filename, album, title, subtitle, genre, pubDate, cover):
super(OggFile, self).__init__(filename, album, title, subtitle, genre, pubDate, cover)
def insert_coverart(self):
audio = File(self.filename, easy=True)
p = self.get_cover_picture(self.cover)
audio['METADATA_BLOCK_PICTURE'] = base64.b64encode(p.write())
audio.save()
class Mp4File(AudioFile):
def __init__(self, filename, album, title, subtitle, genre, pubDate, cover):
super(Mp4File, self).__init__(filename, album, title, subtitle, genre, pubDate, cover)
def insert_coverart(self):
audio = File(self.filename)
if self.cover.endswith('png'):
cover_format = MP4Cover.FORMAT_PNG
else:
cover_format = MP4Cover.FORMAT_JPEG
data = open(self.cover, 'rb').read()
audio.tags['covr'] = [MP4Cover(data, cover_format)]
audio.save()
class Mp3File(AudioFile):
def __init__(self, filename, album, title, subtitle, genre, pubDate, cover):
super(Mp3File, self).__init__(filename, album, title, subtitle, genre, pubDate, cover)
def insert_coverart(self):
audio = MP3(self.filename, ID3=ID3)
if audio.tags is None:
audio.add_tags()
audio.tags.add(
APIC(
encoding=3, # 3 is for utf-8
mime=mimetypes.guess_type(self.cover)[0],
type=3,
desc='Cover',
data=open(self.cover, 'rb').read()
)
)
audio.save()
class gPodderExtension:
def __init__(self, container):
self.container = container
# fix #737 EasyID3 doesn't recognize subtitle and comment tags
EasyID3.RegisterTextKey("comments", "COMM")
EasyID3.RegisterTextKey("subtitle", "TIT3")
EasyMP4Tags.RegisterTextKey("comments", "desc")
EasyMP4Tags.RegisterFreeformKey("subtitle", "SUBTITLE")
def on_episode_downloaded(self, episode):
info = self.read_episode_info(episode)
if info['filename'] is None:
return
self.write_info2file(info, episode)
def get_audio(self, info, episode):
audio = None
cover = None
audioClass = None
if self.container.config.auto_embed_coverart:
cover = self.get_cover(episode.channel)
if info['filename'].endswith('.mp3'):
audioClass = Mp3File
elif info['filename'].endswith('.ogg'):
audioClass = OggFile
elif info['filename'].endswith('.m4a') or info['filename'].endswith('.mp4'):
audioClass = Mp4File
elif File(info['filename'], easy=True):
# mutagen can work with it: at least add basic tags
audioClass = AudioFile
if audioClass:
audio = audioClass(info['filename'],
info['album'],
info['title'],
info['subtitle'],
info['genre'],
info['pubDate'],
cover)
return audio
def read_episode_info(self, episode):
info = {
'filename': None,
'album': None,
'title': None,
'subtitle': None,
'genre': None,
'pubDate': None
}
# read filename (incl. file path) from gPodder database
info['filename'] = episode.local_filename(create=False, check_only=True)
if info['filename'] is None:
return
# read title+album from gPodder database
info['album'] = episode.channel.title
title = episode.title
if (self.container.config.strip_album_from_title and title and info['album'] and title.startswith(info['album'])):
info['title'] = title[len(info['album']):].lstrip()
else:
info['title'] = title
info['subtitle'] = episode._text_description
if self.container.config.genre_tag is not None:
info['genre'] = self.container.config.genre_tag
# convert pubDate to string
try:
pubDate = datetime.datetime.fromtimestamp(episode.pubDate)
info['pubDate'] = pubDate.strftime('%Y-%m-%d %H:%M')
except:
try:
# since version 3 the published date has a new/other name
pubDate = datetime.datetime.fromtimestamp(episode.published)
info['pubDate'] = pubDate.strftime('%Y-%m-%d %H:%M')
except:
info['pubDate'] = None
return info
def write_info2file(self, info, episode):
audio = self.get_audio(info, episode)
if self.container.config.always_remove_tags:
audio.remove_tags()
else:
audio.write_basic_tags(self.container.config.remove_before_modify,
self.container.config.modify_tags,
self.container.config.set_artist_to_album,
self.container.config.set_version)
if self.container.config.auto_embed_coverart:
audio.insert_coverart()
logger.info('tagging.on_episode_downloaded(%s/%s)', episode.channel.title, episode.title)
def get_cover(self, podcast):
downloader = coverart.CoverDownloader()
return downloader.get_cover(podcast.cover_file, podcast.cover_url,
podcast.url, podcast.title, None, None, True)
| 9,952
|
Python
|
.py
| 242
| 32.177686
| 122
| 0.619818
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,348
|
normalize_audio.py
|
gpodder_gpodder/share/gpodder/extensions/normalize_audio.py
|
# -*- coding: utf-8 -*-
# This extension adjusts the volume of audio files to a standard level
# Supported file formats are mp3 and ogg
#
# Requires: normalize-audio, mpg123
#
# (c) 2011-11-06 Bernd Schlapsi <brot@gmx.info>
# Released under the same license terms as gPodder itself.
import logging
import os
import subprocess
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Normalize audio with re-encoding')
__description__ = _('Normalize the volume of audio files with normalize-audio')
__authors__ = 'Bernd Schlapsi <brot@gmx.info>'
__doc__ = 'https://gpodder.github.io/docs/extensions/normalizeaudio.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/NormalizeAudio'
__category__ = 'post-download'
DefaultConfig = {
'context_menu': True, # Show action in the episode list context menu
}
# a tuple of (extension, command)
CONVERT_COMMANDS = {
'.ogg': 'normalize-ogg',
'.mp3': 'normalize-mp3',
}
class gPodderExtension:
MIME_TYPES = ('audio/mpeg', 'audio/ogg', )
EXT = ('.mp3', '.ogg', )
def __init__(self, container):
self.container = container
# Dependency check
self.container.require_command('normalize-ogg')
self.container.require_command('normalize-mp3')
self.container.require_command('normalize-audio')
def on_load(self):
logger.info('Extension "%s" is being loaded.' % __title__)
def on_unload(self):
logger.info('Extension "%s" is being unloaded.' % __title__)
def on_episode_downloaded(self, episode):
self._convert_episode(episode)
def on_episodes_context_menu(self, episodes):
if not self.container.config.context_menu:
return None
if not any(self._check_source(episode) for episode in episodes):
return None
return [(self.container.metadata.title, self.convert_episodes)]
def _check_source(self, episode):
if not episode.file_exists():
return False
if episode.mime_type in self.MIME_TYPES:
return True
if episode.extension() in self.EXT:
return True
return False
def _convert_episode(self, episode):
if episode.file_type() != 'audio':
return
filename = episode.local_filename(create=False)
if filename is None:
return
basename, extension = os.path.splitext(filename)
cmd = [CONVERT_COMMANDS.get(extension, 'normalize-audio'), filename]
# Set cwd to prevent normalize from placing files in the directory gpodder was started from.
if gpodder.ui.win32:
p = util.Popen(cmd, cwd=episode.channel.save_dir)
p.wait()
stdout, stderr = ("<unavailable>",) * 2
else:
p = util.Popen(cmd, cwd=episode.channel.save_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
logger.info('normalize-audio processing successful.')
gpodder.user_extensions.on_notification_show(_('File normalized'),
episode.title)
else:
logger.warning('normalize-audio failed: %s / %s', stdout, stderr)
def convert_episodes(self, episodes):
for episode in episodes:
self._convert_episode(episode)
| 3,474
|
Python
|
.py
| 84
| 34.154762
| 122
| 0.65427
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,349
|
taskbar_progress.py
|
gpodder_gpodder/share/gpodder/extensions/taskbar_progress.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Windows 7 taskbar progress
# Sean Munkel; 2013-01-05
import ctypes
import functools
import logging
from ctypes import (HRESULT, POINTER, Structure, alignment, c_int, c_uint,
c_ulong, c_ulonglong, c_ushort, c_wchar_p, sizeof)
from ctypes.wintypes import tagRECT
from comtypes import COMMETHOD, GUID, IUnknown, client, wireHWND
import gpodder
import gi # isort:skip
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk # isort:skip
_ = gpodder.gettext
__title__ = _('Show download progress on the taskbar')
__description__ = _('Displays the progress on the Windows taskbar.')
__authors__ = 'Sean Munkel <seanmunkel@gmail.com>'
__category__ = 'desktop-integration'
__only_for__ = 'win32'
logger = logging.getLogger(__name__)
WSTRING = c_wchar_p
# values for enumeration 'TBPFLAG'
TBPF_NOPROGRESS = 0
TBPF_INDETERMINATE = 1
TBPF_NORMAL = 2
TBPF_ERROR = 4
TBPF_PAUSED = 8
TBPFLAG = c_int # enum
# values for enumeration 'TBATFLAG'
TBATF_USEMDITHUMBNAIL = 1
TBATF_USEMDILIVEPREVIEW = 2
TBATFLAG = c_int # enum
# return code
S_OK = HRESULT(0).value
class tagTHUMBBUTTON(Structure):
_fields_ = [
('dwMask', c_ulong),
('iId', c_uint),
('iBitmap', c_uint),
('hIcon', POINTER(IUnknown)),
('szTip', c_ushort * 260),
('dwFlags', c_ulong)]
class ITaskbarList(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{56FDF342-FD6D-11D0-958A-006097C9A090}')
_idlflags_ = []
_methods_ = [
COMMETHOD([], HRESULT, 'HrInit'),
COMMETHOD([], HRESULT, 'AddTab',
(['in'], c_int, 'hwnd')),
COMMETHOD([], HRESULT, 'DeleteTab',
(['in'], c_int, 'hwnd')),
COMMETHOD([], HRESULT, 'ActivateTab',
(['in'], c_int, 'hwnd')),
COMMETHOD([], HRESULT, 'SetActivateAlt',
(['in'], c_int, 'hwnd'))]
class ITaskbarList2(ITaskbarList):
_case_insensitive_ = True
_iid_ = GUID('{602D4995-B13A-429B-A66E-1935E44F4317}')
_idlflags_ = []
_methods_ = [
COMMETHOD([], HRESULT, 'MarkFullscreenWindow',
(['in'], c_int, 'hwnd'),
(['in'], c_int, 'fFullscreen'))]
class ITaskbarList3(ITaskbarList2):
_case_insensitive_ = True
_iid_ = GUID('{EA1AFB91-9E28-4B86-90E9-9E9F8A5EEFAF}')
_idlflags_ = []
_methods_ = [
COMMETHOD([], HRESULT, 'SetProgressValue',
(['in'], c_int, 'hwnd'),
(['in'], c_ulonglong, 'ullCompleted'),
(['in'], c_ulonglong, 'ullTotal')),
COMMETHOD([], HRESULT, 'SetProgressState',
(['in'], c_int, 'hwnd'),
(['in'], TBPFLAG, 'tbpFlags')),
COMMETHOD([], HRESULT, 'RegisterTab',
(['in'], c_int, 'hwndTab'),
(['in'], wireHWND, 'hwndMDI')),
COMMETHOD([], HRESULT, 'UnregisterTab',
(['in'], c_int, 'hwndTab')),
COMMETHOD([], HRESULT, 'SetTabOrder',
(['in'], c_int, 'hwndTab'),
(['in'], c_int, 'hwndInsertBefore')),
COMMETHOD([], HRESULT, 'SetTabActive',
(['in'], c_int, 'hwndTab'),
(['in'], c_int, 'hwndMDI'),
(['in'], TBATFLAG, 'tbatFlags')),
COMMETHOD([], HRESULT, 'ThumbBarAddButtons',
(['in'], c_int, 'hwnd'),
(['in'], c_uint, 'cButtons'),
(['in'], POINTER(tagTHUMBBUTTON), 'pButton')),
COMMETHOD([], HRESULT, 'ThumbBarUpdateButtons',
(['in'], c_int, 'hwnd'),
(['in'], c_uint, 'cButtons'),
(['in'], POINTER(tagTHUMBBUTTON), 'pButton')),
COMMETHOD([], HRESULT, 'ThumbBarSetImageList',
(['in'], c_int, 'hwnd'),
(['in'], POINTER(IUnknown), 'himl')),
COMMETHOD([], HRESULT, 'SetOverlayIcon',
(['in'], c_int, 'hwnd'),
(['in'], POINTER(IUnknown), 'hIcon'),
(['in'], WSTRING, 'pszDescription')),
COMMETHOD([], HRESULT, 'SetThumbnailTooltip',
(['in'], c_int, 'hwnd'),
(['in'], WSTRING, 'pszTip')),
COMMETHOD([], HRESULT, 'SetThumbnailClip',
(['in'], c_int, 'hwnd'),
(['in'], POINTER(tagRECT), 'prcClip'))]
assert sizeof(tagTHUMBBUTTON) in [540, 552], sizeof(tagTHUMBBUTTON)
assert alignment(tagTHUMBBUTTON) in [4, 8], alignment(tagTHUMBBUTTON)
def consume_events():
""" consume pending events """
while Gtk.events_pending():
Gtk.main_iteration()
# based on http://stackoverflow.com/a/1744503/905256
class gPodderExtension:
def __init__(self, container):
self.container = container
self.window_handle = None
self.restart_warning = True
def on_load(self):
self.taskbar = client.CreateObject(
'{56FDF344-FD6D-11d0-958A-006097C9A090}',
interface=ITaskbarList3)
ret = self.taskbar.HrInit()
if ret != S_OK:
logger.warning("taskbar.HrInit failed: %r", ret)
del self.taskbar
def on_unload(self):
# let the window change state? otherwise gpodder is stuck on exit
# (tested on windows 7 pro)
consume_events()
if self.taskbar is not None:
self.taskbar.SetProgressState(self.window_handle, TBPF_NOPROGRESS)
# let the taskbar change state otherwise gpodder is stuck on exit
# (tested on windows 7 pro)
consume_events()
def on_ui_object_available(self, name, ui_object):
def callback(self, window, *args):
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object]
win_gpointer = ctypes.pythonapi.PyCapsule_GetPointer(window.get_window().__gpointer__, None)
gdkdll = ctypes.CDLL("libgdk-3-0.dll")
self.window_handle = gdkdll.gdk_win32_window_get_handle(win_gpointer)
ret = self.taskbar.ActivateTab(self.window_handle)
if ret != S_OK:
logger.warning("taskbar.ActivateTab failed: %r", ret)
del self.taskbar
if name == 'gpodder-gtk':
ui_object.main_window.connect('realize',
functools.partial(callback, self))
def on_download_progress(self, progress):
if not self.taskbar:
return
if self.window_handle is None:
if not self.restart_warning:
return
logger.warning("No window handle available, a restart max fix this")
self.restart_warning = False
return
if 0 < progress < 1:
self.taskbar.SetProgressState(self.window_handle, TBPF_NORMAL)
self.taskbar.SetProgressValue(self.window_handle,
int(progress * 100), 100)
else:
self.taskbar.SetProgressState(self.window_handle, TBPF_NOPROGRESS)
| 7,796
|
Python
|
.py
| 185
| 33.324324
| 104
| 0.589764
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,350
|
filter.py
|
gpodder_gpodder/share/gpodder/extensions/filter.py
|
# -*- coding: utf-8 -*-
# Disable automatic downloads based on episode title.
# Released under the same license terms as gPodder itself.
import re
import gpodder
import gi # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gtk # isort:skip
_ = gpodder.gettext
__title__ = _('Filter Episodes')
__description__ = _('Disable automatic downloads based on episode title.')
__only_for__ = 'gtk, cli'
__authors__ = 'Brand Huntsman <http://qzx.com/mail/>'
__doc__ = 'https://gpodder.github.io/docs/extensions/filter.html'
DefaultConfig = {
'filters': []
}
class BlockExceptFrame:
"""
Utility class to manage a Block or Except frame, with sub-widgets:
- Creation as well as internal UI change is handled;
- Changes to the other widget and to the model have to be handled outside.
It's less optimized than mapping each widget to a different signal handler,
but makes shorter code.
"""
def __init__(self, value, enable_re, enable_ic, on_change_cb):
self.on_change_cb = on_change_cb
self.frame = Gtk.Frame()
frame_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.frame.add(frame_vbox)
# checkbox and text entry
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
hbox.set_border_width(5)
frame_vbox.add(hbox)
self.checkbox = Gtk.CheckButton()
self.checkbox.set_active(value is not False)
hbox.pack_start(self.checkbox, False, False, 3)
self.entry = Gtk.Entry()
hbox.pack_start(self.entry, True, True, 5)
# lower hbox
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
hbox.set_border_width(5)
frame_vbox.add(hbox)
# regular expression checkbox
self.checkbox_re = Gtk.CheckButton(_('Regular Expression'))
hbox.pack_end(self.checkbox_re, False, False, 10)
# ignore case checkbox
self.checkbox_ic = Gtk.CheckButton(_('Ignore Case'))
hbox.pack_end(self.checkbox_ic, False, False, 10)
if value is False:
self.entry.set_sensitive(False)
self.entry.set_editable(False)
self.checkbox_re.set_sensitive(False)
self.checkbox_ic.set_sensitive(False)
else:
self.entry.set_text(value)
self.checkbox_re.set_active(enable_re)
self.checkbox_ic.set_active(enable_ic)
self.checkbox.connect('toggled', self.toggle_active)
self.entry.connect('changed', self.emit_change)
self.checkbox_re.connect('toggled', self.emit_change)
self.checkbox_ic.connect('toggled', self.emit_change)
def toggle_active(self, widget):
enabled = widget.get_active()
if enabled:
# enable text and RE/IC checkboxes
self.entry.set_sensitive(True)
self.entry.set_editable(True)
self.checkbox_re.set_sensitive(True)
self.checkbox_ic.set_sensitive(True)
else:
# clear and disable text and RE/IC checkboxes
self.entry.set_sensitive(False)
self.entry.set_text('')
self.entry.set_editable(False)
self.checkbox_re.set_active(False)
self.checkbox_re.set_sensitive(False)
self.checkbox_ic.set_active(False)
self.checkbox_ic.set_sensitive(False)
self.emit_change(widget)
def emit_change(self, widget):
del widget
if self.on_change_cb:
self.on_change_cb(active=self.checkbox.get_active(),
text=self.entry.get_text(),
regexp=self.checkbox_re.get_active(),
ignore_case=self.checkbox_ic.get_active())
class gPodderExtension:
def __init__(self, container):
self.core = container.manager.core # gpodder core
self.filters = container.config.filters # all filters
# the following are only valid when podcast channel settings dialog is open
# self.gpodder = gPodder
# self.ui_object = gPodderChannel
# self.channel = PodcastChannel
# self.url = current filter url
# self.f = current filter
# self.block_widget = block BlockExceptFrame
# self.allow_widget = allow BlockExceptFrame
def on_ui_object_available(self, name, ui_object):
if name == 'channel-gtk':
# to close channel settings dialog after re-filtering
self.ui_object = ui_object
elif name == 'gpodder-gtk':
# to update episode list after re-filtering
self.gpodder = ui_object
# add filter tab to podcast channel settings dialog
def on_channel_settings(self, channel):
return [(_('Filter'), self.show_channel_settings_tab)]
def show_channel_settings_tab(self, channel):
self.channel = channel
self.url = channel.url
self.f = self.find_filter(self.url)
block = self.key('block')
allow = self.key('allow')
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
box.set_border_width(10)
# note about Cancel
note = Gtk.Label(use_markup=True, wrap=True, label=_(
'<b>Note:</b> The Cancel button does <b>not</b> return the '
'filter settings to the values they had before. '
'The changes are saved immediately after they are made.'))
box.add(note)
# block widgets
self.block_widget = BlockExceptFrame(value=block,
enable_re=self.key('block_re') is not False,
enable_ic=self.key('block_ic') is not False,
on_change_cb=self.on_block_changed)
self.block_widget.frame.set_label(_('Block'))
box.add(self.block_widget.frame)
self.block_widget.checkbox.set_sensitive(allow is False)
# allow widgets
self.allow_widget = BlockExceptFrame(value=allow,
enable_re=self.key('allow_re') is not False,
enable_ic=self.key('allow_ic') is not False,
on_change_cb=self.on_allow_changed)
self.allow_widget.frame.set_label(_('Except'))
box.add(self.allow_widget.frame)
if self.f is None:
self.allow_widget.frame.set_sensitive(False)
# help
label = Gtk.Label(_(
'Clicking the block checkbox and leaving it empty will disable auto-download for all episodes in this channel.'
' The patterns match partial text in episode title, and an empty pattern matches any title.'
' The except pattern unblocks blocked episodes (to block all then unblock some).'))
label.set_line_wrap(True)
box.add(label)
# re-filter
separator = Gtk.HSeparator()
box.add(separator)
button = Gtk.Button(_('Filter episodes now'))
button.connect('clicked', self.refilter_podcast)
box.add(button)
label2 = Gtk.Label(_('Undoes any episodes you marked as old.'))
box.add(label2)
box.show_all()
return box
# return filter for a given podcast channel url
def find_filter(self, url):
for f in self.filters:
if f['url'] == url:
return f
return None
# return value for a given key in current filter
def key(self, key):
if self.f is None:
return False
return self.f.get(key, False)
def on_block_changed(self, active, text, regexp, ignore_case):
self.on_changed('block', active, text, regexp, ignore_case)
self.allow_widget.frame.set_sensitive(self.f is not None)
def on_allow_changed(self, active, text, regexp, ignore_case):
self.on_changed('allow', active, text, regexp, ignore_case)
self.block_widget.checkbox.set_sensitive(self.f is None or self.key('allow') is False)
# update filter when toggling block/allow checkbox
def on_changed(self, field, enabled, text, regexp, ignore_case):
if enabled:
if self.f is None:
self.f = {'url': self.url}
self.filters.append(self.f)
self.filters.sort(key=lambda e: e['url'])
self.f[field] = text
if regexp:
self.f[field + '_re'] = True
else:
self.f.pop(field + '_re', None)
if ignore_case:
self.f[field + '_ic'] = True
else:
self.f.pop(field + '_ic', None)
else:
if self.f is not None:
self.f.pop(field + '_ic', None)
self.f.pop(field + '_re', None)
self.f.pop(field, None)
if len(self.f.keys()) == 1:
self.filters.remove(self.f)
self.f = None
# save config
self.core.config.schedule_save()
# remove filter when podcast channel is removed
def on_podcast_delete(self, podcast):
f = self.find_filter(podcast.url)
if f is not None:
self.filters.remove(f)
# save config
self.core.config.schedule_save()
# mark new episodes as old to disable automatic download when they match a block filter
def on_podcast_updated(self, podcast):
self.filter_podcast(podcast, False)
# re-filter episodes after changing filters
def refilter_podcast(self, widget):
if self.filter_podcast(self.channel, True):
self.channel.db.commit()
self.gpodder.update_episode_list_model()
self.ui_object.main_window.destroy()
# compare filter pattern to episode title
def compare(self, title, pattern, regexp, ignore_case):
if regexp is not False:
return regexp.search(title)
elif ignore_case:
return (pattern.casefold() in title.casefold())
else:
return (pattern in title)
# filter episodes that aren't downloaded or deleted
def filter_podcast(self, podcast, mark_new):
f = self.find_filter(podcast.url)
if f is not None:
block = f.get('block', False)
allow = f.get('allow', False)
block_ic = True if block is not False and f.get('block_ic', False) else False
allow_ic = True if allow is not False and f.get('allow_ic', False) else False
block_re = re.compile(block, re.IGNORECASE if block_ic else False) if block is not False and f.get('block_re', False) else False
allow_re = re.compile(allow, re.IGNORECASE if allow_ic else False) if allow is not False and f.get('allow_re', False) else False
else:
block = False
allow = False
changes = False
for e in podcast.get_episodes(gpodder.STATE_NORMAL):
if allow is not False and self.compare(e.title, allow, allow_re, allow_ic):
# allow episode
if mark_new and not e.is_new:
e.mark_new()
changes = True
continue
if block is not False and self.compare(e.title, block, block_re, block_ic):
# block episode - mark as old to disable automatic download
if e.is_new:
e.mark_old()
changes = True
continue
if mark_new and not e.is_new:
e.mark_new()
changes = True
return changes
| 11,775
|
Python
|
.py
| 255
| 35.137255
| 140
| 0.593102
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,351
|
sonos.py
|
gpodder_gpodder/share/gpodder/extensions/sonos.py
|
# -*- coding: utf-8 -*-
# Extension script to stream podcasts to Sonos speakers
# Requirements: gPodder 3.x and the soco module >= 0.7 (https://pypi.python.org/pypi/soco)
# (c) 2013-01-19 Stefan Kögl <stefan@skoegl.net>
# Released under the same license terms as gPodder itself.
import logging
from functools import partial
import requests
import gpodder
import soco
_ = gpodder.gettext
logger = logging.getLogger(__name__)
__title__ = _('Stream to Sonos')
__description__ = _('Stream podcasts to Sonos speakers')
__authors__ = 'Stefan Kögl <stefan@skoegl.net>'
__category__ = 'interface'
__only_for__ = 'gtk'
def SONOS_CAN_PLAY(e):
return 'audio' in e.file_type()
class gPodderExtension:
def __init__(self, container):
speakers = soco.discover()
logger.info('Found Sonos speakers: %s' % ', '.join(name.player_name for name in speakers))
self.speakers = {}
for speaker in speakers:
try:
info = speaker.get_speaker_info()
except requests.ConnectionError:
# ignore speakers we can't connect to
continue
name = info.get('zone_name', None)
uid = speaker.uid
# devices that do not have a name are probably bridges
if name:
self.speakers[uid] = speaker
def _stream_to_speaker(self, speaker_uid, episodes):
""" Play or enqueue selected episodes """
urls = [episode.url for episode in episodes if SONOS_CAN_PLAY(episode)]
logger.info('Streaming to Sonos %s: %s' % (self.speakers[speaker_uid].ip_address, ', '.join(urls)))
controller = self.speakers[speaker_uid].group.coordinator
# enqueue and play
for episode in episodes:
controller.play_uri(episode.url)
episode.playback_mark()
controller.play()
def on_episodes_context_menu(self, episodes):
""" Adds a context menu for each Sonos speaker group """
# Only show context menu if we can play at least one file
if not any(SONOS_CAN_PLAY(e) for e in episodes):
return []
menu_entries = []
for uid in list(self.speakers.keys()):
callback = partial(self._stream_to_speaker, uid)
controller = self.speakers[uid]
is_grouped = ' (Grouped)' if len(controller.group.members) > 1 else ''
name = controller.group.label + is_grouped
item = ('/'.join((_('Stream to Sonos'), name)), callback)
menu_entries.append(item)
# Remove any duplicate group names. I doubt Sonos allows duplicate speaker names,
# but we do initially get duplicated group names with the loop above
return list(dict(menu_entries).items())
| 2,777
|
Python
|
.py
| 61
| 37.327869
| 107
| 0.636668
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,352
|
notification-win32.py
|
gpodder_gpodder/share/gpodder/extensions/notification-win32.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Notification implementation for Windows
# Sean Munkel; 2012-12-29
"""
Current state (2018/07/29 ELL):
- I can't get pywin32 to work in msys2 (the platform used for this python3/gtk3 installer)
so existing code using COM doesn't work.
- Gio.Notification is not implemented on windows yet.
see https://bugzilla.gnome.org/show_bug.cgi?id=776583
- Gtk.StatusIcon with a context works but is deprecated. Showing a balloon using set_tooltip_markup
doesn't work.
See https://github.com/afiskon/py-gtk-example
- hexchat have implemented a solid c++ solution.
See https://github.com/hexchat/hexchat/tree/master/src/fe-gtk/notifications
I've chosen to implement notifications by calling a PowerShell script invoking
Windows Toast Notification API or Balloon Notification as fallback.
It's tested on Win7 32bit and Win10 64bit VMs from modern.ie
So we have a working solution until Gio.Notification is implemented on Windows.
"""
import logging
import os
import os.path
import subprocess
import sys
import tempfile
import gpodder
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Notification Bubbles for Windows')
__description__ = _('Display notification bubbles for different events.')
__authors__ = 'Sean Munkel <SeanMunkel@gmail.com>'
__category__ = 'desktop-integration'
__mandatory_in__ = 'win32'
__only_for__ = 'win32'
class gPodderExtension(object):
def __init__(self, *args):
gpodder_script = sys.argv[0]
gpodder_script = os.path.realpath(gpodder_script)
self._icon = os.path.join(os.path.dirname(gpodder_script), "gpodder.ico")
def on_notification_show(self, title, message):
script = """
try {{
if ([Environment]::OSVersion.Version -ge (new-object 'Version' 10,0,10240)) {{
# use Windows 10 Toast notification
[Windows.UI.Notifications.ToastNotificationManager, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
[Windows.UI.Notifications.ToastNotification, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
[Windows.Data.Xml.Dom.XmlDocument, Windows.Data.Xml.Dom.XmlDocument, ContentType = WindowsRuntime] | Out-Null
# Need a real AppID (see https://stackoverflow.com/q/46814858)
# use gPodder app id if it's the installed, otherwise use PowerShell's AppID
try {{
$gpo_appid = Get-StartApps -Name "gpodder"
}} catch {{
write-host "Get-StartApps not available"
$gpo_appid = $null
}}
if ($gpo_appid -ne $null) {{
$APP_ID = $gpo_appid[0].AppID
}} else {{
$APP_ID = '{{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}}\\WindowsPowerShell\\v1.0\\powershell.exe'
}}
$template = @"
<toast activationType="protocol" launch="" duration="long">
<visual>
<binding template="ToastGeneric">
<image placement="appLogoOverride" src="{icon}" />
<text><![CDATA[{title}]]></text>
<text><![CDATA[{message}]]></text>
</binding>
</visual>
<audio silent="true" />
</toast>
"@
$xml = New-Object Windows.Data.Xml.Dom.XmlDocument
$xml.LoadXml($template)
$toast = New-Object Windows.UI.Notifications.ToastNotification $xml
[Windows.UI.Notifications.ToastNotificationManager]::CreateToastNotifier($APP_ID).Show($toast)
Remove-Item -LiteralPath $MyInvocation.MyCommand.Path -Force # Delete this script temp file.
}} else {{
# use older Balloon notification when not on Windows 10
[System.Reflection.Assembly]::LoadWithPartialName("System.Windows.Forms")
$o = New-Object System.Windows.Forms.NotifyIcon
$o.Icon = "{icon}"
$o.BalloonTipIcon = "None"
$o.BalloonTipText = @"
{message}
"@
$o.BalloonTipTitle = @"
{title}
"@
$o.Visible = $True
$Delay = 10 # Delay value in seconds.
$o.ShowBalloonTip($Delay*1000)
Start-Sleep -s $Delay
$o.Dispose()
Remove-Item -LiteralPath $MyInvocation.MyCommand.Path -Force # Delete this script temp file.
}}
}} catch {{
write-host "Caught an exception:"
write-host "Exception Type: $($_.Exception.GetType().FullName)"
write-host "Exception Message: $($_.Exception.Message)"
exit 1
}}
""".format(icon=self._icon, message=message, title=title)
fh, path = tempfile.mkstemp(suffix=".ps1")
with open(fh, "w", encoding="utf_8_sig") as f:
f.write(script)
try:
# hide powershell command window using startupinfo
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# to run 64bit powershell on Win10 64bit when running from 32bit gPodder
# (we need 64bit powershell on Win10 otherwise Get-StartApps is not available)
powershell = r"{}\sysnative\WindowsPowerShell\v1.0\powershell.exe".format(os.environ["SystemRoot"])
if not os.path.exists(powershell):
powershell = "powershell.exe"
subprocess.Popen([powershell,
"-ExecutionPolicy", "Bypass", "-File", path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo)
except subprocess.CalledProcessError as e:
logger.error("Error in on_notification_show(title=%r, message=%r):\n"
"\t%r exit code %i\n\tstdout=%s\n\tstderr=%s",
title, message, e.cmd, e.returncode, e.stdout, e.stderr)
except FileNotFoundError:
logger.error("Error in on_notification_show(title=%r, message=%r): %s not found",
title, message, powershell)
def on_unload(self):
pass
| 6,638
|
Python
|
.py
| 146
| 38.89726
| 126
| 0.674483
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,353
|
subscription_stats.py
|
gpodder_gpodder/share/gpodder/extensions/subscription_stats.py
|
# -*- coding: utf-8 -*-
# Show publishing statistics for subscriptions.
# Released under the same license terms as gPodder itself.
import time
from time import localtime, strftime
import gpodder
from gpodder import config
import gi # isort:skip
from gi.repository import Gtk # isort:skip
gi.require_version('Gtk', '3.0') # isort:skip
_ = gpodder.gettext
__title__ = _('Subscription Statistics')
__description__ = _('Show publishing statistics for subscriptions.')
__only_for__ = 'gtk'
__doc__ = 'https://gpodder.github.io/docs/extensions/subscription_stats.html'
__authors__ = 'Brand Huntsman <http://qzx.com/mail/> and \
Nuno Dias <Nuno.Dias+gpodder@gmail.com>'
class gPodderExtension:
def __init__(self, container):
self.container = container
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.gpodder = ui_object
def on_create_menu(self):
# extras menu
return [(_("Subscription Statistics"), self.open_dialog)]
def add_page(self, notebook, category, channels):
scrolled = Gtk.ScrolledWindow()
store = Gtk.ListStore(str, float, str, str, int)
for average, name, edate, paused in channels:
last = strftime('%x', localtime(edate))
store.append([
('%.1f' % round(average, 1)) if average > 0 else '?',
average, ('❚❚ ' if paused else '') + name, last, edate,
])
tree = Gtk.TreeView(model=store)
scrolled.add(tree)
lastcell = Gtk.CellRendererText()
lastcolumn = Gtk.TreeViewColumn(_('Updated'))
lastcolumn.set_sort_column_id(4)
lastcolumn.pack_end(lastcell, True)
lastcolumn.add_attribute(lastcell, 'text', 3)
tree.append_column(lastcolumn)
dayscell = Gtk.CellRendererText()
dayscell.set_property('xalign', 1)
dayscolumn = Gtk.TreeViewColumn(_('Days'))
dayscolumn.set_sort_column_id(1)
dayscolumn.pack_start(dayscell, True)
dayscolumn.add_attribute(dayscell, 'text', 0)
tree.append_column(dayscolumn)
channelcell = Gtk.CellRendererText()
channelcolumn = Gtk.TreeViewColumn(_('Podcast'))
channelcolumn.set_sort_column_id(2)
channelcolumn.pack_start(channelcell, True)
channelcolumn.add_attribute(channelcell, 'text', 2)
channelcolumn.set_expand(True)
tree.append_column(channelcolumn)
notebook.append_page(scrolled,
Gtk.Label('%d %s' % (len(channels), category)))
def open_dialog(self):
db = self.gpodder.db
# get all channels
channels = []
with db.lock:
cur = db.cursor()
cur.execute(
'SELECT id, title, pause_subscription FROM %s'
% db.TABLE_PODCAST)
while True:
row = cur.fetchone()
if row is None:
break
channels.append(row)
cur.close()
# get average time between episodes per channel
now = int(time.time())
nr_paused = 0
daily = []
weekly = []
monthly = []
yearly = []
for channel_id, channel_name, paused in channels:
if paused:
nr_paused += 1
total = 0
nr_episodes = 0
prev = now
with db.lock:
cur = db.cursor()
cur.execute('SELECT published FROM %s WHERE podcast_id = %d \
ORDER BY published DESC LIMIT 25'
% (db.TABLE_EPISODE, channel_id))
while True:
row = cur.fetchone()
if row is None:
break
if total == 0:
edate = row[0]
total += (prev - row[0])
nr_episodes += 1
prev = row[0]
cur.close()
average = (total / nr_episodes) / (24 * 60 * 60) \
if nr_episodes > 0 else 0
if average == 0:
yearly.append([average, channel_name, edate, paused])
elif average <= 2:
daily.append([average, channel_name, edate, paused])
elif average <= 14:
weekly.append([average, channel_name, edate, paused])
elif average <= 61:
monthly.append([average, channel_name, edate, paused])
else:
yearly.append([average, channel_name, edate, paused])
# sort by averages
daily.sort(key=lambda e: e[0])
weekly.sort(key=lambda e: e[0])
monthly.sort(key=lambda e: e[0])
yearly.sort(key=lambda e: e[0])
# open dialog
dlg = Gtk.Dialog(_('Subscription Statistics'),
self.gpodder.main_window)
dlg.set_default_size(420, 600)
dlg.set_resizable(True)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.set_border_width(0)
label = Gtk.Label(_('%d subscriptions (%d paused)') % (len(channels),
nr_paused))
label.set_padding(0, 10)
box.add(label)
notebook = Gtk.Notebook()
notebook.set_vexpand(True)
notebook.set_scrollable(True)
notebook.set_show_border(False)
self.add_page(notebook, _('daily'), daily)
self.add_page(notebook, _('weekly'), weekly)
self.add_page(notebook, _('monthly'), monthly)
self.add_page(notebook, _('yearly'), yearly)
box.add(notebook)
conf = config.Config(gpodder.config_file)
label = Gtk.Label(_('Average days between the last %d episodes.') %
(conf.limit.episodes if conf.limit.episodes < 25
else 25))
label.set_line_wrap(True)
label.set_padding(0, 5)
box.add(label)
button = dlg.add_button(_('_Close'), Gtk.ResponseType.OK)
button.set_margin_right(5)
button.set_margin_bottom(5)
dlg.connect("response", lambda w, r: dlg.destroy())
dlg.vbox.pack_start(box, True, True, 0)
dlg.vbox.show_all()
dlg.show()
| 6,324
|
Python
|
.py
| 153
| 29.915033
| 77
| 0.557927
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,354
|
rockbox_coverart.py
|
gpodder_gpodder/share/gpodder/extensions/rockbox_coverart.py
|
# Copies cover art to a file based device
#
# (c) 2014-04-10 Alex Mayer <magictrick4906@aim.com>
# Released under the same license terms as gPodder itself.
# Use a logger for debug output - this will be managed by gPodder
import logging
import os
import shutil
import gpodder
logger = logging.getLogger(__name__)
_ = gpodder.gettext
# Provide some metadata that will be displayed in the gPodder GUI
__title__ = _('Rockbox Cover Art Sync')
__description__ = _('Copy Cover Art To Rockboxed Media Player')
__only_for__ = 'gtk, cli'
__authors__ = 'Alex Mayer <magictrick4906@aim.com>'
DefaultConfig = {
"art_name_on_device": "cover.jpg" # The file name that will be used on the device for cover art
}
class gPodderExtension:
def __init__(self, container):
self.container = container
self.config = self.container.config
def on_episode_synced(self, device, episode):
# check that we have the functions we need
if hasattr(device, 'get_episode_folder_on_device'):
# get the file and folder names we need
episode_folder = os.path.dirname(episode.local_filename(False))
device_folder = device.get_episode_folder_on_device(episode)
episode_art = os.path.join(episode_folder, "folder.jpg")
device_art = os.path.join(device_folder, self.config.art_name_on_device)
# make sure we have art to copy and it doesn't already exist
if os.path.isfile(episode_art) and not os.path.isfile(device_art):
logger.info('Syncing cover art for %s', episode.channel.title)
# copy and rename art
shutil.copy(episode_art, device_art)
| 1,688
|
Python
|
.py
| 36
| 40.722222
| 100
| 0.684531
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,355
|
ubuntu_unity.py
|
gpodder_gpodder/share/gpodder/extensions/ubuntu_unity.py
|
# -*- coding: utf-8 -*-
# Ubuntu Unity Launcher Integration
# Thomas Perl <thp@gpodder.org>; 2012-02-06
import logging
import gpodder
import gi # isort:skip
gi.require_version('Unity', '7.0') # isort:skip
from gi.repository import GLib, Unity # isort:skip
_ = gpodder.gettext
logger = logging.getLogger(__name__)
__title__ = _('Ubuntu Unity Integration')
__description__ = _('Show download progress in the Unity Launcher icon.')
__authors__ = 'Thomas Perl <thp@gpodder.org>'
__category__ = 'desktop-integration'
__only_for__ = 'unity'
__mandatory_in__ = 'unity'
__disable_in__ = 'win32'
class LauncherEntry:
FILENAME = 'gpodder.desktop'
def __init__(self):
self.launcher = Unity.LauncherEntry.get_for_desktop_id(
self.FILENAME)
def set_count(self, count):
self.launcher.set_property('count', count)
self.launcher.set_property('count_visible', count > 0)
def set_progress(self, progress):
self.launcher.set_property('progress', progress)
self.launcher.set_property('progress_visible', 0. <= progress < 1.)
class gPodderExtension:
FILENAME = 'gpodder.desktop'
def __init__(self, container):
self.container = container
self.launcher_entry = None
def on_load(self):
logger.info('Starting Ubuntu Unity Integration.')
self.launcher_entry = LauncherEntry()
def on_unload(self):
self.launcher_entry = None
def on_download_progress(self, progress):
GLib.idle_add(self.launcher_entry.set_progress, float(progress))
| 1,562
|
Python
|
.py
| 40
| 34.2
| 75
| 0.684176
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,356
|
rename_download.py
|
gpodder_gpodder/share/gpodder/extensions/rename_download.py
|
# -*- coding: utf-8 -*-
# Rename files after download based on the episode title
# Copyright (c) 2011-04-04 Thomas Perl <thp.io>
# Licensed under the same terms as gPodder itself
import logging
import os
import gpodder
from gpodder import util
from gpodder.model import PodcastEpisode
logger = logging.getLogger(__name__)
_ = gpodder.gettext
N_ = gpodder.ngettext
__title__ = _('Rename episodes after download')
__description__ = _('Rename episodes to "<Episode Title>.<ext>" on download')
__authors__ = 'Bernd Schlapsi <brot@gmx.info>, Thomas Perl <thp@gpodder.org>'
__doc__ = 'https://gpodder.github.io/docs/extensions/renameafterdownload.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/RenameAfterDownload'
__category__ = 'post-download'
DefaultConfig = {
'add_sortdate': False, # Add the sortdate as prefix
'add_podcast_title': False, # Add the podcast title as prefix
'sortdate_after_podcast_title': False, # put the sortdate after podcast title
}
class gPodderExtension:
def __init__(self, container):
self.container = container
self.gpodder = None
self.config = self.container.config
def on_episode_downloaded(self, episode):
current_filename = episode.local_filename(create=False)
new_filename = self.make_filename(current_filename, episode.title,
episode.sortdate, episode.channel.title)
if new_filename != current_filename:
logger.info('Renaming: %s -> %s', current_filename, new_filename)
os.rename(current_filename, new_filename)
util.rename_episode_file(episode, new_filename)
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.gpodder = ui_object
def on_create_menu(self):
return [(_("Rename all downloaded episodes"), self.rename_all_downloaded_episodes)]
def rename_all_downloaded_episodes(self):
episodes = [e for c in self.gpodder.channels for e in [e for e in c.children if e.state == gpodder.STATE_DOWNLOADED]]
number_of_episodes = len(episodes)
if number_of_episodes == 0:
self.gpodder.show_message(_('No downloaded episodes to rename'),
_('Rename all downloaded episodes'), important=True)
from gpodder.gtkui.interface.progress import ProgressIndicator
progress_indicator = ProgressIndicator(
_('Renaming all downloaded episodes'),
'', True, self.gpodder.get_dialog_parent(), number_of_episodes)
for episode in episodes:
self.on_episode_downloaded(episode)
if not progress_indicator.on_tick():
break
renamed_count = progress_indicator.tick_counter
progress_indicator.on_finished()
if renamed_count > 0:
self.gpodder.show_message(
N_('Renamed %(count)d downloaded episode',
'Renamed %(count)d downloaded episodes',
renamed_count) % {'count': renamed_count},
_('Rename all downloaded episodes'), important=True)
def make_filename(self, current_filename, title, sortdate, podcast_title):
dirname = os.path.dirname(current_filename)
filename = os.path.basename(current_filename)
basename, ext = os.path.splitext(filename)
new_basename = []
new_basename.append(title)
if self.config.sortdate_after_podcast_title:
if self.config.add_sortdate:
new_basename.insert(0, sortdate)
if self.config.add_podcast_title:
new_basename.insert(0, podcast_title)
else:
if self.config.add_podcast_title:
new_basename.insert(0, podcast_title)
if self.config.add_sortdate:
new_basename.insert(0, sortdate)
new_basename = ' - '.join(new_basename)
# Remove unwanted characters and shorten filename (#494)
# Also sanitize ext (see #591 where ext=.mp3?dest-id=754182)
new_basename, ext = util.sanitize_filename_ext(
new_basename,
ext,
PodcastEpisode.MAX_FILENAME_LENGTH,
PodcastEpisode.MAX_FILENAME_WITH_EXT_LENGTH)
new_filename = os.path.join(dirname, new_basename + ext)
if new_filename == current_filename:
return current_filename
for filename in util.generate_names(new_filename):
# Avoid filename collisions
if not os.path.exists(filename):
return filename
| 4,640
|
Python
|
.py
| 94
| 40.010638
| 127
| 0.651626
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,357
|
podverse.py
|
gpodder_gpodder/share/gpodder/extensions/podverse.py
|
# -*- coding: utf-8 -*-
# Searches podverse (podverse.fm) database for podcasts
# (c) 2024 Eric Le Lay <elelay.fr:contact>
# Released under the same license terms as gPodder itself.
# Inspired by gpodder-core plugin "podverse", by kirbylife <hola@kirbylife.dev>
# https://github.com/gpodder/gpodder-core/blob/master/src/gpodder/plugins/podverse.py
import logging
from urllib.parse import quote_plus
import requests
import gpodder
from gpodder.directory import PROVIDERS, DirectoryEntry, JustAWarning, Provider
from gpodder.util import urlopen
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Search Podverse')
__description__ = _('Search podverse podcast index')
__authors__ = 'Eric Le Lay <elelay.fr:contact>'
__doc__ = 'https://gpodder.github.io/docs/extensions/podverse.html'
class PodverseDirectoryProvider(Provider):
def __init__(self):
self.name = _('Podverse search')
self.kind = Provider.PROVIDER_SEARCH
self.icon = 'directory-podverse.png'
def on_search(self, query):
if len(query) < 3:
raise JustAWarning(_("Please provide at least 3 characters"))
# see https://api.podverse.fm/api/v1/swagger#operations-podcast-getPodcasts
json_url = f"https://api.podverse.fm/api/v1/podcast?page=1&searchTitle={quote_plus(query)}&sort=top-past-week"
response = urlopen(json_url, headers={"accept": "application/json"})
json_data = response.json()
if response.status_code != requests.codes.ok:
raise Exception(_("Error searching: %s") % json_data.get("message"))
# contrary to swagger api we get a [results_page, total_length] 2 element list
# See code in https://github.com/podverse/podverse-api/blob/develop/src/controllers/podcast.ts#L311
if isinstance(json_data, list) and len(json_data) == 2 and isinstance(json_data[0], list):
logger.debug("Search for %s yields %i results, of which we display %i",
query, json_data[1], len(json_data[0]))
json_data = json_data[0]
else:
logger.debug("Search for %s yields %i results", query, len(json_data))
return [
DirectoryEntry(e["title"],
e["feedUrls"][0]["url"],
image=e["imageUrl"],
description=e["description"])
for e in json_data if not e["credentialsRequired"]
]
class gPodderExtension:
""" (un)register a podverse search provider """
def __init__(self, container):
pass
def on_load(self):
logger.info('Registering Podverse.')
PROVIDERS.append(None)
PROVIDERS.append(PodverseDirectoryProvider)
def on_unload(self):
logger.info('Unregistering Podverse.')
try:
PROVIDERS.remove(PodverseDirectoryProvider)
except Exception:
logger.exception("Unable to remove PodverseDirectoryProvider")
| 2,972
|
Python
|
.py
| 61
| 40.803279
| 118
| 0.662059
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,358
|
audio_converter.py
|
gpodder_gpodder/share/gpodder/extensions/audio_converter.py
|
# -*- coding: utf-8 -*-
# Convertes m4a audio files to mp3
# This requires ffmpeg to be installed. Also works as a context
# menu item for already-downloaded files.
#
# (c) 2011-11-23 Bernd Schlapsi <brot@gmx.info>
# Released under the same license terms as gPodder itself.
import logging
import os
import subprocess
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Convert audio files')
__description__ = _('Transcode audio files to mp3/ogg')
__authors__ = 'Bernd Schlapsi <brot@gmx.info>, Thomas Perl <thp@gpodder.org>'
__doc__ = 'https://gpodder.github.io/docs/extensions/audioconverter.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/AudioConverter'
__category__ = 'post-download'
DefaultConfig = {
'use_opus': False, # Set to True to convert to .opus
'use_ogg': False, # Set to True to convert to .ogg
'context_menu': True, # Show the conversion option in the context menu
}
class gPodderExtension:
MIME_TYPES = ('audio/x-m4a', 'audio/mp4', 'audio/mp4a-latm', 'audio/mpeg', 'audio/ogg', 'audio/opus')
EXT = ('.m4a', '.ogg', '.opus', '.mp3')
CMD = {'avconv': {'.mp3': ['-n', '-i', '%(old_file)s', '-q:a', '2', '-id3v2_version', '3', '-write_id3v1', '1', '%(new_file)s'],
'.ogg': ['-n', '-i', '%(old_file)s', '-q:a', '2', '%(new_file)s'],
'.opus': ['-n', '-i', '%(old_file)s', '-b:a', '64k', '%(new_file)s']
},
'ffmpeg': {'.mp3': ['-n', '-i', '%(old_file)s', '-q:a', '2', '-id3v2_version', '3', '-write_id3v1', '1', '%(new_file)s'],
'.ogg': ['-n', '-i', '%(old_file)s', '-q:a', '2', '%(new_file)s'],
'.opus': ['-n', '-i', '%(old_file)s', '-b:a', '64k', '%(new_file)s']
}
}
def __init__(self, container):
self.container = container
self.config = self.container.config
# Dependency checks
self.command = self.container.require_any_command(['avconv', 'ffmpeg'])
# extract command without extension (.exe on Windows) from command-string
self.command_without_ext = os.path.basename(os.path.splitext(self.command)[0])
def on_episode_downloaded(self, episode):
self._convert_episode(episode)
def _get_new_extension(self):
if self.config.use_ogg:
extension = '.ogg'
elif self.config.use_opus:
extension = '.opus'
else:
extension = '.mp3'
return extension
def _check_source(self, episode):
if episode.extension() == self._get_new_extension():
return False
if episode.mime_type in self.MIME_TYPES:
return True
# Also check file extension (bug 1770)
if episode.extension() in self.EXT:
return True
return False
def on_episodes_context_menu(self, episodes):
if not self.config.context_menu:
return None
if not all(e.was_downloaded(and_exists=True) for e in episodes):
return None
if not any(self._check_source(episode) for episode in episodes):
return None
menu_item = _('Convert to %(format)s') % {'format': self._target_format()}
return [(menu_item, self._convert_episodes)]
def _target_format(self):
if self.config.use_ogg:
target_format = 'OGG'
elif self.config.use_opus:
target_format = 'OPUS'
else:
target_format = 'MP3'
return target_format
def _convert_episode(self, episode):
if not self._check_source(episode):
return
new_extension = self._get_new_extension()
old_filename = episode.local_filename(create=False)
filename, old_extension = os.path.splitext(old_filename)
new_filename = filename + new_extension
cmd_param = self.CMD[self.command_without_ext][new_extension]
cmd = [self.command] + \
[param % {'old_file': old_filename, 'new_file': new_filename}
for param in cmd_param]
if gpodder.ui.win32:
ffmpeg = util.Popen(cmd)
ffmpeg.wait()
stdout, stderr = ("<unavailable>",) * 2
else:
ffmpeg = util.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = ffmpeg.communicate()
if ffmpeg.returncode == 0:
util.rename_episode_file(episode, new_filename)
os.remove(old_filename)
logger.info('Converted audio file to %(format)s.' % {'format': new_extension})
gpodder.user_extensions.on_notification_show(_('File converted'), episode.title)
else:
logger.warning('Error converting audio file: %s / %s', stdout, stderr)
gpodder.user_extensions.on_notification_show(_('Conversion failed'), episode.title)
def _convert_episodes(self, episodes):
# not running in background because there is no feedback to the user
# which one is being converted and nothing prevents from clicking convert twice.
for episode in episodes:
self._convert_episode(episode)
| 5,297
|
Python
|
.py
| 112
| 38.25
| 132
| 0.596236
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,359
|
command_on_download.py
|
gpodder_gpodder/share/gpodder/extensions/command_on_download.py
|
# -*- coding: utf-8 -*-
#
# gPodder extension for running a command on successful episode download
#
import datetime
import logging
import os
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Run a Command on Download')
__description__ = _('Run a predefined external command upon download completion.')
__authors__ = 'Eric Le Lay <elelay@macports.org>'
__doc__ = 'https://gpodder.github.io/docs/extensions/commandondownload.html'
__category__ = 'post-download'
__only_for__ = 'gtk, cli'
DefaultConfig = {
'command': "zenity --info --width=600 --text=\"file=$filename "
"podcast=$podcast title=$title published=$published "
"section=$section playlist_title=$playlist_title\""
}
class gPodderExtension:
def __init__(self, container):
self.container = container
def on_episode_downloaded(self, episode):
cmd_template = self.container.config.command
info = self.read_episode_info(episode)
if info is None:
return
self.run_command(cmd_template, info)
def read_episode_info(self, episode):
filename = episode.local_filename(create=False, check_only=True)
if filename is None:
logger.warning("%s: missing episode filename", __title__)
return None
info = {
'filename': filename,
'playlist_title': None,
'podcast': None,
'published': None,
'section': None,
'title': None,
}
info['podcast'] = episode.channel.title
info['title'] = episode.title
info['section'] = episode.channel.section
published = datetime.datetime.fromtimestamp(episode.published)
info['published'] = published.strftime('%Y-%m-%d %H:%M')
info['playlist_title'] = episode.playlist_title()
return info
def run_command(self, command, info):
env = os.environ.copy()
env.update(info)
proc = util.Popen(command, shell=True, env=env, close_fds=True)
proc.wait()
if proc.returncode == 0:
logger.info("%s succeeded", command)
else:
logger.warning("%s run with exit code %i", command, proc.returncode)
| 2,287
|
Python
|
.py
| 60
| 30.883333
| 82
| 0.631841
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,360
|
rm_ogg_cover.py
|
gpodder_gpodder/share/gpodder/extensions/rm_ogg_cover.py
|
# -*- coding: utf-8 -*-
####
# 01/2011 Bernd Schlapsi <brot@gmx.info>
#
# This script is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Dependencies:
# * python-mutagen (Mutagen is a Python module to handle audio metadata)
#
# This extension scripts removes coverart from all downloaded ogg files.
# The reason for this script is that my media player (MEIZU SL6)
# couldn't handle ogg files with included coverart
import logging
import os
from mutagen.oggvorbis import OggVorbis
import gpodder
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Remove cover art from OGG files')
__description__ = _('removes coverart from all downloaded ogg files')
__authors__ = 'Bernd Schlapsi <brot@gmx.info>'
__doc__ = 'https://gpodder.github.io/docs/extensions/removeoggcover.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/RemoveOGGCover'
__category__ = 'post-download'
DefaultConfig = {
'context_menu': True, # Show item in context menu
}
class gPodderExtension:
def __init__(self, container):
self.container = container
self.config = self.container.config
def on_episode_downloaded(self, episode):
self.rm_ogg_cover(episode)
def on_episodes_context_menu(self, episodes):
if not self.config.context_menu:
return None
episode_types = [e.mime_type for e in episodes
if e.mime_type is not None and e.file_exists()]
if 'audio/ogg' not in episode_types:
return None
return [(_('Remove cover art'), self._rm_ogg_covers)]
def _rm_ogg_covers(self, episodes):
for episode in episodes:
self.rm_ogg_cover(episode)
def rm_ogg_cover(self, episode):
filename = episode.local_filename(create=False)
if filename is None:
return
basename, extension = os.path.splitext(filename)
if episode.file_type() != 'audio':
return
if extension.lower() != '.ogg':
return
try:
ogg = OggVorbis(filename)
found = False
for key in ogg.keys():
if key.startswith('cover'):
found = True
ogg.pop(key)
if found:
logger.info('Removed cover art from OGG file: %s', filename)
ogg.save()
except Exception as e:
logger.warning('Failed to remove OGG cover: %s', e, exc_info=True)
| 3,093
|
Python
|
.py
| 76
| 34.276316
| 122
| 0.665665
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,361
|
gtk_statusicon.py
|
gpodder_gpodder/share/gpodder/extensions/gtk_statusicon.py
|
# -*- coding: utf-8 -*-
#
# Gtk Status Icon (gPodder bug 1495)
# Thomas Perl <thp@gpodder.org>; 2012-07-31
#
import logging
import os.path
from gi.repository import GdkPixbuf, Gtk
import gpodder
from gpodder.gtkui import draw
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Gtk Status Icon')
__description__ = _('Show a status icon for Gtk-based Desktops.')
__category__ = 'desktop-integration'
__only_for__ = 'gtk'
__disable_in__ = 'unity,win32'
DefaultConfig = {
'download_progress_bar': False, # draw progress bar on icon while downloading?
}
class gPodderExtension:
def __init__(self, container):
self.container = container
self.config = self.container.config
self.status_icon = None
self.icon_name = None
self.gpodder = None
self.last_progress = 1
def set_icon(self, use_pixbuf=False):
path = os.path.join(os.path.dirname(__file__), '..', '..', 'icons')
icon_path = os.path.abspath(path)
theme = Gtk.IconTheme.get_default()
theme.append_search_path(icon_path)
if self.icon_name is None:
if theme.has_icon('gpodder'):
self.icon_name = 'gpodder'
else:
self.icon_name = 'stock_mic'
if self.status_icon is None:
self.status_icon = Gtk.StatusIcon.new_from_icon_name(self.icon_name)
return
# If current mode matches desired mode, nothing to do.
is_pixbuf = (self.status_icon.get_storage_type() == Gtk.ImageType.PIXBUF)
if is_pixbuf == use_pixbuf:
return
if not use_pixbuf:
self.status_icon.set_from_icon_name(self.icon_name)
else:
# Currently icon is not a pixbuf => was loaded by name, at which
# point size was automatically determined.
icon_size = self.status_icon.get_size()
icon_pixbuf = theme.load_icon(self.icon_name, icon_size, Gtk.IconLookupFlags.USE_BUILTIN)
self.status_icon.set_from_pixbuf(icon_pixbuf)
def on_load(self):
self.set_icon()
self.status_icon.connect('activate', self.on_toggle_visible)
self.status_icon.set_has_tooltip(True)
self.status_icon.set_tooltip_text("gPodder")
def on_toggle_visible(self, status_icon):
if self.gpodder is None:
return
visibility = self.gpodder.main_window.get_visible()
self.gpodder.main_window.set_visible(not visibility)
def on_unload(self):
if self.status_icon is not None:
self.status_icon.set_visible(False)
self.status_icon = None
self.icon_name = None
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.gpodder = ui_object
def get_icon_pixbuf(self):
assert self.status_icon is not None
if self.status_icon.get_storage_type() != Gtk.ImageType.PIXBUF:
self.set_icon(use_pixbuf=True)
return self.status_icon.get_pixbuf()
def on_download_progress(self, progress):
logger.debug("download progress: %f", progress)
if not self.config.download_progress_bar:
# reset the icon in case option was turned off during download
if self.last_progress < 1:
self.last_progress = 1
self.set_icon()
# in any case, we're now done
return
if progress == 1:
self.set_icon() # no progress bar
self.last_progress = progress
return
# Only update in 3-percent-steps to save some resources
if abs(progress - self.last_progress) < 0.03 and progress > self.last_progress:
return
icon = self.get_icon_pixbuf().copy()
progressbar = draw.progressbar_pixbuf(icon.get_width(), icon.get_height(), progress)
progressbar.composite(icon, 0, 0, icon.get_width(), icon.get_height(), 0, 0, 1, 1, GdkPixbuf.InterpType.NEAREST, 255)
self.status_icon.set_from_pixbuf(icon)
self.last_progress = progress
| 4,110
|
Python
|
.py
| 97
| 33.752577
| 125
| 0.627446
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,362
|
ubuntu_appindicator.py
|
gpodder_gpodder/share/gpodder/extensions/ubuntu_appindicator.py
|
# -*- coding: utf-8 -*-
# Ubuntu AppIndicator Icon
# Thomas Perl <thp@gpodder.org>; 2012-02-24
import logging
from gi.repository import AppIndicator3 as appindicator
from gi.repository import Gtk
import gpodder
_ = gpodder.gettext
__title__ = _('Ubuntu App Indicator')
__description__ = _('Show a status indicator in the top bar.')
__authors__ = 'Thomas Perl <thp@gpodder.org>'
__category__ = 'desktop-integration'
__only_for__ = 'gtk'
__mandatory_in__ = 'unity'
__disable_in__ = 'win32'
logger = logging.getLogger(__name__)
DefaultConfig = {
'visible': True, # Set to False if you don't want to show the appindicator
}
class gPodderExtension:
def __init__(self, container):
self.container = container
self.config = container.config
self.indicator = None
self.gpodder = None
def on_load(self):
if self.config.visible:
self.indicator = appindicator.Indicator.new('gpodder', 'gpodder',
appindicator.IndicatorCategory.APPLICATION_STATUS)
self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
def _rebuild_menu(self):
menu = Gtk.Menu()
toggle_visible = Gtk.CheckMenuItem(_('Show main window'))
toggle_visible.set_active(True)
def on_toggle_visible(menu_item):
if menu_item.get_active():
self.gpodder.main_window.show()
else:
self.gpodder.main_window.hide()
toggle_visible.connect('activate', on_toggle_visible)
menu.append(toggle_visible)
menu.append(Gtk.SeparatorMenuItem())
quit_gpodder = Gtk.MenuItem(_('Quit'))
def on_quit(menu_item):
self.gpodder.on_gPodder_delete_event(self.gpodder.main_window)
quit_gpodder.connect('activate', on_quit)
menu.append(quit_gpodder)
menu.show_all()
self.indicator.set_menu(menu)
def on_unload(self):
self.indicator = None
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.gpodder = ui_object
self._rebuild_menu()
| 2,126
|
Python
|
.py
| 55
| 31.509091
| 79
| 0.648807
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,363
|
episode_website_context_menu.py
|
gpodder_gpodder/share/gpodder/extensions/episode_website_context_menu.py
|
# -*- coding: utf-8 -*-
# Add a context menu to show the episode/podcast website (bug 1958)
# (c) 2014-10-20 Thomas Perl <thp.io/about>
# Released under the same license terms as gPodder itself.
import logging
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('"Open website" episode and podcast context menu')
__description__ = _('Add a context menu item for opening the website of an episode or podcast')
__authors__ = 'Thomas Perl <thp@gpodder.org>'
__category__ = 'interface'
__only_for__ = 'gtk'
class gPodderExtension:
def __init__(self, container):
self.container = container
def has_website(self, episodes):
for episode in episodes:
if episode.link:
return True
def open_website(self, episodes):
for episode in episodes:
if episode.link:
util.open_website(episode.link)
def open_channel_website(self, channel):
util.open_website(channel.link)
def on_episodes_context_menu(self, episodes):
return [(_('Open website'), self.open_website if self.has_website(episodes) else None)]
def on_channel_context_menu(self, channel):
return [(_('Open website'), self.open_channel_website if channel.link else None)]
| 1,312
|
Python
|
.py
| 31
| 36.806452
| 95
| 0.680851
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,364
|
rockbox_convert2mp4.py
|
gpodder_gpodder/share/gpodder/extensions/rockbox_convert2mp4.py
|
# -*- coding: utf-8 -*-
# Requirements: apt-get install python-kaa-metadata ffmpeg python-dbus
# To use, copy it as a Python script into ~/.config/gpodder/extensions/rockbox_mp4_convert.py
# See the module "gpodder.extensions" for a description of when each extension
# gets called and what the parameters of each extension are.
# Based on Rename files after download based on the episode title
# And patch in Bug https://bugs.gpodder.org/show_bug.cgi?id=1263
# Copyright (c) 2011-04-06 Guy Sheffer <guysoft at gmail.com>
# Copyright (c) 2011-04-04 Thomas Perl <thp.io>
# Licensed under the same terms as gPodder itself
import logging
import os
import shlex
import subprocess
import kaa.metadata
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Convert video files to MP4 for Rockbox')
__description__ = _('Converts all videos to a Rockbox-compatible format')
__authors__ = 'Guy Sheffer <guysoft@gmail.com>, Thomas Perl <thp@gpodder.org>, Bernd Schlapsi <brot@gmx.info>'
__category__ = 'post-download'
DefaultConfig = {
'device_height': 176.0,
'device_width': 224.0,
'ffmpeg_options': '-vcodec mpeg2video -b 500k -ab 192k -ac 2 -ar 44100 -acodec libmp3lame',
}
ROCKBOX_EXTENSION = "mpg"
EXTENTIONS_TO_CONVERT = ['.mp4', "." + ROCKBOX_EXTENSION]
FFMPEG_CMD = 'ffmpeg -y -i "%(from)s" -s %(width)sx%(height)s %(options)s "%(to)s"'
class gPodderExtension:
def __init__(self, container):
self.container = container
program = shlex.split(FFMPEG_CMD)[0]
if not util.find_command(program):
raise ImportError("Couldn't find program '%s'" % program)
def on_load(self):
logger.info('Extension "%s" is being loaded.' % __title__)
def on_unload(self):
logger.info('Extension "%s" is being unloaded.' % __title__)
def on_episode_downloaded(self, episode):
current_filename = episode.local_filename(False)
converted_filename = self._convert_mp4(episode, current_filename)
if converted_filename is not None:
util.rename_episode_file(episode, converted_filename)
os.remove(current_filename)
logger.info('Conversion for %s was successfully' % current_filename)
gpodder.user_extensions.on_notification_show(_('File converted'), episode.title)
def _get_rockbox_filename(self, origin_filename):
if not os.path.exists(origin_filename):
logger.info("File '%s' don't exists." % origin_filename)
return None
dirname = os.path.dirname(origin_filename)
filename = os.path.basename(origin_filename)
basename, ext = os.path.splitext(filename)
if ext not in EXTENTIONS_TO_CONVERT:
logger.info("Ignore file with file-extension %s." % ext)
return None
if filename.endswith(ROCKBOX_EXTENSION):
new_filename = "%s-convert.%s" % (basename, ROCKBOX_EXTENSION)
else:
new_filename = "%s.%s" % (basename, ROCKBOX_EXTENSION)
return os.path.join(dirname, new_filename)
def _calc_resolution(self, video_width, video_height, device_width, device_height):
if video_height is None:
return None
width_ratio = device_width // video_width
height_ratio = device_height // video_height
dest_width = device_width
dest_height = width_ratio * video_height
if dest_height > device_height:
dest_width = height_ratio * video_width
dest_height = device_height
return (int(round(dest_width)), round(int(dest_height)))
def _convert_mp4(self, episode, from_file):
"""Convert MP4 file to rockbox mpg file"""
# generate new filename and check if the file already exists
to_file = self._get_rockbox_filename(from_file)
if to_file is None:
return None
if os.path.isfile(to_file):
return to_file
logger.info("Converting: %s", from_file)
gpodder.user_extensions.on_notification_show("Converting", episode.title)
# calculate the new screen resolution
info = kaa.metadata.parse(from_file)
resolution = self._calc_resolution(
info.video[0].width,
info.video[0].height,
self.container.config.device_width,
self.container.config.device_height
)
if resolution is None:
logger.error("Error calculating the new screen resolution")
return None
convert_command = FFMPEG_CMD % {
'from': from_file,
'to': to_file,
'width': str(resolution[0]),
'height': str(resolution[1]),
'options': self.container.config.ffmpeg_options
}
if gpodder.ui.win32:
p = util.Popen(shlex.split(convert_command))
p.wait()
stdout, stderr = ("<unavailable>",) * 2
else:
process = util.Popen(shlex.split(convert_command),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
logger.error(stderr)
return None
gpodder.user_extensions.on_notification_show("Converting finished", episode.title)
return to_file
| 5,378
|
Python
|
.py
| 116
| 38.034483
| 110
| 0.64869
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,365
|
ted_subtitles.py
|
gpodder_gpodder/share/gpodder/extensions/ted_subtitles.py
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
import json
import logging
import os
import re
from datetime import timedelta
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Subtitle Downloader for TED Talks')
__description__ = _('Downloads .srt subtitles for TED Talks Videos')
__authors__ = 'Danilo Shiga <daniloshiga@gmail.com>'
__category__ = 'post-download'
__only_for__ = 'gtk, cli'
class gPodderExtension(object):
"""
TED Subtitle Download Extension
Downloads ted subtitles
"""
def __init__(self, container):
self.container = container
def milli_to_srt(self, time):
"""Converts milliseconds to srt time format"""
srt_time = timedelta(milliseconds=time)
srt_time = str(srt_time)
if '.' in srt_time:
srt_time = srt_time.replace('.', ',')[:11]
else:
# ',000' required to be a valid srt line
srt_time += ',000'
return srt_time
def ted_to_srt(self, jsonstring, introduration):
"""Converts the json object to srt format"""
jsonobject = json.loads(jsonstring)
srtContent = ''
for captionIndex, caption in enumerate(jsonobject['captions'], 1):
startTime = self.milli_to_srt(introduration + caption['startTime'])
endTime = self.milli_to_srt(introduration + caption['startTime']
+ caption['duration'])
srtContent += ''.join([str(captionIndex), os.linesep, startTime,
' --> ', endTime, os.linesep,
caption['content'], os.linesep * 2])
return srtContent
def get_data_from_url(self, url):
try:
response = util.urlopen(url).read()
except Exception as e:
logger.warning("subtitle url returned error %s", e)
return ''
return response
def get_srt_filename(self, audio_filename):
basename, _ = os.path.splitext(audio_filename)
return basename + '.srt'
def on_episode_downloaded(self, episode):
guid_result = re.search(r'talk.ted.com:(\d+)', episode.guid)
if guid_result is not None:
talkId = int(guid_result.group(1))
else:
logger.debug('Not a TED Talk. Ignoring.')
return
sub_url = 'http://www.ted.com/talks/subtitles/id/%s/lang/eng' % talkId
logger.info('subtitle url: %s', sub_url)
sub_data = self.get_data_from_url(sub_url)
if not sub_data:
return
logger.info('episode url: %s', episode.link)
episode_data = self.get_data_from_url(episode.link)
if not episode_data:
return
INTRO_DEFAULT = 15
try:
# intro in the data could be 15 or 15.33
intro = episode_data
intro = episode_data.split('introDuration":')[1] \
.split(',')[0] or INTRO_DEFAULT
intro = int(float(intro) * 1000)
except (ValueError, IndexError):
logger.info("Couldn't parse introDuration string: %s", intro)
intro = INTRO_DEFAULT * 1000
current_filename = episode.local_filename(create=False)
srt_filename = self.get_srt_filename(current_filename)
sub = self.ted_to_srt(sub_data, int(intro))
try:
with open(srt_filename, 'w+') as srtFile:
srtFile.write(sub.encode("utf-8"))
except Exception as e:
logger.warning("Can't write srt file: %s", e)
def on_episode_delete(self, episode, filename):
srt_filename = self.get_srt_filename(filename)
if os.path.exists(srt_filename):
os.remove(srt_filename)
| 3,811
|
Python
|
.py
| 93
| 31.44086
| 79
| 0.59086
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,366
|
mpris-listener.py
|
gpodder_gpodder/share/gpodder/extensions/mpris-listener.py
|
# -*- coding: utf-8 -*-
#
# gPodder extension for listening to notifications from MPRIS-capable
# players and translating them to gPodder's Media Player D-Bus API
#
# Copyright (c) 2013-2014 Dov Feldstern <dovdevel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
import dbus
import dbus.service
import gpodder
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('MPRIS Listener')
__description__ = _('Convert MPRIS notifications to gPodder Media Player D-Bus API')
__authors__ = 'Dov Feldstern <dovdevel@gmail.com>'
__doc__ = 'https://gpodder.github.io/docs/extensions/mprislistener.html'
__category__ = 'desktop-integration'
__only_for__ = 'freedesktop'
USECS_IN_SEC = 1000000
TrackInfo = collections.namedtuple('TrackInfo',
['uri', 'length', 'status', 'pos', 'rate'])
def subsecond_difference(usec1, usec2):
return usec1 is not None and usec2 is not None and abs(usec1 - usec2) < USECS_IN_SEC
class CurrentTrackTracker(object):
"""An instance of this class is responsible for tracking the state of the
currently playing track -- it's playback status, playing position, etc.
"""
def __init__(self, notifier):
self.uri = None
self.length = None
self.pos = None
self.rate = None
self.status = None
self._notifier = notifier
self._prev_notif = ()
def _calc_update(self):
now = time.time()
logger.debug('CurrentTrackTracker: calculating at %d (status: %r)',
now, self.status)
try:
if self.status != 'Playing':
logger.debug('CurrentTrackTracker: not currently playing, no change')
return
if self.pos is None or self.rate is None:
logger.debug('CurrentTrackTracker: unknown pos/rate, no change')
return
logger.debug('CurrentTrackTracker: %f @%f (diff: %f)',
self.pos, self.rate, now - self._last_time)
self.pos = self.pos + self.rate * (now - self._last_time) * USECS_IN_SEC
finally:
self._last_time = now
def update_needed(self, current, updated):
for field in updated:
if field == 'pos':
if not subsecond_difference(updated['pos'], current['pos']):
return True
elif updated[field] != current[field]:
return True
# no unequal field was found, no new info here!
return False
def update(self, **kwargs):
# check if there is any new info here -- if not, no need to update!
cur = self.getinfo()._asdict()
if not self.update_needed(cur, kwargs):
return
# there *is* new info, go ahead and update...
uri = kwargs.pop('uri', None)
if uri is not None:
length = kwargs.pop('length') # don't know how to handle uri with no length
if uri != cur['uri']:
# if this is a new uri, and the previous state was 'Playing',
# notify that the previous track has stopped before updating to
# the new track.
if cur['status'] == 'Playing':
logger.debug('notify Stopped: new uri: old %s new %s',
cur['uri'], uri)
self.notify_stop()
self.uri = uri
self.length = float(length)
if 'pos' in kwargs:
# If the position is being updated, and the current status was Playing
# If the status *is* playing, and *was* playing, but the position
# has changed discontinuously, notify a stop for the old position
if (cur['status'] == 'Playing'
and ('status' not in kwargs or kwargs['status'] == 'Playing') and not
subsecond_difference(cur['pos'], kwargs['pos'])):
logger.debug('notify Stopped: playback discontinuity:'
+ 'calc: %r observed: %r', cur['pos'], kwargs['pos'])
self.notify_stop()
if ((kwargs['pos']) <= 0
and self.pos is not None
and self.length is not None
and (self.length - USECS_IN_SEC) < self.pos
and self.pos < (self.length + 2 * USECS_IN_SEC)):
logger.debug('pos=0 end of stream (calculated pos: %f/%f [%f])',
self.pos / USECS_IN_SEC, self.length / USECS_IN_SEC,
(self.pos / USECS_IN_SEC) - (self.length / USECS_IN_SEC))
self.pos = self.length
kwargs.pop('pos') # remove 'pos' even though we're not using it
else:
if self.pos is not None and self.length is not None:
logger.debug("%r %r", self.pos, self.length)
logger.debug('pos=0 not end of stream (calculated pos: %f/%f [%f])',
self.pos / USECS_IN_SEC, self.length / USECS_IN_SEC,
(self.pos / USECS_IN_SEC) - (self.length / USECS_IN_SEC))
newpos = kwargs.pop('pos')
self.pos = newpos if newpos >= 0 else 0
if 'status' in kwargs:
self.status = kwargs.pop('status')
if 'rate' in kwargs:
self.rate = kwargs.pop('rate')
if kwargs:
logger.error('unexpected update fields %r', kwargs)
# notify about the current state
if self.status == 'Playing':
self.notify_playing()
else:
logger.debug('notify Stopped: status %r', self.status)
self.notify_stop()
def getinfo(self):
self._calc_update()
return TrackInfo(self.uri, self.length, self.status, self.pos, self.rate)
def notify_stop(self):
self.notify('Stopped')
def notify_playing(self):
self.notify('Playing')
def notify(self, status):
if (self.uri is None
or self.pos is None
or self.status is None
or self.length is None
or self.length <= 0):
return
pos = self.pos // USECS_IN_SEC
parsed_url = urllib.parse.urlparse(self.uri)
if (not parsed_url.scheme) or parsed_url.scheme == 'file':
file_uri = urllib.request.url2pathname(urllib.parse.urlparse(self.uri).path).encode('utf-8')
else:
file_uri = self.uri
total_time = self.length // USECS_IN_SEC
if status == 'Stopped':
end_position = pos
start_position = self._notifier.start_position
if self._prev_notif != (start_position, end_position, total_time, file_uri):
self._notifier.PlaybackStopped(start_position, end_position,
total_time, file_uri)
self._prev_notif = (start_position, end_position, total_time, file_uri)
elif status == 'Playing':
start_position = pos
if self._prev_notif != (start_position, file_uri):
self._notifier.PlaybackStarted(start_position, file_uri)
self._prev_notif = (start_position, file_uri)
self._notifier.start_position = start_position
logger.info('CurrentTrackTracker: %s: %r %s', status, self, file_uri)
def __repr__(self):
return '%s: %s at %d/%d (@%f)' % (
self.uri or 'None',
self.status or 'None',
(self.pos or 0) // USECS_IN_SEC,
(self.length or 0) // USECS_IN_SEC,
self.rate or 0)
class MPRISDBusReceiver(object):
INTERFACE_PROPS = 'org.freedesktop.DBus.Properties'
SIGNAL_PROP_CHANGE = 'PropertiesChanged'
PATH_MPRIS = '/org/mpris/MediaPlayer2'
INTERFACE_MPRIS = 'org.mpris.MediaPlayer2.Player'
SIGNAL_SEEKED = 'Seeked'
OTHER_MPRIS_INTERFACES = ['org.mpris.MediaPlayer2',
'org.mpris.MediaPlayer2.TrackList',
'org.mpris.MediaPlayer2.Playlists']
def __init__(self, bus, notifier):
self.bus = bus
self.cur = CurrentTrackTracker(notifier)
self.bus.add_signal_receiver(self.on_prop_change,
self.SIGNAL_PROP_CHANGE,
self.INTERFACE_PROPS,
None,
self.PATH_MPRIS,
sender_keyword='sender')
self.bus.add_signal_receiver(self.on_seeked,
self.SIGNAL_SEEKED,
self.INTERFACE_MPRIS,
None,
None)
def stop_receiving(self):
self.bus.remove_signal_receiver(self.on_prop_change,
self.SIGNAL_PROP_CHANGE,
self.INTERFACE_PROPS,
None,
self.PATH_MPRIS)
self.bus.remove_signal_receiver(self.on_seeked,
self.SIGNAL_SEEKED,
self.INTERFACE_MPRIS,
None,
None)
def on_prop_change(self, interface_name, changed_properties,
invalidated_properties, path=None, sender=None):
if interface_name != self.INTERFACE_MPRIS:
if interface_name not in self.OTHER_MPRIS_INTERFACES:
logger.warning('unexpected interface: %s, props=%r', interface_name, list(changed_properties.keys()))
return
if sender is None:
logger.warning('No sender associated to D-Bus signal, please report a bug')
return
collected_info = {}
logger.debug("on_prop_change %r", changed_properties.keys())
if 'PlaybackStatus' in changed_properties:
collected_info['status'] = str(changed_properties['PlaybackStatus'])
if 'Metadata' in changed_properties:
logger.debug("Metadata %r", changed_properties['Metadata'].keys())
# on stop there is no xesam:url
if 'xesam:url' in changed_properties['Metadata']:
collected_info['uri'] = changed_properties['Metadata']['xesam:url']
collected_info['length'] = changed_properties['Metadata'].get('mpris:length', 0.0)
if 'Rate' in changed_properties:
collected_info['rate'] = changed_properties['Rate']
# Fix #788 pos=0 when Stopped resulting in not saving position on VLC quit
if changed_properties.get('PlaybackStatus') != 'Stopped':
try:
collected_info['pos'] = self.query_property(sender, 'Position')
except dbus.exceptions.DBusException:
pass
if 'status' not in collected_info:
try:
collected_info['status'] = str(self.query_property(
sender, 'PlaybackStatus'))
except dbus.exceptions.DBusException:
pass
logger.debug('collected info: %r', collected_info)
self.cur.update(**collected_info)
def on_seeked(self, position):
logger.debug('seeked to pos: %f', position)
self.cur.update(pos=position)
def query_property(self, sender, prop):
proxy = self.bus.get_object(sender, self.PATH_MPRIS)
props = dbus.Interface(proxy, self.INTERFACE_PROPS)
return props.Get(self.INTERFACE_MPRIS, prop)
class gPodderNotifier(dbus.service.Object):
def __init__(self, bus, path):
dbus.service.Object.__init__(self, bus, path)
self.start_position = 0
@dbus.service.signal(dbus_interface='org.gpodder.player', signature='us')
def PlaybackStarted(self, start_position, file_uri):
logger.info('PlaybackStarted: %s: %d', file_uri, start_position)
@dbus.service.signal(dbus_interface='org.gpodder.player', signature='uuus')
def PlaybackStopped(self, start_position, end_position, total_time, file_uri):
logger.info('PlaybackStopped: %s: %d--%d/%d',
file_uri, start_position, end_position, total_time)
# Finally, this is the extension, which just pulls this all together
class gPodderExtension:
def __init__(self, container):
self.container = container
self.path = '/org/gpodder/player/notifier'
self.notifier = None
self.rcvr = None
def on_load(self):
if gpodder.dbus_session_bus is None:
logger.debug("dbus session bus not available, not loading")
else:
self.session_bus = gpodder.dbus_session_bus
self.notifier = gPodderNotifier(self.session_bus, self.path)
self.rcvr = MPRISDBusReceiver(self.session_bus, self.notifier)
def on_unload(self):
if self.notifier is not None:
self.notifier.remove_from_connection(self.session_bus, self.path)
if self.rcvr is not None:
self.rcvr.stop_receiving()
| 13,911
|
Python
|
.py
| 285
| 36.259649
| 117
| 0.57747
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,367
|
update_feeds_on_startup.py
|
gpodder_gpodder/share/gpodder/extensions/update_feeds_on_startup.py
|
# -*- coding: utf-8 -*-
# Starts episode update search on startup
#
# (c) 2012-10-13 Bernd Schlapsi <brot@gmx.info>
# Released under the same license terms as gPodder itself.
import logging
import gpodder
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Search for new episodes on startup')
__description__ = _('Starts the search for new episodes on startup')
__authors__ = 'Bernd Schlapsi <brot@gmx.info>'
__doc__ = 'https://gpodder.github.io/docs/extensions/searchepisodeonstartup.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/SearchEpisodeOnStartup'
__category__ = 'interface'
__only_for__ = 'gtk'
class gPodderExtension:
def __init__(self, container):
self.container = container
self.config = self.container.config
self.gpodder = None
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.gpodder = ui_object
def on_find_partial_downloads_done(self):
if self.gpodder:
self.gpodder.update_feed_cache()
| 1,108
|
Python
|
.py
| 27
| 36.888889
| 130
| 0.697761
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,368
|
enqueue_in_mediaplayer.py
|
gpodder_gpodder/share/gpodder/extensions/enqueue_in_mediaplayer.py
|
# -*- coding: utf-8 -*-
# Extension script to add a context menu item for enqueueing episodes in a player
# Requirements: gPodder 3.x (or "tres" branch newer than 2011-06-08)
# (c) 2011-06-08 Thomas Perl <thp.io/about>
# Released under the same license terms as gPodder itself.
import functools
import logging
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Enqueue/Resume in media players')
__description__ = _('Add a context menu item for enqueueing/resuming playback of episodes in installed media players')
__authors__ = 'Thomas Perl <thp@gpodder.org>, Bernd Schlapsi <brot@gmx.info>'
__doc__ = 'https://gpodder.github.io/docs/extensions/enqueueinmediaplayer.html'
__payment__ = 'https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/EnqueueInMediaplayer'
__category__ = 'interface'
__only_for__ = 'gtk'
DefaultConfig = {
'enqueue_after_download': False, # Set to True to enqueue an episode right after downloading
'default_player': '', # Set to the player to be used for auto-enqueueing (otherwise pick first installed)
}
class Player(object):
def __init__(self, slug, application, command):
self.slug = slug
self.application = application
self.title = '/'.join((_('Enqueue in'), application))
self.command = command
self.gpodder = None
def is_installed(self):
raise NotImplementedError('Must be implemented by subclass')
def open_files(self, filenames):
raise NotImplementedError('Must be implemented by subclass')
def enqueue_episodes(self, episodes, config=None):
filenames = [episode.get_playback_url(config=config) for episode in episodes]
self.open_files(filenames)
for episode in episodes:
episode.playback_mark()
if self.gpodder is not None:
self.gpodder.update_episode_list_icons(selected=True)
class FreeDesktopPlayer(Player):
def is_installed(self):
return util.find_command(self.command[0]) is not None
def open_files(self, filenames):
util.Popen(self.command + filenames)
class Win32Player(Player):
def is_installed(self):
if not gpodder.ui.win32:
return False
from gpodder.gtkui.desktopfile import win32_read_registry_key
try:
self.command = win32_read_registry_key(self.command)
return True
except Exception as e:
logger.warning('Win32 player not found: %s (%s)', self.command, e)
return False
def open_files(self, filenames):
for cmd in util.format_desktop_command(self.command, filenames):
util.Popen(cmd, close_fds=True)
class MPRISResumer(FreeDesktopPlayer):
"""
resume episod playback at saved time
"""
OBJECT_PLAYER = '/org/mpris/MediaPlayer2'
OBJECT_DBUS = '/org/freedesktop/DBus'
INTERFACE_PLAYER = 'org.mpris.MediaPlayer2.Player'
INTERFACE_PROPS = 'org.freedesktop.DBus.Properties'
SIGNAL_PROP_CHANGE = 'PropertiesChanged'
NAME_DBUS = 'org.freedesktop.DBus'
def __init__(self, slug, application, command, bus_name):
super(MPRISResumer, self).__init__(slug, application, command)
self.title = '/'.join((_('Resume in'), application))
self.bus_name = bus_name
self.player = None
self.position_us = None
self.url = None
def is_installed(self):
if gpodder.ui.win32:
return False
return util.find_command(self.command[0]) is not None
def enqueue_episodes(self, episodes, config=None):
self.do_enqueue(episodes[0].get_playback_url(config=config),
episodes[0].current_position)
for episode in episodes:
episode.playback_mark()
if self.gpodder is not None:
self.gpodder.update_episode_list_icons(selected=True)
def init_dbus(self):
bus = gpodder.dbus_session_bus
if not bus.name_has_owner(self.bus_name):
logger.debug('MPRISResumer %s is not there...', self.bus_name)
return False
self.player = bus.get_object(self.bus_name, self.OBJECT_PLAYER)
self.signal_match = self.player.connect_to_signal(self.SIGNAL_PROP_CHANGE,
self.on_prop_change,
dbus_interface=self.INTERFACE_PROPS)
return True
def enqueue_when_ready(self, filename, pos):
def name_owner_changed(name, old_owner, new_owner):
logger.debug('name_owner_changed "%s" "%s" "%s"',
name, old_owner, new_owner)
if name == self.bus_name:
logger.debug('MPRISResumer player %s is there', name)
cancel.remove()
util.idle_add(lambda: self.do_enqueue(filename, pos))
bus = gpodder.dbus_session_bus
obj = bus.get_object(self.NAME_DBUS, self.OBJECT_DBUS)
cancel = obj.connect_to_signal('NameOwnerChanged', name_owner_changed, dbus_interface=self.NAME_DBUS)
def do_enqueue(self, filename, pos):
def on_reply():
logger.debug('MPRISResumer opened %s', self.url)
def on_error(exception):
logger.error('MPRISResumer error %s', repr(exception))
self.signal_match.remove()
if filename.startswith('/'):
try:
import pathlib
self.url = pathlib.Path(filename).as_uri()
except ImportError:
self.url = 'file://' + filename
self.position_us = pos * 1000 * 1000 # pos in microseconds
if self.init_dbus():
# async to not freeze the ui waiting for the application to answer
self.player.OpenUri(self.url,
dbus_interface=self.INTERFACE_PLAYER,
reply_handler=on_reply,
error_handler=on_error)
else:
self.enqueue_when_ready(filename, pos)
logger.debug('MPRISResumer launching player %s', self.application)
super(MPRISResumer, self).open_files([])
def on_prop_change(self, interface, props, invalidated_props):
def on_reply():
pass
def on_error(exception):
logger.error('MPRISResumer SetPosition error %s', repr(exception))
self.signal_match.remove()
metadata = props.get('Metadata', {})
url = metadata.get('xesam:url')
track_id = metadata.get('mpris:trackid')
if url is not None and track_id is not None:
if url == self.url:
logger.info('Enqueue %s setting track %s position=%d',
url, track_id, self.position_us)
self.player.SetPosition(str(track_id), self.position_us,
dbus_interface=self.INTERFACE_PLAYER,
reply_handler=on_reply,
error_handler=on_error)
else:
logger.debug('Changed but wrong url: %s, giving up', url)
self.signal_match.remove()
PLAYERS = [
# Amarok, http://amarok.kde.org/
FreeDesktopPlayer('amarok', 'Amarok', ['amarok', '--play', '--append']),
# VLC, http://videolan.org/
FreeDesktopPlayer('vlc', 'VLC', ['vlc', '--started-from-file', '--playlist-enqueue']),
# Totem, https://live.gnome.org/Totem
FreeDesktopPlayer('totem', 'Totem', ['totem', '--enqueue']),
# DeaDBeeF, http://deadbeef.sourceforge.net/
FreeDesktopPlayer('deadbeef', 'DeaDBeeF', ['deadbeef', '--queue']),
# gmusicbrowser, http://gmusicbrowser.org/
FreeDesktopPlayer('gmusicbrowser', 'gmusicbrowser', ['gmusicbrowser', '-enqueue']),
# Audacious, http://audacious-media-player.org/
FreeDesktopPlayer('audacious', 'Audacious', ['audacious', '--enqueue']),
# Clementine, http://www.clementine-player.org/
FreeDesktopPlayer('clementine', 'Clementine', ['clementine', '--append']),
# Strawberry, https://www.strawberrymusicplayer.org/
FreeDesktopPlayer('strawberry', 'Strawberry', ['strawberry', '--append']),
# Parole, http://docs.xfce.org/apps/parole/start
FreeDesktopPlayer('parole', 'Parole', ['parole', '-a']),
# Winamp 2.x, http://www.oldversion.com/windows/winamp/
Win32Player('winamp', 'Winamp', r'HKEY_CLASSES_ROOT\Winamp.File\shell\Enqueue\command'),
# VLC media player, http://videolan.org/vlc/
Win32Player('vlc', 'VLC', r'HKEY_CLASSES_ROOT\VLC.mp3\shell\AddToPlaylistVLC\command'),
# foobar2000, http://www.foobar2000.org/
Win32Player('foobar2000', 'foobar2000', r'HKEY_CLASSES_ROOT\foobar2000.MP3\shell\enqueue\command'),
]
RESUMERS = [
# doesn't play on my system, but the track is appended.
MPRISResumer('amarok', 'Amarok', ['amarok', '--play'], 'org.mpris.MediaPlayer2.amarok'),
MPRISResumer('vlc', 'VLC', ['vlc', '--started-from-file'], 'org.mpris.MediaPlayer2.vlc'),
# totem mpris2 plugin is broken for me: it raises AttributeError:
# File "/usr/lib/totem/plugins/dbus/dbusservice.py", line 329, in OpenUri
# self.totem.add_to_playlist_and_play (uri)
# MPRISResumer('totem', 'Totem', ['totem'], 'org.mpris.MediaPlayer2.totem'),
# with https://github.com/Serranya/deadbeef-mpris2-plugin
MPRISResumer('resume in deadbeef', 'DeaDBeeF', ['deadbeef'], 'org.mpris.MediaPlayer2.DeaDBeeF'),
# the gPodder Downloads directory must be in gmusicbrowser's library
MPRISResumer('resume in gmusicbrowser', 'gmusicbrowser', ['gmusicbrowser'], 'org.mpris.MediaPlayer2.gmusicbrowser'),
# Audacious doesn't implement MPRIS2.OpenUri
# MPRISResumer('audacious', 'resume in Audacious', ['audacious', '--enqueue'], 'org.mpris.MediaPlayer2.audacious'),
# beware: clementine never exits on my system (even when launched from cmdline)
# so the zombie clementine process will get all the bus messages and never answer
# resulting in freezes and timeouts!
MPRISResumer('clementine', 'Clementine', ['clementine'], 'org.mpris.MediaPlayer2.clementine'),
# just enable the plugin
MPRISResumer('parole', 'Parole', ['parole'], 'org.mpris.MediaPlayer2.parole'),
]
class gPodderExtension:
def __init__(self, container):
self.container = container
self.config = container.config
self.gpodder_config = self.container.manager.core.config
# Only display media players that can be found at extension load time
self.players = [player for player in PLAYERS if player.is_installed()]
self.resumers = [r for r in RESUMERS if r.is_installed()]
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
for p in self.players + self.resumers:
p.gpodder = ui_object
def on_episodes_context_menu(self, episodes):
if not any(e.file_exists() for e in episodes):
return None
ret = [(p.title, functools.partial(p.enqueue_episodes, config=self.gpodder_config))
for p in self.players]
# needs dbus, doesn't handle more than 1 episode
# and no point in using DBus when episode is not played.
if not hasattr(gpodder.dbus_session_bus, 'fake') and \
len(episodes) == 1 and episodes[0].current_position > 0:
ret.extend([(p.title, functools.partial(p.enqueue_episodes, config=self.gpodder_config))
for p in self.resumers])
return ret
def on_episode_downloaded(self, episode):
if self.config.enqueue_after_download:
if not self.config.default_player and len(self.players):
player = self.players[0]
logger.info('Picking first installed player: %s (%s)', player.slug, player.application)
else:
player = next((player for player in self.players if self.config.default_player == player.slug), None)
if player is None:
logger.info('No player set, use one of: %r', [player.slug for player in self.players])
return
logger.info('Enqueueing downloaded file in %s', player.application)
player.enqueue_episodes([episode])
| 12,301
|
Python
|
.py
| 234
| 43.115385
| 128
| 0.644244
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,369
|
minimize_on_start.py
|
gpodder_gpodder/share/gpodder/extensions/minimize_on_start.py
|
# -*- coding: utf-8 -*-
# Minimize gPodder's main window on startup
# Thomas Perl <thp@gpodder.org>; 2012-07-31
import gpodder
from gpodder import util
_ = gpodder.gettext
__title__ = _('Minimize on start')
__description__ = _('Minimizes the gPodder window on startup.')
__category__ = 'interface'
__only_for__ = 'gtk'
class gPodderExtension:
def __init__(self, container):
self.container = container
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.ui_object = ui_object
def on_application_started(self):
if self.ui_object:
self.ui_object.main_window.iconify()
util.idle_add(self.ui_object.main_window.iconify)
| 725
|
Python
|
.py
| 20
| 31.25
| 63
| 0.664275
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,370
|
concatenate_videos.py
|
gpodder_gpodder/share/gpodder/extensions/concatenate_videos.py
|
# -*- coding: utf-8 -*-
# Concatenate multiple videos to a single file using ffmpeg
# 2014-05-03 Thomas Perl <thp.io/about>
# Released under the same license terms as gPodder itself.
import logging
import os
from gi.repository import Gtk
import gpodder
from gpodder import util
from gpodder.gtkui.interface.progress import ProgressIndicator
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _('Concatenate videos')
__description__ = _('Add a context menu item for concatenating multiple videos')
__authors__ = 'Thomas Perl <thp@gpodder.org>'
__category__ = 'interface'
__only_for__ = 'gtk'
class gPodderExtension:
def __init__(self, container):
self.container = container
self.gpodder = None
self.have_ffmpeg = (util.find_command('ffmpeg') is not None)
def on_ui_object_available(self, name, ui_object):
if name == 'gpodder-gtk':
self.gpodder = ui_object
def _get_save_filename(self):
dlg = Gtk.FileChooserDialog(title=_('Save video'),
parent=self.gpodder.get_dialog_parent(),
action=Gtk.FileChooserAction.SAVE)
dlg.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
dlg.add_button(_('_Save'), Gtk.ResponseType.OK)
if dlg.run() == Gtk.ResponseType.OK:
filename = dlg.get_filename()
dlg.destroy()
return filename
dlg.destroy()
def _concatenate_videos(self, episodes):
episodes = self._get_sorted_episode_list(episodes)
# TODO: Show file list dialog for reordering
out_filename = self._get_save_filename()
if out_filename is None:
return
list_filename = os.path.join(os.path.dirname(out_filename),
'.' + os.path.splitext(os.path.basename(out_filename))[0] + '.txt')
with open(list_filename, 'w') as fp:
fp.write('\n'.join("file '%s'\n" % episode.local_filename(create=False)
for episode in episodes))
indicator = ProgressIndicator(_('Concatenating video files'),
_('Writing %(filename)s') % {
'filename': os.path.basename(out_filename)},
False, self.gpodder.get_dialog_parent())
def convert():
ffmpeg = util.Popen(['ffmpeg', '-f', 'concat', '-nostdin', '-y',
'-i', list_filename, '-c', 'copy', out_filename],
close_fds=True)
result = ffmpeg.wait()
util.delete_file(list_filename)
indicator.on_finished()
util.idle_add(lambda: self.gpodder.show_message(
_('Videos successfully converted') if result == 0 else
_('Error converting videos'),
_('Concatenation result'), important=True))
util.run_in_background(convert, True)
def _is_downloaded_video(self, episode):
return episode.file_exists() and episode.file_type() == 'video'
def _get_sorted_episode_list(self, episodes):
return sorted([e for e in episodes if self._is_downloaded_video(e)],
key=lambda e: e.published)
def on_episodes_context_menu(self, episodes):
if self.gpodder is None or not self.have_ffmpeg:
return None
episodes = self._get_sorted_episode_list(episodes)
if len(episodes) < 2:
return None
return [(_('Concatenate videos'), self._concatenate_videos)]
| 3,551
|
Python
|
.py
| 75
| 36.813333
| 86
| 0.602378
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,371
|
hello_world.py
|
gpodder_gpodder/share/gpodder/examples/hello_world.py
|
# Use a logger for debug output - this will be managed by gPodder.
import logging
logger = logging.getLogger(__name__)
# Provide some metadata that will be displayed in the gPodder GUI.
__title__ = 'Hello World Extension'
__description__ = 'Explain in one sentence what this extension does.'
__only_for__ = 'gtk, cli'
__authors__ = 'Thomas Perl <m@thp.io>'
class gPodderExtension:
# The extension will be instantiated the first time it's used.
# You can do some sanity checks here and raise an Exception if
# you want to prevent the extension from being loaded.
def __init__(self, container):
self.container = container
# This function will be called when the extension is enabled or
# loaded. This is when you want to create helper objects or hook
# into various parts of gPodder.
def on_load(self):
logger.info('Extension is being loaded.')
print('=' * 40)
print('container:', self.container)
print('container.manager:', self.container.manager)
print('container.config:', self.container.config)
print('container.manager.core:', self.container.manager.core)
print('container.manager.core.db:', self.container.manager.core.db)
print('container.manager.core.config:', self.container.manager.core.config)
print('container.manager.core.model:', self.container.manager.core.model)
print('=' * 40)
# This function will be called when the extension is disabled or
# when gPodder shuts down. You can use this to destroy/delete any
# objects that you created in on_load().
def on_unload(self):
logger.info('Extension is being unloaded.')
def on_ui_object_available(self, name, ui_object):
"""
Called by gPodder when ui is ready.
"""
if name == 'gpodder-gtk':
self.gpodder = ui_object
def on_create_menu(self):
return [("Say Hello", self.say_hello_cb)]
def say_hello_cb(self):
self.gpodder.notification("Hello Extension", "Message", widget=self.gpodder.main_window)
# Concurrency Warning: use gpodder.util.Popen() instead of subprocess.Popen()
#
# When using subprocess.Popen() to spawn a long-lived external command,
# such as ffmpeg, be sure to include the "close_fds=True" argument.
#
# https://docs.python.org/3/library/subprocess.html#subprocess.Popen
#
# This is especially important for extensions responding to
# on_episode_downloaded(), which runs whenever a download finishes.
#
# Otherwise that process will inherit ALL file descriptors gPodder
# happens to have open at the moment (like other active downloads).
# Those files will remain 'in-use' until that process exits, a race
# condition which prevents gPodder from renaming or deleting them on Windows.
#
# Caveat: On Windows, you cannot set close_fds to true and also
# redirect the standard handles (stdin, stdout or stderr). To collect
# output/errors from long-lived external commands, it may be necessary
# to create a (temp) log file and read it afterward.
| 3,049
|
Python
|
.py
| 62
| 44.5
| 96
| 0.717311
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,372
|
gpodder_mark_played.py
|
gpodder_gpodder/share/gpodder/examples/gpodder_mark_played.py
|
#!/usr/bin/env python3
# Example script that can be used as post-play extension in media players
#
# Set the configuration options "audio_played_dbus" and "video_played_dbus"
# to True to let gPodder leave the played status untouched when playing
# files in the media player. After playback has finished, call this script
# with the filename of the played episodes as single argument. The episode
# will be marked as played inside gPodder.
#
# Usage: gpodder_mark_played.py /path/to/episode.mp3
# (the gPodder GUI has to be running)
#
# Thomas Perl <thp@gpodder.org>; 2009-09-09
import os
import sys
import dbus
import gpodder
if len(sys.argv) != 2:
print("""
Usage: %s /path/to/episode.mp3
""" % (sys.argv[0],), file=sys.stderr)
sys.exit(1)
filename = os.path.abspath(sys.argv[1])
session_bus = dbus.SessionBus()
proxy = session_bus.get_object(gpodder.dbus_bus_name,
gpodder.dbus_gui_object_path)
interface = dbus.Interface(proxy, gpodder.dbus_interface)
if not interface.mark_episode_played(filename):
print('Warning: Could not mark episode as played.', file=sys.stderr)
sys.exit(2)
| 1,155
|
Python
|
.py
| 30
| 35.4
| 75
| 0.730528
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,373
|
Headphones.py
|
rembo10_headphones/Headphones.py
|
#!/usr/bin/env python
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
if sys.version_info <= (3, 6):
sys.stdout.write("Headphones requires Python >= 3.7\n")
sys.exit(1)
# Ensure lib added to path, before any other imports
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib/'))
from headphones import webstart, logger
import locale
import time
import signal
import argparse
import headphones
# Register signals, such as CTRL + C
signal.signal(signal.SIGINT, headphones.sig_handler)
signal.signal(signal.SIGTERM, headphones.sig_handler)
def main():
"""
Headphones application entry point. Parses arguments, setups encoding and
initializes the application.
"""
# Fixed paths to Headphones
if hasattr(sys, 'frozen'):
headphones.FULL_PATH = os.path.abspath(sys.executable)
else:
headphones.FULL_PATH = os.path.abspath(__file__)
headphones.PROG_DIR = os.path.dirname(headphones.FULL_PATH)
headphones.ARGS = sys.argv[1:]
# From sickbeard
headphones.SYS_PLATFORM = sys.platform
headphones.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
if headphones.SYS_PLATFORM == 'win32':
headphones.SYS_ENCODING = sys.getdefaultencoding().upper()
else:
headphones.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# for OSes that are poorly configured I'll just force UTF-8
if not headphones.SYS_ENCODING or headphones.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
headphones.SYS_ENCODING = 'UTF-8'
# Set up and gather command line arguments
parser = argparse.ArgumentParser(
description='Music add-on for SABnzbd+, Transmission and more.')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Increase console logging verbosity')
parser.add_argument(
'-q', '--quiet', action='store_true', help='Turn off console logging')
parser.add_argument(
'-d', '--daemon', action='store_true', help='Run as a daemon')
parser.add_argument(
'-p', '--port', type=int, help='Force Headphones to run on a specified port')
parser.add_argument(
'--datadir', help='Specify a directory where to store your data files')
parser.add_argument('--config', help='Specify a config file to use')
parser.add_argument('--nolaunch', action='store_true',
help='Prevent browser from launching on startup')
parser.add_argument(
'--pidfile', help='Create a pid file (only relevant when running as a daemon)')
parser.add_argument(
'--host', help='Specify a host (default - localhost)')
args = parser.parse_args()
if args.verbose:
headphones.VERBOSE = True
if args.quiet:
headphones.QUIET = True
# Do an intial setup of the logger.
logger.initLogger(console=not headphones.QUIET, log_dir=False,
verbose=headphones.VERBOSE)
if args.daemon:
if sys.platform == 'win32':
sys.stderr.write(
"Daemonizing not supported under Windows, starting normally\n")
else:
headphones.DAEMON = True
headphones.QUIET = True
if args.pidfile:
headphones.PIDFILE = str(args.pidfile)
# If the pidfile already exists, headphones may still be running, so
# exit
if os.path.exists(headphones.PIDFILE):
raise SystemExit("PID file '%s' already exists. Exiting." %
headphones.PIDFILE)
# The pidfile is only useful in daemon mode, make sure we can write the
# file properly
if headphones.DAEMON:
headphones.CREATEPID = True
try:
with open(headphones.PIDFILE, 'w') as fp:
fp.write("pid\n")
except IOError as e:
raise SystemExit("Unable to write PID file: %s", e)
else:
logger.warn("Not running in daemon mode. PID file creation " \
"disabled.")
# Determine which data directory and config file to use
if args.datadir:
headphones.DATA_DIR = args.datadir
else:
headphones.DATA_DIR = headphones.PROG_DIR
if args.config:
config_file = args.config
else:
config_file = os.path.join(headphones.DATA_DIR, 'config.ini')
# Try to create the DATA_DIR if it doesn't exist
if not os.path.exists(headphones.DATA_DIR):
try:
os.makedirs(headphones.DATA_DIR)
except OSError:
raise SystemExit(
'Could not create data directory: ' + headphones.DATA_DIR + '. Exiting....')
# Make sure the DATA_DIR is writeable
if not os.access(headphones.DATA_DIR, os.W_OK):
raise SystemExit(
'Cannot write to the data directory: ' + headphones.DATA_DIR + '. Exiting...')
# Put the database in the DATA_DIR
headphones.DB_FILE = os.path.join(headphones.DATA_DIR, 'headphones.db')
# Read config and start logging
try:
headphones.initialize(config_file)
except headphones.exceptions.SoftChrootError as e:
raise SystemExit('FATAL ERROR')
if headphones.DAEMON:
headphones.daemonize()
# Configure the connection to the musicbrainz database
headphones.mb.startmb()
# Force the http port if neccessary
if args.port:
http_port = args.port
logger.info('Using forced web server port: %i', http_port)
else:
http_port = int(headphones.CONFIG.HTTP_PORT)
# Force the http host if neccessary
if args.host:
http_host = args.host
logger.info('Using forced web server host: %s', http_host)
else:
http_host = headphones.CONFIG.HTTP_HOST
# Check if pyOpenSSL is installed. It is required for certificate generation
# and for CherryPy.
if headphones.CONFIG.ENABLE_HTTPS:
try:
import OpenSSL
except ImportError:
logger.warn("The pyOpenSSL module is missing. Install this " \
"module to enable HTTPS. HTTPS will be disabled.")
headphones.CONFIG.ENABLE_HTTPS = False
# Try to start the server. Will exit here is address is already in use.
web_config = {
'http_port': http_port,
'http_host': http_host,
'http_root': headphones.CONFIG.HTTP_ROOT,
'http_proxy': headphones.CONFIG.HTTP_PROXY,
'enable_https': headphones.CONFIG.ENABLE_HTTPS,
'https_cert': headphones.CONFIG.HTTPS_CERT,
'https_key': headphones.CONFIG.HTTPS_KEY,
'http_username': headphones.CONFIG.HTTP_USERNAME,
'http_password': headphones.CONFIG.HTTP_PASSWORD,
}
webstart.initialize(web_config)
# Start the background threads
headphones.start()
# Open webbrowser
if headphones.CONFIG.LAUNCH_BROWSER and not args.nolaunch:
headphones.launch_browser(headphones.CONFIG.HTTP_HOST, http_port,
headphones.CONFIG.HTTP_ROOT)
# Wait endlessy for a signal to happen
while True:
if not headphones.SIGNAL:
try:
time.sleep(1)
except KeyboardInterrupt:
headphones.SIGNAL = 'shutdown'
else:
logger.info('Received signal: %s', headphones.SIGNAL)
if headphones.SIGNAL == 'shutdown':
headphones.shutdown()
elif headphones.SIGNAL == 'restart':
headphones.shutdown(restart=True)
else:
headphones.shutdown(restart=True, update=True)
headphones.SIGNAL = None
# Call main()
if __name__ == "__main__":
main()
| 8,371
|
Python
|
.py
| 201
| 33.965174
| 105
| 0.6558
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,374
|
zipp.py
|
rembo10_headphones/lib/zipp.py
|
import io
import posixpath
import zipfile
import itertools
import contextlib
import sys
import pathlib
if sys.version_info < (3, 7):
from collections import OrderedDict
else:
OrderedDict = dict
__all__ = ['Path']
def _parents(path):
"""
Given a path with elements separated by
posixpath.sep, generate all parents of that path.
>>> list(_parents('b/d'))
['b']
>>> list(_parents('/b/d/'))
['/b']
>>> list(_parents('b/d/f/'))
['b/d', 'b']
>>> list(_parents('b'))
[]
>>> list(_parents(''))
[]
"""
return itertools.islice(_ancestry(path), 1, None)
def _ancestry(path):
"""
Given a path with elements separated by
posixpath.sep, generate all elements of that path
>>> list(_ancestry('b/d'))
['b/d', 'b']
>>> list(_ancestry('/b/d/'))
['/b/d', '/b']
>>> list(_ancestry('b/d/f/'))
['b/d/f', 'b/d', 'b']
>>> list(_ancestry('b'))
['b']
>>> list(_ancestry(''))
[]
"""
path = path.rstrip(posixpath.sep)
while path and path != posixpath.sep:
yield path
path, tail = posixpath.split(path)
_dedupe = OrderedDict.fromkeys
"""Deduplicate an iterable in original order"""
def _difference(minuend, subtrahend):
"""
Return items in minuend not in subtrahend, retaining order
with O(1) lookup.
"""
return itertools.filterfalse(set(subtrahend).__contains__, minuend)
class CompleteDirs(zipfile.ZipFile):
"""
A ZipFile subclass that ensures that implied directories
are always included in the namelist.
"""
@staticmethod
def _implied_dirs(names):
parents = itertools.chain.from_iterable(map(_parents, names))
as_dirs = (p + posixpath.sep for p in parents)
return _dedupe(_difference(as_dirs, names))
def namelist(self):
names = super(CompleteDirs, self).namelist()
return names + list(self._implied_dirs(names))
def _name_set(self):
return set(self.namelist())
def resolve_dir(self, name):
"""
If the name represents a directory, return that name
as a directory (with the trailing slash).
"""
names = self._name_set()
dirname = name + '/'
dir_match = name not in names and dirname in names
return dirname if dir_match else name
@classmethod
def make(cls, source):
"""
Given a source (filename or zipfile), return an
appropriate CompleteDirs subclass.
"""
if isinstance(source, CompleteDirs):
return source
if not isinstance(source, zipfile.ZipFile):
return cls(_pathlib_compat(source))
# Only allow for FastLookup when supplied zipfile is read-only
if 'r' not in source.mode:
cls = CompleteDirs
source.__class__ = cls
return source
class FastLookup(CompleteDirs):
"""
ZipFile subclass to ensure implicit
dirs exist and are resolved rapidly.
"""
def namelist(self):
with contextlib.suppress(AttributeError):
return self.__names
self.__names = super(FastLookup, self).namelist()
return self.__names
def _name_set(self):
with contextlib.suppress(AttributeError):
return self.__lookup
self.__lookup = super(FastLookup, self)._name_set()
return self.__lookup
def _pathlib_compat(path):
"""
For path-like objects, convert to a filename for compatibility
on Python 3.6.1 and earlier.
"""
try:
return path.__fspath__()
except AttributeError:
return str(path)
class Path:
"""
A pathlib-compatible interface for zip files.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = zipfile.ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'mem/abcde.zip'
Path accepts the zipfile object itself or a filename
>>> root = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = root.iterdir()
>>> a
Path('mem/abcde.zip', 'a.txt')
>>> b
Path('mem/abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('mem/abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text()
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> import os
>>> str(c).replace(os.sep, posixpath.sep)
'mem/abcde.zip/b/c.txt'
At the root, ``name``, ``filename``, and ``parent``
resolve to the zipfile. Note these attributes are not
valid and will raise a ``ValueError`` if the zipfile
has no filename.
>>> root.name
'abcde.zip'
>>> str(root.filename).replace(os.sep, posixpath.sep)
'mem/abcde.zip'
>>> str(root.parent)
'mem'
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
"""
Construct a Path from a ZipFile or filename.
Note: When the source is an existing ZipFile object,
its type (__class__) will be mutated to a
specialized type. If the caller wishes to retain the
original type, the caller should either create a
separate ZipFile object or pass a filename.
"""
self.root = FastLookup.make(root)
self.at = at
def open(self, mode='r', *args, pwd=None, **kwargs):
"""
Open this entry as text or binary following the semantics
of ``pathlib.Path.open()`` by passing arguments through
to io.TextIOWrapper().
"""
if self.is_dir():
raise IsADirectoryError(self)
zip_mode = mode[0]
if not self.exists() and zip_mode == 'r':
raise FileNotFoundError(self)
stream = self.root.open(self.at, zip_mode, pwd=pwd)
if 'b' in mode:
if args or kwargs:
raise ValueError("encoding args invalid for binary operation")
return stream
return io.TextIOWrapper(stream, *args, **kwargs)
@property
def name(self):
return pathlib.Path(self.at).name or self.filename.name
@property
def suffix(self):
return pathlib.Path(self.at).suffix or self.filename.suffix
@property
def suffixes(self):
return pathlib.Path(self.at).suffixes or self.filename.suffixes
@property
def stem(self):
return pathlib.Path(self.at).stem or self.filename.stem
@property
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
def read_text(self, *args, **kwargs):
with self.open('r', *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return self.__class__(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return self.exists() and not self.is_dir()
def exists(self):
return self.at in self.root._name_set()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self.root.namelist())
return filter(self._is_child, subs)
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, *other):
next = posixpath.join(self.at, *map(_pathlib_compat, other))
return self._next(self.root.resolve_dir(next))
__truediv__ = joinpath
@property
def parent(self):
if not self.at:
return self.filename.parent
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
| 8,425
|
Python
|
.py
| 256
| 25.945313
| 78
| 0.603521
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,375
|
mediafile.py
|
rembo10_headphones/lib/mediafile.py
|
# -*- coding: utf-8 -*-
# This file is part of MediaFile.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
u'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
Internally ``MediaFile`` uses ``MediaField`` descriptors to access the
data from the tags. In turn ``MediaField`` uses a number of
``StorageStyle`` strategies to handle format specific logic.
"""
from __future__ import division, absolute_import, print_function
import mutagen
import mutagen.id3
import mutagen.mp3
import mutagen.mp4
import mutagen.flac
import mutagen.asf
import mutagen._util
import base64
import binascii
import codecs
import datetime
import enum
import functools
import imghdr
import logging
import math
import os
import re
import six
import struct
import traceback
__version__ = '0.9.0'
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
log = logging.getLogger(__name__)
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'aac': 'AAC',
'alac': 'ALAC',
'ogg': 'OGG',
'opus': 'Opus',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
'asf': 'Windows Media',
'aiff': 'AIFF',
'dsf': 'DSD Stream File',
'wav': 'WAVE',
}
PREFERRED_IMAGE_EXTENSIONS = {'jpeg': 'jpg'}
# Exceptions.
class UnreadableFileError(Exception):
"""Mutagen is not able to extract information from the file.
"""
def __init__(self, filename, msg):
Exception.__init__(self, msg if msg else repr(filename))
class FileTypeError(UnreadableFileError):
"""Reading this type of file is not supported.
If passed the `mutagen_type` argument this indicates that the
mutagen type is not supported by `Mediafile`.
"""
def __init__(self, filename, mutagen_type=None):
if mutagen_type is None:
msg = u'{0!r}: not in a recognized format'.format(filename)
else:
msg = u'{0}: of mutagen type {1}'.format(
repr(filename), mutagen_type
)
Exception.__init__(self, msg)
class MutagenError(UnreadableFileError):
"""Raised when Mutagen fails unexpectedly---probably due to a bug.
"""
def __init__(self, filename, mutagen_exc):
msg = u'{0}: {1}'.format(repr(filename), mutagen_exc)
Exception.__init__(self, msg)
# Interacting with Mutagen.
def mutagen_call(action, filename, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `filename` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
"""
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(filename, six.text_type(exc))
except UnreadableFileError:
# Reraise our errors without changes.
# Used in case of decorating functions (e.g. by `loadfile`).
raise
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(filename, exc)
def loadfile(method=True, writable=False, create=False):
"""A decorator that works like `mutagen._util.loadfile` but with
additional error handling.
Opens a file and passes a `mutagen._utils.FileThing` to the
decorated function. Should be used as a decorator for functions
using a `filething` parameter.
"""
def decorator(func):
f = mutagen._util.loadfile(method, writable, create)(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return mutagen_call('loadfile', '', f, *args, **kwargs)
return wrapper
return decorator
# Utility.
def _update_filething(filething):
"""Reopen a `filething` if it's a local file.
A filething that is *not* an actual file is left unchanged; a
filething with a filename is reopened and a new object is returned.
"""
if filething.filename:
return mutagen._util.FileThing(
None, filething.filename, filething.name
)
else:
return filething
def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception.
If the value does not exist, return None. Or, if the value
can't be converted, then a sensible default value is returned.
out_type should be bool, int, or unicode; otherwise, the value
is just passed through.
"""
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
elif not isinstance(val, six.string_types):
val = six.text_type(val)
# Get a number from the front of the string.
match = re.match(r'[\+-]?[0-9]+', val.strip())
return int(match.group(0)) if match else 0
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == six.text_type:
if isinstance(val, bytes):
return val.decode('utf-8', 'ignore')
elif isinstance(val, six.text_type):
return val
else:
return six.text_type(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
else:
val = six.text_type(val)
match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)',
val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val
# Image coding for ASF/WMA.
def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from('<bi', data)
pos = 5
mime = b''
while data[pos:pos + 2] != b'\x00\x00':
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = b''
while data[pos:pos + 2] != b'\x00\x00':
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le"))
def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack('<bi', type, len(data))
tag_data += mime.encode("utf-16-le") + b'\x00\x00'
tag_data += description.encode("utf-16-le") + b'\x00\x00'
tag_data += data
return tag_data
# iTunes Sound Check encoding.
def _sc_decode(soundcheck):
"""Convert a Sound Check bytestring value to a (gain, peak) tuple as
used by ReplayGain.
"""
# We decode binary data. If one of the formats gives us a text
# string, interpret it as UTF-8.
if isinstance(soundcheck, six.text_type):
soundcheck = soundcheck.encode('utf-8')
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = codecs.decode(soundcheck.replace(b' ', b''), 'hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError, binascii.Error):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6)
def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum and minimum value here, which is equivalent to
# about -18.2dB and 30.0dB.
g1 = int(min(round((10 ** (gain / -10)) * 1000), 65534)) or 1
# Same as above, except our reference level is 2500 units.
g2 = int(min(round((10 ** (gain / -10)) * 2500), 65534)) or 1
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, int(peak), int(peak), uk, uk)
return (u' %08X' * 10) % values
# Cover art and other images.
def _imghdr_what_wrapper(data):
"""A wrapper around imghdr.what to account for jpeg files that can only be
identified as such using their magic bytes
See #1545
See https://github.com/file/file/blob/master/magic/Magdir/jpeg#L12
"""
# imghdr.what returns none for jpegs with only the magic bytes, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
return imghdr.what(None, h=data) or _wider_test_jpeg(data)
def _wider_test_jpeg(data):
"""Test for a jpeg file following the UNIX file implementation which
uses the magic bytes rather than just looking for the bytes that
represent 'JFIF' or 'EXIF' at a fixed position.
"""
if data[:2] == b'\xff\xd8':
return 'jpeg'
def image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
# This checks for a jpeg file with only the magic bytes (unrecognized by
# imghdr.what). imghdr.what returns none for that type of file, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
kind = _imghdr_what_wrapper(data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
return 'image/{0}'.format(kind)
elif kind == 'pgm':
return 'image/x-portable-graymap'
elif kind == 'pbm':
return 'image/x-portable-bitmap'
elif kind == 'ppm':
return 'image/x-portable-pixmap'
elif kind == 'xbm':
return 'image/x-xbitmap'
else:
return 'image/x-{0}'.format(kind)
def image_extension(data):
ext = _imghdr_what_wrapper(data)
return PREFERRED_IMAGE_EXTENSIONS.get(ext, ext)
class ImageType(enum.Enum):
"""Indicates the kind of an `Image` stored in a file's tag.
"""
other = 0
icon = 1
other_icon = 2
front = 3
back = 4
leaflet = 5
media = 6
lead_artist = 7
artist = 8
conductor = 9
group = 10
composer = 11
lyricist = 12
recording_location = 13
recording_session = 14
performance = 15
screen_capture = 16
fish = 17
illustration = 18
artist_logo = 19
publisher_logo = 20
class Image(object):
"""Structure representing image data and metadata that can be
stored and retrieved from tags.
The structure has four properties.
* ``data`` The binary data of the image
* ``desc`` An optional description of the image
* ``type`` An instance of `ImageType` indicating the kind of image
* ``mime_type`` Read-only property that contains the mime type of
the binary data
"""
def __init__(self, data, desc=None, type=None):
assert isinstance(data, bytes)
if desc is not None:
assert isinstance(desc, six.text_type)
self.data = data
self.desc = desc
if isinstance(type, int):
try:
type = list(ImageType)[type]
except IndexError:
log.debug(u"ignoring unknown image type index %s", type)
type = ImageType.other
self.type = type
@property
def mime_type(self):
if self.data:
return image_mime_type(self.data)
@property
def type_index(self):
if self.type is None:
# This method is used when a tag format requires the type
# index to be set, so we return "other" as the default value.
return 0
return self.type.value
# StorageStyle classes describe strategies for accessing values in
# Mutagen file objects.
class StorageStyle(object):
"""A strategy for storing a value for a certain tag format (or set
of tag formats). This basic StorageStyle describes simple 1:1
mapping from raw values to keys in a Mutagen file object; subclasses
describe more sophisticated translations or format-specific access
strategies.
MediaFile uses a StorageStyle via three methods: ``get()``,
``set()``, and ``delete()``. It passes a Mutagen file object to
each.
Internally, the StorageStyle implements ``get()`` and ``set()``
using two steps that may be overridden by subtypes. To get a value,
the StorageStyle first calls ``fetch()`` to retrieve the value
corresponding to a key and then ``deserialize()`` to convert the raw
Mutagen value to a consumable Python value. Similarly, to set a
field, we call ``serialize()`` to encode the value and then
``store()`` to assign the result into the Mutagen object.
Each StorageStyle type has a class-level `formats` attribute that is
a list of strings indicating the formats that the style applies to.
MediaFile only uses StorageStyles that apply to the correct type for
a given audio file.
"""
formats = ['FLAC', 'OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac', 'APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio']
"""List of mutagen classes the StorageStyle can handle.
"""
def __init__(self, key, as_type=six.text_type, suffix=None,
float_places=2, read_only=False):
"""Create a basic storage strategy. Parameters:
- `key`: The key on the Mutagen file object used to access the
field's data.
- `as_type`: The Python type that the value is stored as
internally (`unicode`, `int`, `bool`, or `bytes`).
- `suffix`: When `as_type` is a string type, append this before
storing the value.
- `float_places`: When the value is a floating-point number and
encoded as a string, the number of digits to store after the
decimal point.
- `read_only`: When true, writing to this field is disabled.
Primary use case is so wrongly named fields can be addressed
in a graceful manner. This does not block the delete method.
"""
self.key = key
self.as_type = as_type
self.suffix = suffix
self.float_places = float_places
self.read_only = read_only
# Convert suffix to correct string type.
if self.suffix and self.as_type is six.text_type \
and not isinstance(self.suffix, six.text_type):
self.suffix = self.suffix.decode('utf-8')
# Getter.
def get(self, mutagen_file):
"""Get the value for the field using this style.
"""
return self.deserialize(self.fetch(mutagen_file))
def fetch(self, mutagen_file):
"""Retrieve the raw value of for this tag from the Mutagen file
object.
"""
try:
return mutagen_file[self.key][0]
except (KeyError, IndexError):
return None
def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, six.text_type) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value
# Setter.
def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value))
def store(self, mutagen_file, value):
"""Store a serialized value in the Mutagen file object.
"""
mutagen_file[self.key] = [value]
def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is six.text_type:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is six.text_type:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = six.text_type(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
else:
value = six.text_type(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value
def delete(self, mutagen_file):
"""Remove the tag from the file.
"""
if self.key in mutagen_file:
del mutagen_file[self.key]
class ListStorageStyle(StorageStyle):
"""Abstract storage style that provides access to lists.
The ListMediaField descriptor uses a ListStorageStyle via two
methods: ``get_list()`` and ``set_list()``. It passes a Mutagen file
object to each.
Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must
return a (possibly empty) list and ``store`` receives a serialized
list of values as the second argument.
The `serialize` and `deserialize` methods (from the base
`StorageStyle`) are still called with individual values. This class
handles packing and unpacking the values into lists.
"""
def get(self, mutagen_file):
"""Get the first value in the field's value list.
"""
try:
return self.get_list(mutagen_file)[0]
except IndexError:
return None
def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)]
def fetch(self, mutagen_file):
"""Get the list of raw (serialized) values.
"""
try:
return mutagen_file[self.key]
except KeyError:
return []
def set(self, mutagen_file, value):
"""Set an individual value as the only value for the field using
this style.
"""
self.set_list(mutagen_file, [value])
def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values])
def store(self, mutagen_file, values):
"""Set the list of all raw (serialized) values for this field.
"""
mutagen_file[self.key] = values
class SoundCheckStorageStyleMixin(object):
"""A mixin for storage styles that read and write iTunes SoundCheck
analysis values. The object must have an `index` field that
indicates which half of the gain/peak pair---0 or 1---the field
represents.
"""
def get(self, mutagen_file):
data = self.fetch(mutagen_file)
if data is not None:
return _sc_decode(data)[self.index]
def set(self, mutagen_file, value):
data = self.fetch(mutagen_file)
if data is None:
gain_peak = [0, 0]
else:
gain_peak = list(_sc_decode(data))
gain_peak[self.index] = value or 0
data = self.serialize(_sc_encode(*gain_peak))
self.store(mutagen_file, data)
class ASFStorageStyle(ListStorageStyle):
"""A general storage style for Windows Media/ASF files.
"""
formats = ['ASF']
def deserialize(self, data):
if isinstance(data, mutagen.asf.ASFBaseAttribute):
data = data.value
return data
class MP4StorageStyle(StorageStyle):
"""A general storage style for MPEG-4 tags.
"""
formats = ['MP4']
def serialize(self, value):
value = super(MP4StorageStyle, self).serialize(value)
if self.key.startswith('----:') and isinstance(value, six.text_type):
value = value.encode('utf-8')
return value
class MP4TupleStorageStyle(MP4StorageStyle):
"""A style for storing values as part of a pair of numbers in an
MPEG-4 file.
"""
def __init__(self, key, index=0, **kwargs):
super(MP4TupleStorageStyle, self).__init__(key, **kwargs)
self.index = index
def deserialize(self, mutagen_value):
items = mutagen_value or []
packing_length = 2
return list(items) + [0] * (packing_length - len(items))
def get(self, mutagen_file):
value = super(MP4TupleStorageStyle, self).get(mutagen_file)[self.index]
if value == 0:
# The values are always present and saved as integers. So we
# assume that "0" indicates it is not set.
return None
else:
return value
def set(self, mutagen_file, value):
if value is None:
value = 0
items = self.deserialize(self.fetch(mutagen_file))
items[self.index] = int(value)
self.store(mutagen_file, items)
def delete(self, mutagen_file):
if self.index == 0:
super(MP4TupleStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP4ListStorageStyle(ListStorageStyle, MP4StorageStyle):
pass
class MP4SoundCheckStorageStyle(SoundCheckStorageStyleMixin, MP4StorageStyle):
def __init__(self, key, index=0, **kwargs):
super(MP4SoundCheckStorageStyle, self).__init__(key, **kwargs)
self.index = index
class MP4BoolStorageStyle(MP4StorageStyle):
"""A style for booleans in MPEG-4 files. (MPEG-4 has an atom type
specifically for representing booleans.)
"""
def get(self, mutagen_file):
try:
return mutagen_file[self.key]
except KeyError:
return None
def get_list(self, mutagen_file):
raise NotImplementedError(u'MP4 bool storage does not support lists')
def set(self, mutagen_file, value):
mutagen_file[self.key] = value
def set_list(self, mutagen_file, values):
raise NotImplementedError(u'MP4 bool storage does not support lists')
class MP4ImageStorageStyle(MP4ListStorageStyle):
"""Store images as MPEG-4 image atoms. Values are `Image` objects.
"""
def __init__(self, **kwargs):
super(MP4ImageStorageStyle, self).__init__(key='covr', **kwargs)
def deserialize(self, data):
return Image(data)
def serialize(self, image):
if image.mime_type == 'image/png':
kind = mutagen.mp4.MP4Cover.FORMAT_PNG
elif image.mime_type == 'image/jpeg':
kind = mutagen.mp4.MP4Cover.FORMAT_JPEG
else:
raise ValueError(u'MP4 files only supports PNG and JPEG images')
return mutagen.mp4.MP4Cover(image.data, kind)
class MP3StorageStyle(StorageStyle):
"""Store data in ID3 frames.
"""
formats = ['MP3', 'AIFF', 'DSF', 'WAVE']
def __init__(self, key, id3_lang=None, **kwargs):
"""Create a new ID3 storage style. `id3_lang` is the value for
the language field of newly created frames.
"""
self.id3_lang = id3_lang
super(MP3StorageStyle, self).__init__(key, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text[0]
except (KeyError, IndexError):
return None
def store(self, mutagen_file, value):
frame = mutagen.id3.Frames[self.key](encoding=3, text=[value])
mutagen_file.tags.setall(self.key, [frame])
class MP3PeopleStorageStyle(MP3StorageStyle):
"""Store list of people in ID3 frames.
"""
def __init__(self, key, involvement='', **kwargs):
self.involvement = involvement
super(MP3PeopleStorageStyle, self).__init__(key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
# Try modifying in place.
found = False
for frame in frames:
if frame.encoding == mutagen.id3.Encoding.UTF8:
for pair in frame.people:
if pair[0].lower() == self.involvement.lower():
pair[1] = value
found = True
# Try creating a new frame.
if not found:
frame = mutagen.id3.Frames[self.key](
encoding=mutagen.id3.Encoding.UTF8,
people=[[self.involvement, value]]
)
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
for pair in frame.people:
if pair[0].lower() == self.involvement.lower():
try:
return pair[1]
except IndexError:
return None
class MP3ListStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Store lists of data in multiple ID3 frames.
"""
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text
except KeyError:
return []
def store(self, mutagen_file, values):
frame = mutagen.id3.Frames[self.key](encoding=3, text=values)
mutagen_file.tags.setall(self.key, [frame])
class MP3UFIDStorageStyle(MP3StorageStyle):
"""Store string data in a UFID ID3 frame with a particular owner.
"""
def __init__(self, owner, **kwargs):
self.owner = owner
super(MP3UFIDStorageStyle, self).__init__('UFID:' + owner, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].data
except KeyError:
return None
def store(self, mutagen_file, value):
# This field type stores text data as encoded data.
assert isinstance(value, six.text_type)
value = value.encode('utf-8')
frames = mutagen_file.tags.getall(self.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == self.owner:
frame.data = value
else:
# New frame.
frame = mutagen.id3.UFID(owner=self.owner, data=value)
mutagen_file.tags.setall(self.key, [frame])
class MP3DescStorageStyle(MP3StorageStyle):
"""Store data in a TXXX (or similar) ID3 frame. The frame is
selected based its ``desc`` field.
``attr`` allows to specify name of data accessor property in the frame.
Most of frames use `text`.
``multispec`` specifies if frame data is ``mutagen.id3.MultiSpec``
which means that the data is being packed in the list.
"""
def __init__(self, desc=u'', key='TXXX', attr='text', multispec=True,
**kwargs):
assert isinstance(desc, six.text_type)
self.description = desc
self.attr = attr
self.multispec = multispec
super(MP3DescStorageStyle, self).__init__(key=key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
if self.multispec:
value = [value]
# Try modifying in place.
found = False
for frame in frames:
if frame.desc.lower() == self.description.lower():
setattr(frame, self.attr, value)
frame.encoding = mutagen.id3.Encoding.UTF8
found = True
# Try creating a new frame.
if not found:
frame = mutagen.id3.Frames[self.key](
desc=self.description,
encoding=mutagen.id3.Encoding.UTF8,
**{self.attr: value}
)
if self.id3_lang:
frame.lang = self.id3_lang
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
if not self.multispec:
return getattr(frame, self.attr)
try:
return getattr(frame, self.attr)[0]
except IndexError:
return None
def delete(self, mutagen_file):
found_frame = None
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
found_frame = frame
break
if found_frame is not None:
del mutagen_file[frame.HashKey]
class MP3ListDescStorageStyle(MP3DescStorageStyle, ListStorageStyle):
def __init__(self, desc=u'', key='TXXX', split_v23=False, **kwargs):
self.split_v23 = split_v23
super(MP3ListDescStorageStyle, self).__init__(
desc=desc, key=key, **kwargs
)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
if mutagen_file.tags.version == (2, 3, 0) and self.split_v23:
return sum((el.split('/') for el in frame.text), [])
else:
return frame.text
return []
def store(self, mutagen_file, values):
self.delete(mutagen_file)
frame = mutagen.id3.Frames[self.key](
desc=self.description,
text=values,
encoding=mutagen.id3.Encoding.UTF8,
)
if self.id3_lang:
frame.lang = self.id3_lang
mutagen_file.tags.add(frame)
class MP3SlashPackStorageStyle(MP3StorageStyle):
"""Store value as part of pair that is serialized as a slash-
separated string.
"""
def __init__(self, key, pack_pos=0, **kwargs):
super(MP3SlashPackStorageStyle, self).__init__(key, **kwargs)
self.pack_pos = pack_pos
def _fetch_unpacked(self, mutagen_file):
data = self.fetch(mutagen_file)
if data:
items = six.text_type(data).split('/')
else:
items = []
packing_length = 2
return list(items) + [None] * (packing_length - len(items))
def get(self, mutagen_file):
return self._fetch_unpacked(mutagen_file)[self.pack_pos]
def set(self, mutagen_file, value):
items = self._fetch_unpacked(mutagen_file)
items[self.pack_pos] = value
if items[0] is None:
items[0] = ''
if items[1] is None:
items.pop() # Do not store last value
self.store(mutagen_file, '/'.join(map(six.text_type, items)))
def delete(self, mutagen_file):
if self.pack_pos == 0:
super(MP3SlashPackStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP3ImageStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Converts between APIC frames and ``Image`` instances.
The `get_list` method inherited from ``ListStorageStyle`` returns a
list of ``Image``s. Similarly, the `set_list` method accepts a
list of ``Image``s as its ``values`` argument.
"""
def __init__(self):
super(MP3ImageStorageStyle, self).__init__(key='APIC')
self.as_type = bytes
def deserialize(self, apic_frame):
"""Convert APIC frame into Image."""
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type)
def fetch(self, mutagen_file):
return mutagen_file.tags.getall(self.key)
def store(self, mutagen_file, frames):
mutagen_file.tags.setall(self.key, frames)
def delete(self, mutagen_file):
mutagen_file.tags.delall(self.key)
def serialize(self, image):
"""Return an APIC frame populated with data from ``image``.
"""
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = image.desc or u''
# For compatibility with OS X/iTunes prefer latin-1 if possible.
# See issue #899
try:
frame.desc.encode("latin-1")
except UnicodeEncodeError:
frame.encoding = mutagen.id3.Encoding.UTF16
else:
frame.encoding = mutagen.id3.Encoding.LATIN1
frame.type = image.type_index
return frame
class MP3SoundCheckStorageStyle(SoundCheckStorageStyleMixin,
MP3DescStorageStyle):
def __init__(self, index=0, **kwargs):
super(MP3SoundCheckStorageStyle, self).__init__(**kwargs)
self.index = index
class ASFImageStorageStyle(ListStorageStyle):
"""Store images packed into Windows Media/ASF byte array attributes.
Values are `Image` objects.
"""
formats = ['ASF']
def __init__(self):
super(ASFImageStorageStyle, self).__init__(key='WM/Picture')
def deserialize(self, asf_picture):
mime, data, type, desc = _unpack_asf_image(asf_picture.value)
return Image(data, desc=desc, type=type)
def serialize(self, image):
pic = mutagen.asf.ASFByteArrayAttribute()
pic.value = _pack_asf_image(image.mime_type, image.data,
type=image.type_index,
description=image.desc or u'')
return pic
class VorbisImageStorageStyle(ListStorageStyle):
"""Store images in Vorbis comments. Both legacy COVERART fields and
modern METADATA_BLOCK_PICTURE tags are supported. Data is
base64-encoded. Values are `Image` objects.
"""
formats = ['OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac']
def __init__(self):
super(VorbisImageStorageStyle, self).__init__(
key='metadata_block_picture'
)
self.as_type = bytes
def fetch(self, mutagen_file):
images = []
if 'metadata_block_picture' not in mutagen_file:
# Try legacy COVERART tags.
if 'coverart' in mutagen_file:
for data in mutagen_file['coverart']:
images.append(Image(base64.b64decode(data)))
return images
for data in mutagen_file["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
except (TypeError, AttributeError):
continue
images.append(Image(data=pic.data, desc=pic.desc,
type=pic.type))
return images
def store(self, mutagen_file, image_data):
# Strip all art, including legacy COVERART.
if 'coverart' in mutagen_file:
del mutagen_file['coverart']
if 'coverartmime' in mutagen_file:
del mutagen_file['coverartmime']
super(VorbisImageStorageStyle, self).store(mutagen_file, image_data)
def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
# Encoding with base64 returns bytes on both Python 2 and 3.
# Mutagen requires the data to be a Unicode string, so we decode
# it before passing it along.
return base64.b64encode(pic.write()).decode('ascii')
class FlacImageStorageStyle(ListStorageStyle):
"""Converts between ``mutagen.flac.Picture`` and ``Image`` instances.
"""
formats = ['FLAC']
def __init__(self):
super(FlacImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
return mutagen_file.pictures
def deserialize(self, flac_picture):
return Image(data=flac_picture.data, desc=flac_picture.desc,
type=flac_picture.type)
def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic)
def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
return pic
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
mutagen_file.clear_pictures()
class APEv2ImageStorageStyle(ListStorageStyle):
"""Store images in APEv2 tags. Values are `Image` objects.
"""
formats = ['APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio', 'OptimFROG']
TAG_NAMES = {
ImageType.other: 'Cover Art (other)',
ImageType.icon: 'Cover Art (icon)',
ImageType.other_icon: 'Cover Art (other icon)',
ImageType.front: 'Cover Art (front)',
ImageType.back: 'Cover Art (back)',
ImageType.leaflet: 'Cover Art (leaflet)',
ImageType.media: 'Cover Art (media)',
ImageType.lead_artist: 'Cover Art (lead)',
ImageType.artist: 'Cover Art (artist)',
ImageType.conductor: 'Cover Art (conductor)',
ImageType.group: 'Cover Art (band)',
ImageType.composer: 'Cover Art (composer)',
ImageType.lyricist: 'Cover Art (lyricist)',
ImageType.recording_location: 'Cover Art (studio)',
ImageType.recording_session: 'Cover Art (recording)',
ImageType.performance: 'Cover Art (performance)',
ImageType.screen_capture: 'Cover Art (movie scene)',
ImageType.fish: 'Cover Art (colored fish)',
ImageType.illustration: 'Cover Art (illustration)',
ImageType.artist_logo: 'Cover Art (band logo)',
ImageType.publisher_logo: 'Cover Art (publisher logo)',
}
def __init__(self):
super(APEv2ImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
images = []
for cover_type, cover_tag in self.TAG_NAMES.items():
try:
frame = mutagen_file[cover_tag]
text_delimiter_index = frame.value.find(b'\x00')
if text_delimiter_index > 0:
comment = frame.value[0:text_delimiter_index]
comment = comment.decode('utf-8', 'replace')
else:
comment = None
image_data = frame.value[text_delimiter_index + 1:]
images.append(Image(data=image_data, type=cover_type,
desc=comment))
except KeyError:
pass
return images
def set_list(self, mutagen_file, values):
self.delete(mutagen_file)
for image in values:
image_type = image.type or ImageType.other
comment = image.desc or ''
image_data = comment.encode('utf-8') + b'\x00' + image.data
cover_tag = self.TAG_NAMES[image_type]
mutagen_file[cover_tag] = image_data
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass
# MediaField is a descriptor that represents a single logical field. It
# aggregates several StorageStyles describing how to access the data for
# each file type.
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field.
"""
def __init__(self, *styles, **kwargs):
"""Creates a new MediaField.
:param styles: `StorageStyle` instances that describe the strategy
for reading and writing the field in particular
formats. There must be at least one style for
each possible file format.
:param out_type: the type of the value that should be returned when
getting this property.
"""
self.out_type = kwargs.get('out_type', six.text_type)
self._styles = styles
def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can
handle the MediaFile's format.
"""
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style
def __get__(self, mediafile, owner=None):
out = None
for style in self.styles(mediafile.mgfile):
out = style.get(mediafile.mgfile)
if out:
break
return _safe_cast(self.out_type, out)
def __set__(self, mediafile, value):
if value is None:
value = self._none_value()
for style in self.styles(mediafile.mgfile):
if not style.read_only:
style.set(mediafile.mgfile, value)
def __delete__(self, mediafile):
for style in self.styles(mediafile.mgfile):
style.delete(mediafile.mgfile)
def _none_value(self):
"""Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None.
"""
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == six.text_type:
return u''
class ListMediaField(MediaField):
"""Property descriptor that retrieves a list of multiple values from
a tag.
Uses ``get_list`` and set_list`` methods of its ``StorageStyle``
strategies to do the actual work.
"""
def __get__(self, mediafile, _):
values = []
for style in self.styles(mediafile.mgfile):
values.extend(style.get_list(mediafile.mgfile))
return [_safe_cast(self.out_type, value) for value in values]
def __set__(self, mediafile, values):
for style in self.styles(mediafile.mgfile):
if not style.read_only:
style.set_list(mediafile.mgfile, values)
def single_field(self):
"""Returns a ``MediaField`` descriptor that gets and sets the
first item.
"""
options = {'out_type': self.out_type}
return MediaField(*self._styles, **options)
class DateField(MediaField):
"""Descriptor that handles serializing and deserializing dates
The getter parses value from tags into a ``datetime.date`` instance
and setter serializes such an instance into a string.
For granular access to year, month, and day, use the ``*_field``
methods to create corresponding `DateItemField`s.
"""
def __init__(self, *date_styles, **kwargs):
"""``date_styles`` is a list of ``StorageStyle``s to store and
retrieve the whole date from. The ``year`` option is an
additional list of fallback styles for the year. The year is
always set on this style, but is only retrieved if the main
storage styles do not return a value.
"""
super(DateField, self).__init__(*date_styles)
year_style = kwargs.get('year', None)
if year_style:
self._year_field = MediaField(*year_style)
def __get__(self, mediafile, owner=None):
year, month, day = self._get_date_tuple(mediafile)
if not year:
return None
try:
return datetime.date(
year,
month or 1,
day or 1
)
except ValueError: # Out of range values.
return None
def __set__(self, mediafile, date):
if date is None:
self._set_date_tuple(mediafile, None, None, None)
else:
self._set_date_tuple(mediafile, date.year, date.month, date.day)
def __delete__(self, mediafile):
super(DateField, self).__delete__(mediafile)
if hasattr(self, '_year_field'):
self._year_field.__delete__(mediafile)
def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None.
"""
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, six.string_types):
datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring))
items = re.split('[-/]', six.text_type(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except (TypeError, ValueError):
items_.append(None)
return items_
def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component.
"""
if year is None:
self.__delete__(mediafile)
return
date = [u'{0:04d}'.format(int(year))]
if month:
date.append(u'{0:02d}'.format(int(month)))
if month and day:
date.append(u'{0:02d}'.format(int(day)))
date = map(six.text_type, date)
super(DateField, self).__set__(mediafile, u'-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year)
def year_field(self):
return DateItemField(self, 0)
def month_field(self):
return DateItemField(self, 1)
def day_field(self):
return DateItemField(self, 2)
class DateItemField(MediaField):
"""Descriptor that gets and sets constituent parts of a `DateField`:
the month, day, or year.
"""
def __init__(self, date_field, item_pos):
self.date_field = date_field
self.item_pos = item_pos
def __get__(self, mediafile, _):
return self.date_field._get_date_tuple(mediafile)[self.item_pos]
def __set__(self, mediafile, value):
items = self.date_field._get_date_tuple(mediafile)
items[self.item_pos] = value
self.date_field._set_date_tuple(mediafile, *items)
def __delete__(self, mediafile):
self.__set__(mediafile, None)
class CoverArtField(MediaField):
"""A descriptor that provides access to the *raw image data* for the
cover image on a file. This is used for backwards compatibility: the
full `ImageListField` provides richer `Image` objects.
When there are multiple images we try to pick the most likely to be a front
cover.
"""
def __init__(self):
pass
def __get__(self, mediafile, _):
candidates = mediafile.images
if candidates:
return self.guess_cover_image(candidates).data
else:
return None
@staticmethod
def guess_cover_image(candidates):
if len(candidates) == 1:
return candidates[0]
try:
return next(c for c in candidates if c.type == ImageType.front)
except StopIteration:
return candidates[0]
def __set__(self, mediafile, data):
if data:
mediafile.images = [Image(data=data)]
else:
mediafile.images = []
def __delete__(self, mediafile):
delattr(mediafile, 'images')
class QNumberField(MediaField):
"""Access integer-represented Q number fields.
Access a fixed-point fraction as a float. The stored value is shifted by
`fraction_bits` binary digits to the left and then rounded, yielding a
simple integer.
"""
def __init__(self, fraction_bits, *args, **kwargs):
super(QNumberField, self).__init__(out_type=int, *args, **kwargs)
self.__fraction_bits = fraction_bits
def __get__(self, mediafile, owner=None):
q_num = super(QNumberField, self).__get__(mediafile, owner)
if q_num is None:
return None
return q_num / pow(2, self.__fraction_bits)
def __set__(self, mediafile, value):
q_num = round(value * pow(2, self.__fraction_bits))
q_num = int(q_num) # needed for py2.7
super(QNumberField, self).__set__(mediafile, q_num)
class ImageListField(ListMediaField):
"""Descriptor to access the list of images embedded in tags.
The getter returns a list of `Image` instances obtained from
the tags. The setter accepts a list of `Image` instances to be
written to the tags.
"""
def __init__(self):
# The storage styles used here must implement the
# `ListStorageStyle` interface and get and set lists of
# `Image`s.
super(ImageListField, self).__init__(
MP3ImageStorageStyle(),
MP4ImageStorageStyle(),
ASFImageStorageStyle(),
VorbisImageStorageStyle(),
FlacImageStorageStyle(),
APEv2ImageStorageStyle(),
out_type=Image,
)
# MediaFile is a collection of fields.
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
@loadfile()
def __init__(self, filething, id3v23=False):
"""Constructs a new `MediaFile` reflecting the provided file.
`filething` can be a path to a file (i.e., a string) or a
file-like object.
May throw `UnreadableFileError`.
By default, MP3 files are saved with ID3v2.4 tags. You can use
the older ID3v2.3 standard by specifying the `id3v23` option.
"""
self.filething = filething
self.mgfile = mutagen_call(
'open', self.filename, mutagen.File, filething
)
if self.mgfile is None:
# Mutagen couldn't guess the type
raise FileTypeError(self.filename)
elif type(self.mgfile).__name__ in ['M4A', 'MP4']:
info = self.mgfile.info
if info.codec and info.codec.startswith('alac'):
self.type = 'alac'
else:
self.type = 'aac'
elif type(self.mgfile).__name__ in ['ID3', 'MP3']:
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggOpus':
self.type = 'opus'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
elif type(self.mgfile).__name__ == 'ASF':
self.type = 'asf'
elif type(self.mgfile).__name__ == 'AIFF':
self.type = 'aiff'
elif type(self.mgfile).__name__ == 'DSF':
self.type = 'dsf'
elif type(self.mgfile).__name__ == 'WAVE':
self.type = 'wav'
else:
raise FileTypeError(self.filename, type(self.mgfile).__name__)
# Add a set of tags if it's missing.
if self.mgfile.tags is None:
self.mgfile.add_tags()
# Set the ID3v2.3 flag only for MP3s.
self.id3v23 = id3v23 and self.type == 'mp3'
@property
def filename(self):
"""The name of the file.
This is the path if this object was opened from the filesystem,
or the name of the file-like object.
"""
return self.filething.name
@filename.setter
def filename(self, val):
"""Silently skips setting filename.
Workaround for `mutagen._util._openfile` setting instance's filename.
"""
pass
@property
def path(self):
"""The path to the file.
This is `None` if the data comes from a file-like object instead
of a filesystem path.
"""
return self.filething.filename
@property
def filesize(self):
"""The size (in bytes) of the underlying file.
"""
if self.filething.filename:
return os.path.getsize(self.filething.filename)
if hasattr(self.filething.fileobj, '__len__'):
return len(self.filething.fileobj)
else:
tell = self.filething.fileobj.tell()
filesize = self.filething.fileobj.seek(0, 2)
self.filething.fileobj.seek(tell)
return filesize
def save(self, **kwargs):
"""Write the object's tags back to the file.
May throw `UnreadableFileError`. Accepts keyword arguments to be
passed to Mutagen's `save` function.
"""
# Possibly save the tags to ID3v2.3.
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
mutagen_call('save', self.filename, self.mgfile.save,
_update_filething(self.filething), **kwargs)
def delete(self):
"""Remove the current metadata tag from the file. May
throw `UnreadableFileError`.
"""
mutagen_call('delete', self.filename, self.mgfile.delete,
_update_filething(self.filething))
# Convenient access to the set of available fields.
@classmethod
def fields(cls):
"""Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
for property, descriptor in cls.__dict__.items():
if isinstance(descriptor, MediaField):
if isinstance(property, bytes):
# On Python 2, class field names are bytes. This method
# produces text strings.
yield property.decode('utf8', 'ignore')
else:
yield property
@classmethod
def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
"""
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name
@classmethod
def sorted_fields(cls):
"""Get the names of all writable metadata fields, sorted in the
order that they should be written.
This is a lexicographic order, except for instances of
:class:`DateItemField`, which are sorted in year-month-day
order.
"""
for property in sorted(cls.fields(), key=cls._field_sort_name):
yield property
@classmethod
def readable_fields(cls):
"""Get all metadata fields: the writable ones from
:meth:`fields` and also other audio properties.
"""
for property in cls.fields():
yield property
for property in ('length', 'samplerate', 'bitdepth', 'bitrate',
'bitrate_mode', 'channels', 'encoder_info',
'encoder_settings', 'format'):
yield property
@classmethod
def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
u'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
u'property "{0}" already exists on MediaFile'.format(name))
setattr(cls, name, descriptor)
def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.sorted_fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field])
def as_dict(self):
"""Get a dictionary with all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
return dict((x, getattr(self, x)) for x in self.fields())
# Field definitions.
title = MediaField(
MP3StorageStyle('TIT2'),
MP4StorageStyle('\xa9nam'),
StorageStyle('TITLE'),
ASFStorageStyle('Title'),
)
artist = MediaField(
MP3StorageStyle('TPE1'),
MP4StorageStyle('\xa9ART'),
StorageStyle('ARTIST'),
ASFStorageStyle('Author'),
)
artists = ListMediaField(
MP3ListDescStorageStyle(desc=u'ARTISTS'),
MP4ListStorageStyle('----:com.apple.iTunes:ARTISTS'),
ListStorageStyle('ARTISTS'),
ASFStorageStyle('WM/ARTISTS'),
)
album = MediaField(
MP3StorageStyle('TALB'),
MP4StorageStyle('\xa9alb'),
StorageStyle('ALBUM'),
ASFStorageStyle('WM/AlbumTitle'),
)
genres = ListMediaField(
MP3ListStorageStyle('TCON'),
MP4ListStorageStyle('\xa9gen'),
ListStorageStyle('GENRE'),
ASFStorageStyle('WM/Genre'),
)
genre = genres.single_field()
lyricist = MediaField(
MP3StorageStyle('TEXT'),
MP4StorageStyle('----:com.apple.iTunes:LYRICIST'),
StorageStyle('LYRICIST'),
ASFStorageStyle('WM/Writer'),
)
composer = MediaField(
MP3StorageStyle('TCOM'),
MP4StorageStyle('\xa9wrt'),
StorageStyle('COMPOSER'),
ASFStorageStyle('WM/Composer'),
)
composer_sort = MediaField(
MP3StorageStyle('TSOC'),
MP4StorageStyle('soco'),
StorageStyle('COMPOSERSORT'),
ASFStorageStyle('WM/Composersortorder'),
)
arranger = MediaField(
MP3PeopleStorageStyle('TIPL', involvement='arranger'),
MP4StorageStyle('----:com.apple.iTunes:Arranger'),
StorageStyle('ARRANGER'),
ASFStorageStyle('beets/Arranger'),
)
grouping = MediaField(
MP3StorageStyle('TIT1'),
MP4StorageStyle('\xa9grp'),
StorageStyle('GROUPING'),
ASFStorageStyle('WM/ContentGroupDescription'),
)
track = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=0),
MP4TupleStorageStyle('trkn', index=0),
StorageStyle('TRACK'),
StorageStyle('TRACKNUMBER'),
ASFStorageStyle('WM/TrackNumber'),
out_type=int,
)
tracktotal = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=1),
MP4TupleStorageStyle('trkn', index=1),
StorageStyle('TRACKTOTAL'),
StorageStyle('TRACKC'),
StorageStyle('TOTALTRACKS'),
ASFStorageStyle('TotalTracks'),
out_type=int,
)
disc = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=0),
MP4TupleStorageStyle('disk', index=0),
StorageStyle('DISC'),
StorageStyle('DISCNUMBER'),
ASFStorageStyle('WM/PartOfSet'),
out_type=int,
)
disctotal = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=1),
MP4TupleStorageStyle('disk', index=1),
StorageStyle('DISCTOTAL'),
StorageStyle('DISCC'),
StorageStyle('TOTALDISCS'),
ASFStorageStyle('TotalDiscs'),
out_type=int,
)
url = MediaField(
MP3DescStorageStyle(key='WXXX', attr='url', multispec=False),
MP4StorageStyle('\xa9url'),
StorageStyle('URL'),
ASFStorageStyle('WM/URL'),
)
lyrics = MediaField(
MP3DescStorageStyle(key='USLT', multispec=False),
MP4StorageStyle('\xa9lyr'),
StorageStyle('LYRICS'),
ASFStorageStyle('WM/Lyrics'),
)
comments = MediaField(
MP3DescStorageStyle(key='COMM'),
MP4StorageStyle('\xa9cmt'),
StorageStyle('DESCRIPTION'),
StorageStyle('COMMENT'),
ASFStorageStyle('WM/Comments'),
ASFStorageStyle('Description')
)
copyright = MediaField(
MP3StorageStyle('TCOP'),
MP4StorageStyle('cprt'),
StorageStyle('COPYRIGHT'),
ASFStorageStyle('Copyright'),
)
bpm = MediaField(
MP3StorageStyle('TBPM'),
MP4StorageStyle('tmpo', as_type=int),
StorageStyle('BPM'),
ASFStorageStyle('WM/BeatsPerMinute'),
out_type=int,
)
comp = MediaField(
MP3StorageStyle('TCMP'),
MP4BoolStorageStyle('cpil'),
StorageStyle('COMPILATION'),
ASFStorageStyle('WM/IsCompilation', as_type=bool),
out_type=bool,
)
albumartist = MediaField(
MP3StorageStyle('TPE2'),
MP4StorageStyle('aART'),
StorageStyle('ALBUM ARTIST'),
StorageStyle('ALBUMARTIST'),
ASFStorageStyle('WM/AlbumArtist'),
)
albumartists = ListMediaField(
MP3ListDescStorageStyle(desc=u'ALBUMARTISTS'),
MP4ListStorageStyle('----:com.apple.iTunes:ALBUMARTISTS'),
ListStorageStyle('ALBUMARTISTS'),
ASFStorageStyle('WM/AlbumArtists'),
)
albumtype = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Type'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Type'),
StorageStyle('RELEASETYPE'),
StorageStyle('MUSICBRAINZ_ALBUMTYPE'),
ASFStorageStyle('MusicBrainz/Album Type'),
)
label = MediaField(
MP3StorageStyle('TPUB'),
MP4StorageStyle('----:com.apple.iTunes:LABEL'),
MP4StorageStyle('----:com.apple.iTunes:publisher'),
MP4StorageStyle('----:com.apple.iTunes:Label', read_only=True),
StorageStyle('LABEL'),
StorageStyle('PUBLISHER'), # Traktor
ASFStorageStyle('WM/Publisher'),
)
artist_sort = MediaField(
MP3StorageStyle('TSOP'),
MP4StorageStyle('soar'),
StorageStyle('ARTISTSORT'),
ASFStorageStyle('WM/ArtistSortOrder'),
)
albumartist_sort = MediaField(
MP3DescStorageStyle(u'ALBUMARTISTSORT'),
MP4StorageStyle('soaa'),
StorageStyle('ALBUMARTISTSORT'),
ASFStorageStyle('WM/AlbumArtistSortOrder'),
)
asin = MediaField(
MP3DescStorageStyle(u'ASIN'),
MP4StorageStyle('----:com.apple.iTunes:ASIN'),
StorageStyle('ASIN'),
ASFStorageStyle('MusicBrainz/ASIN'),
)
catalognum = MediaField(
MP3DescStorageStyle(u'CATALOGNUMBER'),
MP4StorageStyle('----:com.apple.iTunes:CATALOGNUMBER'),
StorageStyle('CATALOGNUMBER'),
ASFStorageStyle('WM/CatalogNo'),
)
barcode = MediaField(
MP3DescStorageStyle(u'BARCODE'),
MP4StorageStyle('----:com.apple.iTunes:BARCODE'),
StorageStyle('BARCODE'),
StorageStyle('UPC', read_only=True),
StorageStyle('EAN/UPN', read_only=True),
StorageStyle('EAN', read_only=True),
StorageStyle('UPN', read_only=True),
ASFStorageStyle('WM/Barcode'),
)
isrc = MediaField(
MP3StorageStyle(u'TSRC'),
MP4StorageStyle('----:com.apple.iTunes:ISRC'),
StorageStyle('ISRC'),
ASFStorageStyle('WM/ISRC'),
)
disctitle = MediaField(
MP3StorageStyle('TSST'),
MP4StorageStyle('----:com.apple.iTunes:DISCSUBTITLE'),
StorageStyle('DISCSUBTITLE'),
ASFStorageStyle('WM/SetSubTitle'),
)
encoder = MediaField(
MP3StorageStyle('TENC'),
MP4StorageStyle('\xa9too'),
StorageStyle('ENCODEDBY'),
StorageStyle('ENCODER'),
ASFStorageStyle('WM/EncodedBy'),
)
script = MediaField(
MP3DescStorageStyle(u'Script'),
MP4StorageStyle('----:com.apple.iTunes:SCRIPT'),
StorageStyle('SCRIPT'),
ASFStorageStyle('WM/Script'),
)
language = MediaField(
MP3StorageStyle('TLAN'),
MP4StorageStyle('----:com.apple.iTunes:LANGUAGE'),
StorageStyle('LANGUAGE'),
ASFStorageStyle('WM/Language'),
)
country = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Release Country'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz '
'Album Release Country'),
StorageStyle('RELEASECOUNTRY'),
ASFStorageStyle('MusicBrainz/Album Release Country'),
)
albumstatus = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Status'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Status'),
StorageStyle('RELEASESTATUS'),
StorageStyle('MUSICBRAINZ_ALBUMSTATUS'),
ASFStorageStyle('MusicBrainz/Album Status'),
)
media = MediaField(
MP3StorageStyle('TMED'),
MP4StorageStyle('----:com.apple.iTunes:MEDIA'),
StorageStyle('MEDIA'),
ASFStorageStyle('WM/Media'),
)
albumdisambig = MediaField(
# This tag mapping was invented for beets (not used by Picard, etc).
MP3DescStorageStyle(u'MusicBrainz Album Comment'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Comment'),
StorageStyle('MUSICBRAINZ_ALBUMCOMMENT'),
ASFStorageStyle('MusicBrainz/Album Comment'),
)
# Release date.
date = DateField(
MP3StorageStyle('TDRC'),
MP4StorageStyle('\xa9day'),
StorageStyle('DATE'),
ASFStorageStyle('WM/Year'),
year=(StorageStyle('YEAR'),))
year = date.year_field()
month = date.month_field()
day = date.day_field()
# *Original* release date.
original_date = DateField(
MP3StorageStyle('TDOR'),
MP4StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR'),
StorageStyle('ORIGINALDATE'),
ASFStorageStyle('WM/OriginalReleaseYear'))
original_year = original_date.year_field()
original_month = original_date.month_field()
original_day = original_date.day_field()
# Nonstandard metadata.
artist_credit = MediaField(
MP3DescStorageStyle(u'Artist Credit'),
MP4StorageStyle('----:com.apple.iTunes:Artist Credit'),
StorageStyle('ARTIST_CREDIT'),
ASFStorageStyle('beets/Artist Credit'),
)
albumartist_credit = MediaField(
MP3DescStorageStyle(u'Album Artist Credit'),
MP4StorageStyle('----:com.apple.iTunes:Album Artist Credit'),
StorageStyle('ALBUMARTIST_CREDIT'),
ASFStorageStyle('beets/Album Artist Credit'),
)
# Legacy album art field
art = CoverArtField()
# Image list
images = ImageListField()
# MusicBrainz IDs.
mb_trackid = MediaField(
MP3UFIDStorageStyle(owner='http://musicbrainz.org'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Track Id'),
StorageStyle('MUSICBRAINZ_TRACKID'),
ASFStorageStyle('MusicBrainz/Track Id'),
)
mb_releasetrackid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Release Track Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Track Id'),
StorageStyle('MUSICBRAINZ_RELEASETRACKID'),
ASFStorageStyle('MusicBrainz/Release Track Id'),
)
mb_workid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Work Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Work Id'),
StorageStyle('MUSICBRAINZ_WORKID'),
ASFStorageStyle('MusicBrainz/Work Id'),
)
mb_albumid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Id'),
StorageStyle('MUSICBRAINZ_ALBUMID'),
ASFStorageStyle('MusicBrainz/Album Id'),
)
mb_artistids = ListMediaField(
MP3ListDescStorageStyle(u'MusicBrainz Artist Id', split_v23=True),
MP4ListStorageStyle('----:com.apple.iTunes:MusicBrainz Artist Id'),
ListStorageStyle('MUSICBRAINZ_ARTISTID'),
ASFStorageStyle('MusicBrainz/Artist Id'),
)
mb_artistid = mb_artistids.single_field()
mb_albumartistids = ListMediaField(
MP3ListDescStorageStyle(
u'MusicBrainz Album Artist Id',
split_v23=True,
),
MP4ListStorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Artist Id',
),
ListStorageStyle('MUSICBRAINZ_ALBUMARTISTID'),
ASFStorageStyle('MusicBrainz/Album Artist Id'),
)
mb_albumartistid = mb_albumartistids.single_field()
mb_releasegroupid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Release Group Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Group Id'),
StorageStyle('MUSICBRAINZ_RELEASEGROUPID'),
ASFStorageStyle('MusicBrainz/Release Group Id'),
)
# Acoustid fields.
acoustid_fingerprint = MediaField(
MP3DescStorageStyle(u'Acoustid Fingerprint'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Fingerprint'),
StorageStyle('ACOUSTID_FINGERPRINT'),
ASFStorageStyle('Acoustid/Fingerprint'),
)
acoustid_id = MediaField(
MP3DescStorageStyle(u'Acoustid Id'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Id'),
StorageStyle('ACOUSTID_ID'),
ASFStorageStyle('Acoustid/Id'),
)
# ReplayGain fields.
rg_track_gain = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'
),
MP3DescStorageStyle(
u'replaygain_track_gain',
float_places=2, suffix=u' dB'
),
MP3SoundCheckStorageStyle(
key='COMM',
index=0, desc=u'iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_gain',
float_places=2, suffix=' dB'
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=0
),
StorageStyle(
u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'
),
ASFStorageStyle(
u'replaygain_track_gain',
float_places=2, suffix=u' dB'
),
out_type=float
)
rg_album_gain = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'
),
MP3DescStorageStyle(
u'replaygain_album_gain',
float_places=2, suffix=u' dB'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_gain',
float_places=2, suffix=' dB'
),
StorageStyle(
u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'
),
ASFStorageStyle(
u'replaygain_album_gain',
float_places=2, suffix=u' dB'
),
out_type=float
)
rg_track_peak = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_TRACK_PEAK',
float_places=6
),
MP3DescStorageStyle(
u'replaygain_track_peak',
float_places=6
),
MP3SoundCheckStorageStyle(
key=u'COMM',
index=1, desc=u'iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_peak',
float_places=6
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=1
),
StorageStyle(u'REPLAYGAIN_TRACK_PEAK', float_places=6),
ASFStorageStyle(u'replaygain_track_peak', float_places=6),
out_type=float,
)
rg_album_peak = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_ALBUM_PEAK',
float_places=6
),
MP3DescStorageStyle(
u'replaygain_album_peak',
float_places=6
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_peak',
float_places=6
),
StorageStyle(u'REPLAYGAIN_ALBUM_PEAK', float_places=6),
ASFStorageStyle(u'replaygain_album_peak', float_places=6),
out_type=float,
)
# EBU R128 fields.
r128_track_gain = QNumberField(
8,
MP3DescStorageStyle(
u'R128_TRACK_GAIN'
),
MP4StorageStyle(
'----:com.apple.iTunes:R128_TRACK_GAIN'
),
StorageStyle(
u'R128_TRACK_GAIN'
),
ASFStorageStyle(
u'R128_TRACK_GAIN'
),
)
r128_album_gain = QNumberField(
8,
MP3DescStorageStyle(
u'R128_ALBUM_GAIN'
),
MP4StorageStyle(
'----:com.apple.iTunes:R128_ALBUM_GAIN'
),
StorageStyle(
u'R128_ALBUM_GAIN'
),
ASFStorageStyle(
u'R128_ALBUM_GAIN'
),
)
initial_key = MediaField(
MP3StorageStyle('TKEY'),
MP4StorageStyle('----:com.apple.iTunes:initialkey'),
StorageStyle('INITIALKEY'),
ASFStorageStyle('INITIALKEY'),
)
@property
def length(self):
"""The duration of the audio in seconds (a float)."""
return self.mgfile.info.length
@property
def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
elif self.type == 'opus':
# Opus is always 48kHz internally.
return 48000
return 0
@property
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0
@property
def channels(self):
"""The number of channels in the audio (an int)."""
if hasattr(self.mgfile.info, 'channels'):
return self.mgfile.info.channels
return 0
@property
def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
return int(self.filesize * 8 / self.length)
@property
def bitrate_mode(self):
"""The mode of the bitrate used in the audio coding
(a string, eg. "CBR", "VBR" or "ABR").
Only available for the MP3 file format (empty where unavailable).
"""
if hasattr(self.mgfile.info, 'bitrate_mode'):
return {
mutagen.mp3.BitrateMode.CBR: 'CBR',
mutagen.mp3.BitrateMode.VBR: 'VBR',
mutagen.mp3.BitrateMode.ABR: 'ABR',
}.get(self.mgfile.info.bitrate_mode, '')
else:
return ''
@property
def encoder_info(self):
"""The name and/or version of the encoder used
(a string, eg. "LAME 3.97.0").
Only available for some formats (empty where unavailable).
"""
if hasattr(self.mgfile.info, 'encoder_info'):
return self.mgfile.info.encoder_info
else:
return ''
@property
def encoder_settings(self):
"""A guess of the settings used for the encoder (a string, eg. "-V2").
Only available for the MP3 file format (empty where unavailable).
"""
if hasattr(self.mgfile.info, 'encoder_settings'):
return self.mgfile.info.encoder_settings
else:
return ''
@property
def format(self):
"""A string describing the file format/codec."""
return TYPES[self.type]
| 80,652
|
Python
|
.py
| 2,045
| 30.784352
| 79
| 0.616425
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,376
|
argparse.py
|
rembo10_headphones/lib/argparse.py
|
# Author: Steven J. Bethard <steven.bethard@gmail.com>.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'MetavarTypeHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import collections as _collections
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _, ngettext
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
default = self._get_default_metavar_for_positional(action)
part = self._format_args(action, default)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
default = self._get_default_metavar_for_positional(action)
metavar, = self._metavar_formatter(action, default)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
def _get_default_metavar_for_optional(self, action):
return action.dest.upper()
def _get_default_metavar_for_positional(self, action):
return action.dest
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
class MetavarTypeHelpFormatter(HelpFormatter):
"""Help message formatter which uses the argument 'type' as the default
metavar value (instead of the argument 'dest')
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_default_metavar_for_optional(self, action):
return action.type.__name__
def _get_default_metavar_for_positional(self, action):
return action.type.__name__
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, aliases, help):
metavar = dest = name
if aliases:
metavar += ' (%s)' % ', '.join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=dest, help=help,
metavar=metavar)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = _collections.OrderedDict()
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
aliases = kwargs.pop('aliases', ())
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
# make parser available under aliases also
for alias in aliases:
self._name_parser_map[alias] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
args = {'parser_name': parser_name,
'choices': ', '.join(self._name_parser_map)}
msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=-1):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r') % self._mode
raise ValueError(msg)
# all other arguments are used as file names
try:
return open(string, self._mode, self._bufsize)
except IOError as e:
message = _("can't open '%s': %s")
raise ArgumentTypeError(message % (string, e))
def __repr__(self):
args = self._mode, self._bufsize
args_str = ', '.join(repr(arg) for arg in args if arg != -1)
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % (action_class,))
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % (type_func,))
# raise an error if the metavar does not match the type
if hasattr(self, "_get_formatter"):
try:
self._get_formatter()._format_args(action, None)
except TypeError:
raise ValueError("length of metavar tuple does not match nargs")
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
args = {'option': option_string,
'prefix_chars': self.prefix_chars}
msg = _('invalid option string %(option)r: '
'must start with a character %(prefix_chars)r')
raise ValueError(msg % args)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = ngettext('conflicting option string: %s',
'conflicting option strings: %s',
len(conflicting_actions))
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
self._mutually_exclusive_groups = container._mutually_exclusive_groups
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
default_prefix = '-' if '-' in prefix_chars else prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, str):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# make sure all required actions were present
required_actions = [_get_action_name(action) for action in self._actions
if action.required and action not in seen_actions]
if required_actions:
self.error(_('the following arguments are required: %s') %
', '.join(required_actions))
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = ngettext('expected %s argument',
'expected %s arguments',
action.nargs) % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
args = {'option': arg_string, 'matches': options}
msg = _('ambiguous option: %(option)s could match %(matches)s')
self.error(msg % args)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, str):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
args = {'type': name, 'value': arg_string}
msg = _('invalid %(type)s value: %(value)r')
raise ArgumentError(action, msg % args)
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
args = {'value': value,
'choices': ', '.join(map(repr, action.choices))}
msg = _('invalid choice: %(value)r (choose from %(choices)s)')
raise ArgumentError(action, msg % args)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
| 89,553
|
Python
|
.py
| 1,959
| 33.760082
| 83
| 0.575114
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,377
|
sgmllib.py
|
rembo10_headphones/lib/sgmllib.py
|
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
import _markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(_markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
_markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print('*** Unbalanced </' + tag + '>')
print('*** Stack:', self.stack)
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print('data:', repr(data))
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print('comment:', r)
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print('start tag: <' + tag + '>')
else:
print('start tag: <' + tag, end=' ')
for name, value in attrs:
print(name + '=' + '"' + value + '"', end=' ')
print('>')
def unknown_endtag(self, tag):
self.flush()
print('end tag: </' + tag + '>')
def unknown_entityref(self, ref):
self.flush()
print('*** unknown entity ref: &' + ref + ';')
def unknown_charref(self, ref):
self.flush()
print('*** unknown char ref: &#' + ref + ';')
def unknown_decl(self, data):
self.flush()
print('*** unknown decl: [' + data + ']')
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError as msg:
print(file, ":", msg)
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| 17,788
|
Python
|
.py
| 476
| 26.289916
| 79
| 0.516675
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,378
|
MultipartPostHandler.py
|
rembo10_headphones/lib/MultipartPostHandler.py
|
#!/usr/bin/python
####
# 06/2010 Nic Wolfe <nic@wolfeden.ca>
# 02/2006 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import mimetools, mimetypes
import os, sys
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib.request.BaseHandler):
handler_order = urllib.request.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in list(data.items()):
if type(value) in (file, list, tuple):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping object").with_traceback(traceback)
if len(v_files) == 0:
data = urllib.parse.urlencode(v_vars, doseq)
else:
boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print("Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data'))
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
@staticmethod
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = ''
for(key, value) in vars:
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"' % key
buffer += '\r\n\r\n' + value + '\r\n'
for(key, fd) in files:
# allow them to pass in a file or a tuple with name & data
if type(fd) == file:
name_in = fd.name
fd.seek(0)
data_in = fd.read()
elif type(fd) in (tuple, list):
name_in, data_in = fd
filename = os.path.basename(name_in)
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buffer += 'Content-Type: %s\r\n' % contenttype
# buffer += 'Content-Length: %s\r\n' % file_size
buffer += '\r\n' + data_in + '\r\n'
buffer += '--%s--\r\n\r\n' % boundary
return boundary, buffer
https_request = http_request
| 3,668
|
Python
|
.py
| 77
| 37.350649
| 111
| 0.600901
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,379
|
unittestcompat.py
|
rembo10_headphones/lib/unittestcompat.py
|
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
from unittest2 import TestCase as TC
else:
import unittest
from unittest import TestCase as TC
skip = unittest.skip
_dummy = False
# less than 2.6 ...
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
_dummy = True
def _d(f):
def decorate(self, *args, **kw):
if not _dummy:
return f(self, *args, **kw)
return self.assertTrue(True)
return decorate
class TestCase(TC):
"""
Wrapper for python 2.6 stubs
"""
def assertIsInstance(self, obj, cls, msg=None):
if not _dummy:
return super(TestCase, self).assertIsInstance(obj, cls, msg)
tst = isinstance(obj, cls)
return self.assertTrue(tst, msg)
@_d
def assertNotIsInstance(self, *args, **kw):
return super(TestCase, self).assertNotIsInstance(*args, **kw)
@_d
def assertIn(self, *args, **kw):
return super(TestCase, self).assertIn(*args, **kw)
@_d
def assertRegexpMatches(self, *args, **kw):
return super(TestCase, self).assertRegex(*args, **kw)
@_d
def assertItemsEqual(self, *args, **kw):
return super(TestCase, self).assertItemsEqual(*args, **kw)
# -----------------------------------------------------------
# NOT DUMMY ASSERTIONS
# -----------------------------------------------------------
def assertIsNone(self, val, msg=None):
if not _dummy:
return super(TestCase, self).assertIsNone(val, msg)
tst = val is None
return super(TestCase, self).assertTrue(tst, msg)
def assertIsNotNone(self, val, msg=None):
if not _dummy:
return super(TestCase, self).assertIsNotNone(val, msg)
tst = val is not None
return super(TestCase, self).assertTrue(tst, msg)
def assertRaises(self, exc, msg=None):
if not _dummy:
return super(TestCase, self).assertRaises(exc, msg)
return TestCase._TestCaseRaiseStub(self, exc, msg=msg)
def assertRaisesRegexp(self, exc, regex, msg=None):
if not _dummy:
return super(TestCase, self).assertRaises(exc, msg)
return TestCase._TestCaseRaiseStub(self, exc, regex=regex, msg=msg)
class _TestCaseRaiseStub:
""" Internal stuff for stubbing `assertRaises*` """
def __init__(self, test_case, exc, regex=None, msg=None):
self.exc = exc
self.test_case = test_case
self.regex = regex
self.msg = msg
def __enter__(self):
return self
def __exit__(self, tp, value, traceback):
tst = tp is self.exc
self.test_case.assertTrue(tst, msg=self.msg)
self.exception = value
# TODO: implement self.regex checking
# True indicates, that exception is handled
return True
def TestArgs(*parameters):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
def decorator(method, parameters=parameters):
for parameter in (tuplify(x) for x in parameters):
def method_for_parameter(self, method=method, parameter=parameter):
method(self, *parameter)
args_for_parameter = ",".join(repr(v) for v in parameter)
name_for_parameter = method.__name__ + "(" + args_for_parameter + ")"
frame = sys._getframe(1) # pylint: disable-msg=W0212
frame.f_locals[name_for_parameter] = method_for_parameter
if method.__doc__:
frame.f_locals[name_for_parameter].__doc__ = method.__doc__ + '(' + args_for_parameter + ')'
method_for_parameter.__name__ = name_for_parameter + '(' + args_for_parameter + ')'
return None
return decorator
| 3,835
|
Python
|
.py
| 94
| 32.43617
| 108
| 0.591068
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,380
|
portend.py
|
rembo10_headphones/lib/portend.py
|
"""
A simple library for managing the availability of ports.
"""
import time
import socket
import argparse
import sys
import itertools
import contextlib
import platform
from collections import abc
import urllib.parse
from tempora import timing
def client_host(server_host):
"""
Return the host on which a client can connect to the given listener.
>>> client_host('192.168.0.1')
'192.168.0.1'
>>> client_host('0.0.0.0')
'127.0.0.1'
>>> client_host('::')
'::1'
"""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host in ('::', '::0', '::0.0.0.0'):
# :: is IN6ADDR_ANY, which should answer on localhost.
# ::0 and ::0.0.0.0 are non-canonical but common
# ways to write IN6ADDR_ANY.
return '::1'
return server_host
class Checker(object):
def __init__(self, timeout=1.0):
self.timeout = timeout
def assert_free(self, host, port=None):
"""
Assert that the given addr is free
in that all attempts to connect fail within the timeout
or raise a PortNotFree exception.
>>> free_port = find_available_local_port()
>>> Checker().assert_free('localhost', free_port)
>>> Checker().assert_free('127.0.0.1', free_port)
>>> Checker().assert_free('::1', free_port)
Also accepts an addr tuple
>>> addr = '::1', free_port, 0, 0
>>> Checker().assert_free(addr)
Host might refer to a server bind address like '::', which
should use localhost to perform the check.
>>> Checker().assert_free('::', free_port)
"""
if port is None and isinstance(host, abc.Sequence):
host, port = host[:2]
if platform.system() == 'Windows':
host = client_host(host) # pragma: nocover
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
list(itertools.starmap(self._connect, info))
def _connect(self, af, socktype, proto, canonname, sa):
s = socket.socket(af, socktype, proto)
# fail fast with a small timeout
s.settimeout(self.timeout)
with contextlib.closing(s):
try:
s.connect(sa)
except socket.error:
return
# the connect succeeded, so the port isn't free
host, port = sa[:2]
tmpl = "Port {port} is in use on {host}."
raise PortNotFree(tmpl.format(**locals()))
class Timeout(IOError):
pass
class PortNotFree(IOError):
pass
def free(host, port, timeout=float('Inf')):
"""
Wait for the specified port to become free (dropping or rejecting
requests). Return when the port is free or raise a Timeout if timeout has
elapsed.
Timeout may be specified in seconds or as a timedelta.
If timeout is None or ∞, the routine will run indefinitely.
>>> free('localhost', find_available_local_port())
>>> free(None, None)
Traceback (most recent call last):
...
ValueError: Host values of '' or None are not allowed.
"""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
timer = timing.Timer(timeout)
while True:
try:
# Expect a free port, so use a small timeout
Checker(timeout=0.1).assert_free(host, port)
return
except PortNotFree:
if timer.expired():
raise Timeout("Port {port} not free on {host}.".format(**locals()))
# Politely wait.
time.sleep(0.1)
def occupied(host, port, timeout=float('Inf')):
"""
Wait for the specified port to become occupied (accepting requests).
Return when the port is occupied or raise a Timeout if timeout has
elapsed.
Timeout may be specified in seconds or as a timedelta.
If timeout is None or ∞, the routine will run indefinitely.
>>> occupied('localhost', find_available_local_port(), .1)
Traceback (most recent call last):
...
Timeout: Port ... not bound on localhost.
>>> occupied(None, None)
Traceback (most recent call last):
...
ValueError: Host values of '' or None are not allowed.
"""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
timer = timing.Timer(timeout)
while True:
try:
Checker(timeout=0.5).assert_free(host, port)
if timer.expired():
raise Timeout("Port {port} not bound on {host}.".format(**locals()))
# Politely wait
time.sleep(0.1)
except PortNotFree:
# port is occupied
return
def find_available_local_port():
"""
Find a free port on localhost.
>>> 0 < find_available_local_port() < 65536
True
"""
infos = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)
family, proto, _, _, addr = next(iter(infos))
sock = socket.socket(family, proto)
sock.bind(addr)
addr, port = sock.getsockname()[:2]
sock.close()
return port
class HostPort(str):
"""
A simple representation of a host/port pair as a string
>>> hp = HostPort('localhost:32768')
>>> hp.host
'localhost'
>>> hp.port
32768
>>> len(hp)
15
>>> hp = HostPort('[::1]:32768')
>>> hp.host
'::1'
>>> hp.port
32768
"""
@property
def host(self):
return urllib.parse.urlparse(f'//{self}').hostname
@property
def port(self):
return urllib.parse.urlparse(f'//{self}').port
@classmethod
def from_addr(cls, addr):
listen_host, port = addr[:2]
plain_host = client_host(listen_host)
host = f'[{plain_host}]' if ':' in plain_host else plain_host
return cls(':'.join([host, str(port)]))
def _main(args=None):
parser = argparse.ArgumentParser()
def global_lookup(key):
return globals()[key]
parser.add_argument('target', metavar='host:port', type=HostPort)
parser.add_argument('func', metavar='state', type=global_lookup)
parser.add_argument('-t', '--timeout', default=None, type=float)
args = parser.parse_args(args)
try:
args.func(args.target.host, args.target.port, timeout=args.timeout)
except Timeout as timeout:
print(timeout, file=sys.stderr)
raise SystemExit(1)
__name__ == '__main__' and _main()
| 6,488
|
Python
|
.py
| 184
| 28.391304
| 84
| 0.616752
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,381
|
certgen.py
|
rembo10_headphones/lib/certgen.py
|
# -*- coding: latin-1 -*-
#
# Copyright (C) Martin Sjögren and AB Strakt 2001, All rights reserved
# Copyright (C) Jean-Paul Calderone 2008, All rights reserved
# This file is licenced under the GNU LESSER GENERAL PUBLIC LICENSE Version 2.1 or later (aka LGPL v2.1)
# Please see LGPL2.1.txt for more information
"""
Certificate generation module.
"""
from OpenSSL import crypto
import time
TYPE_RSA = crypto.TYPE_RSA
TYPE_DSA = crypto.TYPE_DSA
serial = int(time.time())
def createKeyPair(type, bits):
"""
Create a public/private key pair.
Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA
bits - Number of bits to use in the key
Returns: The public/private key pair in a PKey object
"""
pkey = crypto.PKey()
pkey.generate_key(type, bits)
return pkey
def createCertRequest(pkey, digest="md5", **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is md5
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for (key,value) in list(name.items()):
setattr(subj, key, value)
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req
def createCertificate(req, xxx_todo_changeme, serial, xxx_todo_changeme1, digest="md5"):
"""
Generate a certificate given a certificate request.
Arguments: req - Certificate reqeust to use
issuerCert - The certificate of the issuer
issuerKey - The private key of the issuer
serial - Serial number for the certificate
notBefore - Timestamp (relative to now) when the certificate
starts being valid
notAfter - Timestamp (relative to now) when the certificate
stops being valid
digest - Digest method to use for signing, default is md5
Returns: The signed certificate in an X509 object
"""
(issuerCert, issuerKey) = xxx_todo_changeme
(notBefore, notAfter) = xxx_todo_changeme1
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(notBefore)
cert.gmtime_adj_notAfter(notAfter)
cert.set_issuer(issuerCert.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(issuerKey, digest)
return cert
| 2,984
|
Python
|
.py
| 72
| 32.708333
| 104
| 0.625172
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,382
|
six.py
|
rembo10_headphones/lib/six.py
|
# Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.16.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
if PY34:
from importlib.util import spec_from_loader
else:
spec_from_loader = None
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def find_spec(self, fullname, path, target=None):
if fullname in self.known_modules:
return spec_from_loader(fullname, self)
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections", "IterableUserDict", "UserDict"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| 34,581
|
Python
|
.py
| 808
| 35.780941
| 118
| 0.643331
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,383
|
munkres.py
|
rembo10_headphones/lib/munkres.py
|
"""
Introduction
============
The Munkres module provides an implementation of the Munkres algorithm
(also called the Hungarian algorithm or the Kuhn-Munkres algorithm),
useful for solving the Assignment Problem.
For complete usage documentation, see: https://software.clapper.org/munkres/
"""
__docformat__ = 'markdown'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import copy
from typing import Union, NewType, Sequence, Tuple, Optional, Callable
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Munkres', 'make_cost_matrix', 'DISALLOWED']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
AnyNum = NewType('AnyNum', Union[int, float])
Matrix = NewType('Matrix', Sequence[Sequence[AnyNum]])
# Info about the module
__version__ = "1.1.4"
__author__ = "Brian Clapper, bmc@clapper.org"
__url__ = "https://software.clapper.org/munkres/"
__copyright__ = "(c) 2008-2020 Brian M. Clapper"
__license__ = "Apache Software License"
# Constants
class DISALLOWED_OBJ(object):
pass
DISALLOWED = DISALLOWED_OBJ()
DISALLOWED_PRINTVAL = "D"
# ---------------------------------------------------------------------------
# Exceptions
# ---------------------------------------------------------------------------
class UnsolvableMatrix(Exception):
"""
Exception raised for unsolvable matrices
"""
pass
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Munkres:
"""
Calculate the Munkres solution to the classical assignment problem.
See the module documentation for usage.
"""
def __init__(self):
"""Create a new instance"""
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def pad_matrix(self, matrix: Matrix, pad_value: int=0) -> Matrix:
"""
Pad a possibly non-square matrix to make it square.
**Parameters**
- `matrix` (list of lists of numbers): matrix to pad
- `pad_value` (`int`): value to use to pad the matrix
**Returns**
a new, possibly padded, matrix
"""
max_columns = 0
total_rows = len(matrix)
for row in matrix:
max_columns = max(max_columns, len(row))
total_rows = max(max_columns, total_rows)
new_matrix = []
for row in matrix:
row_len = len(row)
new_row = row[:]
if total_rows > row_len:
# Row too short. Pad it.
new_row += [pad_value] * (total_rows - row_len)
new_matrix += [new_row]
while len(new_matrix) < total_rows:
new_matrix += [[pad_value] * total_rows]
return new_matrix
def compute(self, cost_matrix: Matrix) -> Sequence[Tuple[int, int]]:
"""
Compute the indexes for the lowest-cost pairings between rows and
columns in the database. Returns a list of `(row, column)` tuples
that can be used to traverse the matrix.
**WARNING**: This code handles square and rectangular matrices. It
does *not* handle irregular matrices.
**Parameters**
- `cost_matrix` (list of lists of numbers): The cost matrix. If this
cost matrix is not square, it will be padded with zeros, via a call
to `pad_matrix()`. (This method does *not* modify the caller's
matrix. It operates on a copy of the matrix.)
**Returns**
A list of `(row, column)` tuples that describe the lowest cost path
through the matrix
"""
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix(self.n * 2, 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = { 1 : self.__step1,
2 : self.__step2,
3 : self.__step3,
4 : self.__step4,
5 : self.__step5,
6 : self.__step6 }
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if self.marked[i][j] == 1:
results += [(i, j)]
return results
def __copy_matrix(self, matrix: Matrix) -> Matrix:
"""Return an exact copy of the supplied matrix"""
return copy.deepcopy(matrix)
def __make_matrix(self, n: int, val: AnyNum) -> Matrix:
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self) -> int:
"""
For each row of the matrix, find the smallest element and
subtract it from every element in its row. Go to Step 2.
"""
C = self.C
n = self.n
for i in range(n):
vals = [x for x in self.C[i] if x is not DISALLOWED]
if len(vals) == 0:
# All values in this row are DISALLOWED. This matrix is
# unsolvable.
raise UnsolvableMatrix(
"Row {0} is entirely DISALLOWED.".format(i)
)
minval = min(vals)
# Find the minimum value for this row and subtract that minimum
# from every element in the row.
for j in range(n):
if self.C[i][j] is not DISALLOWED:
self.C[i][j] -= minval
return 2
def __step2(self) -> int:
"""
Find a zero (Z) in the resulting matrix. If there is no starred
zero in its row or column, star Z. Repeat for each element in the
matrix. Go to Step 3.
"""
n = self.n
for i in range(n):
for j in range(n):
if (self.C[i][j] == 0) and \
(not self.col_covered[j]) and \
(not self.row_covered[i]):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
break
self.__clear_covers()
return 3
def __step3(self) -> int:
"""
Cover each column containing a starred zero. If K columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
n = self.n
count = 0
for i in range(n):
for j in range(n):
if self.marked[i][j] == 1 and not self.col_covered[j]:
self.col_covered[j] = True
count += 1
if count >= n:
step = 7 # done
else:
step = 4
return step
def __step4(self) -> int:
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = 0
col = 0
star_col = -1
while not done:
(row, col) = self.__find_a_zero(row, col)
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self) -> int:
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while not done:
row = self.__find_star_in_col(path[count][1])
if row >= 0:
count += 1
path[count][0] = row
path[count][1] = path[count-1][1]
else:
done = True
if not done:
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[count-1][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self) -> int:
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
minval = self.__find_smallest()
events = 0 # track actual changes to matrix
for i in range(self.n):
for j in range(self.n):
if self.C[i][j] is DISALLOWED:
continue
if self.row_covered[i]:
self.C[i][j] += minval
events += 1
if not self.col_covered[j]:
self.C[i][j] -= minval
events += 1
if self.row_covered[i] and not self.col_covered[j]:
events -= 2 # change reversed, no real difference
if (events == 0):
raise UnsolvableMatrix("Matrix cannot be solved!")
return 4
def __find_smallest(self) -> AnyNum:
"""Find the smallest uncovered value in the matrix."""
minval = sys.maxsize
for i in range(self.n):
for j in range(self.n):
if (not self.row_covered[i]) and (not self.col_covered[j]):
if self.C[i][j] is not DISALLOWED and minval > self.C[i][j]:
minval = self.C[i][j]
return minval
def __find_a_zero(self, i0: int = 0, j0: int = 0) -> Tuple[int, int]:
"""Find the first uncovered element with value 0"""
row = -1
col = -1
i = i0
n = self.n
done = False
while not done:
j = j0
while True:
if (self.C[i][j] == 0) and \
(not self.row_covered[i]) and \
(not self.col_covered[j]):
row = i
col = j
done = True
j = (j + 1) % n
if j == j0:
break
i = (i + 1) % n
if i == i0:
done = True
return (row, col)
def __find_star_in_row(self, row: Sequence[AnyNum]) -> int:
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col
def __find_star_in_col(self, col: Sequence[AnyNum]) -> int:
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row
def __find_prime_in_row(self, row) -> int:
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col
def __convert_path(self,
path: Sequence[Sequence[int]],
count: int) -> None:
for i in range(count+1):
if self.marked[path[i][0]][path[i][1]] == 1:
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self) -> None:
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self) -> None:
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def make_cost_matrix(
profit_matrix: Matrix,
inversion_function: Optional[Callable[[AnyNum], AnyNum]] = None
) -> Matrix:
"""
Create a cost matrix from a profit matrix by calling `inversion_function()`
to invert each value. The inversion function must take one numeric argument
(of any type) and return another numeric argument which is presumed to be
the cost inverse of the original profit value. If the inversion function
is not provided, a given cell's inverted value is calculated as
`max(matrix) - value`.
This is a static method. Call it like this:
from munkres import Munkres
cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
For example:
from munkres import Munkres
cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxsize - x)
**Parameters**
- `profit_matrix` (list of lists of numbers): The matrix to convert from
profit to cost values.
- `inversion_function` (`function`): The function to use to invert each
entry in the profit matrix.
**Returns**
A new matrix representing the inversion of `profix_matrix`.
"""
if not inversion_function:
maximum = max(max(row) for row in profit_matrix)
inversion_function = lambda x: maximum - x
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix
def print_matrix(matrix: Matrix, msg: Optional[str] = None) -> None:
"""
Convenience function: Displays the contents of a matrix.
**Parameters**
- `matrix` (list of lists of numbers): The matrix to print
- `msg` (`str`): Optional message to print before displaying the matrix
"""
import math
if msg is not None:
print(msg)
# Calculate the appropriate format width.
width = 0
for row in matrix:
for val in row:
if val is DISALLOWED:
val = DISALLOWED_PRINTVAL
width = max(width, len(str(val)))
# Make the format string
format = ('%%%d' % width)
# Print the matrix
for row in matrix:
sep = '['
for val in row:
if val is DISALLOWED:
val = DISALLOWED_PRINTVAL
formatted = ((format + 's') % val)
sys.stdout.write(sep + formatted)
sep = ', '
sys.stdout.write(']\n')
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850), # expected cost
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452), # expected cost
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18),
# Square variant with floating point value
([[10.1, 10.2, 8.3],
[9.4, 8.5, 1.6],
[9.7, 7.8, 4.9]],
19.5),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15),
# Rectangular variant with floating point value
([[10.01, 10.02, 8.03, 11.04],
[9.05, 8.06, 1.07, 1.08],
[9.09, 7.1, 4.11, 10.12]],
15.2),
# Rectangular with DISALLOWED
([[4, 5, 6, DISALLOWED],
[1, 9, 12, 11],
[DISALLOWED, 5, 4, DISALLOWED],
[12, 12, 12, 10]],
20),
# Rectangular variant with DISALLOWED and floating point value
([[4.001, 5.002, 6.003, DISALLOWED],
[1.004, 9.005, 12.006, 11.007],
[DISALLOWED, 5.008, 4.009, DISALLOWED],
[12.01, 12.011, 12.012, 10.013]],
20.028),
# DISALLOWED to force pairings
([[1, DISALLOWED, DISALLOWED, DISALLOWED],
[DISALLOWED, 2, DISALLOWED, DISALLOWED],
[DISALLOWED, DISALLOWED, 3, DISALLOWED],
[DISALLOWED, DISALLOWED, DISALLOWED, 4]],
10),
# DISALLOWED to force pairings with floating point value
([[1.1, DISALLOWED, DISALLOWED, DISALLOWED],
[DISALLOWED, 2.2, DISALLOWED, DISALLOWED],
[DISALLOWED, DISALLOWED, 3.3, DISALLOWED],
[DISALLOWED, DISALLOWED, DISALLOWED, 4.4]],
11.0)]
m = Munkres()
for cost_matrix, expected_total in matrices:
print_matrix(cost_matrix, msg='cost matrix')
indexes = m.compute(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r][c]
total_cost += x
print(('(%d, %d) -> %s' % (r, c, x)))
print(('lowest cost=%s' % total_cost))
assert expected_total == total_cost
| 19,406
|
Python
|
.py
| 506
| 28.245059
| 82
| 0.502872
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,384
|
util.py
|
rembo10_headphones/lib/apscheduler/util.py
|
"""This module contains several handy functions primarily meant for internal use."""
from __future__ import division
from asyncio import iscoroutinefunction
from datetime import date, datetime, time, timedelta, tzinfo
from calendar import timegm
from functools import partial
from inspect import isclass, ismethod
import re
import sys
from pytz import timezone, utc, FixedOffset
import six
try:
from inspect import signature
except ImportError: # pragma: nocover
from funcsigs import signature
try:
from threading import TIMEOUT_MAX
except ImportError:
TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows
__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp',
'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name',
'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args',
'normalize', 'localize', 'TIMEOUT_MAX')
class _Undefined(object):
def __nonzero__(self):
return False
def __bool__(self):
return False
def __repr__(self):
return '<undefined>'
undefined = _Undefined() #: a unique object that only signifies that no value is defined
def asint(text):
"""
Safely converts a string to an integer, returning ``None`` if the string is ``None``.
:type text: str
:rtype: int
"""
if text is not None:
return int(text)
def asbool(obj):
"""
Interprets an object as a boolean value.
:rtype: bool
"""
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
return True
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
return False
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
return bool(obj)
def astimezone(obj):
"""
Interprets an object as a timezone.
:rtype: tzinfo
"""
if isinstance(obj, six.string_types):
return timezone(obj)
if isinstance(obj, tzinfo):
if obj.tzname(None) == 'local':
raise ValueError(
'Unable to determine the name of the local timezone -- you must explicitly '
'specify the name of the local timezone. Please refrain from using timezones like '
'EST to prevent problems with daylight saving time. Instead, use a locale based '
'timezone name (such as Europe/Helsinki).')
return obj
if obj is not None:
raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__)
_DATE_REGEX = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'(?:[ T](?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})'
r'(?:\.(?P<microsecond>\d{1,6}))?'
r'(?P<timezone>Z|[+-]\d\d:\d\d)?)?$')
def convert_to_datetime(input, tz, arg_name):
"""
Converts the given object to a timezone aware datetime object.
If a timezone aware datetime object is passed, it is returned unmodified.
If a native datetime object is passed, it is given the specified timezone.
If the input is a string, it is parsed as a datetime with the given timezone.
Date strings are accepted in three different forms: date only (Y-m-d), date with time
(Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro). Additionally you can
override the time zone by giving a specific offset in the format specified by ISO 8601:
Z (UTC), +HH:MM or -HH:MM.
:param str|datetime input: the datetime or string to convert to a timezone aware datetime
:param datetime.tzinfo tz: timezone to interpret ``input`` in
:param str arg_name: the name of the argument (used in an error message)
:rtype: datetime
"""
if input is None:
return
elif isinstance(input, datetime):
datetime_ = input
elif isinstance(input, date):
datetime_ = datetime.combine(input, time())
elif isinstance(input, six.string_types):
m = _DATE_REGEX.match(input)
if not m:
raise ValueError('Invalid date string')
values = m.groupdict()
tzname = values.pop('timezone')
if tzname == 'Z':
tz = utc
elif tzname:
hours, minutes = (int(x) for x in tzname[1:].split(':'))
sign = 1 if tzname[0] == '+' else -1
tz = FixedOffset(sign * (hours * 60 + minutes))
values = {k: int(v or 0) for k, v in values.items()}
datetime_ = datetime(**values)
else:
raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__))
if datetime_.tzinfo is not None:
return datetime_
if tz is None:
raise ValueError(
'The "tz" argument must be specified if %s has no timezone information' % arg_name)
if isinstance(tz, six.string_types):
tz = timezone(tz)
return localize(datetime_, tz)
def datetime_to_utc_timestamp(timeval):
"""
Converts a datetime instance to a timestamp.
:type timeval: datetime
:rtype: float
"""
if timeval is not None:
return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000
def utc_timestamp_to_datetime(timestamp):
"""
Converts the given timestamp to a datetime instance.
:type timestamp: float
:rtype: datetime
"""
if timestamp is not None:
return datetime.fromtimestamp(timestamp, utc)
def timedelta_seconds(delta):
"""
Converts the given timedelta to seconds.
:type delta: timedelta
:rtype: float
"""
return delta.days * 24 * 60 * 60 + delta.seconds + \
delta.microseconds / 1000000.0
def datetime_ceil(dateval):
"""
Rounds the given datetime object upwards.
:type dateval: datetime
"""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
return dateval
def datetime_repr(dateval):
return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None'
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
:rtype: str
"""
# the easy case (on Python 3.3+)
if hasattr(func, '__qualname__'):
return func.__qualname__
# class methods, bound and unbound methods
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
if f_self and hasattr(func, '__name__'):
f_class = f_self if isclass(f_self) else f_self.__class__
else:
f_class = getattr(func, 'im_class', None)
if f_class and hasattr(func, '__name__'):
return '%s.%s' % (f_class.__name__, func.__name__)
# class or class instance
if hasattr(func, '__call__'):
# class
if hasattr(func, '__name__'):
return func.__name__
# instance of a class with a __call__ method
return func.__class__.__name__
raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
def obj_to_ref(obj):
"""
Returns the path to the given callable.
:rtype: str
:raises TypeError: if the given object is not callable
:raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
function
"""
if isinstance(obj, partial):
raise ValueError('Cannot create a reference to a partial()')
name = get_callable_name(obj)
if '<lambda>' in name:
raise ValueError('Cannot create a reference to a lambda')
if '<locals>' in name:
raise ValueError('Cannot create a reference to a nested function')
if ismethod(obj):
if hasattr(obj, 'im_self') and obj.im_self:
# bound method
module = obj.im_self.__module__
elif hasattr(obj, 'im_class') and obj.im_class:
# unbound method
module = obj.im_class.__module__
else:
module = obj.__module__
else:
module = obj.__module__
return '%s:%s' % (module, name)
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
:type ref: str
"""
if not isinstance(ref, six.string_types):
raise TypeError('References must be strings')
if ':' not in ref:
raise ValueError('Invalid reference')
modulename, rest = ref.split(':', 1)
try:
obj = __import__(modulename, fromlist=[rest])
except ImportError:
raise LookupError('Error resolving reference %s: could not import module' % ref)
try:
for name in rest.split('.'):
obj = getattr(obj, name)
return obj
except Exception:
raise LookupError('Error resolving reference %s: error looking up object' % ref)
def maybe_ref(ref):
"""
Returns the object that the given reference points to, if it is indeed a reference.
If it is not a reference, the object is returned as-is.
"""
if not isinstance(ref, str):
return ref
return ref_to_obj(ref)
if six.PY2:
def repr_escape(string):
if isinstance(string, six.text_type):
return string.encode('ascii', 'backslashreplace')
return string
else:
def repr_escape(string):
return string
def check_callable_args(func, args, kwargs):
"""
Ensures that the given callable can be called with the given arguments.
:type args: tuple
:type kwargs: dict
"""
pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs
positional_only_kwargs = [] # positional-only parameters that have a match in kwargs
unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs
unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs
unmatched_args = list(args) # args that didn't match any of the parameters in the signature
# kwargs that didn't match any of the parameters in the signature
unmatched_kwargs = list(kwargs)
# indicates if the signature defines *args and **kwargs respectively
has_varargs = has_var_kwargs = False
try:
if sys.version_info >= (3, 5):
sig = signature(func, follow_wrapped=False)
else:
sig = signature(func)
except ValueError:
# signature() doesn't work against every kind of callable
return
for param in six.itervalues(sig.parameters):
if param.kind == param.POSITIONAL_OR_KEYWORD:
if param.name in unmatched_kwargs and unmatched_args:
pos_kwargs_conflicts.append(param.name)
elif unmatched_args:
del unmatched_args[0]
elif param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
elif param.default is param.empty:
unsatisfied_args.append(param.name)
elif param.kind == param.POSITIONAL_ONLY:
if unmatched_args:
del unmatched_args[0]
elif param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
positional_only_kwargs.append(param.name)
elif param.default is param.empty:
unsatisfied_args.append(param.name)
elif param.kind == param.KEYWORD_ONLY:
if param.name in unmatched_kwargs:
unmatched_kwargs.remove(param.name)
elif param.default is param.empty:
unsatisfied_kwargs.append(param.name)
elif param.kind == param.VAR_POSITIONAL:
has_varargs = True
elif param.kind == param.VAR_KEYWORD:
has_var_kwargs = True
# Make sure there are no conflicts between args and kwargs
if pos_kwargs_conflicts:
raise ValueError('The following arguments are supplied in both args and kwargs: %s' %
', '.join(pos_kwargs_conflicts))
# Check if keyword arguments are being fed to positional-only parameters
if positional_only_kwargs:
raise ValueError('The following arguments cannot be given as keyword arguments: %s' %
', '.join(positional_only_kwargs))
# Check that the number of positional arguments minus the number of matched kwargs matches the
# argspec
if unsatisfied_args:
raise ValueError('The following arguments have not been supplied: %s' %
', '.join(unsatisfied_args))
# Check that all keyword-only arguments have been supplied
if unsatisfied_kwargs:
raise ValueError(
'The following keyword-only arguments have not been supplied in kwargs: %s' %
', '.join(unsatisfied_kwargs))
# Check that the callable can accept the given number of positional arguments
if not has_varargs and unmatched_args:
raise ValueError(
'The list of positional arguments is longer than the target callable can handle '
'(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args)))
# Check that the callable can accept the given keyword arguments
if not has_var_kwargs and unmatched_kwargs:
raise ValueError(
'The target callable does not accept the following keyword arguments: %s' %
', '.join(unmatched_kwargs))
def iscoroutinefunction_partial(f):
while isinstance(f, partial):
f = f.func
# The asyncio version of iscoroutinefunction includes testing for @coroutine
# decorations vs. the inspect version which does not.
return iscoroutinefunction(f)
def normalize(dt):
return datetime.fromtimestamp(dt.timestamp(), dt.tzinfo)
def localize(dt, tzinfo):
if hasattr(tzinfo, 'localize'):
return tzinfo.localize(dt)
return normalize(dt.replace(tzinfo=tzinfo))
| 13,846
|
Python
|
.py
| 330
| 34.569697
| 99
| 0.644603
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,385
|
__init__.py
|
rembo10_headphones/lib/apscheduler/__init__.py
|
from pkg_resources import get_distribution, DistributionNotFound
try:
release = get_distribution('APScheduler').version.split('-')[0]
except DistributionNotFound:
release = '3.5.0'
version_info = tuple(int(x) if x.isdigit() else x for x in release.split('.'))
version = __version__ = '.'.join(str(x) for x in version_info[:3])
del get_distribution, DistributionNotFound
| 380
|
Python
|
.py
| 8
| 45.25
| 78
| 0.737838
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,386
|
events.py
|
rembo10_headphones/lib/apscheduler/events.py
|
__all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDULER_PAUSED',
'EVENT_SCHEDULER_RESUMED', 'EVENT_EXECUTOR_ADDED', 'EVENT_EXECUTOR_REMOVED',
'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED',
'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED',
'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES',
'SchedulerEvent', 'JobEvent', 'JobExecutionEvent', 'JobSubmissionEvent')
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0
EVENT_SCHEDULER_SHUTDOWN = 2 ** 1
EVENT_SCHEDULER_PAUSED = 2 ** 2
EVENT_SCHEDULER_RESUMED = 2 ** 3
EVENT_EXECUTOR_ADDED = 2 ** 4
EVENT_EXECUTOR_REMOVED = 2 ** 5
EVENT_JOBSTORE_ADDED = 2 ** 6
EVENT_JOBSTORE_REMOVED = 2 ** 7
EVENT_ALL_JOBS_REMOVED = 2 ** 8
EVENT_JOB_ADDED = 2 ** 9
EVENT_JOB_REMOVED = 2 ** 10
EVENT_JOB_MODIFIED = 2 ** 11
EVENT_JOB_EXECUTED = 2 ** 12
EVENT_JOB_ERROR = 2 ** 13
EVENT_JOB_MISSED = 2 ** 14
EVENT_JOB_SUBMITTED = 2 ** 15
EVENT_JOB_MAX_INSTANCES = 2 ** 16
EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED |
EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED |
EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED |
EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED |
EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES)
class SchedulerEvent(object):
"""
An event that concerns the scheduler itself.
:ivar code: the type code of this event
:ivar alias: alias of the job store or executor that was added or removed (if applicable)
"""
def __init__(self, code, alias=None):
super(SchedulerEvent, self).__init__()
self.code = code
self.alias = alias
def __repr__(self):
return '<%s (code=%d)>' % (self.__class__.__name__, self.code)
class JobEvent(SchedulerEvent):
"""
An event that concerns a job.
:ivar code: the type code of this event
:ivar job_id: identifier of the job in question
:ivar jobstore: alias of the job store containing the job in question
"""
def __init__(self, code, job_id, jobstore):
super(JobEvent, self).__init__(code)
self.code = code
self.job_id = job_id
self.jobstore = jobstore
class JobSubmissionEvent(JobEvent):
"""
An event that concerns the submission of a job to its executor.
:ivar scheduled_run_times: a list of datetimes when the job was intended to run
"""
def __init__(self, code, job_id, jobstore, scheduled_run_times):
super(JobSubmissionEvent, self).__init__(code, job_id, jobstore)
self.scheduled_run_times = scheduled_run_times
class JobExecutionEvent(JobEvent):
"""
An event that concerns the running of a job within its executor.
:ivar scheduled_run_time: the time when the job was scheduled to be run
:ivar retval: the return value of the successfully executed job
:ivar exception: the exception raised by the job
:ivar traceback: a formatted traceback for the exception
"""
def __init__(self, code, job_id, jobstore, scheduled_run_time, retval=None, exception=None,
traceback=None):
super(JobExecutionEvent, self).__init__(code, job_id, jobstore)
self.scheduled_run_time = scheduled_run_time
self.retval = retval
self.exception = exception
self.traceback = traceback
| 3,593
|
Python
|
.py
| 75
| 41.96
| 99
| 0.676479
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,387
|
job.py
|
rembo10_headphones/lib/apscheduler/job.py
|
from inspect import ismethod, isclass
from uuid import uuid4
import six
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import (
ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args,
convert_to_datetime)
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
class Job(object):
"""
Contains the options given when scheduling callables and its current schedule and other state.
This class should never be instantiated by the user.
:var str id: the unique identifier of this job
:var str name: the description of this job
:var func: the callable to execute
:var tuple|list args: positional arguments to the callable
:var dict kwargs: keyword arguments to the callable
:var bool coalesce: whether to only run the job once when several run times are due
:var trigger: the trigger object that controls the schedule of this job
:var str executor: the name of the executor that will run this job
:var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to
be late (``None`` means "allow the job to run no matter how late it is")
:var int max_instances: the maximum number of concurrently executing instances allowed for this
job
:var datetime.datetime next_run_time: the next scheduled run time of this job
.. note::
The ``misfire_grace_time`` has some non-obvious effects on job execution. See the
:ref:`missed-job-executions` section in the documentation for an in-depth explanation.
"""
__slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref',
'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances',
'next_run_time', '__weakref__')
def __init__(self, scheduler, id=None, **kwargs):
super(Job, self).__init__()
self._scheduler = scheduler
self._jobstore_alias = None
self._modify(id=id or uuid4().hex, **kwargs)
def modify(self, **changes):
"""
Makes the given changes to this job and saves it in the associated job store.
Accepted keyword arguments are the same as the variables on this class.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`
:return Job: this job instance
"""
self._scheduler.modify_job(self.id, self._jobstore_alias, **changes)
return self
def reschedule(self, trigger, **trigger_args):
"""
Shortcut for switching the trigger on this job.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`
:return Job: this job instance
"""
self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args)
return self
def pause(self):
"""
Temporarily suspend the execution of this job.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job`
:return Job: this job instance
"""
self._scheduler.pause_job(self.id, self._jobstore_alias)
return self
def resume(self):
"""
Resume the schedule of this job if previously paused.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job`
:return Job: this job instance
"""
self._scheduler.resume_job(self.id, self._jobstore_alias)
return self
def remove(self):
"""
Unschedules this job and removes it from its associated job store.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`
"""
self._scheduler.remove_job(self.id, self._jobstore_alias)
@property
def pending(self):
"""
Returns ``True`` if the referenced job is still waiting to be added to its designated job
store.
"""
return self._jobstore_alias is None
#
# Private API
#
def _get_run_times(self, now):
"""
Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive).
:type now: datetime.datetime
:rtype: list[datetime.datetime]
"""
run_times = []
next_run_time = self.next_run_time
while next_run_time and next_run_time <= now:
run_times.append(next_run_time)
next_run_time = self.trigger.get_next_fire_time(next_run_time, now)
return run_times
def _modify(self, **changes):
"""
Validates the changes to the Job and makes the modifications if and only if all of them
validate.
"""
approved = {}
if 'id' in changes:
value = changes.pop('id')
if not isinstance(value, six.string_types):
raise TypeError("id must be a nonempty string")
if hasattr(self, 'id'):
raise ValueError('The job ID may not be changed')
approved['id'] = value
if 'func' in changes or 'args' in changes or 'kwargs' in changes:
func = changes.pop('func') if 'func' in changes else self.func
args = changes.pop('args') if 'args' in changes else self.args
kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs
if isinstance(func, six.string_types):
func_ref = func
func = ref_to_obj(func)
elif callable(func):
try:
func_ref = obj_to_ref(func)
except ValueError:
# If this happens, this Job won't be serializable
func_ref = None
else:
raise TypeError('func must be a callable or a textual reference to one')
if not hasattr(self, 'name') and changes.get('name', None) is None:
changes['name'] = get_callable_name(func)
if isinstance(args, six.string_types) or not isinstance(args, Iterable):
raise TypeError('args must be a non-string iterable')
if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping):
raise TypeError('kwargs must be a dict-like object')
check_callable_args(func, args, kwargs)
approved['func'] = func
approved['func_ref'] = func_ref
approved['args'] = args
approved['kwargs'] = kwargs
if 'name' in changes:
value = changes.pop('name')
if not value or not isinstance(value, six.string_types):
raise TypeError("name must be a nonempty string")
approved['name'] = value
if 'misfire_grace_time' in changes:
value = changes.pop('misfire_grace_time')
if value is not None and (not isinstance(value, six.integer_types) or value <= 0):
raise TypeError('misfire_grace_time must be either None or a positive integer')
approved['misfire_grace_time'] = value
if 'coalesce' in changes:
value = bool(changes.pop('coalesce'))
approved['coalesce'] = value
if 'max_instances' in changes:
value = changes.pop('max_instances')
if not isinstance(value, six.integer_types) or value <= 0:
raise TypeError('max_instances must be a positive integer')
approved['max_instances'] = value
if 'trigger' in changes:
trigger = changes.pop('trigger')
if not isinstance(trigger, BaseTrigger):
raise TypeError('Expected a trigger instance, got %s instead' %
trigger.__class__.__name__)
approved['trigger'] = trigger
if 'executor' in changes:
value = changes.pop('executor')
if not isinstance(value, six.string_types):
raise TypeError('executor must be a string')
approved['executor'] = value
if 'next_run_time' in changes:
value = changes.pop('next_run_time')
approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone,
'next_run_time')
if changes:
raise AttributeError('The following are not modifiable attributes of Job: %s' %
', '.join(changes))
for key, value in six.iteritems(approved):
setattr(self, key, value)
def __getstate__(self):
# Don't allow this Job to be serialized if the function reference could not be determined
if not self.func_ref:
raise ValueError(
'This Job cannot be serialized since the reference to its callable (%r) could not '
'be determined. Consider giving a textual reference (module:function name) '
'instead.' % (self.func,))
# Instance methods cannot survive serialization as-is, so store the "self" argument
# explicitly
func = self.func
if ismethod(func) and not isclass(func.__self__) and obj_to_ref(func) == self.func_ref:
args = (func.__self__,) + tuple(self.args)
else:
args = self.args
return {
'version': 1,
'id': self.id,
'func': self.func_ref,
'trigger': self.trigger,
'executor': self.executor,
'args': args,
'kwargs': self.kwargs,
'name': self.name,
'misfire_grace_time': self.misfire_grace_time,
'coalesce': self.coalesce,
'max_instances': self.max_instances,
'next_run_time': self.next_run_time
}
def __setstate__(self, state):
if state.get('version', 1) > 1:
raise ValueError('Job has version %s, but only version 1 can be handled' %
state['version'])
self.id = state['id']
self.func_ref = state['func']
self.func = ref_to_obj(self.func_ref)
self.trigger = state['trigger']
self.executor = state['executor']
self.args = state['args']
self.kwargs = state['kwargs']
self.name = state['name']
self.misfire_grace_time = state['misfire_grace_time']
self.coalesce = state['coalesce']
self.max_instances = state['max_instances']
self.next_run_time = state['next_run_time']
def __eq__(self, other):
if isinstance(other, Job):
return self.id == other.id
return NotImplemented
def __repr__(self):
return '<Job (id=%s name=%s)>' % (repr_escape(self.id), repr_escape(self.name))
def __str__(self):
return repr_escape(self.__unicode__())
def __unicode__(self):
if hasattr(self, 'next_run_time'):
status = ('next run at: ' + datetime_repr(self.next_run_time) if
self.next_run_time else 'paused')
else:
status = 'pending'
return u'%s (trigger: %s, %s)' % (self.name, self.trigger, status)
| 11,216
|
Python
|
.py
| 237
| 36.57384
| 99
| 0.601063
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,388
|
pool.py
|
rembo10_headphones/lib/apscheduler/executors/pool.py
|
from abc import abstractmethod
import concurrent.futures
from apscheduler.executors.base import BaseExecutor, run_job
try:
from concurrent.futures.process import BrokenProcessPool
except ImportError:
BrokenProcessPool = None
class BasePoolExecutor(BaseExecutor):
@abstractmethod
def __init__(self, pool):
super(BasePoolExecutor, self).__init__()
self._pool = pool
def _do_submit_job(self, job, run_times):
def callback(f):
exc, tb = (f.exception_info() if hasattr(f, 'exception_info') else
(f.exception(), getattr(f.exception(), '__traceback__', None)))
if exc:
self._run_job_error(job.id, exc, tb)
else:
self._run_job_success(job.id, f.result())
try:
f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name)
except BrokenProcessPool:
self._logger.warning('Process pool is broken; replacing pool with a fresh instance')
self._pool = self._pool.__class__(self._pool._max_workers)
f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name)
f.add_done_callback(callback)
def shutdown(self, wait=True):
self._pool.shutdown(wait)
class ThreadPoolExecutor(BasePoolExecutor):
"""
An executor that runs jobs in a concurrent.futures thread pool.
Plugin alias: ``threadpool``
:param max_workers: the maximum number of spawned threads.
:param pool_kwargs: dict of keyword arguments to pass to the underlying
ThreadPoolExecutor constructor
"""
def __init__(self, max_workers=10, pool_kwargs=None):
pool_kwargs = pool_kwargs or {}
pool = concurrent.futures.ThreadPoolExecutor(int(max_workers), **pool_kwargs)
super(ThreadPoolExecutor, self).__init__(pool)
class ProcessPoolExecutor(BasePoolExecutor):
"""
An executor that runs jobs in a concurrent.futures process pool.
Plugin alias: ``processpool``
:param max_workers: the maximum number of spawned processes.
:param pool_kwargs: dict of keyword arguments to pass to the underlying
ProcessPoolExecutor constructor
"""
def __init__(self, max_workers=10, pool_kwargs=None):
pool_kwargs = pool_kwargs or {}
pool = concurrent.futures.ProcessPoolExecutor(int(max_workers), **pool_kwargs)
super(ProcessPoolExecutor, self).__init__(pool)
| 2,484
|
Python
|
.py
| 53
| 39.132075
| 98
| 0.672192
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,389
|
gevent.py
|
rembo10_headphones/lib/apscheduler/executors/gevent.py
|
from __future__ import absolute_import
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError: # pragma: nocover
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\
link(callback)
| 777
|
Python
|
.py
| 22
| 27.590909
| 87
| 0.637216
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,390
|
twisted.py
|
rembo10_headphones/lib/apscheduler/executors/twisted.py
|
from __future__ import absolute_import
from apscheduler.executors.base import BaseExecutor, run_job
class TwistedExecutor(BaseExecutor):
"""
Runs jobs in the reactor's thread pool.
Plugin alias: ``twisted``
"""
def start(self, scheduler, alias):
super(TwistedExecutor, self).start(scheduler, alias)
self._reactor = scheduler._reactor
def _do_submit_job(self, job, run_times):
def callback(success, result):
if success:
self._run_job_success(job.id, result)
else:
self._run_job_error(job.id, result.value, result.tb)
self._reactor.getThreadPool().callInThreadWithCallback(
callback, run_job, job, job._jobstore_alias, run_times, self._logger.name)
| 778
|
Python
|
.py
| 18
| 34.944444
| 86
| 0.657371
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,391
|
asyncio.py
|
rembo10_headphones/lib/apscheduler/executors/asyncio.py
|
from __future__ import absolute_import
import sys
from apscheduler.executors.base import BaseExecutor, run_job
from apscheduler.executors.base_py3 import run_coroutine_job
from apscheduler.util import iscoroutinefunction_partial
class AsyncIOExecutor(BaseExecutor):
"""
Runs jobs in the default executor of the event loop.
If the job function is a native coroutine function, it is scheduled to be run directly in the
event loop as soon as possible. All other functions are run in the event loop's default
executor which is usually a thread pool.
Plugin alias: ``asyncio``
"""
def start(self, scheduler, alias):
super(AsyncIOExecutor, self).start(scheduler, alias)
self._eventloop = scheduler._eventloop
self._pending_futures = set()
def shutdown(self, wait=True):
# There is no way to honor wait=True without converting this method into a coroutine method
for f in self._pending_futures:
if not f.done():
f.cancel()
self._pending_futures.clear()
def _do_submit_job(self, job, run_times):
def callback(f):
self._pending_futures.discard(f)
try:
events = f.result()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
if iscoroutinefunction_partial(job.func):
coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
f = self._eventloop.create_task(coro)
else:
f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times,
self._logger.name)
f.add_done_callback(callback)
self._pending_futures.add(f)
| 1,859
|
Python
|
.py
| 40
| 36.775
| 99
| 0.643055
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,392
|
tornado.py
|
rembo10_headphones/lib/apscheduler/executors/tornado.py
|
from __future__ import absolute_import
import sys
from concurrent.futures import ThreadPoolExecutor
from tornado.gen import convert_yielded
from apscheduler.executors.base import BaseExecutor, run_job
try:
from apscheduler.executors.base_py3 import run_coroutine_job
from apscheduler.util import iscoroutinefunction_partial
except ImportError:
def iscoroutinefunction_partial(func):
return False
class TornadoExecutor(BaseExecutor):
"""
Runs jobs either in a thread pool or directly on the I/O loop.
If the job function is a native coroutine function, it is scheduled to be run directly in the
I/O loop as soon as possible. All other functions are run in a thread pool.
Plugin alias: ``tornado``
:param int max_workers: maximum number of worker threads in the thread pool
"""
def __init__(self, max_workers=10):
super(TornadoExecutor, self).__init__()
self.executor = ThreadPoolExecutor(max_workers)
def start(self, scheduler, alias):
super(TornadoExecutor, self).start(scheduler, alias)
self._ioloop = scheduler._ioloop
def _do_submit_job(self, job, run_times):
def callback(f):
try:
events = f.result()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
if iscoroutinefunction_partial(job.func):
f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
else:
f = self.executor.submit(run_job, job, job._jobstore_alias, run_times,
self._logger.name)
f = convert_yielded(f)
f.add_done_callback(callback)
| 1,780
|
Python
|
.py
| 40
| 36.225
| 97
| 0.669177
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,393
|
base.py
|
rembo10_headphones/lib/apscheduler/executors/base.py
|
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from traceback import format_tb
import logging
import sys
from pytz import utc
import six
from apscheduler.events import (
JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED)
class MaxInstancesReachedError(Exception):
def __init__(self, job):
super(MaxInstancesReachedError, self).__init__(
'Job "%s" has already reached its maximum number of instances (%d)' %
(job.id, job.max_instances))
class BaseExecutor(six.with_metaclass(ABCMeta, object)):
"""Abstract base class that defines the interface that every executor must implement."""
_scheduler = None
_lock = None
_logger = logging.getLogger('apscheduler.executors')
def __init__(self):
super(BaseExecutor, self).__init__()
self._instances = defaultdict(lambda: 0)
def start(self, scheduler, alias):
"""
Called by the scheduler when the scheduler is being started or when the executor is being
added to an already running scheduler.
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
this executor
:param str|unicode alias: alias of this executor as it was assigned to the scheduler
"""
self._scheduler = scheduler
self._lock = scheduler._create_lock()
self._logger = logging.getLogger('apscheduler.executors.%s' % alias)
def shutdown(self, wait=True):
"""
Shuts down this executor.
:param bool wait: ``True`` to wait until all submitted jobs
have been executed
"""
def submit_job(self, job, run_times):
"""
Submits job for execution.
:param Job job: job to execute
:param list[datetime] run_times: list of datetimes specifying
when the job should have been run
:raises MaxInstancesReachedError: if the maximum number of
allowed instances for this job has been reached
"""
assert self._lock is not None, 'This executor has not been started yet'
with self._lock:
if self._instances[job.id] >= job.max_instances:
raise MaxInstancesReachedError(job)
self._do_submit_job(job, run_times)
self._instances[job.id] += 1
@abstractmethod
def _do_submit_job(self, job, run_times):
"""Performs the actual task of scheduling `run_job` to be called."""
def _run_job_success(self, job_id, events):
"""
Called by the executor with the list of generated events when :func:`run_job` has been
successfully called.
"""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
for event in events:
self._scheduler._dispatch_event(event)
def _run_job_error(self, job_id, exc, traceback=None):
"""Called by the executor with the exception if there is an error calling `run_job`."""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
exc_info = (exc.__class__, exc, traceback)
self._logger.error('Error running job %s', job_id, exc_info=exc_info)
def run_job(job, jobstore_alias, run_times, logger_name):
"""
Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
scheduler.
"""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle
# possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias,
run_time))
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = ''.join(format_tb(tb))
events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
exception=exc, traceback=formatted_tb))
logger.exception('Job "%s" raised an exception', job)
# This is to prevent cyclic references that would lead to memory leaks
if six.PY2:
sys.exc_clear()
del tb
else:
import traceback
traceback.clear_frames(tb)
del tb
else:
events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
retval=retval))
logger.info('Job "%s" executed successfully', job)
return events
| 5,336
|
Python
|
.py
| 117
| 35.299145
| 98
| 0.617534
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,394
|
debug.py
|
rembo10_headphones/lib/apscheduler/executors/debug.py
|
import sys
from apscheduler.executors.base import BaseExecutor, run_job
class DebugExecutor(BaseExecutor):
"""
A special executor that executes the target callable directly instead of deferring it to a
thread or process.
Plugin alias: ``debug``
"""
def _do_submit_job(self, job, run_times):
try:
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
| 573
|
Python
|
.py
| 15
| 31.266667
| 94
| 0.660036
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,395
|
base_py3.py
|
rembo10_headphones/lib/apscheduler/executors/base_py3.py
|
import logging
import sys
import traceback
from datetime import datetime, timedelta
from traceback import format_tb
from pytz import utc
from apscheduler.events import (
JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED)
async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
"""Coroutine version of run_job()."""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias,
run_time))
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = await job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = ''.join(format_tb(tb))
events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
exception=exc, traceback=formatted_tb))
logger.exception('Job "%s" raised an exception', job)
traceback.clear_frames(tb)
else:
events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
retval=retval))
logger.info('Job "%s" executed successfully', job)
return events
| 1,831
|
Python
|
.py
| 37
| 37.837838
| 97
| 0.614653
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,396
|
interval.py
|
rembo10_headphones/lib/apscheduler/triggers/interval.py
|
from datetime import timedelta, datetime
from math import ceil
from tzlocal import get_localzone
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import (
convert_to_datetime, normalize, timedelta_seconds, datetime_repr,
astimezone)
class IntervalTrigger(BaseTrigger):
"""
Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` +
interval otherwise.
:param int weeks: number of weeks to wait
:param int days: number of days to wait
:param int hours: number of hours to wait
:param int minutes: number of minutes to wait
:param int seconds: number of seconds to wait
:param datetime|str start_date: starting point for the interval calculation
:param datetime|str end_date: latest possible date/time to trigger on
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
"""
__slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter'
def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None,
end_date=None, timezone=None, jitter=None):
self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes,
seconds=seconds)
self.interval_length = timedelta_seconds(self.interval)
if self.interval_length == 0:
self.interval = timedelta(seconds=1)
self.interval_length = 1
if timezone:
self.timezone = astimezone(timezone)
elif isinstance(start_date, datetime) and start_date.tzinfo:
self.timezone = start_date.tzinfo
elif isinstance(end_date, datetime) and end_date.tzinfo:
self.timezone = end_date.tzinfo
else:
self.timezone = get_localzone()
start_date = start_date or (datetime.now(self.timezone) + self.interval)
self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
self.jitter = jitter
def get_next_fire_time(self, previous_fire_time, now):
if previous_fire_time:
next_fire_time = previous_fire_time + self.interval
elif self.start_date > now:
next_fire_time = self.start_date
else:
timediff_seconds = timedelta_seconds(now - self.start_date)
next_interval_num = int(ceil(timediff_seconds / self.interval_length))
next_fire_time = self.start_date + self.interval * next_interval_num
if self.jitter is not None:
next_fire_time = self._apply_jitter(next_fire_time, self.jitter, now)
if not self.end_date or next_fire_time <= self.end_date:
return normalize(next_fire_time)
def __getstate__(self):
return {
'version': 2,
'timezone': self.timezone,
'start_date': self.start_date,
'end_date': self.end_date,
'interval': self.interval,
'jitter': self.jitter,
}
def __setstate__(self, state):
# This is for compatibility with APScheduler 3.0.x
if isinstance(state, tuple):
state = state[1]
if state.get('version', 1) > 2:
raise ValueError(
'Got serialized data for version %s of %s, but only versions up to 2 can be '
'handled' % (state['version'], self.__class__.__name__))
self.timezone = state['timezone']
self.start_date = state['start_date']
self.end_date = state['end_date']
self.interval = state['interval']
self.interval_length = timedelta_seconds(self.interval)
self.jitter = state.get('jitter')
def __str__(self):
return 'interval[%s]' % str(self.interval)
def __repr__(self):
options = ['interval=%r' % self.interval, 'start_date=%r' % datetime_repr(self.start_date)]
if self.end_date:
options.append("end_date=%r" % datetime_repr(self.end_date))
if self.jitter:
options.append('jitter=%s' % self.jitter)
return "<%s (%s, timezone='%s')>" % (
self.__class__.__name__, ', '.join(options), self.timezone)
| 4,377
|
Python
|
.py
| 88
| 40.386364
| 99
| 0.632935
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,397
|
date.py
|
rembo10_headphones/lib/apscheduler/triggers/date.py
|
from datetime import datetime
from tzlocal import get_localzone
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import convert_to_datetime, datetime_repr, astimezone
class DateTrigger(BaseTrigger):
"""
Triggers once on the given datetime. If ``run_date`` is left empty, current time is used.
:param datetime|str run_date: the date/time to run the job at
:param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already
"""
__slots__ = 'run_date'
def __init__(self, run_date=None, timezone=None):
timezone = astimezone(timezone) or get_localzone()
if run_date is not None:
self.run_date = convert_to_datetime(run_date, timezone, 'run_date')
else:
self.run_date = datetime.now(timezone)
def get_next_fire_time(self, previous_fire_time, now):
return self.run_date if previous_fire_time is None else None
def __getstate__(self):
return {
'version': 1,
'run_date': self.run_date
}
def __setstate__(self, state):
# This is for compatibility with APScheduler 3.0.x
if isinstance(state, tuple):
state = state[1]
if state.get('version', 1) > 1:
raise ValueError(
'Got serialized data for version %s of %s, but only version 1 can be handled' %
(state['version'], self.__class__.__name__))
self.run_date = state['run_date']
def __str__(self):
return 'date[%s]' % datetime_repr(self.run_date)
def __repr__(self):
return "<%s (run_date='%s')>" % (self.__class__.__name__, datetime_repr(self.run_date))
| 1,705
|
Python
|
.py
| 37
| 38
| 98
| 0.628779
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,398
|
combining.py
|
rembo10_headphones/lib/apscheduler/triggers/combining.py
|
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import obj_to_ref, ref_to_obj
class BaseCombiningTrigger(BaseTrigger):
__slots__ = ('triggers', 'jitter')
def __init__(self, triggers, jitter=None):
self.triggers = triggers
self.jitter = jitter
def __getstate__(self):
return {
'version': 1,
'triggers': [(obj_to_ref(trigger.__class__), trigger.__getstate__())
for trigger in self.triggers],
'jitter': self.jitter
}
def __setstate__(self, state):
if state.get('version', 1) > 1:
raise ValueError(
'Got serialized data for version %s of %s, but only versions up to 1 can be '
'handled' % (state['version'], self.__class__.__name__))
self.jitter = state['jitter']
self.triggers = []
for clsref, state in state['triggers']:
cls = ref_to_obj(clsref)
trigger = cls.__new__(cls)
trigger.__setstate__(state)
self.triggers.append(trigger)
def __repr__(self):
return '<{}({}{})>'.format(self.__class__.__name__, self.triggers,
', jitter={}'.format(self.jitter) if self.jitter else '')
class AndTrigger(BaseCombiningTrigger):
"""
Always returns the earliest next fire time that all the given triggers can agree on.
The trigger is considered to be finished when any of the given triggers has finished its
schedule.
Trigger alias: ``and``
:param list triggers: triggers to combine
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
"""
__slots__ = ()
def get_next_fire_time(self, previous_fire_time, now):
while True:
fire_times = [trigger.get_next_fire_time(previous_fire_time, now)
for trigger in self.triggers]
if None in fire_times:
return None
elif min(fire_times) == max(fire_times):
return self._apply_jitter(fire_times[0], self.jitter, now)
else:
now = max(fire_times)
def __str__(self):
return 'and[{}]'.format(', '.join(str(trigger) for trigger in self.triggers))
class OrTrigger(BaseCombiningTrigger):
"""
Always returns the earliest next fire time produced by any of the given triggers.
The trigger is considered finished when all the given triggers have finished their schedules.
Trigger alias: ``or``
:param list triggers: triggers to combine
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
.. note:: Triggers that depends on the previous fire time, such as the interval trigger, may
seem to behave strangely since they are always passed the previous fire time produced by
any of the given triggers.
"""
__slots__ = ()
def get_next_fire_time(self, previous_fire_time, now):
fire_times = [trigger.get_next_fire_time(previous_fire_time, now)
for trigger in self.triggers]
fire_times = [fire_time for fire_time in fire_times if fire_time is not None]
if fire_times:
return self._apply_jitter(min(fire_times), self.jitter, now)
else:
return None
def __str__(self):
return 'or[{}]'.format(', '.join(str(trigger) for trigger in self.triggers))
| 3,449
|
Python
|
.py
| 73
| 37.616438
| 97
| 0.611807
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,399
|
base.py
|
rembo10_headphones/lib/apscheduler/triggers/base.py
|
from abc import ABCMeta, abstractmethod
from datetime import timedelta
import random
import six
class BaseTrigger(six.with_metaclass(ABCMeta)):
"""Abstract base class that defines the interface that every trigger must implement."""
__slots__ = ()
@abstractmethod
def get_next_fire_time(self, previous_fire_time, now):
"""
Returns the next datetime to fire on, If no such datetime can be calculated, returns
``None``.
:param datetime.datetime previous_fire_time: the previous time the trigger was fired
:param datetime.datetime now: current datetime
"""
def _apply_jitter(self, next_fire_time, jitter, now):
"""
Randomize ``next_fire_time`` by adding a random value (the jitter).
:param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
``None``, returns ``None``.
:param int|None jitter: maximum number of seconds to add to ``next_fire_time``
(if ``None`` or ``0``, returns ``next_fire_time``)
:param datetime.datetime now: current datetime
:return datetime.datetime|None: next fire time with a jitter.
"""
if next_fire_time is None or not jitter:
return next_fire_time
return next_fire_time + timedelta(seconds=random.uniform(0, jitter))
| 1,355
|
Python
|
.py
| 28
| 40.785714
| 95
| 0.670713
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|