id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8,200
|
dstat_nfsd3_ops.py
|
dstat-real_dstat/plugins/dstat_nfsd3_ops.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'extended nfs3 server operations'
self.nick = ('null', 'gatr', 'satr', 'look', 'aces', 'rdln', 'read', 'writ', 'crea', 'mkdr', 'syml', 'mknd', 'rm', 'rmdr', 'ren', 'link', 'rdir', 'rdr+', 'fstt', 'fsnf', 'path', 'cmmt')
self.vars = ('null', 'getattr', 'setattr', 'lookup', 'access', 'readlink', 'read', 'write', 'create', 'mkdir', 'symlink', 'mknod', 'remove', 'rmdir', 'rename', 'link', 'readdir', 'readdirplus', 'fsstat', 'fsinfo', 'pathconf', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfsd')
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
for i, name in enumerate(self.vars):
self.set2[name] = int(l[i+2])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,195
|
Python
|
.py
| 22
| 45.590909
| 243
| 0.55527
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,201
|
dstat_mysql5_innodb_basic.py
|
dstat-real_dstat/plugins/dstat_mysql5_innodb_basic.py
|
### Author: HIROSE Masaaki <hirose31 _at_ gmail.com>
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL') or ''
global target_status
global _basic_status
global _extra_status
_basic_status = (
('Queries' , 'qps'),
('Com_select' , 'sel/s'),
('Com_insert' , 'ins/s'),
('Com_update' , 'upd/s'),
('Com_delete' , 'del/s'),
('Connections' , 'con/s'),
('Threads_connected' , 'thcon'),
('Threads_running' , 'thrun'),
('Slow_queries' , 'slow'),
)
_extra_status = (
('Innodb_rows_read' , 'r#read'),
('Innodb_rows_inserted' , 'r#ins'),
('Innodb_rows_updated' , 'r#upd'),
('Innodb_rows_deleted' , 'r#del'),
('Innodb_data_reads' , 'rdphy'),
('Innodb_buffer_pool_read_requests', 'rdlgc'),
('Innodb_data_writes' , 'wrdat'),
('Innodb_log_writes' , 'wrlog'),
('innodb_buffer_pool_pages_dirty_pct', '%dirty'),
)
global calculating_status
calculating_status = (
'Innodb_buffer_pool_pages_total',
'Innodb_buffer_pool_pages_dirty',
)
global gauge
gauge = {
'Slow_queries' : 1,
'Threads_connected' : 1,
'Threads_running' : 1,
}
class dstat_plugin(dstat):
"""
mysql5-innodb, mysql5-innodb-basic, mysql5-innodb-extra
display various metircs on MySQL5 and InnoDB.
"""
def __init__(self):
self.name = 'MySQL5 InnoDB '
self.type = 'd'
self.width = 5
self.scale = 1000
def check(self):
if self.filename.find("basic") >= 0:
target_status = _basic_status
self.name += 'basic'
elif self.filename.find("extra") >= 0:
target_status = _extra_status
self.name += 'extra'
elif self.filename.find("full") >= 0:
target_status = _basic_status + _extra_status
self.name += 'full'
else:
target_status = _basic_status + _extra_status
self.name += 'full'
self.vars = tuple( map((lambda e: e[0]), target_status) )
self.nick = tuple( map((lambda e: e[1]), target_status) )
mysql_candidate = ('/usr/bin/mysql', '/usr/local/bin/mysql')
mysql_cmd = ''
for mc in mysql_candidate:
if os.access(mc, os.X_OK):
mysql_cmd = mc
break
if mysql_cmd:
try:
self.stdin, self.stdout, self.stderr = dpopen('%s -n %s' % (mysql_cmd, mysql_options))
except IOError:
raise Exception('Cannot interface with MySQL binary')
return True
raise Exception('Needs MySQL binary')
def extract(self):
try:
self.stdin.write('show global status;\n')
for line in readpipe(self.stdout):
if line == '':
break
s = line.split()
if s[0] in self.vars:
self.set2[s[0]] = float(s[1])
elif s[0] in calculating_status:
self.set2[s[0]] = float(s[1])
for k in self.vars:
if k in gauge:
self.val[k] = self.set2[k]
elif k == 'innodb_buffer_pool_pages_dirty_pct':
self.val[k] = self.set2['Innodb_buffer_pool_pages_dirty'] / self.set2['Innodb_buffer_pool_pages_total'] * 100
else:
self.val[k] = (self.set2[k] - self.set1[k]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mysql, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception' % (self.filename, e))
for name in self.vars: self.val[name] = -1
| 4,110
|
Python
|
.py
| 103
| 30.640777
| 129
| 0.500251
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,202
|
dstat_qmail.py
|
dstat-real_dstat/plugins/dstat_qmail.py
|
### Author: Tom Van Looy <tom$ctors,net>
class dstat_plugin(dstat):
"""
port of qmail_qstat to dstat
"""
def __init__(self):
self.name = 'qmail'
self.nick = ('in_queue', 'not_prep')
self.vars = ('mess', 'todo')
self.type = 'd'
self.width = 4
self.scale = 100
def check(self):
for item in self.vars:
if not os.access('/var/qmail/queue/'+item, os.R_OK):
raise Exception('Cannot access qmail queues')
def extract(self):
for item in self.vars:
self.val[item] = len(glob.glob('/var/qmail/queue/'+item+'/*/*'))
# vim:ts=4:sw=4:et
| 656
|
Python
|
.py
| 20
| 25.2
| 76
| 0.547468
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,203
|
dstat_mysql5_keys.py
|
dstat-real_dstat/plugins/dstat_mysql5_keys.py
|
### Author: <lefred$inuits,be>
global mysql_user
mysql_user = os.getenv('DSTAT_MYSQL_USER') or os.getenv('USER')
global mysql_pwd
mysql_pwd = os.getenv('DSTAT_MYSQL_PWD')
global mysql_host
mysql_host = os.getenv('DSTAT_MYSQL_HOST')
global mysql_port
mysql_port = os.getenv('DSTAT_MYSQL_PORT')
global mysql_socket
mysql_socket = os.getenv('DSTAT_MYSQL_SOCKET')
class dstat_plugin(dstat):
"""
Plugin for MySQL 5 Keys.
"""
def __init__(self):
self.name = 'mysql5 key status'
self.nick = ('used', 'read', 'writ', 'rreq', 'wreq')
self.vars = ('Key_blocks_used', 'Key_reads', 'Key_writes', 'Key_read_requests', 'Key_write_requests')
self.type = 'f'
self.width = 4
self.scale = 1000
def check(self):
global MySQLdb
import MySQLdb
try:
args = {}
if mysql_user:
args['user'] = mysql_user
if mysql_pwd:
args['passwd'] = mysql_pwd
if mysql_host:
args['host'] = mysql_host
if mysql_port:
args['port'] = mysql_port
if mysql_socket:
args['unix_socket'] = mysql_socket
self.db = MySQLdb.connect(**args)
except:
raise Exception('Cannot interface with MySQL server')
def extract(self):
try:
c = self.db.cursor()
c.execute("""show global status like 'Key_%';""")
lines = c.fetchall()
for line in lines:
if len(line[1]) < 2: continue
if line[0] in self.vars:
self.set2[line[0]] = float(line[1])
for name in self.vars:
self.val[name] = self.set2[name] * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except Exception as e:
for name in self.vars:
self.val[name] = -1
# vim:ts=4:sw=4:et
| 1,978
|
Python
|
.py
| 57
| 25
| 109
| 0.539591
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,204
|
dstat_top_io_adv.py
|
dstat-real_dstat/plugins/dstat_top_io_adv.py
|
### Dstat all I/O process plugin
### Displays all processes' I/O read/write stats and CPU usage
###
### Authority: Guillermo Cantu Luna
class dstat_plugin(dstat):
def __init__(self):
self.name = 'most expensive i/o process'
self.vars = ('process pid read write cpu',)
self.type = 's'
self.width = 40
self.scale = 0
self.pidset1 = {}
def check(self):
if not os.access('/proc/self/io', os.R_OK):
raise Exception('Kernel has no per-process I/O accounting [CONFIG_TASK_IO_ACCOUNTING], use at least 2.6.20')
return True
def extract(self):
self.output = ''
self.pidset2 = {}
self.val['usage'] = 0.0
for pid in proc_pidlist():
try:
### Reset values
if pid not in self.pidset2:
self.pidset2[pid] = {'rchar:': 0, 'wchar:': 0, 'cputime:': 0, 'cpuper:': 0}
if pid not in self.pidset1:
self.pidset1[pid] = {'rchar:': 0, 'wchar:': 0, 'cputime:': 0, 'cpuper:': 0}
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Extract counters
for l in proc_splitlines('/proc/%s/io' % pid):
if len(l) != 2: continue
self.pidset2[pid][l[0]] = int(l[1])
### Get CPU usage
l = proc_splitline('/proc/%s/stat' % pid)
if len(l) < 15:
cpu_usage = 0
else:
self.pidset2[pid]['cputime:'] = int(l[13]) + int(l[14])
cpu_usage = (self.pidset2[pid]['cputime:'] - self.pidset1[pid]['cputime:']) * 1.0 / elapsed / cpunr
except ValueError:
continue
except IOError:
continue
except IndexError:
continue
read_usage = (self.pidset2[pid]['rchar:'] - self.pidset1[pid]['rchar:']) * 1.0 / elapsed
write_usage = (self.pidset2[pid]['wchar:'] - self.pidset1[pid]['wchar:']) * 1.0 / elapsed
usage = read_usage + write_usage
### Get the process that spends the most jiffies
if usage > self.val['usage']:
self.val['usage'] = usage
self.val['read_usage'] = read_usage
self.val['write_usage'] = write_usage
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
self.val['cpu_usage'] = cpu_usage
if step == op.delay:
self.pidset1 = self.pidset2
if self.val['usage'] != 0.0:
self.output = '%-*s%s%-5s%s%s%s%s%%' % (self.width-14-len(pid), self.val['name'][0:self.width-14-len(pid)], color['darkblue'], self.val['pid'], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024), cprint(self.val['cpu_usage'], 'f', 3, 34), color['darkgray'])
def showcsv(self):
return 'Top: %s\t%s\t%s\t%s' % (self.val['name'][0:self.width-20], self.val['read_usage'], self.val['write_usage'], self.val['cpu_usage'])
| 3,249
|
Python
|
.py
| 63
| 37.444444
| 311
| 0.499367
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,205
|
dstat_battery_remain.py
|
dstat-real_dstat/plugins/dstat_battery_remain.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Remaining battery time.
Calculated from power drain and remaining battery power. Information is
retrieved from ACPI.
"""
def __init__(self):
self.name = 'remain'
self.type = 't'
self.width = 5
self.scale = 0
def vars(self):
ret = []
for battery in os.listdir('/proc/acpi/battery/'):
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 2: continue
if l[0] == 'present:' and l[1] == 'yes':
ret.append(battery)
ret.sort()
return ret
def nick(self):
return [name.lower() for name in self.vars]
def extract(self):
for battery in self.vars:
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 3: continue
if l[0:2] == ['remaining', 'capacity:']:
remaining = int(l[2])
continue
elif l[0:2] == ['present', 'rate:']:
rate = int(l[2])
continue
if rate and remaining:
self.val[battery] = remaining * 60 / rate
else:
self.val[battery] = -1
# vim:ts=4:sw=4:et
| 1,430
|
Python
|
.py
| 40
| 24.35
| 82
| 0.494935
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,206
|
dstat_mysql_io.py
|
dstat-real_dstat/plugins/dstat_mysql_io.py
|
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL')
class dstat_plugin(dstat):
def __init__(self):
self.name = 'mysql io'
self.nick = ('recv', 'sent')
self.vars = ('Bytes_received', 'Bytes_sent')
def check(self):
if not os.access('/usr/bin/mysql', os.X_OK):
raise Exception('Needs MySQL binary')
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/bin/mysql -n %s' % mysql_options)
except IOError:
raise Exception('Cannot interface with MySQL binary')
def extract(self):
try:
self.stdin.write("show status like 'Bytes_%';\n")
for line in readpipe(self.stdout):
l = line.split()
if len(l) < 2: continue
if l[0] in self.vars:
self.set2[l[0]] = float(l[1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mysql, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('dstat_innodb_buffer: exception', e)
for name in self.vars: self.val[name] = -1
# vim:ts=4:sw=4:et
| 1,418
|
Python
|
.py
| 33
| 32.121212
| 97
| 0.546115
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,207
|
dstat_dstat_cpu.py
|
dstat-real_dstat/plugins/dstat_dstat_cpu.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Provide CPU information related to the dstat process.
This plugin shows the CPU utilization for the dstat process itself,
including the user-space and system-space (kernel) utilization and
a total of both. On a system with one cpu and one core, the total
cputime is 1000ms. On a system with 2 cores the total is 2000ms.
It may help to vizualise the performance of Dstat and its selection
of plugins.
"""
def __init__(self):
self.name = 'dstat cpu'
self.vars = ('user', 'system', 'total')
self.nick = ('usr', 'sys', 'tot')
self.type = 'p'
self.width = 3
self.scale = 100
def extract(self):
res = resource.getrusage(resource.RUSAGE_SELF)
self.set2['user'] = float(res.ru_utime)
self.set2['system'] = float(res.ru_stime)
self.set2['total'] = float(res.ru_utime) + float(res.ru_stime)
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 100.0 / elapsed / cpunr
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,193
|
Python
|
.py
| 28
| 35.5
| 90
| 0.631261
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,208
|
dstat_vmk_int.py
|
dstat-real_dstat/plugins/dstat_vmk_int.py
|
### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware ESX kernel interrupt stats
### Displays kernel interrupt statistics on VMware ESX servers
# NOTE TO USERS: command-line plugin configuration is not yet possible, so I've
# "borrowed" the -I argument.
# EXAMPLES:
# # dstat --vmkint -I 0x46,0x5a
# You can even combine the Linux and VMkernel interrupt stats
# # dstat --vmkint -i -I 14,0x5a
# Look at /proc/vmware/interrupts to see which interrupt is linked to which function
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmkint'
self.type = 'd'
self.width = 4
self.scale = 1000
self.open('/proc/vmware/interrupts')
# self.intmap = self.intmap()
# def intmap(self):
# ret = {}
# for line in dopen('/proc/vmware/interrupts').readlines():
# l = line.split()
# if len(l) <= self.vmkcpunr: continue
# l1 = l[0].split(':')[0]
# l2 = ' '.join(l[vmkcpunr()+1:]).split(',')
# ret[l1] = l1
# for name in l2:
# ret[name.strip().lower()] = l1
# return ret
def vmkcpunr(self):
#the service console sees only one CPU, so cpunr == 1, only the vmkernel sees all CPUs
ret = []
# default cpu number is 2
ret = 2
for l in self.fd[0].splitlines():
if l[0] == 'Vector':
ret = int( int( l[-1] ) + 1 )
return ret
def discover(self):
#interrupt names are not decimal numbers, but rather hexadecimal numbers like 0x7e
ret = []
self.fd[0].seek(0)
for line in self.fd[0].readlines():
l = line.split()
if l[0] == 'Vector': continue
if len(l) < self.vmkcpunr()+1: continue
name = l[0].split(':')[0]
amount = 0
for i in l[1:1+self.vmkcpunr()]:
amount = amount + int(i)
if amount > 20: ret.append(str(name))
return ret
def vars(self):
ret = []
if op.intlist:
list = op.intlist
else:
list = self.discover
# len(list) > 5: list = list[-5:]
for name in list:
if name in self.discover:
ret.append(name)
# elif name.lower() in self.intmap:
# ret.append(self.intmap[name.lower()])
return ret
def check(self):
try:
os.listdir('/proc/vmware')
except:
raise Exception('Needs VMware ESX')
info(1, 'The vmkint module is an EXPERIMENTAL module.')
def extract(self):
self.fd[0].seek(0)
for line in self.fd[0].readlines():
l = line.split()
if len(l) < self.vmkcpunr()+1: continue
name = l[0].split(':')[0]
if name in self.vars:
self.set2[name] = 0
for i in l[1:1+self.vmkcpunr()]:
self.set2[name] = self.set2[name] + int(i)
for name in self.set2:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
| 3,191
|
Python
|
.py
| 86
| 29.232558
| 94
| 0.539282
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,209
|
dstat_innodb_buffer.py
|
dstat-real_dstat/plugins/dstat_innodb_buffer.py
|
### Author: Dag Wieers <dag$wieers,com>
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL')
class dstat_plugin(dstat):
def __init__(self):
self.name = 'innodb pool'
self.nick = ('crt', 'rea', 'wri')
self.vars = ('created', 'read', 'written')
self.type = 'f'
self.width = 3
self.scale = 1000
def check(self):
if not os.access('/usr/bin/mysql', os.X_OK):
raise Exception('Needs MySQL binary')
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/bin/mysql -n %s' % mysql_options)
except IOError as e:
raise Exception('Cannot interface with MySQL binary (%s)' % e)
def extract(self):
try:
self.stdin.write('show engine innodb status\G\n')
line = greppipe(self.stdout, 'Pages read ')
if line:
l = line.split()
self.set2['read'] = int(l[2].rstrip(','))
self.set2['created'] = int(l[4].rstrip(','))
self.set2['written'] = int(l[6])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mysql, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception: %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
# vim:ts=4:sw=4:et
| 1,628
|
Python
|
.py
| 38
| 32.394737
| 97
| 0.537975
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,210
|
dstat_disk_avgqu.py
|
dstat-real_dstat/plugins/dstat_disk_avgqu.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
The average queue length of the requests that were issued to the device.
"""
def __init__(self):
self.version = 2
self.nick = ('avgqu',)
self.type = 'f'
self.width = 4
self.scale = 10
self.diskfilter = re.compile('^([hsv]d[a-z]+\d+|cciss/c\d+d\d+p\d+|dm-\d+|md\d+|mmcblk\d+p\d0|VxVM\d+)$')
self.open('/proc/diskstats')
self.cols = 1
self.struct = dict( rq_ticks=0 )
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception('No suitable block devices found to monitor')
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
else:
varlist = []
blockdevices = [os.path.basename(filename) for filename in glob.glob('/sys/block/*')]
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices: continue
varlist.append(name)
varlist.sort()
for name in varlist:
if name in self.discover:
ret.append(name)
return ret
def name(self):
return self.vars
def extract(self):
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
if l[3] == '0' and l[7] == '0': continue
name = l[2]
if name not in self.vars or name == 'total': continue
self.set2[name] = dict(
rq_ticks = int(l[13]),
)
for name in self.vars:
self.val[name] = ( ( self.set2[name]['rq_ticks'] - self.set1[name]['rq_ticks'] ) * 1.0 / elapsed / 1000, )
if step == op.delay:
self.set1.update(self.set2)
| 2,118
|
Python
|
.py
| 58
| 26.137931
| 118
| 0.517544
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,211
|
dstat_vmk_nic.py
|
dstat-real_dstat/plugins/dstat_vmk_nic.py
|
### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware ESX kernel vmknic stats
### Displays VMkernel port statistics on VMware ESX servers
# NOTE TO USERS: command-line plugin configuration is not yet possible, so I've
# "borrowed" the -N argument.
# EXAMPLES:
# # dstat --vmknic -N vmk1
# You can even combine the Linux and VMkernel network stats (just don't just "total").
# # dstat --vmknic -n -N vmk0,vswif0
# NB Data comes from /proc/vmware/net/tcpip/ifconfig
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmknic'
self.nick = ('recv', 'send')
self.open('/proc/vmware/net/tcpip/ifconfig')
self.cols = 2
def check(self):
try:
os.listdir('/proc/vmware')
except:
raise Exception('Needs VMware ESX')
info(1, 'The vmknic module is an EXPERIMENTAL module.')
def discover(self, *list):
ret = []
for l in self.fd[0].splitlines(replace=' /', delim='/'):
if len(l) != 12: continue
if l[2][:5] == '<Link': continue
if ','.join(l) == 'Name,Mtu/TSO,Network,Address,Ipkts,Ierrs,Ibytes,Opkts,Oerrs,Obytes,Coll,Time': continue
if l[0] == 'lo0': continue
if l[0] == 'Usage:': continue
ret.append(l[0])
ret.sort()
for item in list: ret.append(item)
return ret
def vars(self):
ret = []
if op.netlist:
list = op.netlist
else:
list = self.discover
list.sort()
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def name(self):
return ['net/'+name for name in self.vars]
def extract(self):
self.set2['total'] = [0, 0]
for line in self.fd[0].readlines():
l = line.replace(' /','/').split()
if len(l) != 12: continue
if l[2][:5] == '<Link': continue
if ','.join(l) == 'Name,Mtu/TSO,Network,Address,Ipkts,Ierrs,Ibytes,Opkts,Oerrs,Obytes,Coll,Time': continue
if l[0] == 'Usage:': continue
name = l[0]
if name in self.vars:
self.set2[name] = ( int(l[6]), int(l[9]) )
if name != 'lo0':
self.set2['total'] = ( self.set2['total'][0] + int(l[6]), self.set2['total'][1] + int(l[9]) )
if update:
for name in self.set2:
self.val[name] = list(map(lambda x, y: (y - x) * 1.0 / elapsed, self.set1[name], self.set2[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
| 2,648
|
Python
|
.py
| 66
| 30.954545
| 118
| 0.550758
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,212
|
dstat_test.py
|
dstat-real_dstat/plugins/dstat_test.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
'''
Provides a test playground to test syntax and structure.
'''
def __init__(self):
self.name = 'test'
self.nick = ( 'f1', 'f2' )
self.vars = ( 'f1', 'f2' )
# self.type = 'd'
# self.width = 4
# self.scale = 20
self.type = 's'
self.width = 4
self.scale = 0
def extract(self):
# Self.val = { 'f1': -1, 'f2': -1 }
self.val = { 'f1': 'test', 'f2': 'test' }
# vim:ts=4:sw=4:et
| 547
|
Python
|
.py
| 19
| 23.631579
| 60
| 0.491429
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,213
|
dstat_zfs_zil.py
|
dstat-real_dstat/plugins/dstat_zfs_zil.py
|
class dstat_plugin(dstat):
"""
ZFS on Linux ZIL (ZFS Intent Log)
Data is extracted from /proc/spl/kstat/zfs/zil
"""
def __init__(self):
self.name = 'ZFS ZIL'
self.nick = ('count', 'bytes')
self.vars = ('zil_itx_metaslab_slog_count', 'zil_itx_metaslab_slog_bytes')
self.types = ('d', 'b')
self.scales = (1000, 1024)
self.counter = (True, True)
self.open('/proc/spl/kstat/zfs/zil')
def extract(self):
for l in self.splitlines():
if len(l) < 2: continue
l[0].split()
name = l[0]
if name in self.vars:
self.set2[name] = int(l[2])
for i, name in enumerate (self.vars):
if self.counter[i]:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
else:
self.val[name] = self.set2[name]
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,001
|
Python
|
.py
| 28
| 26.142857
| 84
| 0.52376
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,214
|
dstat_top_oom.py
|
dstat-real_dstat/plugins/dstat_top_oom.py
|
### Author: Dag Wieers <dag@wieers.com>
### Dstat most expensive process plugin
### Displays the name of the most expensive process
### More information:
### http://lwn.net/Articles/317814/
class dstat_plugin(dstat):
def __init__(self):
self.name = 'out of memory'
self.vars = ('kill score',)
self.type = 's'
self.width = 18
self.scale = 0
def check(self):
if not os.access('/proc/self/oom_score', os.R_OK):
raise Exception('Kernel does not support /proc/pid/oom_score, use at least 2.6.11.')
def extract(self):
self.output = ''
self.val['max'] = 0.0
for pid in proc_pidlist():
try:
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Using dopen() will cause too many open files
l = proc_splitline('/proc/%s/oom_score' % pid)
except IOError:
continue
except IndexError:
continue
if len(l) < 1: continue
oom_score = int(l[0])
### Is it a new topper ?
if oom_score <= self.val['max']: continue
self.val['max'] = oom_score
self.val['name'] = getnamebypid(pid, name)
self.val['pid'] = pid
if self.val['max'] != 0.0:
self.output = '%-*s%s' % (self.width-4, self.val['name'][0:self.width-4], cprint(self.val['max'], 'f', 4, 1000))
### Debug (show PID)
# self.output = '%*s %-*s' % (5, self.val['pid'], self.width-6, self.val['name'])
def showcsv(self):
return '%s / %d%%' % (self.val['name'], self.val['max'])
# vim:ts=4:sw=4:et
| 1,721
|
Python
|
.py
| 42
| 31.285714
| 124
| 0.52581
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,215
|
dstat_thermal.py
|
dstat-real_dstat/plugins/dstat_thermal.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'thermal'
self.type = 'd'
self.width = 3
self.scale = 20
if os.path.exists('/sys/devices/virtual/thermal/'):
self.nick = []
self.vars = []
for zone in os.listdir('/sys/devices/virtual/thermal/'):
zone_split=zone.split("thermal_zone")
if len(zone_split) == 2:
self.vars.append(zone)
name="".join(["tz",zone_split[1]])
self.nick.append(name)
elif os.path.exists('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'):
self.vars = os.listdir('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/')
self.nick = []
for name in self.vars:
self.nick.append(name.lower())
elif os.path.exists('/proc/acpi/ibm/thermal'):
self.namelist = ['cpu', 'pci', 'hdd', 'cpu', 'ba0', 'unk', 'ba1', 'unk']
self.nick = []
for line in dopen('/proc/acpi/ibm/thermal'):
l = line.split()
for i, name in enumerate(self.namelist):
if int(l[i+1]) > 0:
self.nick.append(name)
self.vars = self.nick
elif os.path.exists('/proc/acpi/thermal_zone/'):
self.vars = os.listdir('/proc/acpi/thermal_zone/')
# self.nick = [name.lower() for name in self.vars]
self.nick = []
for name in self.vars:
self.nick.append(name.lower())
else:
raise Exception('Needs kernel thermal, ACPI or IBM-ACPI support')
def check(self):
if not os.path.exists('/proc/acpi/ibm/thermal') and \
not os.path.exists('/proc/acpi/thermal_zone/') and \
not os.path.exists('/sys/devices/virtual/thermal/') and \
not os.path.exists('/sys/bus/acpi/devices/LNXTHERM:00/thermal_zone/'):
raise Exception('Needs kernel thermal, ACPI or IBM-ACPI support')
def extract(self):
if os.path.exists('/sys/devices/virtual/thermal/'):
for zone in self.vars:
for line in dopen('/sys/devices/virtual/thermal/'+zone+'/temp').readlines():
l = line.split()
self.val[zone] = int(l[0])
elif os.path.exists('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'):
for zone in self.vars:
if os.path.isdir('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'+zone) == False:
for line in dopen('/sys/bus/acpi/devices/LNXTHERM:01/thermal_zone/'+zone).readlines():
l = line.split()
if l[0].isdigit() == True:
self.val[zone] = int(l[0])
else:
self.val[zone] = 0
elif os.path.exists('/proc/acpi/ibm/thermal'):
for line in dopen('/proc/acpi/ibm/thermal'):
l = line.split()
for i, name in enumerate(self.namelist):
if int(l[i+1]) > 0:
self.val[name] = int(l[i+1])
elif os.path.exists('/proc/acpi/thermal_zone/'):
for zone in self.vars:
for line in dopen('/proc/acpi/thermal_zone/'+zone+'/temperature').readlines():
l = line.split()
self.val[zone] = int(l[1])
# vim:ts=4:sw=4:et
| 3,513
|
Python
|
.py
| 71
| 35.15493
| 106
| 0.517332
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,216
|
dstat_nfsd4_ops.py
|
dstat-real_dstat/plugins/dstat_nfsd4_ops.py
|
### Author: Adam Michel <elfurbe@furbism.com>
### Based on work by: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'nfs4 server'
# this vars/nick pair is the ones I considered relevant. Any set of the full list would work.
self.vars = ('read','write','readdir','getattr','setattr','commit','getfh','putfh',
'savefh','restorefh','open','open_conf','close','access','lookup','remove')
self.nick = ('read', 'writ', 'rdir', 'gatr','satr','cmmt','gfh','pfh','sfh','rfh',
'open','opnc','clse','accs','lkup','rem')
# this is every possible variable for NFSv4 server if you're into that
#self.vars4 = ('op0-unused', 'op1-unused', 'op2-future' , 'access',
# 'close', 'commit', 'create', 'delegpurge', 'delegreturn', 'getattr', 'getfh',
# 'link', 'lock', 'lockt', 'locku', 'lookup', 'lookup_root', 'nverify', 'open',
# 'openattr', 'open_conf', 'open_dgrd','putfh', 'putpubfh', 'putrootfh',
# 'read', 'readdir', 'readlink', 'remove', 'rename','renew', 'restorefh',
# 'savefh', 'secinfo', 'setattr', 'setcltid', 'setcltidconf', 'verify', 'write',
# 'rellockowner')
# I separated the NFSv41 ops cause you know, completeness.
#self.vars41 = ('bc_ctl', 'bind_conn', 'exchange_id', 'create_ses',
# 'destroy_ses', 'free_stateid', 'getdirdeleg', 'getdevinfo', 'getdevlist',
# 'layoutcommit', 'layoutget', 'layoutreturn', 'secinfononam', 'sequence',
# 'set_ssv', 'test_stateid', 'want_deleg', 'destroy_clid', 'reclaim_comp')
# Just catin' the tuples together to make the full list.
#self.vars = self.vars4 + self.vars41
# these are terrible shortnames for every possible variable
#self.nick4 = ('unsd','unsd','unsd','accs','clse','comm','crt','delp','delr','gatr','gfh',
# 'link','lock','lckt','lcku','lkup','lkpr','nver','open','opna','opnc','opnd',
# 'pfh','ppfh','prfh','read','rdir','rlnk','rmv','ren','rnw','rfh','sfh','snfo',
# 'satr','scid','scic','ver','wrt','rlko')
#self.nick41 = ('bctl','bcon','eid','cses','dses','fsid',
# 'gdd','gdi','gdl','lcmt','lget','lrtn','sinn','seq','sets','tsts','wdel','dcid',
# 'rcmp')
#self.nick = self.nick4 + self.nick41
self.type = 'd'
self.width = 5
self.scale = 1000
self.open("/proc/net/rpc/nfsd")
def check(self):
# other NFS modules had this, so I left it. It seems to work.
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
# list of fields from /proc/net/rpc/nfsd, in order of output
# taken from include/linux/nfs4.h in kernel source
nfsd4_names = ('label', 'fieldcount', 'op0-unused', 'op1-unused', 'op2-future' , 'access',
'close', 'commit', 'create', 'delegpurge', 'delegreturn', 'getattr', 'getfh',
'link', 'lock', 'lockt', 'locku', 'lookup', 'lookup_root', 'nverify', 'open',
'openattr', 'open_conf', 'open_dgrd','putfh', 'putpubfh', 'putrootfh',
'read', 'readdir', 'readlink', 'remove', 'rename','renew', 'restorefh',
'savefh', 'secinfo', 'setattr', 'setcltid', 'setcltidconf', 'verify', 'write',
'rellockowner', 'bc_ctl', 'bind_conn', 'exchange_id', 'create_ses',
'destroy_ses', 'free_stateid', 'getdirdeleg', 'getdevinfo', 'getdevlist',
'layoutcommit', 'layoutget', 'layoutreturn', 'secinfononam', 'sequence',
'set_ssv', 'test_stateid', 'want_deleg', 'destroy_clid', 'reclaim_comp'
)
for line in self.splitlines():
fields = line.split()
if fields[0] == "proc4ops": # just grab NFSv4 stats
assert int(fields[1]) == len(fields[2:]), ("reported field count (%d) does not match actual field count (%d)" % (int(fields[1]), len(fields[2:])))
for var in self.vars:
self.set2[var] = fields[nfsd4_names.index(var)]
for name in self.vars:
self.val[name] = (int(self.set2[name]) - int(self.set1[name])) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 4,417
|
Python
|
.py
| 66
| 56.333333
| 162
| 0.554916
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,217
|
dstat_top_cpu_adv.py
|
dstat-real_dstat/plugins/dstat_top_cpu_adv.py
|
### Dstat all I/O process plugin
### Displays all processes' I/O read/write stats and CPU usage
###
### Authority: Guillermo Cantu Luna
class dstat_plugin(dstat):
def __init__(self):
self.name = 'most expensive cpu process'
self.vars = ('process pid cpu read write',)
self.type = 's'
self.width = 40
self.scale = 0
self.pidset1 = {}
def check(self):
if not os.access('/proc/self/io', os.R_OK):
raise Exception('Kernel has no per-process I/O accounting [CONFIG_TASK_IO_ACCOUNTING], use at least 2.6.20')
return True
def extract(self):
self.output = ''
self.pidset2 = {}
self.val['cpu_usage'] = 0
for pid in proc_pidlist():
try:
### Reset values
if pid not in self.pidset2:
self.pidset2[pid] = {'rchar:': 0, 'wchar:': 0, 'cputime:': 0, 'cpuper:': 0}
if pid not in self.pidset1:
self.pidset1[pid] = {'rchar:': 0, 'wchar:': 0, 'cputime:': 0, 'cpuper:': 0}
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Extract counters
for l in proc_splitlines('/proc/%s/io' % pid):
if len(l) != 2: continue
self.pidset2[pid][l[0]] = int(l[1])
### Get CPU usage
l = proc_splitline('/proc/%s/stat' % pid)
if len(l) < 15:
cpu_usage = 0.0
else:
self.pidset2[pid]['cputime:'] = int(l[13]) + int(l[14])
cpu_usage = (self.pidset2[pid]['cputime:'] - self.pidset1[pid]['cputime:']) * 1.0 / elapsed / cpunr
except ValueError:
continue
except IOError:
continue
except IndexError:
continue
read_usage = (self.pidset2[pid]['rchar:'] - self.pidset1[pid]['rchar:']) * 1.0 / elapsed
write_usage = (self.pidset2[pid]['wchar:'] - self.pidset1[pid]['wchar:']) * 1.0 / elapsed
### Get the process that spends the most jiffies
if cpu_usage > self.val['cpu_usage']:
self.val['read_usage'] = read_usage
self.val['write_usage'] = write_usage
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
self.val['cpu_usage'] = cpu_usage
if step == op.delay:
self.pidset1 = self.pidset2
if self.val['cpu_usage'] != 0.0:
self.output = '%-*s%s%-5s%s%s%%%s%s' % (self.width-14-len(pid), self.val['name'][0:self.width-14-len(pid)], color['darkblue'], self.val['pid'], cprint(self.val['cpu_usage'], 'f', 3, 34), color['darkgray'],cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
def showcsv(self):
return 'Top: %s\t%s\t%s\t%s' % (self.val['name'][0:self.width-20], self.val['cpu_usage'], self.val['read_usage'], self.val['write_usage'])
| 3,177
|
Python
|
.py
| 61
| 37.983607
| 310
| 0.501942
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,218
|
dstat_power.py
|
dstat-real_dstat/plugins/dstat_power.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
"""
Power usage information from ACPI.
Displays the power usage in watt per hour of your system's battery using
ACPI information. This information is only available when the battery is
being used (or being charged).
"""
def __init__(self):
self.name = 'power'
self.nick = ( 'usage', )
self.vars = ( 'rate', )
self.type = 'f'
self.width = 5
self.scale = 1
self.rate = 0
self.batteries = []
for battery in os.listdir('/proc/acpi/battery/'):
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 2: continue
self.batteries.append(battery)
break
def check(self):
if not self.batteries:
raise Exception('No battery information found, no power usage statistics')
def extract(self):
amperes_drawn = 0
voltage = 0
watts_drawn = 0
for battery in self.batteries:
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 3: continue
if l[0] == 'present:' and l[1] != 'yes': continue
if l[0:2] == ['charging','state:'] and l[2] != 'discharging':
voltage = 0
break
if l[0:2] == ['present','voltage:']:
voltage = int(l[2]) / 1000.0
elif l[0:2] == ['present','rate:'] and l[3] == 'mW':
watts_drawn = int(l[2]) / 1000.0
elif l[0:2] == ['present','rate:'] and l[3] == 'mA':
amperes_drawn = int(l[2]) / 1000.0
self.rate = self.rate + watts_drawn + voltage * amperes_drawn
### Return error if we found no information
if self.rate == 0:
self.rate = -1
if op.update:
self.val['rate'] = self.rate / elapsed
else:
self.val['rate'] = self.rate
if step == op.delay:
self.rate = 0
# vim:ts=4:sw=4:et
| 2,200
|
Python
|
.py
| 55
| 28.636364
| 86
| 0.511007
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,219
|
dstat_mysql5_conn.py
|
dstat-real_dstat/plugins/dstat_mysql5_conn.py
|
### Author: <lefred$inuits,be>
global mysql_user
mysql_user = os.getenv('DSTAT_MYSQL_USER') or os.getenv('USER')
global mysql_pwd
mysql_pwd = os.getenv('DSTAT_MYSQL_PWD')
global mysql_host
mysql_host = os.getenv('DSTAT_MYSQL_HOST')
global mysql_port
mysql_port = os.getenv('DSTAT_MYSQL_PORT')
global mysql_socket
mysql_socket = os.getenv('DSTAT_MYSQL_SOCKET')
class dstat_plugin(dstat):
"""
Plugin for MySQL 5 connections.
"""
def __init__(self):
self.name = 'mysql5 conn'
self.nick = ('ThCon', '%Con')
self.vars = ('Threads_connected', 'Threads')
self.type = 'f'
self.width = 4
self.scale = 1
def check(self):
global MySQLdb
import MySQLdb
try:
args = {}
if mysql_user:
args['user'] = mysql_user
if mysql_pwd:
args['passwd'] = mysql_pwd
if mysql_host:
args['host'] = mysql_host
if mysql_port:
args['port'] = mysql_port
if mysql_socket:
args['unix_socket'] = mysql_socket
self.db = MySQLdb.connect(**args)
except Exception as e:
raise Exception('Cannot interface with MySQL server, %s' % e)
def extract(self):
try:
c = self.db.cursor()
c.execute("""show global variables like 'max_connections';""")
max = c.fetchone()
c.execute("""show global status like 'Threads_connected';""")
thread = c.fetchone()
if thread[0] in self.vars:
self.set2[thread[0]] = float(thread[1])
self.set2['Threads'] = float(thread[1]) / float(max[1]) * 100.0
for name in self.vars:
self.val[name] = self.set2[name] * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except Exception as e:
for name in self.vars:
self.val[name] = -1
# vim:ts=4:sw=4:et
| 2,038
|
Python
|
.py
| 58
| 25.534483
| 79
| 0.547304
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,220
|
dstat_mongodb_queue.py
|
dstat-real_dstat/plugins/dstat_mongodb_queue.py
|
### Author: <gianfranco@mongodb.com>
global mongodb_user
mongodb_user = os.getenv('DSTAT_MONGODB_USER') or os.getenv('USER')
global mongodb_pwd
mongodb_pwd = os.getenv('DSTAT_MONGODB_PWD')
global mongodb_host
mongodb_host = os.getenv('DSTAT_MONGODB_HOST') or '127.0.0.1:27017'
class dstat_plugin(dstat):
"""
Plugin for MongoDB.
"""
def __init__(self):
global pymongo
import pymongo
try:
self.m = pymongo.MongoClient(mongodb_host)
if mongodb_pwd:
self.m.admin.authenticate(mongodb_user, mongodb_pwd)
self.db = self.m.admin
except Exception as e:
raise Exception('Cannot interface with MongoDB server: %s' % e)
self.name = 'mongodb queues'
self.nick = ('ar', 'aw', 'qt', 'qw')
self.vars = ('ar', 'aw', 'qt', 'qw')
self.type = 'd'
self.width = 5
self.scale = 2
self.lastVal = {}
def extract(self):
status = self.db.command("serverStatus")
glock = status['globalLock']
alock = glock['activeClients']
qlock = glock['currentQueue']
self.val['ar'] = int(alock['readers'])
self.val['aw'] = int(alock['writers'])
self.val['qr'] = int(qlock['readers'])
self.val['qw'] = int(qlock['writers'])
| 1,224
|
Python
|
.py
| 37
| 28.675676
| 69
| 0.639525
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,221
|
dstat_snmp_net.py
|
dstat-real_dstat/plugins/dstat_snmp_net.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
def __init__(self):
self.nick = ('recv', 'send')
self.type = 'b'
self.cols = 2
self.server = os.getenv('DSTAT_SNMPSERVER') or '192.168.1.1'
self.community = os.getenv('DSTAT_SNMPCOMMUNITY') or 'public'
def check(self):
try:
global cmdgen
from pysnmp.entity.rfc3413.oneliner import cmdgen
except:
raise Exception('Needs pysnmp and pyasn1 modules')
def name(self):
return self.vars
def vars(self):
return [ str(x) for x in snmpwalk(self.server, self.community, (1,3,6,1,2,1,2,2,1,2)) ]
def extract(self):
list(map(lambda x, y, z: self.set2.update({x: (int(y), int(z))}), self.vars, snmpwalk(self.server, self.community, (1,3,6,1,2,1,2,2,1,10)), snmpwalk(self.server, self.community, (1,3,6,1,2,1,2,2,1,16))))
if update:
for name in self.set2:
self.val[name] = list(map(lambda x, y: (y - x) * 1.0 / elapsed, self.set1[name], self.set2[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,175
|
Python
|
.py
| 26
| 36.5
| 211
| 0.587204
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,222
|
dstat_nfsstat4.py
|
dstat-real_dstat/plugins/dstat_nfsstat4.py
|
### Author: Adam Michel <elfurbe@furbism.com>
### Based on work by: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'nfs4 client'
# this vars/nick pair is the ones I considered relevant. Any set of the full list would work.
self.vars = ('read', 'write', 'readdir', 'commit', 'getattr', 'create', 'link','remove')
self.nick = ('read', 'writ', 'rdir', 'cmmt', 'gatr','crt','link','rmv')
# this is every possible variable if you're into that
#self.vars = ("read", "write", "commit", "open", "open_conf", "open_noat", "open_dgrd", "close",
# "setattr", "fsinfo", "renew", "setclntid", "confirm", "lock", "lockt", "locku",
# "access", "getattr", "lookup", "lookup_root", "remove", "rename", "link", "symlink",
# "create", "pathconf", "statfs", "readlink", "readdir", "server_caps", "delegreturn",
# "getacl", "setacl", "fs_locations", "rel_lkowner", "secinfo")
# these are terrible shortnames for every possible variable
#self.nick = ("read", "writ", "comt", "open", "opnc", "opnn", "opnd", "clse", "seta", "fnfo",
# "renw", "stcd", "cnfm", "lock", "lckt", "lcku", "accs", "gatr", "lkup", "lkp_r",
# "rem", "ren", "lnk", "slnk", "crte", "pthc", "stfs", "rdlk", "rdir", "scps", "delr",
# "gacl", "sacl", "fslo", "relo", "seco")
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfs')
def check(self):
# other NFS modules had this, so I left it. It seems to work.
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
# list of fields from nfsstat, in order of output from cat /proc/net/rpc/nfs
nfs4_names = ("version", "fieldcount", "null", "read", "write", "commit", "open", "open_conf",
"open_noat", "open_dgrd", "close", "setattr", "fsinfo", "renew", "setclntid",
"confirm", "lock", "lockt", "locku", "access", "getattr", "lookup", "lookup_root",
"remove", "rename", "link", "symlink", "create", "pathconf", "statfs", "readlink",
"readdir", "server_caps", "delegreturn", "getacl", "setacl", "fs_locations",
"rel_lkowner", "secinfo")
for line in self.splitlines():
fields = line.split()
if fields[0] == "proc4": # just grab NFSv4 stats
assert int(fields[1]) == len(fields[2:]), ("reported field count (%d) does not match actual field count (%d)" % (int(fields[1]), len(fields[2:])))
for var in self.vars:
self.set2[var] = fields[nfs4_names.index(var)]
for name in self.vars:
self.val[name] = (int(self.set2[name]) - int(self.set1[name])) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 2,970
|
Python
|
.py
| 45
| 55.777778
| 162
| 0.547635
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,223
|
dstat_mysql_keys.py
|
dstat-real_dstat/plugins/dstat_mysql_keys.py
|
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL')
class dstat_plugin(dstat):
def __init__(self):
self.name = 'mysql key status'
self.nick = ('used', 'read', 'writ', 'rreq', 'wreq')
self.vars = ('Key_blocks_used', 'Key_reads', 'Key_writes', 'Key_read_requests', 'Key_write_requests')
self.type = 'f'
self.width = 4
self.scale = 1000
def check(self):
if not os.access('/usr/bin/mysql', os.X_OK):
raise Exception('Needs MySQL binary')
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/bin/mysql -n %s' % mysql_options)
except IOError:
raise Exception('Cannot interface with MySQL binary')
def extract(self):
try:
self.stdin.write("show status like 'Key_%';\n")
for line in readpipe(self.stdout):
l = line.split()
if len(l) < 2: continue
if l[0] in self.vars:
self.set2[l[0]] = float(l[1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mysql, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception' (self.filename, e))
for name in self.vars: self.val[name] = -1
# vim:ts=4:sw=4:et
| 1,577
|
Python
|
.py
| 36
| 33.111111
| 109
| 0.545336
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,224
|
dstat_helloworld.py
|
dstat-real_dstat/plugins/dstat_helloworld.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Example "Hello world!" output plugin for aspiring Dstat developers.
"""
def __init__(self):
self.name = 'plugin title'
self.nick = ('counter',)
self.vars = ('text',)
self.type = 's'
self.width = 12
self.scale = 0
def extract(self):
self.val['text'] = 'Hello world!'
# vim:ts=4:sw=4:et
| 436
|
Python
|
.py
| 15
| 22.733333
| 71
| 0.565947
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,225
|
dstat_zfs_arc.py
|
dstat-real_dstat/plugins/dstat_zfs_arc.py
|
class dstat_plugin(dstat):
"""
ZFS on Linux ARC (Adjustable Replacement Cache)
Data is extracted from /proc/spl/kstat/zfs/arcstats
"""
def __init__(self):
self.name = 'ZFS ARC'
self.nick = ('mem', 'hit', 'miss', 'reads', 'hit%')
self.vars = ('size', 'hits', 'misses', 'total', 'hit_rate')
self.types = ('b', 'd', 'd', 'd', 'p')
self.scales = (1024, 1000, 1000, 1000, 1000)
self.counter = (False, True, True, False, False)
self.open('/proc/spl/kstat/zfs/arcstats')
def extract(self):
for l in self.splitlines():
if len(l) < 2: continue
l[0].split()
name = l[0]
if name in self.vars:
self.set2[name] = int(l[2])
for i, name in enumerate (self.vars):
if self.counter[i]:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
else:
self.val[name] = self.set2[name]
self.val['total'] = self.val['hits'] + self.val['misses']
if self.val['total'] > 0 :
self.val['hit_rate'] = self.val['hits'] / self.val['total'] * 100.0
else:
self.val['hit_rate'] = 0
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,319
|
Python
|
.py
| 33
| 30.151515
| 84
| 0.516028
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,226
|
dstat_gpfs.py
|
dstat-real_dstat/plugins/dstat_gpfs.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Total amount of read and write throughput (in bytes) on a GPFS filesystem.
"""
def __init__(self):
self.name = 'gpfs i/o'
self.nick = ('read', 'write')
self.vars = ('_br_', '_bw_')
def check(self):
if os.access('/usr/lpp/mmfs/bin/mmpmon', os.X_OK):
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/lpp/mmfs/bin/mmpmon -p -s')
self.stdin.write('reset\n')
readpipe(self.stdout)
except IOError:
raise Exception('Cannot interface with gpfs mmpmon binary')
return True
raise Exception('Needs GPFS mmpmon binary')
def extract(self):
try:
self.stdin.write('io_s\n')
# readpipe(self.stderr)
for line in readpipe(self.stdout):
if not line: continue
l = line.split()
for name in self.vars:
self.set2[name] = int(l[l.index(name)+1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mmpmon, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,622
|
Python
|
.py
| 39
| 30.794872
| 95
| 0.536462
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,227
|
dstat_battery.py
|
dstat-real_dstat/plugins/dstat_battery.py
|
### Author: Dag Wieers <dag$wieers,com>
### Author: Sven-Hendrik Haase <sh@lutzhaase.com>
class dstat_plugin(dstat):
"""
Percentage of remaining battery power as reported by ACPI.
"""
def __init__(self):
self.name = 'battery'
self.type = 'p'
self.width = 4
self.scale = 34
self.battery_type = "none"
def check(self):
if os.path.exists('/proc/acpi/battery/'):
self.battery_type = "procfs"
elif glob.glob('/sys/class/power_supply/BAT*'):
self.battery_type = "sysfs"
else:
raise Exception('No ACPI battery information found.')
def vars(self):
ret = []
if self.battery_type == "procfs":
for battery in os.listdir('/proc/acpi/battery/'):
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 2: continue
if l[0] == 'present:' and l[1] == 'yes':
ret.append(battery)
elif self.battery_type == "sysfs":
for battery in glob.glob('/sys/class/power_supply/BAT*'):
for line in dopen(battery+'/present').readlines():
if int(line[0]) == 1:
ret.append(os.path.basename(battery))
ret.sort()
return ret
def nick(self):
return [name.lower() for name in self.vars]
def extract(self):
for battery in self.vars:
if self.battery_type == "procfs":
for line in dopen('/proc/acpi/battery/'+battery+'/info').readlines():
l = line.split()
if len(l) < 4: continue
if l[0] == 'last':
full = int(l[3])
break
for line in dopen('/proc/acpi/battery/'+battery+'/state').readlines():
l = line.split()
if len(l) < 3: continue
if l[0] == 'remaining':
current = int(l[2])
break
if current:
self.val[battery] = current * 100.0 / full
else:
self.val[battery] = -1
elif self.battery_type == "sysfs":
for line in dopen('/sys/class/power_supply/'+battery+'/capacity').readlines():
current = int(line)
break
if current:
self.val[battery] = current
else:
self.val[battery] = -1
# vim:ts=4:sw=4:et
| 2,648
|
Python
|
.py
| 65
| 26.661538
| 94
| 0.478075
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,228
|
dstat_mongodb_stats.py
|
dstat-real_dstat/plugins/dstat_mongodb_stats.py
|
### Author: <gianfranco@mongodb.com>
global mongodb_user
mongodb_user = os.getenv('DSTAT_MONGODB_USER') or os.getenv('USER')
global mongodb_pwd
mongodb_pwd = os.getenv('DSTAT_MONGODB_PWD')
global mongodb_host
mongodb_host = os.getenv('DSTAT_MONGODB_HOST') or '127.0.0.1:27017'
class dstat_plugin(dstat):
"""
Plugin for MongoDB.
"""
def __init__(self):
global pymongo
import pymongo
try:
self.m = pymongo.MongoClient(mongodb_host)
if mongodb_pwd:
self.m.admin.authenticate(mongodb_user, mongodb_pwd)
self.db = self.m.admin
except Exception as e:
raise Exception('Cannot interface with MongoDB server: %s' % e)
stats = self.db.command("listDatabases")
self.dbList = []
for db in stats.get('databases'):
self.dbList.append(db.get('name'))
line = self.db.command("serverStatus")
if 'storageEngine' in line:
self.storageEngine = line.get('storageEngine').get('name')
else:
self.storageEngine = 'mmapv1'
self.name = 'mongodb stats'
self.nick = ('dsize', 'isize', 'ssize')
self.vars = ('dataSize', 'indexSize', 'storageSize')
self.type = 'b'
self.width = 5
self.scale = 2
self.count = 1
if self.storageEngine == 'mmapv1':
self.nick = self.nick + ('fsize',)
self.vars = self.vars + ('fileSize',)
def extract(self):
self.set = {}
# refresh the database list every 10 iterations
if (self.count % 10) == 0:
stats = self.m.admin.command("listDatabases")
self.dbList = []
for db in stats.get('databases'):
self.dbList.append(db.get('name'))
self.count += 1
for name in self.vars:
self.set[name] = 0
for db in self.dbList:
self.db = self.m.get_database(db)
stats = self.db.command("dbStats")
for name in self.vars:
self.set[name] += int(stats.get(name)) / (1024 * 1024)
self.val = self.set
| 1,942
|
Python
|
.py
| 57
| 28.789474
| 69
| 0.636558
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,229
|
dstat_dstat.py
|
dstat-real_dstat/plugins/dstat_dstat.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Provide more information related to the dstat process.
The dstat cputime is the total cputime dstat requires per second. On a
system with one cpu and one core, the total cputime is 1000ms. On a system
with 2 cores the total is 2000ms. It may help to vizualise the performance
of Dstat and its selection of plugins.
"""
def __init__(self):
self.name = 'dstat'
self.vars = ('cputime', 'latency')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/%s/schedstat' % ownpid)
def extract(self):
l = self.splitline()
# l = linecache.getline('/proc/%s/schedstat' % self.pid, 1).split()
self.set2['cputime'] = int(l[0])
self.set2['latency'] = int(l[1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,050
|
Python
|
.py
| 26
| 33.461538
| 80
| 0.613949
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,230
|
dstat_snmp_cpu.py
|
dstat-real_dstat/plugins/dstat_snmp_cpu.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'total cpu'
self.vars = ( 'usr', 'sys', 'idl' )
self.type = 'p'
self.width = 3
self.scale = 34
self.server = os.getenv('DSTAT_SNMPSERVER') or '192.168.1.1'
self.community = os.getenv('DSTAT_SNMPCOMMUNITY') or 'public'
def check(self):
try:
global cmdgen
from pysnmp.entity.rfc3413.oneliner import cmdgen
except:
raise Exception('Needs pysnmp and pyasn1 modules')
def extract(self):
self.set2['usr'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,50,0)))
self.set2['sys'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,52,0)))
self.set2['idl'] = int(snmpget(self.server, self.community, (1,3,6,1,4,1,2021,11,53,0)))
# self.set2['usr'] = int(snmpget(self.server, self.community, (('UCD-SNMP-MIB', 'ssCpuRawUser'), 0)))
# self.set2['sys'] = int(snmpget(self.server, self.community, (('UCD-SNMP-MIB', 'ssCpuRawSystem'), 0)))
# self.set2['idl'] = int(snmpget(self.server, self.community, (('UCD-SNMP-MIB', 'ssCpuRawIdle'), 0)))
if update:
for name in self.vars:
if sum(self.set2.values()) > sum(self.set1.values()):
self.val[name] = 100.0 * (self.set2[name] - self.set1[name]) / (sum(self.set2.values()) - sum(self.set1.values()))
else:
self.val[name] = 0
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,635
|
Python
|
.py
| 32
| 41.90625
| 134
| 0.575454
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,231
|
dstat_vz_ubc.py
|
dstat-real_dstat/plugins/dstat_vz_ubc.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.nick = ('fcnt', )
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/user_beancounters')
self.cols = 1 ### Is this correct ?
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def discover(self, *list):
ret = []
for l in self.splitlines():
if len(l) < 7 or l[0] in ('uid', '0:'): continue
ret.append(l[0][0:-1])
ret.sort()
for item in list: ret.append(item)
return ret
def name(self):
ret = []
for name in self.vars:
if name == 'total':
ret.append('total failcnt')
else:
ret.append(name)
return ret
def vars(self):
ret = []
if not op.full:
list = ('total', )
else:
list = self.discover
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def extract(self):
for name in self.vars + ['total']:
self.set2[name] = 0
for l in self.splitlines():
if len(l) < 6 or l[0] == 'uid':
continue
elif len(l) == 7:
name = l[0][0:-1]
if name in self.vars:
self.set2[name] = self.set2[name] + int(l[6])
self.set2['total'] = self.set2['total'] + int(l[6])
elif name == '0':
continue
else:
if name in self.vars:
self.set2[name] = self.set2[name] + int(l[5])
self.set2['total'] = self.set2['total'] + int(l[5])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 2,010
|
Python
|
.py
| 59
| 22.983051
| 80
| 0.474253
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,232
|
dstat_cpufreq.py
|
dstat-real_dstat/plugins/dstat_cpufreq.py
|
### Author: dag@wieers.com
class dstat_plugin(dstat):
"""
CPU frequency in percentage as reported by ACPI.
"""
def __init__(self):
self.name = 'frequency'
self.type = 'p'
self.width = 4
self.scale = 34
def check(self):
for cpu in glob.glob('/sys/devices/system/cpu/cpu[0-9]*'):
if not os.access(cpu+'/cpufreq/scaling_cur_freq', os.R_OK):
raise Exception('Cannot access acpi %s frequency information' % os.path.basename(cpu))
def vars(self):
ret = []
for name in glob.glob('/sys/devices/system/cpu/cpu[0-9]*'):
ret.append(os.path.basename(name))
ret.sort()
return ret
# return os.listdir('/sys/devices/system/cpu/')
def nick(self):
return [name.lower() for name in self.vars]
def extract(self):
for cpu in self.vars:
for line in dopen('/sys/devices/system/cpu/'+cpu+'/cpufreq/scaling_max_freq').readlines():
l = line.split()
max = int(l[0])
for line in dopen('/sys/devices/system/cpu/'+cpu+'/cpufreq/scaling_cur_freq').readlines():
l = line.split()
cur = int(l[0])
### Need to close because of bug in sysfs (?)
dclose('/sys/devices/system/cpu/'+cpu+'/cpufreq/scaling_cur_freq')
self.set1[cpu] = self.set1[cpu] + cur * 100.0 / max
if op.update:
self.val[cpu] = self.set1[cpu] / elapsed
else:
self.val[cpu] = self.set1[cpu]
if step == op.delay:
self.set1[cpu] = 0
# vim:ts=4:sw=4:et
| 1,664
|
Python
|
.py
| 41
| 30.365854
| 102
| 0.546468
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,233
|
dstat_innodb_io.py
|
dstat-real_dstat/plugins/dstat_innodb_io.py
|
### Author: Dag Wieers <dag$wieers,com>
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL')
class dstat_plugin(dstat):
def __init__(self):
self.name = 'innodb io ops '
self.nick = ('rea', 'wri', 'syn')
self.vars = ('read', 'write', 'sync')
self.type = 'f'
self.width = 3
self.scale = 1000
def check(self):
if os.access('/usr/bin/mysql', os.X_OK):
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/bin/mysql -n %s' % mysql_options)
except IOError:
raise Exception('Cannot interface with MySQL binary')
return True
raise Exception('Needs MySQL binary')
def extract(self):
try:
self.stdin.write('show engine innodb status\G\n')
line = matchpipe(self.stdout, '.*OS file reads,.*')
if line:
l = line.split()
self.set2['read'] = int(l[0])
self.set2['write'] = int(l[4])
self.set2['sync'] = int(l[8])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mysql, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception' % (self.filename, e))
for name in self.vars: self.val[name] = -1
# vim:ts=4:sw=4:et
| 1,618
|
Python
|
.py
| 39
| 30.692308
| 101
| 0.530911
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,234
|
dstat_mysql5_innodb_extra.py
|
dstat-real_dstat/plugins/dstat_mysql5_innodb_extra.py
|
### Author: HIROSE Masaaki <hirose31 _at_ gmail.com>
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL') or ''
global target_status
global _basic_status
global _extra_status
_basic_status = (
('Queries' , 'qps'),
('Com_select' , 'sel/s'),
('Com_insert' , 'ins/s'),
('Com_update' , 'upd/s'),
('Com_delete' , 'del/s'),
('Connections' , 'con/s'),
('Threads_connected' , 'thcon'),
('Threads_running' , 'thrun'),
('Slow_queries' , 'slow'),
)
_extra_status = (
('Innodb_rows_read' , 'r#read'),
('Innodb_rows_inserted' , 'r#ins'),
('Innodb_rows_updated' , 'r#upd'),
('Innodb_rows_deleted' , 'r#del'),
('Innodb_data_reads' , 'rdphy'),
('Innodb_buffer_pool_read_requests', 'rdlgc'),
('Innodb_data_writes' , 'wrdat'),
('Innodb_log_writes' , 'wrlog'),
('innodb_buffer_pool_pages_dirty_pct', '%dirty'),
)
global calculating_status
calculating_status = (
'Innodb_buffer_pool_pages_total',
'Innodb_buffer_pool_pages_dirty',
)
global gauge
gauge = {
'Slow_queries' : 1,
'Threads_connected' : 1,
'Threads_running' : 1,
}
class dstat_plugin(dstat):
"""
mysql5-innodb, mysql5-innodb-basic, mysql5-innodb-extra
display various metircs on MySQL5 and InnoDB.
"""
def __init__(self):
self.name = 'MySQL5 InnoDB '
self.type = 'd'
self.width = 5
self.scale = 1000
def check(self):
if self.filename.find("basic") >= 0:
target_status = _basic_status
self.name += 'basic'
elif self.filename.find("extra") >= 0:
target_status = _extra_status
self.name += 'extra'
elif self.filename.find("full") >= 0:
target_status = _basic_status + _extra_status
self.name += 'full'
else:
target_status = _basic_status + _extra_status
self.name += 'full'
self.vars = tuple( map((lambda e: e[0]), target_status) )
self.nick = tuple( map((lambda e: e[1]), target_status) )
mysql_candidate = ('/usr/bin/mysql', '/usr/local/bin/mysql')
mysql_cmd = ''
for mc in mysql_candidate:
if os.access(mc, os.X_OK):
mysql_cmd = mc
break
if mysql_cmd:
try:
self.stdin, self.stdout, self.stderr = dpopen('%s -n %s' % (mysql_cmd, mysql_options))
except IOError:
raise Exception('Cannot interface with MySQL binary')
return True
raise Exception('Needs MySQL binary')
def extract(self):
try:
self.stdin.write('show global status;\n')
for line in readpipe(self.stdout):
if line == '':
break
s = line.split()
if s[0] in self.vars:
self.set2[s[0]] = float(s[1])
elif s[0] in calculating_status:
self.set2[s[0]] = float(s[1])
for k in self.vars:
if k in gauge:
self.val[k] = self.set2[k]
elif k == 'innodb_buffer_pool_pages_dirty_pct':
self.val[k] = self.set2['Innodb_buffer_pool_pages_dirty'] / self.set2['Innodb_buffer_pool_pages_total'] * 100
else:
self.val[k] = (self.set2[k] - self.set1[k]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mysql, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception' % (self.filename, e))
for name in self.vars: self.val[name] = -1
| 4,110
|
Python
|
.py
| 103
| 30.640777
| 129
| 0.500251
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,235
|
dstat_mongodb_opcount.py
|
dstat-real_dstat/plugins/dstat_mongodb_opcount.py
|
### Author: <gianfranco@mongodb.com>
global mongodb_user
mongodb_user = os.getenv('DSTAT_MONGODB_USER') or os.getenv('USER')
global mongodb_pwd
mongodb_pwd = os.getenv('DSTAT_MONGODB_PWD')
global mongodb_host
mongodb_host = os.getenv('DSTAT_MONGODB_HOST') or '127.0.0.1:27017'
class dstat_plugin(dstat):
"""
Plugin for MongoDB.
"""
def __init__(self):
global pymongo
import pymongo
try:
self.m = pymongo.MongoClient(mongodb_host)
if mongodb_pwd:
self.m.admin.authenticate(mongodb_user, mongodb_pwd)
self.db = self.m.admin
except Exception as e:
raise Exception('Cannot interface with MongoDB server: %s' % e)
self.name = 'mongodb counts'
self.nick = ('qry', 'ins', 'upd', 'del', 'gtm', 'cmd')
self.vars = ('query', 'insert','update','delete','getmore','command')
self.type = 'd'
self.width = 5
self.scale = 2
self.lastVal = {}
def extract(self):
status = self.db.command("serverStatus")
opct = status['opcounters']
for name in self.vars:
if name in opct.iterkeys():
if not name in self.lastVal:
self.lastVal[name] = opct.get(name)
self.val[name] = (int(opct.get(name)) - self.lastVal[name]) / elapsed
self.lastVal[name] = opct.get(name)
| 1,304
|
Python
|
.py
| 37
| 30.27027
| 81
| 0.643084
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,236
|
dstat_wifi.py
|
dstat-real_dstat/plugins/dstat_wifi.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'wifi'
self.nick = ('lnk', 's/n')
self.type = 'd'
self.width = 3
self.scale = 34
self.cols = 2
def check(self):
global iwlibs
from pythonwifi import iwlibs
def vars(self):
return iwlibs.getNICnames()
def extract(self):
for name in self.vars:
wifi = iwlibs.Wireless(name)
stat, qual, discard, missed_beacon = wifi.getStatistics()
# print(qual.quality, qual.signallevel, qual.noiselevel)
if qual.quality == 0 or qual.signallevel == -101 or qual.noiselevel == -101 or qual.signallevel == -256 or qual.noiselevel == -256:
self.val[name] = ( -1, -1 )
else:
self.val[name] = ( qual.quality, qual.signallevel * 100 / qual.noiselevel )
# vim:ts=4:sw=4:et
| 937
|
Python
|
.py
| 24
| 30.5
| 143
| 0.575991
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,237
|
dstat_jvm_full.py
|
dstat-real_dstat/plugins/dstat_jvm_full.py
|
# Author: Roberto Polli <rpolli@redhat.com>
#
# NOTE: Edit the jcmd location according to your path or use update-alternatives.
global BIN_JCMD
BIN_JCMD = '/usr/bin/jcmd'
class dstat_plugin(dstat):
"""
This plugin gathers jvm stats via jcmd.
Usage:
JVM_PID=15123 dstat --jvm-full
Minimize the impacts of jcmd and consider using:
dstat --noupdate
For full informations on jcmd see:
- http://docs.oracle.com/javase/7/docs/technotes/tools/solaris/jcmd.html
- https://docs.oracle.com/javase/8/docs/technotes/guides/troubleshoot/tooldescr006.html
This requires the presence of /tmp/hsperfdata_* directory, so
it WON'T WORK if you add -XX:-UsePerfData or -XX:+PerfDisableSharedMem.
"""
def __init__(self):
self.name = 'jvm_full'
self.vars = ('clsL', 'clsU', 'fgc', 'heap', 'heap%',
'heapmax', 'perm', 'perm%', 'permmax')
self.type = 'f'
self.width = 5
self.scale = 1000
def check(self):
"""Preliminar checks. If no pid is passed, defaults to 0.
"""
if not os.access(BIN_JCMD, os.X_OK):
raise Exception('Needs jstat binary')
try:
self.jvm_pid = int(os.environ.get('JVM_PID',0))
except Exception as e:
self.jvm_pid = 0
return True
@staticmethod
def _to_stat(k, v):
try:
return k, int(v)
except (KeyError, ValueError, AttributeError):
return k, v
@staticmethod
def _cmd_splitlines(cmd):
"""Splits a txt output of lines like key=value.
"""
for l in os.popen(cmd):
yield l.strip().split("=", 1)
def extract(self):
try:
lines = self._cmd_splitlines(
'%s %s PerfCounter.print ' % (BIN_JCMD, self.jvm_pid))
table = dict(self._to_stat(*l) for l in lines
if len(l) > 1)
if table:
# Number of loaded classes.
self.set2['clsL'] = table['java.cls.loadedClasses']
self.set2['clsU'] = table['java.cls.unloadedClasses']
# Number of Full Garbage Collection events.
self.set2['fgc'] = table['sun.gc.collector.1.invocations']
# The heap space is made up of Old Generation and Young
# Generation (which is divided in Eden, Survivor0 and
# Survivor1)
self.set2['heap'] = table['sun.gc.generation.1.capacity'] + table[
'sun.gc.generation.0.capacity']
# Usage is hidden in the nested spaces.
self.set2['heapu'] = sum(table[k] for k in table
if 'sun.gc.generation.' in k
and 'used' in k)
self.set2['heapmax'] = table['sun.gc.generation.1.maxCapacity'] + table[
'sun.gc.generation.0.maxCapacity']
# Use PermGen on jdk7 and the new metaspace on jdk8
try:
self.set2['perm'] = table['sun.gc.generation.2.capacity']
self.set2['permu'] = sum(table[k] for k in table
if 'sun.gc.generation.2.' in k
and 'used' in k)
self.set2['permmax'] = table[
'sun.gc.generation.2.maxCapacity']
except KeyError:
self.set2['perm'] = table['sun.gc.metaspace.capacity']
self.set2['permu'] = table['sun.gc.metaspace.used']
self.set2['permmax'] = table[
'sun.gc.metaspace.maxCapacity']
# Evaluate statistics on memory usage.
for name in ('heap', 'perm'):
self.set2[name + '%'] = 100 * self.set2[
name + 'u'] / self.set2[name]
for name in self.vars:
self.val[name] = self.set2[name]
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1:
print('%s: lost pipe to jstat, %s' % (self.filename, e))
for name in self.vars:
self.val[name] = -1
except Exception as e:
if op.debug > 1:
print('%s: exception' % e)
for name in self.vars:
self.val[name] = -1
# vim:ts=4:sw=4:et
| 4,523
|
Python
|
.py
| 102
| 30.77451
| 93
| 0.520691
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,238
|
dstat_snooze.py
|
dstat-real_dstat/plugins/dstat_snooze.py
|
class dstat_plugin(dstat):
def __init__(self):
self.name = 'snooze'
self.vars = ('snooze',)
self.type = 's'
self.width = 6
self.scale = 0
self.before = time.time()
def extract(self):
now = time.time()
if loop != 0:
self.val['snooze'] = now - self.before
else:
self.val['snooze'] = self.before
if step == op.delay:
self.before = now
def show(self):
if self.val['snooze'] > step + 1:
return ansi['default'] + ' -'
if op.blackonwhite:
textcolor = 'black'
if step != op.delay:
textcolor = 'darkgray'
else:
textcolor = 'white'
if step != op.delay:
textcolor = 'gray'
snoze, c = fchg(self.val['snooze'], 6, 1000)
return color[textcolor] + snoze
| 908
|
Python
|
.py
| 29
| 21.172414
| 52
| 0.483982
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,239
|
dstat_zfs_l2arc.py
|
dstat-real_dstat/plugins/dstat_zfs_l2arc.py
|
class dstat_plugin(dstat):
"""
ZFS on Linux L2ARC (Level 2 Adjustable Replacement Cache)
Data is extracted from /proc/spl/kstat/zfs/arcstats
"""
def __init__(self):
self.name = 'ZFS L2ARC'
self.nick = ('size', 'hit', 'miss', 'hit%', 'read', 'write')
self.vars = ('l2_size', 'l2_hits', 'l2_misses', 'hit_rate', 'l2_read_bytes', 'l2_write_bytes')
self.types = ('b', 'd', 'd', 'p', 'b', 'b')
self.scales = (1024, 1000, 1000, 1000, 1024, 1024)
self.counter = (False, True, True, False, True, True)
self.open('/proc/spl/kstat/zfs/arcstats')
def extract(self):
for l in self.splitlines():
if len(l) < 2: continue
l[0].split()
name = l[0]
if name in self.vars:
self.set2[name] = int(l[2])
for i, name in enumerate (self.vars):
if self.counter[i]:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
else:
self.val[name] = self.set2[name]
probes = self.val['l2_hits'] + self.val['l2_misses']
if probes > 0 :
self.val['hit_rate'] = self.val['l2_hits'] / probes * 100.0
else:
self.val['hit_rate'] = 0
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,367
|
Python
|
.py
| 33
| 31.606061
| 102
| 0.523738
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,240
|
dstat_disk_svctm.py
|
dstat-real_dstat/plugins/dstat_disk_svctm.py
|
### Author: David Nicklay <david-d$nicklay,com>
### Modified from disk-util: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
The average service time (in milliseconds) for I/O requests that were
issued to the device.
Warning! Do not trust this field any more.
"""
def __init__(self):
self.version = 2
self.nick = ('svctm',)
self.type = 'f'
self.width = 4
self.scale = 1
self.diskfilter = re.compile('^([hsv]d[a-z]+\d+|cciss/c\d+d\d+p\d+|dm-\d+|md\d+|mmcblk\d+p\d0|VxVM\d+)$')
self.open('/proc/diskstats')
self.cols = 1
self.struct = dict( nr_ios=0, tot_ticks=0 )
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception('No suitable block devices found to monitor')
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
else:
varlist = []
blockdevices = [os.path.basename(filename) for filename in glob.glob('/sys/block/*')]
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices: continue
varlist.append(name)
varlist.sort()
for name in varlist:
if name in self.discover:
ret.append(name)
return ret
def name(self):
return self.vars
def extract(self):
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
if l[3] == '0' and l[7] == '0': continue
name = l[2]
if name not in self.vars or name == 'total': continue
self.set2[name] = dict(
nr_ios = int(l[3])+int(l[7]),
tot_ticks = int(l[12]),
)
for name in self.vars:
tput = ( self.set2[name]['nr_ios'] - self.set1[name]['nr_ios'] )
if tput:
util = ( self.set2[name]['tot_ticks'] - self.set1[name]['tot_ticks'] )
self.val[name] = ( util * 1.0 / tput, )
else:
self.val[name] = ( 0.0, )
if step == op.delay:
self.set1.update(self.set2)
| 2,493
|
Python
|
.py
| 67
| 26.641791
| 113
| 0.514274
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,241
|
dstat_vz_io.py
|
dstat-real_dstat/plugins/dstat_vz_io.py
|
### Author: Dag Wieers <dag@wieers.com>
### Example content for /proc/bc/<veid>/ioacct
# read 2773011640320
# write 2095707136000
# dirty 4500342390784
# cancel 4080624041984
# missed 0
# syncs_total 2
# fsyncs_total 1730732
# fdatasyncs_total 3266
# range_syncs_total 0
# syncs_active 0
# fsyncs_active 0
# fdatasyncs_active 0
# range_syncs_active 0
# vfs_reads 3717331387
# vfs_read_chars 3559144863185798078
# vfs_writes 901216138
# vfs_write_chars 23864660931174682
# io_pbs 16
class dstat_plugin(dstat):
def __init__(self):
self.nick = ['read', 'write', 'dirty', 'cancel', 'missed']
self.cols = len(self.nick)
def check(self):
if not os.path.exists('/proc/vz'):
raise Exception('System does not have OpenVZ support')
elif not os.path.exists('/proc/bc'):
raise Exception('System does not have (new) OpenVZ beancounter support')
elif not glob.glob('/proc/bc/*/ioacct'):
raise Exception('System does not have any OpenVZ containers')
info(1, 'Module %s is still experimental.' % self.filename)
def name(self):
return ['ve/'+name for name in self.vars]
def vars(self):
ret = []
if not op.full:
varlist = ['total',]
else:
varlist = [os.path.basename(veid) for veid in glob.glob('/proc/vz/*')]
ret = varlist
return ret
def extract(self):
for name in self.vars:
self.set2['total'] = {}
for line in dopen('/proc/bc/%s/ioacct' % name).readlines():
l = line.split()
if len(l) != 2: continue
if l[0] not in self.nick: continue
index = self.nick.index(l[0])
self.set2[name][index] = int(l[1])
self.set2['total'][index] = self.set2['total'][index] + int(l[1])
# print(name, self.val[name], self.set2[name][0], self.set2[name][1])
# print(name, self.val[name], self.set1[name][0], self.set1[name][1])
self.val[name] = list(map(lambda x, y: (y - x) / elapsed, self.set1[name], self.set2[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 2,742
|
Python
|
.py
| 58
| 40.327586
| 104
| 0.484112
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,242
|
dstat_fan.py
|
dstat-real_dstat/plugins/dstat_fan.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Fan speed in RPM (rotations per minute) as reported by ACPI.
"""
def __init__(self):
self.name = 'fan'
self.type = 'd'
self.width = 4
self.scale = 500
self.open('/proc/acpi/ibm/fan')
def vars(self):
ret = None
for l in self.splitlines():
if l[0] == 'speed:':
ret = ('speed',)
return ret
def check(self):
if not os.path.exists('/proc/acpi/ibm/fan'):
raise Exception('Needs kernel IBM-ACPI support')
def extract(self):
if os.path.exists('/proc/acpi/ibm/fan'):
for l in self.splitlines():
if l[0] == 'speed:':
self.val['speed'] = int(l[1])
# vim:ts=4:sw=4:et
| 829
|
Python
|
.py
| 26
| 23.115385
| 64
| 0.520703
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,243
|
dstat_vm_cpu.py
|
dstat-real_dstat/plugins/dstat_vm_cpu.py
|
### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware cpu stats
### Displays CPU stats coming from the hypervisor inside VMware VMs.
### The vmGuestLib API from VMware Tools needs to be installed
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vm cpu'
self.vars = ('used', 'stolen', 'elapsed')
self.nick = ('usd', 'stl')
self.type = 'p'
self.width = 3
self.scale = 100
self.cpunr = getcpunr()
def check(self):
try:
global vmguestlib
import vmguestlib
self.gl = vmguestlib.VMGuestLib()
except:
raise Exception('Needs python-vmguestlib module')
def extract(self):
self.gl.UpdateInfo()
self.set2['elapsed'] = self.gl.GetElapsedMs()
self.set2['stolen'] = self.gl.GetCpuStolenMs()
self.set2['used'] = self.gl.GetCpuUsedMs()
for name in ('stolen', 'used'):
self.val[name] = (self.set2[name] - self.set1[name]) * 100 / (self.set2['elapsed'] - self.set1['elapsed']) / self.cpunr
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
| 1,169
|
Python
|
.py
| 30
| 30.9
| 131
| 0.595049
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,244
|
dstat_lustre.py
|
dstat-real_dstat/plugins/dstat_lustre.py
|
# Author: Brock Palen <brockp@mlds-networks.com>, Kilian Vavalotti <kilian@stanford.edu>
class dstat_plugin(dstat):
def __init__(self):
self.nick = ('read', 'write')
self.cols = 2
def check(self):
if not os.path.exists('/proc/fs/lustre/llite'):
raise Exception('Lustre filesystem not found')
info(1, 'Module %s is still experimental.' % self.filename)
def name(self):
return [mount for mount in os.listdir('/proc/fs/lustre/llite')]
def vars(self):
return [mount for mount in os.listdir('/proc/fs/lustre/llite')]
def extract(self):
for name in self.vars:
for line in dopen(os.path.join('/proc/fs/lustre/llite', name, 'stats')).readlines():
l = line.split()
if len(l) < 6: continue
if l[0] == 'read_bytes':
read = int(l[6])
elif l[0] == 'write_bytes':
write = int(l[6])
self.set2[name] = (read, write)
self.val[name] = list(map(lambda x, y: (y - x) * 1.0 / elapsed, self.set1[name], self.set2[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
| 1,221
|
Python
|
.py
| 27
| 34.703704
| 110
| 0.555649
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,245
|
dstat_top_cpu.py
|
dstat-real_dstat/plugins/dstat_top_cpu.py
|
### Authority: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
"""
Most expensive CPU process.
Displays the process that uses the CPU the most during the monitored
interval. The value displayed is the percentage of CPU time for the total
amount of CPU processing power. Based on per process CPU information.
"""
def __init__(self):
self.name = 'most expensive'
self.vars = ('cpu process',)
self.type = 's'
self.width = 16
self.scale = 0
self.pidset1 = {}
def extract(self):
self.output = ''
self.pidset2 = {}
self.val['max'] = 0.0
for pid in proc_pidlist():
try:
### Using dopen() will cause too many open files
l = proc_splitline('/proc/%s/stat' % pid)
except IOError:
continue
if len(l) < 15: continue
### Reset previous value if it doesn't exist
if pid not in self.pidset1:
self.pidset1[pid] = 0
self.pidset2[pid] = int(l[13]) + int(l[14])
usage = (self.pidset2[pid] - self.pidset1[pid]) * 1.0 / elapsed / cpunr
### Is it a new topper ?
if usage < self.val['max']: continue
name = l[1][1:-1]
self.val['max'] = usage
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
# self.val['name'] = name
if self.val['max'] != 0.0:
self.output = '%-*s%s' % (self.width-3, self.val['name'][0:self.width-3], cprint(self.val['max'], 'f', 3, 34))
### Debug (show PID)
# self.output = '%*s %-*s' % (5, self.val['pid'], self.width-6, self.val['name'])
if step == op.delay:
self.pidset1 = self.pidset2
def showcsv(self):
return '%s / %d%%' % (self.val['name'], self.val['max'])
# vim:ts=4:sw=4:et
| 1,926
|
Python
|
.py
| 47
| 31.340426
| 122
| 0.531367
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,246
|
dstat_vz_cpu.py
|
dstat-real_dstat/plugins/dstat_vz_cpu.py
|
### Author: Dag Wieers <dag@wieers.com>
#Version: 2.2
#VEID user nice system uptime idle strv uptime used maxlat totlat numsched
#302 142926 0 10252 152896388 852779112954062 0 427034187248480 1048603937010 0 0 0
#301 27188 0 7896 152899846 853267000490282 0 427043845492614 701812592320 0 0 0
class dstat_plugin(dstat):
def __init__(self):
self.nick = ('usr', 'sys', 'idl', 'nic')
self.type = 'p'
self.width = 3
self.scale = 34
self.open('/proc/vz/vestat')
self.cols = 4
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def discover(self, *list):
ret = []
for l in self.splitlines():
if len(l) < 6 or l[0] == 'VEID': continue
ret.append(l[0])
ret.sort()
for item in list: ret.append(item)
return ret
def name(self):
ret = []
for name in self.vars:
if name == 'total':
ret.append('total ve usage')
else:
ret.append('ve ' + name + ' usage')
return ret
def vars(self):
ret = []
if not op.full:
list = ('total', )
else:
list = self.discover
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def extract(self):
self.set2['total'] = [0, 0, 0, 0]
for l in self.splitlines():
if len(l) < 6 or l[0] == 'VEID': continue
name = l[0]
self.set2[name] = ( int(l[1]), int(l[3]), int(l[4]) - int(l[1]) - int(l[2]) - int(l[3]), int(l[2]) )
self.set2['total'] = ( self.set2['total'][0] + int(l[1]), self.set2['total'][1] + int(l[3]), self.set2['total'][2] + int(l[4]) - int(l[1]) - int(l[2]) - int(l[3]), self.set2['total'][3] + int(l[2]) )
for name in self.vars:
for i in range(self.cols):
self.val[name][i] = 100.0 * (self.set2[name][i] - self.set1[name][i]) / (sum(self.set2[name]) - sum(self.set1[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 2,288
|
Python
|
.py
| 54
| 33.074074
| 211
| 0.501799
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,247
|
dstat_fuse.py
|
dstat-real_dstat/plugins/dstat_fuse.py
|
### Author: Vikas Gorur (http://github.com/vikasgorur)
class dstat_plugin(dstat):
"""
Waiting calls on mounted FUSE filesystems
Displays the number of waiting calls on all mounted FUSE filesystems.
"""
def __init__(self):
self.name = 'fuse'
self.type = 'd'
self.fusectl_path = "/sys/fs/fuse/connections/"
self.dirs = []
def check(self):
info(1, "Module %s is still experimental." % self.filename)
if not os.path.exists(self.fusectl_path):
raise Exception('%s not mounted' % self.fusectl_path)
if len(os.listdir(self.fusectl_path)) == 0:
raise Exception('No fuse filesystems mounted')
def vars(self):
self.dirs = os.listdir(self.fusectl_path)
atleast_one_ok = False
for d in self.dirs:
if os.access(self.fusectl_path + d + "/waiting", os.R_OK):
atleast_one_ok = True
if not atleast_one_ok:
raise Exception('User is not root or no fuse filesystems mounted')
return self.dirs
def extract(self):
for d in self.dirs:
path = self.fusectl_path + d + "/waiting"
if os.path.exists(path):
line = dopen(path).readline()
self.val[d] = int(line)
else:
self.val[d] = 0
# vim:ts=4:sw=4:et
| 1,369
|
Python
|
.py
| 35
| 29.685714
| 78
| 0.581255
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,248
|
dstat_jvm_vm.py
|
dstat-real_dstat/plugins/dstat_jvm_vm.py
|
# Author: Roberto Polli <rpolli@redhat.com>
#
# This plugin shows jvm stats using the JVM_PID environment variable.
# Requires the presence of the /tmp/hsperfdata_* directory and
# files created when running java with the profiler enabled.
#
class dstat_plugin(dstat):
def __init__(self):
self.name = 'jvm mem ops '
self.vars = ('fgc', 'heap', 'heap%', 'perm', 'perm%')
self.type = 'f'
self.width = 5
self.scale = 1000
def check(self):
if not os.access('/usr/bin/jstat', os.X_OK):
raise Exception('Needs jstat binary')
try:
self.jvm_pid = int(os.environ.get('JVM_PID', 0))
except Exception:
self.jvm_pid = 0
return True
@staticmethod
def _to_float(s):
return float(s.replace(",", "."))
@staticmethod
def _cmd_splitlines(cmd):
for l in os.popen(cmd):
yield l.strip().split()
def extract(self):
from collections import namedtuple
try:
lines = self._cmd_splitlines(
'/usr/bin/jstat -gc %s' % self.jvm_pid)
headers = next(lines)
DStatParser = namedtuple('DStatParser', headers)
line = next(lines)
if line:
stats = DStatParser(*[self._to_float(x) for x in line])
# print(stats)
self.set2['cls'] = 0
self.set2['fgc'] = int(stats.FGC)
self.set2['heap'] = (
stats.S0C + stats.S1C + stats.EC + stats.OC)
self.set2['heapu'] = (
stats.S0U + stats.S1U + stats.EU + stats.OU)
# Use MetaSpace on jdk8
try:
self.set2['perm'] = stats.PC
self.set2['permu'] = stats.PU
except AttributeError:
self.set2['perm'] = stats.MC
self.set2['permu'] = stats.MU
# Evaluate statistics on memory usage.
for name in ('heap', 'perm'):
self.set2[name + '%'] = 100 * self.set2[
name + 'u'] / self.set2[name]
self.set2[name] /= 1024
for name in self.vars:
self.val[name] = self.set2[name]
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1:
print('%s: lost pipe to jstat, %s' % (self.filename, e))
for name in self.vars:
self.val[name] = -1
except Exception as e:
if op.debug > 1:
print('%s: exception' % e)
for name in self.vars:
self.val[name] = -1
# vim:ts=4:sw=4:et
| 2,765
|
Python
|
.py
| 72
| 26.361111
| 72
| 0.503734
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,249
|
dstat_disk_wait.py
|
dstat-real_dstat/plugins/dstat_disk_wait.py
|
### Author: David Nicklay <david-d$nicklay,com>
### Modified from disk-util: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Read and Write average wait times of block devices.
Displays the average read and write wait times of block devices
"""
def __init__(self):
self.nick = ('rawait', 'wawait')
self.type = 'f'
self.width = 4
self.scale = 1
self.diskfilter = re.compile('^([hsv]d[a-z]+\d+|cciss/c\d+d\d+p\d+|dm-\d+|md\d+|mmcblk\d+p\d0|VxVM\d+)$')
self.open('/proc/diskstats')
self.cols = 1
self.struct = dict( rd_ios=0, wr_ios=0, rd_ticks=0, wr_ticks=0 )
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception('No suitable block devices found to monitor')
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
else:
varlist = []
blockdevices = [os.path.basename(filename) for filename in glob.glob('/sys/block/*')]
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices: continue
varlist.append(name)
varlist.sort()
for name in varlist:
if name in self.discover:
ret.append(name)
return ret
def name(self):
return self.vars
def extract(self):
for l in self.splitlines():
if len(l) < 13: continue
if l[5] == '0' and l[9] == '0': continue
if l[3:] == ['0',] * 11: continue
name = l[2]
if name not in self.vars: continue
self.set2[name] = dict(
rd_ios = int(l[3]),
wr_ios = int(l[7]),
rd_ticks = int(l[6]),
wr_ticks = int(l[10]),
)
for name in self.vars:
rd_tput = self.set2[name]['rd_ios'] - self.set1[name]['rd_ios']
wr_tput = self.set2[name]['wr_ios'] - self.set1[name]['wr_ios']
if rd_tput:
rd_wait = ( self.set2[name]['rd_ticks'] - self.set1[name]['rd_ticks'] ) * 1.0 / rd_tput
else:
rd_wait = 0
if wr_tput:
wr_wait = ( self.set2[name]['wr_ticks'] - self.set1[name]['wr_ticks'] ) * 1.0 / wr_tput
else:
wr_wait = 0
self.val[name] = ( rd_wait, wr_wait )
if step == op.delay:
self.set1.update(self.set2)
| 2,769
|
Python
|
.py
| 72
| 27.388889
| 113
| 0.50744
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,250
|
dstat_disk_avgrq.py
|
dstat-real_dstat/plugins/dstat_disk_avgrq.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
The average size (in sectors) of the requests that were issued
to the device.
"""
def __init__(self):
self.version = 2
self.nick = ('avgrq',)
self.type = 'f'
self.width = 4
self.scale = 10
self.diskfilter = re.compile('^([hsv]d[a-z]+\d+|cciss/c\d+d\d+p\d+|dm-\d+|md\d+|mmcblk\d+p\d0|VxVM\d+)$')
self.open('/proc/diskstats')
self.cols = 1
self.struct = dict( nr_ios=0, rd_sect=0, wr_sect=0 )
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception('No suitable block devices found to monitor')
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
else:
varlist = []
blockdevices = [os.path.basename(filename) for filename in glob.glob('/sys/block/*')]
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices: continue
varlist.append(name)
varlist.sort()
for name in varlist:
if name in self.discover:
ret.append(name)
return ret
def name(self):
return self.vars
def extract(self):
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
if l[3] == '0' and l[7] == '0': continue
name = l[2]
if name not in self.vars or name == 'total': continue
self.set2[name] = dict(
nr_ios = int(l[3])+int(l[7]),
rd_sect = int(l[9]),
wr_sect = int(l[11]),
)
for name in self.vars:
tput = ( self.set2[name]['nr_ios'] - self.set1[name]['nr_ios'] )
if tput:
ticks = self.set2[name]['rd_sect'] - self.set1[name]['rd_sect'] + \
self.set2[name]['wr_sect'] - self.set1[name]['wr_sect']
self.val[name] = ( ticks * 1.0 / tput, )
else:
self.val[name] = ( 0.0, )
if step == op.delay:
self.set1.update(self.set2)
| 2,489
|
Python
|
.py
| 67
| 26.059701
| 113
| 0.496686
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,251
|
dstat_top_int.py
|
dstat-real_dstat/plugins/dstat_top_int.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
"""
Top interrupt
Displays the name of the most frequent interrupt
"""
def __init__(self):
self.name = 'most frequent'
self.vars = ('interrupt',)
self.type = 's'
self.width = 20
self.scale = 0
self.intset1 = [ ]
self.open('/proc/stat')
self.names = self.names()
def names(self):
ret = {}
for line in dopen('/proc/interrupts'):
l = line.split()
if len(l) <= cpunr: continue
l1 = l[0].split(':')[0]
### Cleanup possible names from /proc/interrupts
l2 = ' '.join(l[cpunr+3:])
l2 = l2.replace('_hcd:', '/')
l2 = re.sub('@pci[:\d+\.]+', '', l2)
l2 = re.sub('ahci\[[:\da-z\.]+\]', 'ahci', l2)
ret[l1] = l2
return ret
def extract(self):
self.output = ''
self.val['total'] = 0.0
for line in self.splitlines():
if line[0] == 'intr':
self.intset2 = [ int(i) for i in line[3:] ]
if not self.intset1:
self.intset1 = [ 0 for i in self.intset2 ]
for i in range(len(self.intset2)):
total = (self.intset2[i] - self.intset1[i]) * 1.0 / elapsed
### Put the highest value in self.val
if total > self.val['total']:
if str(i+1) in self.names:
self.val['name'] = self.names[str(i+1)]
else:
self.val['name'] = 'int ' + str(i+1)
self.val['total'] = total
if step == op.delay:
self.intset1 = self.intset2
if self.val['total'] != 0.0:
self.output = '%-15s%s' % (self.val['name'], cprint(self.val['total'], 'd', 5, 1000))
def showcsv(self):
return '%s / %f' % (self.val['name'], self.val['total'])
# vim:ts=4:sw=4:et
| 1,948
|
Python
|
.py
| 52
| 27.019231
| 97
| 0.483289
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,252
|
dstat_sendmail.py
|
dstat-real_dstat/plugins/dstat_sendmail.py
|
### Author: Dag Wieers <dag@wieers.com>
### FIXME: Should read /var/log/mail/statistics or /etc/mail/statistics (format ?)
class dstat_plugin(dstat):
def __init__(self):
self.name = 'sendmail'
self.vars = ('queue',)
self.type = 'd'
self.width = 4
self.scale = 100
def check(self):
if not os.access('/var/spool/mqueue', os.R_OK):
raise Exception('Cannot access sendmail queue')
def extract(self):
self.val['queue'] = len(glob.glob('/var/spool/mqueue/qf*'))
# vim:ts=4:sw=4:et
| 560
|
Python
|
.py
| 15
| 30.666667
| 82
| 0.603704
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,253
|
dstat_vmk_hba.py
|
dstat-real_dstat/plugins/dstat_vmk_hba.py
|
### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware ESX kernel vmhba stats
### Displays kernel vmhba statistics on VMware ESX servers
# NOTE TO USERS: command-line plugin configuration is not yet possible, so I've
# "borrowed" the -D argument.
# EXAMPLES:
# # dstat --vmkhba -D vmhba1,vmhba2,total
# # dstat --vmkhba -D vmhba0
# You can even combine the Linux and VMkernel diskstats (but the "total" argument
# will be used by both).
# # dstat --vmkhba -d -D sda,vmhba1
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmkhba'
self.nick = ('read', 'writ')
self.cols = 2
def discover(self, *list):
# discover will list all vmhba's found.
# we might want to filter out the unused vmhba's (read stats, compare with ['0', ] * 13)
ret = []
try:
list = os.listdir('/proc/vmware/scsi/')
except:
raise Exception('Needs VMware ESX')
for name in list:
for line in dopen('/proc/vmware/scsi/%s/stats' % name).readlines():
l = line.split()
if len(l) < 13: continue
if l[0] == 'cmds': continue
if l == ['0', ] * 13: continue
ret.append(name)
return ret
def vars(self):
# vars will take the argument list - when implemented - , use total, or will use discover + total
ret = []
if op.disklist:
list = op.disklist
#elif not op.full:
# list = ('total', )
else:
list = self.discover
list.sort()
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def check(self):
try:
os.listdir('/proc/vmware')
except:
raise Exception('Needs VMware ESX')
info(1, 'The vmkhba module is an EXPERIMENTAL module.')
def extract(self):
self.set2['total'] = (0, 0)
for name in self.vars:
self.set2[name] = (0, 0)
for name in os.listdir('/proc/vmware/scsi/'):
for line in dopen('/proc/vmware/scsi/%s/stats' % name).readlines():
l = line.split()
if len(l) < 13: continue
if l[0] == 'cmds': continue
if l[2] == '0' and l[4] == '0': continue
if l == ['0', ] * 13: continue
self.set2['total'] = ( self.set2['total'][0] + int(l[2]), self.set2['total'][1] + int(l[4]) )
if name in self.vars and name != 'total':
self.set2[name] = ( int(l[2]), int(l[4]) )
for name in self.set2:
self.val[name] = list(map(lambda x, y: (y - x) * 1024.0 / elapsed, self.set1[name], self.set2[name]))
if step == op.delay:
self.set1.update(self.set2)
| 2,856
|
Python
|
.py
| 70
| 30.957143
| 117
| 0.538711
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,254
|
dstat_top_latency_avg.py
|
dstat-real_dstat/plugins/dstat_top_latency_avg.py
|
### Dstat most expensive I/O process plugin
### Displays the name of the most expensive I/O process
###
### Authority: dag@wieers.com
### For more information, see:
### http://eaglet.rain.com/rick/linux/schedstat/
class dstat_plugin(dstat):
def __init__(self):
self.name = 'highest average'
self.vars = ('latency process',)
self.type = 's'
self.width = 17
self.scale = 0
self.pidset1 = {}
def check(self):
if not os.access('/proc/self/schedstat', os.R_OK):
raise Exception('Kernel has no scheduler statistics [CONFIG_SCHEDSTATS], use at least 2.6.12')
def extract(self):
self.output = ''
self.pidset2 = {}
self.val['result'] = 0
for pid in proc_pidlist():
try:
### Reset values
if pid not in self.pidset1:
self.pidset1[pid] = {'wait_ticks': 0, 'ran': 0}
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Extract counters
l = proc_splitline('/proc/%s/schedstat' % pid)
except IOError:
continue
except IndexError:
continue
if len(l) != 3: continue
self.pidset2[pid] = {'wait_ticks': int(l[1]), 'ran': int(l[2])}
if self.pidset2[pid]['ran'] - self.pidset1[pid]['ran'] > 0:
avgwait = (self.pidset2[pid]['wait_ticks'] - self.pidset1[pid]['wait_ticks']) * 1.0 / (self.pidset2[pid]['ran'] - self.pidset1[pid]['ran']) / elapsed
else:
avgwait = 0
### Get the process that spends the most jiffies
if avgwait > self.val['result']:
self.val['result'] = avgwait
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
if step == op.delay:
self.pidset1 = self.pidset2
if self.val['result'] != 0.0:
self.output = '%-*s%s' % (self.width-4, self.val['name'][0:self.width-4], cprint(self.val['result'], 'f', 4, 100))
### Debug (show PID)
# self.output = '%*s %-*s' % (5, self.val['pid'], self.width-6, self.val['name'])
def showcsv(self):
return '%s / %.4f' % (self.val['name'], self.val['result'])
# vim:ts=4:sw=4:et
| 2,367
|
Python
|
.py
| 54
| 33.148148
| 165
| 0.525239
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,255
|
dstat_nfs3_ops.py
|
dstat-real_dstat/plugins/dstat_nfs3_ops.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'extended nfs3 client operations'
self.nick = ('null', 'gatr', 'satr', 'look', 'aces', 'rdln', 'read', 'writ', 'crea', 'mkdr', 'syml', 'mknd', 'rm', 'rmdr', 'ren', 'link', 'rdir', 'rdr+', 'fstt', 'fsnf', 'path', 'cmmt')
self.vars = ('null', 'getattr', 'setattr', 'lookup', 'access', 'readlink', 'read', 'write', 'create', 'mkdir', 'symlink', 'mknod', 'remove', 'rmdir', 'rename', 'link', 'readdir', 'readdirplus', 'fsstat', 'fsinfo', 'pathconf', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfs')
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
for i, name in enumerate(self.vars):
self.set2[name] = int(l[i+2])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,194
|
Python
|
.py
| 22
| 45.545455
| 243
| 0.554889
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,256
|
dstat_top_childwait.py
|
dstat-real_dstat/plugins/dstat_top_childwait.py
|
### Dstat most expensive process plugin
### Displays the name of the most expensive process
###
### Authority: dag@wieers.com
global cpunr
class dstat_plugin(dstat):
def __init__(self):
self.name = 'most waiting for'
self.vars = ('child process',)
self.type = 's'
self.width = 16
self.scale = 0
def extract(self):
self.set2 = {}
self.val['max'] = 0.0
for pid in proc_pidlist():
try:
### Using dopen() will cause too many open files
l = proc_splitline('/proc/%s/stat' % pid)
except IOError:
continue
if len(l) < 15: continue
### Reset previous value if it doesn't exist
if pid not in self.set1:
self.set1[pid] = 0
self.set2[pid] = int(l[15]) + int(l[16])
usage = (self.set2[pid] - self.set1[pid]) * 1.0 / elapsed / cpunr
### Is it a new topper ?
if usage <= self.val['max']: continue
self.val['max'] = usage
self.val['name'] = getnamebypid(pid, l[1][1:-1])
self.val['pid'] = pid
### Debug (show PID)
# self.val['process'] = '%*s %-*s' % (5, self.val['pid'], self.width-6, self.val['name'])
if step == op.delay:
self.set1 = self.set2
def show(self):
if self.val['max'] == 0.0:
return '%-*s' % (self.width, '')
else:
return '%s%-*s%s' % (theme['default'], self.width-3, self.val['name'][0:self.width-3], cprint(self.val['max'], 'p', 3, 34))
def showcsv(self):
return '%s / %d%%' % (self.val['name'], self.val['max'])
# vim:ts=4:sw=4:et
| 1,716
|
Python
|
.py
| 44
| 29.431818
| 135
| 0.509946
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,257
|
dstat_mongodb_mem.py
|
dstat-real_dstat/plugins/dstat_mongodb_mem.py
|
### Author: <gianfranco@mongodb.com>
global mongodb_user
mongodb_user = os.getenv('DSTAT_MONGODB_USER') or os.getenv('USER')
global mongodb_pwd
mongodb_pwd = os.getenv('DSTAT_MONGODB_PWD')
global mongodb_host
mongodb_host = os.getenv('DSTAT_MONGODB_HOST') or '127.0.0.1:27017'
class dstat_plugin(dstat):
"""
Plugin for MongoDB.
"""
def __init__(self):
global pymongo
import pymongo
try:
self.m = pymongo.MongoClient(mongodb_host)
if mongodb_pwd:
self.m.admin.authenticate(mongodb_user, mongodb_pwd)
self.db = self.m.admin
except Exception as e:
raise Exception('Cannot interface with MongoDB server: %s' % e)
line = self.db.command("serverStatus")
if 'storageEngine' in line:
self.storageEngine = line.get('storageEngine').get('name')
else:
self.storageEngine = 'mmapv1'
self.name = 'mongodb mem'
self.nick = ('res', 'virt')
self.vars = ('mem.resident', 'mem.virtual')
self.type = 'd'
self.width = 5
self.scale = 2
self.lastVal = {}
if self.storageEngine == 'mmapv1':
self.nick = self.nick + ('map', 'mapj', 'flt')
self.vars = self.vars + ('mem.mapped', 'mem.mappedWithJournal', 'extra_info.page_faults')
def extract(self):
status = self.db.command("serverStatus")
for name in self.vars:
if name in ('extra_info.page_faults'):
if not name in self.lastVal:
self.lastVal[name] = int(self.getDoc(status, name))
self.val[name] = (int(self.getDoc(status, name)) - self.lastVal[name])
self.lastVal[name] = self.getDoc(status, name)
else:
self.val[name] = (int(self.getDoc(status, name)))
def getDoc(self, dic, doc):
par = doc.split('.')
sdic = dic
for p in par:
sdic = sdic.get(p)
return sdic
| 1,830
|
Python
|
.py
| 52
| 29.826923
| 95
| 0.64152
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,258
|
dstat_nfs3.py
|
dstat-real_dstat/plugins/dstat_nfs3.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'nfs3 client'
self.nick = ('read', 'writ', 'rdir', 'othr', 'fs', 'cmmt')
self.vars = ('read', 'write', 'readdir', 'other', 'filesystem', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfs')
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
self.set2['read'] = int(l[8])
self.set2['write'] = int(l[9])
self.set2['readdir'] = int(l[18]) + int(l[19])
self.set2['other'] = int(l[3]) + int(l[4]) + int(l[5]) + int(l[6]) + int(l[7]) + int(l[10]) + int(l[11]) + int(l[12]) + int(l[13]) + int(l[14]) + int(l[15]) + int(l[16]) + int(l[17])
self.set2['filesystem'] = int(l[20]) + int(l[21]) + int(l[22])
self.set2['commit'] = int(l[23])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,159
|
Python
|
.py
| 24
| 38.916667
| 194
| 0.50708
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,259
|
dstat_postfix.py
|
dstat-real_dstat/plugins/dstat_postfix.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'postfix'
self.nick = ('inco', 'actv', 'dfrd', 'bnce', 'defr')
self.vars = ('incoming', 'active', 'deferred', 'bounce', 'defer')
self.type = 'd'
self.width = 4
self.scale = 100
def check(self):
if not os.access('/var/spool/postfix/active', os.R_OK):
raise Exception('Cannot access postfix queues')
def extract(self):
for item in self.vars:
self.val[item] = len(glob.glob('/var/spool/postfix/'+item+'/*/*'))
# vim:ts=4:sw=4:et
| 629
|
Python
|
.py
| 16
| 31.8125
| 78
| 0.569787
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,260
|
dstat_snmp_load.py
|
dstat-real_dstat/plugins/dstat_snmp_load.py
|
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'load avg'
self.nick = ('1m', '5m', '15m')
self.vars = ('load1', 'load5', 'load15')
self.type = 'f'
self.width = 4
self.scale = 0.5
self.server = os.getenv('DSTAT_SNMPSERVER') or '192.168.1.1'
self.community = os.getenv('DSTAT_SNMPCOMMUNITY') or 'public'
def check(self):
try:
global cmdgen
from pysnmp.entity.rfc3413.oneliner import cmdgen
except:
raise Exception('Needs pysnmp and pyasn1 modules')
def extract(self):
list(map(lambda x, y: self.val.update({x: float(y)}), self.vars, snmpwalk(self.server, self.community, (1,3,6,1,4,1,2021,10,1,3))))
# vim:ts=4:sw=4:et
| 809
|
Python
|
.py
| 20
| 32.45
| 139
| 0.589809
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,261
|
dstat_squid.py
|
dstat-real_dstat/plugins/dstat_squid.py
|
### Authority: Jason Friedland <thesuperjason@gmail.com>
# This plugin has been tested with:
# - Dstat 0.6.7
# - CentOS release 5.4 (Final)
# - Python 2.4.3
# - Squid 2.6 and 2.7
global squidclient_options
squidclient_options = os.getenv('DSTAT_SQUID_OPTS') # -p 8080
class dstat_plugin(dstat):
'''
Provides various Squid statistics.
'''
def __init__(self):
self.name = 'squid status'
self.type = 's'
self.width = 5
self.scale = 1000
self.vars = ('Number of file desc currently in use',
'CPU Usage, 5 minute avg',
'Total accounted',
'Number of clients accessing cache',
'Mean Object Size')
self.nick = ('fdesc',
'cpu5',
'mem',
'clnts',
'objsz')
def check(self):
if not os.access('/usr/sbin/squidclient', os.X_OK):
raise Exception('Needs squidclient binary')
cmd_test('/usr/sbin/squidclient %s mgr:info' % squidclient_options)
return True
def extract(self):
try:
for l in cmd_splitlines('/usr/sbin/squidclient %s mgr:info' % squidclient_options, ':'):
if l[0].strip() in self.vars:
self.val[l[0].strip()] = l[1].strip()
break
except IOError as e:
if op.debug > 1: print('%s: lost pipe to squidclient, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception' (self.filename, e))
for name in self.vars: self.val[name] = -1
# vim:ts=4:sw=4:et
| 1,666
|
Python
|
.py
| 45
| 28.177778
| 100
| 0.562655
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,262
|
dstat_mysql5_innodb.py
|
dstat-real_dstat/plugins/dstat_mysql5_innodb.py
|
### Author: HIROSE Masaaki <hirose31 _at_ gmail.com>
global mysql_options
mysql_options = os.getenv('DSTAT_MYSQL') or ''
global target_status
global _basic_status
global _extra_status
_basic_status = (
('Queries' , 'qps'),
('Com_select' , 'sel/s'),
('Com_insert' , 'ins/s'),
('Com_update' , 'upd/s'),
('Com_delete' , 'del/s'),
('Connections' , 'con/s'),
('Threads_connected' , 'thcon'),
('Threads_running' , 'thrun'),
('Slow_queries' , 'slow'),
)
_extra_status = (
('Innodb_rows_read' , 'r#read'),
('Innodb_rows_inserted' , 'r#ins'),
('Innodb_rows_updated' , 'r#upd'),
('Innodb_rows_deleted' , 'r#del'),
('Innodb_data_reads' , 'rdphy'),
('Innodb_buffer_pool_read_requests', 'rdlgc'),
('Innodb_data_writes' , 'wrdat'),
('Innodb_log_writes' , 'wrlog'),
('innodb_buffer_pool_pages_dirty_pct', '%dirty'),
)
global calculating_status
calculating_status = (
'Innodb_buffer_pool_pages_total',
'Innodb_buffer_pool_pages_dirty',
)
global gauge
gauge = {
'Slow_queries' : 1,
'Threads_connected' : 1,
'Threads_running' : 1,
}
class dstat_plugin(dstat):
"""
mysql5-innodb, mysql5-innodb-basic, mysql5-innodb-extra
display various metircs on MySQL5 and InnoDB.
"""
def __init__(self):
self.name = 'MySQL5 InnoDB '
self.type = 'd'
self.width = 5
self.scale = 1000
def check(self):
if self.filename.find("basic") >= 0:
target_status = _basic_status
self.name += 'basic'
elif self.filename.find("extra") >= 0:
target_status = _extra_status
self.name += 'extra'
elif self.filename.find("full") >= 0:
target_status = _basic_status + _extra_status
self.name += 'full'
else:
target_status = _basic_status + _extra_status
self.name += 'full'
self.vars = tuple( map((lambda e: e[0]), target_status) )
self.nick = tuple( map((lambda e: e[1]), target_status) )
mysql_candidate = ('/usr/bin/mysql', '/usr/local/bin/mysql')
mysql_cmd = ''
for mc in mysql_candidate:
if os.access(mc, os.X_OK):
mysql_cmd = mc
break
if mysql_cmd:
try:
self.stdin, self.stdout, self.stderr = dpopen('%s -n %s' % (mysql_cmd, mysql_options))
except IOError:
raise Exception('Cannot interface with MySQL binary')
return True
raise Exception('Needs MySQL binary')
def extract(self):
try:
self.stdin.write('show global status;\n')
for line in readpipe(self.stdout):
if line == '':
break
s = line.split()
if s[0] in self.vars:
self.set2[s[0]] = float(s[1])
elif s[0] in calculating_status:
self.set2[s[0]] = float(s[1])
for k in self.vars:
if k in gauge:
self.val[k] = self.set2[k]
elif k == 'innodb_buffer_pool_pages_dirty_pct':
self.val[k] = self.set2['Innodb_buffer_pool_pages_dirty'] / self.set2['Innodb_buffer_pool_pages_total'] * 100
else:
self.val[k] = (self.set2[k] - self.set1[k]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mysql, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception' % (self.filename, e))
for name in self.vars: self.val[name] = -1
| 4,110
|
Python
|
.py
| 103
| 30.640777
| 129
| 0.500251
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,263
|
dstat_ib.py
|
dstat-real_dstat/plugins/dstat_ib.py
|
### Author: Dmitry Fedin <dmitry.fedin@gmail.com>
class dstat_plugin(dstat):
ibdirname = '/sys/class/infiniband'
"""
Bytes received or sent through infiniband/RoCE interfaces
Usage:
dstat --ib -N <adapter name>:<port>,total
default dstat --ib is the same as
dstat --ib -N total
example for Mellanox adapter, transfering data via port 2
dstat --ib -Nmlx4_0:2
"""
def __init__(self):
self.nick = ('recv', 'send')
self.type = 'd'
self.cols = 2
self.width = 6
def discover(self, *objlist):
ret = []
for subdirname in os.listdir(self.ibdirname):
if not os.path.isdir(os.path.join(self.ibdirname,subdirname)) : continue
device_dir = os.path.join(self.ibdirname, subdirname, 'ports')
for subdirname2 in os.listdir(device_dir) :
if not os.path.isdir(os.path.join(device_dir,subdirname2)): continue
name = subdirname + ":" + subdirname2
ret.append(name)
ret.sort()
for item in objlist: ret.append(item)
return ret
def vars(self):
ret = []
if op.netlist:
varlist = op.netlist
elif not op.full:
varlist = ('total',)
else:
varlist = self.discover
varlist.sort()
for name in varlist:
if name in self.discover + ['total']:
ret.append(name)
if not ret:
raise Exception('No suitable network interfaces found to monitor')
return ret
def name(self):
return ['ib/'+name for name in self.vars]
def extract(self):
self.set2['total'] = [0, 0]
ifaces = self.discover
for name in self.vars: self.set2[name] = [0, 0]
for name in ifaces:
l=name.split(':');
if len(l) < 2:
continue
rcv_counter_name=os.path.join('/sys/class/infiniband', l[0], 'ports', l[1], 'counters_ext/port_rcv_data_64')
xmit_counter_name=os.path.join('/sys/class/infiniband', l[0], 'ports', l[1], 'counters_ext/port_xmit_data_64')
rcv_lines = dopen(rcv_counter_name).readlines()
xmit_lines = dopen(xmit_counter_name).readlines()
if len(rcv_lines) < 1 or len(xmit_lines) < 1:
continue
rcv_value = int(rcv_lines[0])
xmit_value = int(xmit_lines[0])
if name in self.vars :
self.set2[name] = (rcv_value, xmit_value)
self.set2['total'] = ( self.set2['total'][0] + rcv_value, self.set2['total'][1] + xmit_value)
if update:
for name in self.set2:
self.val[name] = [
(self.set2[name][0] - self.set1[name][0]) * 4.0 / elapsed,
(self.set2[name][1] - self.set1[name][1]) * 4.0/ elapsed,
]
if self.val[name][0] < 0: self.val[name][0] += maxint + 1
if self.val[name][1] < 0: self.val[name][1] += maxint + 1
if step == op.delay:
self.set1.update(self.set2)
| 3,140
|
Python
|
.py
| 75
| 30.826667
| 122
| 0.543016
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,264
|
dstat_md_status.py
|
dstat-real_dstat/plugins/dstat_md_status.py
|
### Author: Bert de Bruijn <bert+dstat@debruijn.be>
class dstat_plugin(dstat):
"""
Recovery state of software RAID rebuild.
Prints completed recovery percentage and rebuild speed of the md device
that is actively being recovered or resynced.
If no devices are being rebuilt, it displays 100%, 0B. If instead
multiple devices are being rebuilt, it displays the total progress
and total throughput.
"""
def __init__(self):
self.name = 'sw raid'
self.type = 's'
self.scale = 0
self.nick = ('pct speed', )
self.width = 9
self.vars = ('text', )
self.open('/proc/mdstat')
def check(self):
if not os.path.exists('/proc/mdstat'):
raise Exception('Needs kernel md support')
def extract(self):
pct = 0
speed = 0
nr = 0
for l in self.splitlines():
if len(l) < 2: continue
if l[1] in ('recovery', 'reshape', 'resync'):
nr += 1
pct += int(l[3][0:2].strip('.%'))
speed += int(l[6].strip('sped=K/sc')) * 1024
if nr:
pct = pct / nr
else:
pct = 100
self.val['text'] = '%s %s' % (cprint(pct, 'p', 3, 34), cprint(speed, 'd', 5, 1024))
# vim:ts=4:sw=4:et
| 1,314
|
Python
|
.py
| 37
| 26.972973
| 91
| 0.548819
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,265
|
dstat_rpcd.py
|
dstat-real_dstat/plugins/dstat_rpcd.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'rpc server'
self.nick = ('call', 'erca', 'erau', 'ercl', 'xdrc')
self.vars = ('calls', 'badcalls', 'badauth', 'badclnt', 'xdrcall')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfsd')
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'rpc': continue
for i, name in enumerate(self.vars):
self.set2[name] = int(l[i+1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 780
|
Python
|
.py
| 20
| 30.15
| 80
| 0.539073
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,266
|
dstat_top_io.py
|
dstat-real_dstat/plugins/dstat_top_io.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
"""
Top most expensive I/O process
Displays the name of the most expensive I/O process
"""
def __init__(self):
self.name = 'most expensive'
self.vars = ('i/o process',)
self.type = 's'
self.width = 22
self.scale = 0
self.pidset1 = {}
def check(self):
if not os.access('/proc/self/io', os.R_OK):
raise Exception('Kernel has no per-process I/O accounting [CONFIG_TASK_IO_ACCOUNTING], use at least 2.6.20')
def extract(self):
self.output = ''
self.pidset2 = {}
self.val['usage'] = 0.0
for pid in proc_pidlist():
try:
### Reset values
if pid not in self.pidset2:
self.pidset2[pid] = {'rchar:': 0, 'wchar:': 0}
if pid not in self.pidset1:
self.pidset1[pid] = {'rchar:': 0, 'wchar:': 0}
### Extract name
name = proc_splitline('/proc/%s/stat' % pid)[1][1:-1]
### Extract counters
for l in proc_splitlines('/proc/%s/io' % pid):
if len(l) != 2: continue
self.pidset2[pid][l[0]] = int(l[1])
except IOError:
continue
except IndexError:
continue
read_usage = (self.pidset2[pid]['rchar:'] - self.pidset1[pid]['rchar:']) * 1.0 / elapsed
write_usage = (self.pidset2[pid]['wchar:'] - self.pidset1[pid]['wchar:']) * 1.0 / elapsed
usage = read_usage + write_usage
# if usage > 0.0:
# print('%s %s:%s' % (pid, read_usage, write_usage))
### Get the process that spends the most jiffies
if usage > self.val['usage']:
self.val['usage'] = usage
self.val['read_usage'] = read_usage
self.val['write_usage'] = write_usage
self.val['pid'] = pid
self.val['name'] = getnamebypid(pid, name)
if step == op.delay:
self.pidset1 = self.pidset2
if self.val['usage'] != 0.0:
self.output = '%-*s%s %s' % (self.width-11, self.val['name'][0:self.width-11], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
### Debug (show PID)
# self.output = '%*s %-*s%s %s' % (5, self.val['pid'], self.width-17, self.val['name'][0:self.width-17], cprint(self.val['read_usage'], 'd', 5, 1024), cprint(self.val['write_usage'], 'd', 5, 1024))
def showcsv(self):
return '%s / %d:%d' % (self.val['name'], self.val['read_usage'], self.val['write_usage'])
# vim:ts=4:sw=4:et
| 2,759
|
Python
|
.py
| 58
| 36.206897
| 204
| 0.512649
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,267
|
dstat_nfsd3.py
|
dstat-real_dstat/plugins/dstat_nfsd3.py
|
### Author: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'nfs3 server'
self.nick = ('read', 'writ', 'rdir', 'inod', 'fs', 'cmmt')
self.vars = ('read', 'write', 'readdir', 'inode', 'filesystem', 'commit')
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfsd')
def check(self):
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
for l in self.splitlines():
if not l or l[0] != 'proc3': continue
self.set2['read'] = int(l[8])
self.set2['write'] = int(l[9])
self.set2['readdir'] = int(l[18]) + int(l[19])
self.set2['inode'] = int(l[3]) + int(l[4]) + int(l[5]) + int(l[6]) + int(l[7]) + int(l[10]) + int(l[11]) + int(l[12]) + int(l[13]) + int(l[14]) + int(l[15]) + int(l[16]) + int(l[17])
self.set2['filesystem'] = int(l[20]) + int(l[21]) + int(l[22])
self.set2['commit'] = int(l[23])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 1,250
|
Python
|
.py
| 26
| 38.846154
| 194
| 0.51642
|
dstat-real/dstat
| 1,340
| 330
| 1
|
GPL-2.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,268
|
setup.py
|
gpodder_gpodder/setup.py
|
#!/usr/bin/env python3
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import sys
from setuptools import setup
installing = ('install' in sys.argv and '--help' not in sys.argv)
# setuptools depends on setup.py being executed from the same dir.
# Most of our custom commands work either way, but this makes
# it work in all cases.
os.chdir(os.path.dirname(os.path.realpath(__file__)))
class MissingFile(BaseException):
pass
def info(message, item=None):
print('=>', message, item if item is not None else '')
def find_data_files(uis, scripts):
# Support for installing only a subset of translations
linguas = os.environ.get('LINGUAS', None)
if linguas is not None:
linguas = linguas.split()
info('Selected languages (from $LINGUAS):', linguas)
for dirpath, dirnames, filenames in os.walk('share'):
if not filenames:
continue
# Skip data folders if we don't want the corresponding UI
share_gpodder_ui = os.path.join('share', 'gpodder', 'ui')
if uis is not None and dirpath.startswith(share_gpodder_ui):
dirparts = dirpath.split(os.sep)
if not any(part in uis for part in dirparts):
info('Skipping folder:', dirpath)
continue
# Skip translations if $LINGUAS is set
share_locale = os.path.join('share', 'locale')
if linguas is not None and dirpath.startswith(share_locale):
_, _, language, _ = dirpath.split(os.sep, 3)
if language not in linguas:
info('Skipping translation:', language)
continue
# Skip desktop stuff if we don't have any UIs requiring it
skip_folder = False
uis_requiring_freedesktop = ('gtk',)
freedesktop_folders = ('applications', 'dbus-1', 'icons', 'metainfo')
for folder in freedesktop_folders:
share_folder = os.path.join('share', folder)
if dirpath.startswith(share_folder) and uis is not None:
if not any(ui in uis_requiring_freedesktop for ui in uis):
info('Skipping freedesktop.org folder:', dirpath)
skip_folder = True
break
if skip_folder:
continue
# Skip manpages if their scripts are not going to be installed
share_man = os.path.join('share', 'man')
if dirpath.startswith(share_man):
def have_script(filename):
if not filename.endswith('.1'):
return True
basename, _ = os.path.splitext(filename)
result = any(os.path.basename(s) == basename for s in scripts)
if not result:
info('Skipping manpage without script:', filename)
return result
filenames = list(filter(have_script, filenames))
def convert_filename(filename):
filename = os.path.join(dirpath, filename)
# Skip header files generated by "make messages"
if filename.endswith('.h'):
return None
# Skip .in files, but check if their target exist
if filename.endswith('.in'):
filename = filename[:-3]
if installing and not os.path.exists(filename):
raise MissingFile(filename)
return None
return filename
filenames = [_f for _f in map(convert_filename, filenames) if _f]
if filenames:
# Some distros/ports install manpages into $PREFIX/man instead
# of $PREFIX/share/man (e.g. FreeBSD). To allow this, we strip
# the "share/" part if the variable GPODDER_MANPATH_NO_SHARE is
# set to any value in the environment.
if dirpath.startswith(share_man):
if 'GPODDER_MANPATH_NO_SHARE' in os.environ:
dirpath = dirpath.replace(share_man, 'man')
yield (dirpath, filenames)
def find_packages(uis):
src_gpodder = os.path.join('src', 'gpodder')
for dirpath, dirnames, filenames in os.walk(src_gpodder):
if '__init__.py' not in filenames:
continue
skip = False
dirparts = dirpath.split(os.sep)
dirparts.pop(0)
package = '.'.join(dirparts)
# Extract all parts of the package name ending in "ui"
ui_parts = [p for p in dirparts if p.endswith('ui')]
if uis is not None and ui_parts:
# Strip the trailing "ui", e.g. "gtkui" -> "gtk"
folder_uis = [p[:-2] for p in ui_parts]
for folder_ui in folder_uis:
if folder_ui not in uis:
info('Skipping package:', package)
skip = True
break
if not skip:
yield package
def find_scripts(uis):
# Functions for scripts to check if they should be installed
file_checks = {
'gpo': lambda uis: 'cli' in uis,
'gpodder': lambda uis: any(ui in uis for ui in ('gtk',)),
}
for dirpath, dirnames, filenames in os.walk('bin'):
for filename in filenames:
# If we have a set of uis, check if we can skip this file
if uis is not None and filename in file_checks:
if not file_checks[filename](uis):
info('Skipping script:', filename)
continue
yield os.path.join(dirpath, filename)
# Recognized UIs: cli, gtk (default: install all UIs)
uis = os.environ.get('GPODDER_INSTALL_UIS', None)
if uis is not None:
uis = uis.split()
info('Selected UIs (from $GPODDER_INSTALL_UIS):', uis)
try:
packages = sorted(find_packages(uis))
scripts = sorted(find_scripts(uis))
data_files = sorted(find_data_files(uis, scripts))
except MissingFile as mf:
print("""
Missing file: %s
If you want to install, use "make install" instead of using
setup.py directly. See the README file for more information.
""" % mf, file=sys.stderr)
sys.exit(1)
setup(
package_dir={'': 'src'},
packages=packages,
scripts=scripts,
data_files=data_files,
)
| 6,881
|
Python
|
.py
| 158
| 34.417722
| 78
| 0.620473
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,269
|
test-auth-server.py
|
gpodder_gpodder/tools/test-auth-server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Simple HTTP web server for testing HTTP Authentication (see bug 1539)
# from our crappy-but-does-the-job department
# Thomas Perl <thp.io/about>; 2012-01-20
import base64
import datetime
import hashlib
import http.server
import re
import sys
import threading
import time
USERNAME = 'user@example.com' # Username used for HTTP Authentication
PASSWORD = 'secret' # Password used for HTTP Authentication
HOST, PORT, RPORT = 'localhost', 8000, 8001 # Hostname and port for the HTTP server
# When the script contents change, the feed's episodes each get a new GUID
GUID = hashlib.sha1(open(__file__, mode='rb').read()).hexdigest()
URL = 'http://%(HOST)s:%(PORT)s' % locals()
FEEDNAME = sys.argv[0] # The title of the RSS feed
REDIRECT = 'redirect.rss' # The path for a redirection
REDIRECT_TO_BAD_HOST = 'redirect_bad' # The path for a redirection
FEEDFILE = 'feed.rss' # The "filename" of the feed on the server
EPISODES = 'episode' # Base name for the episode files
TIMEOUT = 'timeout' # The path to never return
EPISODES_EXT = '.mp3' # Extension for the episode files
EPISODES_MIME = 'audio/mpeg' # Mime type for the episode files
EP_COUNT = 7 # Number of episodes in the feed
SIZE = 500000 # Size (in bytes) of the episode downloads)
def mkpubdates(items):
"""Generate fake pubDates (one each day, recently)"""
current = datetime.datetime.now() - datetime.timedelta(days=items + 3)
for i in range(items):
yield current.ctime()
current += datetime.timedelta(days=1)
def mkrss(items=EP_COUNT):
"""Generate a dumm RSS feed with a given number of items"""
ITEMS = '\n'.join("""
<item>
<title>Episode %(INDEX)s</title>
<guid>tag:test.gpodder.org,2012:%(GUID)s,%(URL)s,%(INDEX)s</guid>
<pubDate>%(PUBDATE)s</pubDate>
<enclosure
url="%(URL)s/%(EPISODES)s%(INDEX)s%(EPISODES_EXT)s"
type="%(EPISODES_MIME)s"
length="%(SIZE)s"/>
</item>
""" % dict(list(locals().items()) + list(globals().items()))
for INDEX, PUBDATE in enumerate(mkpubdates(items)))
ITEMS += """
<item>
<title>Missing Episode</title>
<guid>tag:test.gpodder.org,2012:missing</guid>
<pubDate>Sun, 25 Nov 2018 17:28:03 +0000</pubDate>
<enclosure
url="%(URL)s/not_there%(EPISODES_EXT)s"
type="%(EPISODES_MIME)s"
length="%(SIZE)s"/>
</item>""" % dict(list(locals().items()) + list(globals().items()))
ITEMS += """
<item>
<title>Server Timeout Episode</title>
<guid>tag:test.gpodder.org,2012:timeout</guid>
<pubDate>Sun, 25 Nov 2018 17:28:03 +0000</pubDate>
<enclosure
url="%(URL)s/%(TIMEOUT)s"
type="%(EPISODES_MIME)s"
length="%(SIZE)s"/>
</item>""" % dict(list(locals().items()) + list(globals().items()))
ITEMS += """
<item>
<title>Bad Host Episode</title>
<guid>tag:test.gpodder.org,2012:timeout</guid>
<pubDate>Sun, 25 Nov 2018 17:28:03 +0000</pubDate>
<enclosure
url="%(URL)s/%(REDIRECT_TO_BAD_HOST)s"
type="%(EPISODES_MIME)s"
length="%(SIZE)s"/>
</item>""" % dict(list(locals().items()) + list(globals().items()))
ITEMS += """
<item>
<title>Space in url Episode</title>
<guid>tag:test.gpodder.org,2012:timeout</guid>
<pubDate>Sun, 25 Nov 2018 17:28:03 +0000</pubDate>
<enclosure
url="%(URL)s/%(EPISODES)s with space%(EPISODES_EXT)s"
type="%(EPISODES_MIME)s"
length="%(SIZE)s"/>
</item>""" % dict(list(locals().items()) + list(globals().items()))
return """
<rss>
<channel><title>%(FEEDNAME)s</title><link>%(URL)s</link>
%(ITEMS)s
</channel>
</rss>
""" % dict(list(locals().items()) + list(globals().items()))
def mkdata(size=SIZE):
"""Generate dummy data of a given size (in bytes)"""
return bytes([32 + (i % (127 - 32)) for i in range(size)])
class AuthRequestHandler(http.server.BaseHTTPRequestHandler):
FEEDFILE_PATH = '/%s' % FEEDFILE
EPISODES_PATH = '/%s' % EPISODES
REDIRECT_PATH = '/%s' % REDIRECT
REDIRECT_TO_BAD_HOST_PATH = '/%s' % REDIRECT_TO_BAD_HOST
TIMEOUT_PATH = '/%s' % TIMEOUT
def do_GET(self):
authorized = False
is_feed = False
is_episode = False
auth_header = self.headers.get('authorization', '')
m = re.match(r'^Basic (.*)$', auth_header)
if m is not None:
auth_data = base64.b64decode(m.group(1)).decode().split(':', 1)
if len(auth_data) == 2:
username, password = auth_data
print('Got username:', username)
print('Got password:', password)
if (username, password) == (USERNAME, PASSWORD):
print('Valid credentials provided.')
authorized = True
if self.path == self.FEEDFILE_PATH:
print('Feed request.')
is_feed = True
elif self.path.startswith(self.EPISODES_PATH):
print('Episode request.')
is_episode = True
elif self.path == self.REDIRECT_PATH:
print('Redirect request.')
self.send_response(302)
self.send_header('Location', '%s/%s' % (URL, FEEDFILE))
self.end_headers()
return
elif self.path.startswith(self.REDIRECT_TO_BAD_HOST_PATH):
print('Redirect request => bad host.')
self.send_response(302)
self.send_header('Location', '//notthere.gpodder.io/%s' % (FEEDFILE))
self.end_headers()
return
elif self.path == self.TIMEOUT_PATH:
# will need to restart the server or wait 80s before next request
time.sleep(80)
return
if not authorized:
print('Not authorized - sending WWW-Authenticate header.')
self.send_response(401)
self.send_header('WWW-Authenticate',
'Basic realm="%s"' % sys.argv[0])
self.end_headers()
return
if not is_feed and not is_episode:
print('Not there episode - sending 404.')
self.send_response(404)
self.end_headers()
return
self.send_response(200)
self.send_header('Content-type',
'application/xml' if is_feed else 'audio/mpeg')
self.end_headers()
self.wfile.write(mkrss().encode('utf-8') if is_feed else mkdata())
def run(httpd):
while True:
httpd.handle_request()
if __name__ == '__main__':
httpd = http.server.HTTPServer((HOST, PORT), AuthRequestHandler)
print("""
Feed URL: %(URL)s/%(FEEDFILE)s
Redirect URL: http://%(HOST)s:%(RPORT)d/%(REDIRECT)s
Timeout URL: %(URL)s/%(TIMEOUT)s
Username: %(USERNAME)s
Password: %(PASSWORD)s
""" % locals())
httpdr = http.server.HTTPServer((HOST, RPORT), AuthRequestHandler)
t1 = threading.Thread(name='http', target=run, args=(httpd,), daemon=True)
t1.start()
t2 = threading.Thread(name='http redirect', target=run, args=(httpdr,), daemon=True)
t2.start()
try:
t1.join()
t2.join()
except KeyboardInterrupt:
pass
| 7,432
|
Python
|
.py
| 181
| 33.298343
| 88
| 0.593054
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,270
|
github_release.py
|
gpodder_gpodder/tools/github_release.py
|
#!/usr/bin/env python3
"""
Prepare release and upload Windows and macOS artifacts
"""
import argparse
import hashlib
import os
import re
import sys
import magic # use python-magic (not compatible with filemagic)
import requests
from github3 import login
from jinja2 import Template
def debug_requests():
""" turn requests debug on """
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which is not logged.
import http.client as http_client
import logging
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def error_exit(msg, code=1):
""" print msg and exit with code """
print(msg, file=sys.stderr)
sys.exit(code)
def download_items(urls, prefix):
print("D: downloading %s" % urls)
for url in urls:
print("I: downloading %s" % url)
filename = url.split('/')[-1]
output = os.path.join("_build", "{}-{}".format(prefix, filename))
with requests.get(url, stream=True) as r:
with open(output, "wb") as f:
for chunk in r.iter_content(chunk_size=1000000):
f.write(chunk)
def download_mac_github(github_workflow, prefix, version):
""" download mac workflow artifacts from github and exit """
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token %s' % github_token}
print("I: downloading release artifacts for workflow %d" % github_workflow)
r = requests.get("https://api.github.com/repos/gpodder/gpodder/actions/artifacts", headers=headers)
if not r.ok:
error_exit('ERROR: API fetch failed %d %s' % (r.status_code, r.reason))
artifacts = r.json()
artifact = [(a['id'], a['archive_download_url']) for a in artifacts['artifacts'] if a['workflow_run']['id'] == github_workflow]
if len(artifact) != 1:
error_exit("Nothing found to download")
id, url = artifact[0]
print("I: found artifact %d" % id)
print("I: downloading %s" % url)
output = os.path.join("_build", "{}-artifact.zip".format(prefix))
with requests.get(url, stream=True, headers=headers) as r:
if not r.ok:
error_exit('ERROR: artifact fetch failed %d %s' % (r.status_code, r.reason))
with open(output, "wb") as f:
for chunk in r.iter_content(chunk_size=1000000):
f.write(chunk)
print("I: unzipping %s" % output)
with zipfile.ZipFile(output, 'r') as z:
z.extractall('_build')
os.remove(output)
os.remove(os.path.join("_build", "{}-gPodder-{}.zip.md5".format(prefix, version)))
os.remove(os.path.join("_build", "{}-gPodder-{}.zip.sha256".format(prefix, version)))
checksum()
def download_appveyor(appveyor_build, prefix):
""" download build artifacts from appveyor and exit """
print("I: downloading release artifacts from appveyor")
build = requests.get("https://ci.appveyor.com/api/projects/elelay/gpodder/build/%s" % appveyor_build).json()
job_id = build.get("build", {}).get("jobs", [{}])[0].get("jobId")
if job_id:
job_url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts".format(job_id)
artifacts = requests.get(job_url).json()
items = ["{}/{}".format(job_url, f["fileName"]) for f in artifacts if f["type"] == "File"]
if len(items) == 0:
error_exit("Nothing found to download")
download_items(items, prefix)
else:
error_exit("no jobId in {}".format(build))
def checksums():
""" compute artifact checksums """
ret = {}
for f in os.listdir("_build"):
archive = os.path.join("_build", f)
m = hashlib.md5()
s = hashlib.sha256()
with open(archive, "rb") as f:
block = f.read(4096)
while block:
m.update(block)
s.update(block)
block = f.read(4096)
ret[os.path.basename(archive)] = {'md5': m.hexdigest(), 'sha256': s.hexdigest()}
return ret
def get_contributors(tag, previous_tag):
"""
list contributor logins '@...' for every commit in range
"""
cmp = repo.compare_commits(previous_tag, tag)
logins = [c.author.login for c in cmp.commits() if c.author] + [c.committer.login for c in cmp.commits()]
return sorted({"@{}".format(n) for n in logins})
def get_previous_tag():
latest_release = repo.latest_release()
return latest_release.tag_name
def release_text(tag, previous_tag, mac_github=None, appveyor=None):
t = Template("""
Linux, macOS and Windows are supported.
Thanks to {{contributors[0]}}{% for c in contributors[1:-1] %}, {{c}}{% endfor %} and {{contributors[-1]}} for contributing to this release!
[Changes](https://github.com/gpodder/gpodder/compare/{{previous_tag}}...{{tag}}) since **{{previous_tag}}**:
## New features
- ...
## Improvements
- ...
## Bug fixes
- ...
## Translations
- ...
## CI references
- macOS GitHub build [{{mac_github}}](https://github.com/gpodder/gpodder/actions/runs/{{mac_github}})
- Windows Appveyor build [{{appveyor}}](https://ci.appveyor.com/project/elelay/gpodder/build/{{appveyor}})
## Checksums
{% for f, c in checksums.items() %}
- {{f}} md5:<i>{{c.md5}}</i> sha256:<i>{{c.sha256}}</i>
{% endfor %}
""")
args = {
'contributors': get_contributors(tag, previous_tag),
'tag': tag,
'previous_tag': previous_tag,
'mac_github': mac_github,
'appveyor': appveyor,
'checksums': checksums()
}
return t.render(args)
def upload(repo, tag, previous_tag, mac_github, appveyor):
""" create github release (draft) and upload assets """
print("I: creating release %s" % tag)
items = os.listdir('_build')
if len(items) == 0:
error_exit("Nothing found to upload")
try:
release = repo.create_release(tag, name=tag, draft=True)
except Exception as e:
error_exit("Error creating release '%s' (%r)" % (tag, e))
print("I: updating release description from template")
text = release_text(tag, previous_tag, mac_github=mac_github, appveyor=appveyor)
print(text)
if release.edit(body=text):
print("I: updated release description")
else:
error_exit("E: updating release description")
print("D: uploading items\n - %s" % "\n - ".join(items))
m = magic.Magic(mime=True)
for itm in items:
filename = os.path.join("_build", itm)
content_type = m.from_file(filename)
print("I: uploading %s..." % itm)
with open(filename, "rb") as f:
try:
_ = release.upload_asset(content_type, itm, f)
except Exception as e:
error_exit("Error uploading asset '%s' (%r)" % (itm, e))
print("I: upload success")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='upload gpodder-osx-bundle artifacts to a github release\n'
'Example usage: \n'
' GITHUB_TOKEN=xxx python github_release.py --download --mac-github-workflow 1234567890 --appveyor 1.0.104 3.10.4\n'
' GITHUB_TOKEN=xxx python github_release.py --mac-github-workflow 1234567890 --appveyor 1.0.104 3.10.4\n',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('tag', type=str, help='gPodder git tag to create a release from')
parser.add_argument('--download', action='store_true', help='download artifacts')
parser.add_argument('--mac-github-workflow', type=str, help='mac github workflow number')
parser.add_argument('--appveyor', type=str, help='appveyor build number')
parser.add_argument('--debug', '-d', action='store_true', help='debug requests')
args = parser.parse_args()
if args.debug:
debug_requests()
github_token = os.environ.get("GITHUB_TOKEN")
if not github_token:
error_exit("E: set GITHUB_TOKEN environment", -1)
gh = login(token=github_token)
repo = gh.repository('gpodder', 'gpodder')
if args.download:
if not args.mac_github:
error_exit("E: --download requires --mac-github-workflow number")
elif not args.appveyor:
error_exit("E: --download requires --appveyor number")
if os.path.isdir("_build"):
error_exit("E: _build directory exists", -1)
os.mkdir("_build")
download_mac_github(args.mac_github, "macOS", args.tag)
download_appveyor(args.appveyor, "windows")
print("I: download success.")
else:
if not os.path.exists("_build"):
error_exit("E: _build directory doesn't exist. You need to download build artifacts (see Usage)", -1)
previous_tag = get_previous_tag()
upload(repo, args.tag, previous_tag, args.mac_github, args.appveyor)
| 9,173
|
Python
|
.py
| 202
| 38.965347
| 140
| 0.643673
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,271
|
getversion.py
|
gpodder_gpodder/tools/getversion.py
|
import os
import re
here = os.path.dirname(__file__) or '.'
main_module = open(os.path.join(here, '../src/gpodder/__init__.py')).read()
metadata = dict(re.findall(r"__([a-z_]+)__\s*=\s*'([^']+)'", main_module))
print(metadata['version'])
| 239
|
Python
|
.py
| 6
| 38.666667
| 75
| 0.616379
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,272
|
create-desktop-icon.py
|
gpodder_gpodder/tools/create-desktop-icon.py
|
#!/usr/bin/env python3
# create-desktop-icon.py: Create a Desktop icon
# 2016-12-22 Thomas Perl <m@thp.io>
import os
import sys
from gi.repository import GLib
BASE = os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
TEMPLATE = """# Created by %(__file__)s
[Desktop Entry]
Name=gPodder (Git)
Exec=%(BASE)s/bin/gpodder
Icon=%(BASE)s/share/icons/hicolor/scalable/apps/gpodder.svg
Terminal=false
Type=Application
""" % locals()
DESKTOP = GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)
if not os.path.exists(DESKTOP):
print("{} desktop folder doesn't exists, exiting".format(DESKTOP))
sys.exit(1)
DESTINATION = os.path.join(DESKTOP, 'gpodder-git.desktop')
if os.path.exists(DESTINATION):
print('{} already exists, not overwriting'.format(DESTINATION))
sys.exit(1)
with open(DESTINATION, 'w') as fp:
fp.write(TEMPLATE)
os.chmod(DESTINATION, 0o755)
print('Wrote {}'.format(DESTINATION))
| 958
|
Python
|
.py
| 27
| 33.37037
| 87
| 0.744843
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,273
|
progressbar_icon_tester.py
|
gpodder_gpodder/tools/progressbar_icon_tester.py
|
#!/usr/bin/env python3
# Progressbar icon tester
# Thomas Perl <thp.io/about>; 2012-02-05
#
# based on: Simple script to test gPodder's "pill" pixbuf implementation
# Thomas Perl <thp.io/about>; 2009-09-13
import sys
from gi.repository import Gtk
from gpodder.gtkui.draw import draw_cake_pixbuf
sys.path.insert(0, 'src')
def gen(percentage):
pixbuf = draw_cake_pixbuf(percentage)
return Gtk.Image.new_from_pixbuf(pixbuf)
w = Gtk.Window()
w.connect('destroy', Gtk.main_quit)
v = Gtk.VBox()
w.add(v)
for y in range(1):
h = Gtk.HBox()
h.set_homogeneous(True)
v.add(h)
PARTS = 20
for x in range(PARTS + 1):
h.add(gen(x / PARTS))
w.set_default_size(400, 100)
w.show_all()
Gtk.main()
| 732
|
Python
|
.py
| 27
| 24.481481
| 72
| 0.697274
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,274
|
make_cert_pem.py
|
gpodder_gpodder/tools/mac-osx/make_cert_pem.py
|
# -*- coding: utf-8 -*-
""" A script to initialize our bundled openssl CA trust store
based on your System's keychain
Released under the same licence as gPodder (GPL3 or later)
Copyright (c) 2016 Eric Le Lay
"""
import argparse
import re
import subprocess
import sys
import traceback
from subprocess import PIPE, CalledProcessError, Popen
def is_valid_cert(openssl, cert):
""" check if cert is valid according to openssl"""
cmd = [openssl, "x509", "-inform", "pem", "-checkend", "0", "-noout"]
# print("D: is_valid_cert %r" % cmd)
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate(cert)
# print("out: %s; err:%s; ret:%i" % (stdout, stderr, proc.returncode))
return proc.returncode == 0
def get_certs(openssl):
""" extract System's certificates then filter them by validity
and return a list of text of valid certs
"""
cmd = ["security", "find-certificate", "-a", "-p",
"/System/Library/Keychains/SystemRootCertificates.keychain"]
cert_re = re.compile(b"^-----BEGIN CERTIFICATE-----$"
+ b".+?"
+ b"^-----END CERTIFICATE-----$", re.M | re.S)
try:
certs_str = subprocess.check_output(cmd)
all_certs = cert_re.findall(certs_str)
print("I: extracted %i certificates" % len(all_certs))
valid_certs = [cert for cert in all_certs
if is_valid_cert(openssl, cert)]
print("I: of which %i are valid certificates" % len(valid_certs))
return valid_certs
except OSError:
print("E: extracting certificates using %r" % cmd)
traceback.print_exc()
except CalledProcessError as err:
print(("E: extracting certificates using %r, exit=%i" %
(cmd, err.returncode)))
def write_certs(certs, dest):
""" write concatenated certs to dest """
with open(dest, "wb") as output:
output.write(b"\n".join(certs))
def main(openssl, dest):
""" main program """
print("I: make_cert_pem.py %s %s" % (openssl, dest))
certs = get_certs(openssl)
if certs is None:
print("E: no certificate extracted")
return -1
else:
write_certs(certs, dest)
print("I: updated %s with %i certificates" % (dest, len(certs)))
return 0
PARSER = argparse.ArgumentParser(
description='Extract system certificates for openssl')
PARSER.add_argument("openssl",
metavar="OPENSSL_EXE",
help="absolute path to the openssl executable")
PARSER.add_argument("dest",
metavar="DEST_FILE",
help="absolute path to the certs.pem file to write to")
if __name__ == "__main__":
ARGS = PARSER.parse_args()
sys.exit(main(ARGS.openssl, ARGS.dest))
| 2,837
|
Python
|
.py
| 69
| 33.855072
| 75
| 0.621818
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,275
|
launcher.py
|
gpodder_gpodder/tools/mac-osx/launcher.py
|
import os
import os.path
import platform
import re
import runpy
import subprocess
import sys
import time
import traceback
from os.path import join
from subprocess import PIPE, CalledProcessError, Popen
class MakeCertPem:
""" create openssl cert bundle from system certificates """
def __init__(self, openssl):
self.openssl = openssl
def is_valid_cert(self, cert):
""" check if cert is valid according to openssl"""
cmd = [self.openssl, "x509", "-inform", "pem", "-checkend", "0", "-noout"]
# print("D: is_valid_cert %r" % cmd)
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate(cert)
# print("out: %s; err:%s; ret:%i" % (stdout, stderr, proc.returncode))
return proc.returncode == 0
def get_certs(self):
""" extract System's certificates then filter them by validity
and return a list of text of valid certs
"""
cmd = ["security", "find-certificate", "-a", "-p",
"/System/Library/Keychains/SystemRootCertificates.keychain"]
cert_re = re.compile(b"^-----BEGIN CERTIFICATE-----$"
+ b".+?"
+ b"^-----END CERTIFICATE-----$", re.M | re.S)
try:
certs_str = subprocess.check_output(cmd)
all_certs = cert_re.findall(certs_str)
print("I: extracted %i certificates" % len(all_certs))
valid_certs = [cert for cert in all_certs
if self.is_valid_cert(cert)]
print("I: of which %i are valid certificates" % len(valid_certs))
return valid_certs
except OSError:
print("E: extracting certificates using %r" % cmd)
traceback.print_exc()
except CalledProcessError as err:
print(("E: extracting certificates using %r, exit=%i" %
(cmd, err.returncode)))
@staticmethod
def write_certs(certs, dest):
""" write concatenated certs to dest """
with open(dest, "wb") as output:
output.write(b"\n".join(certs))
def regen(self, dest):
""" main program """
print("I: make_cert_pem %s %s" % (self.openssl, dest))
certs = self.get_certs()
if certs is None:
print("E: no certificate extracted")
return -1
else:
self.write_certs(certs, dest)
print("I: updated %s with %i certificates" % (dest, len(certs)))
return 0
# print("launcher.py sys.argv=", sys.argv)
bundlepath = sys.argv.pop(0)
app = os.path.basename(sys.argv[0])
bundle_contents = join(bundlepath, 'Contents')
bundle_res = join(bundle_contents, 'Resources')
bundle_lib = join(bundle_res, 'lib')
bundle_bin = join(bundle_res, 'bin')
bundle_data = join(bundle_res, 'share')
bundle_etc = join(bundle_res, 'etc')
os.environ['CHARSETALIASDIR'] = bundle_lib
os.environ['DYLD_LIBRARY_PATH'] = bundle_lib
os.environ['GTK_DATA_PREFIX'] = bundle_res
os.environ['GTK_EXE_PREFIX'] = bundle_res
os.environ['GTK_PATH'] = bundle_res
os.environ['LD_LIBRARY_PATH'] = bundle_lib
os.environ['XDG_CONFIG_DIRS'] = bundle_etc
os.environ['XDG_DATA_DIRS'] = bundle_data
os.environ['PANGO_LIBDIR'] = bundle_lib
os.environ['PANGO_RC_FILE'] = join(bundle_etc, 'pango', 'pangorc')
os.environ['PANGO_SYSCONFDIR'] = bundle_etc
os.environ['GDK_PIXBUF_MODULE_FILE'] = join(bundle_lib, 'gdk-pixbuf-2.0',
'2.10.0', 'loaders.cache')
if int(platform.release().split('.')[0]) > 10:
os.environ['GTK_IM_MODULE_FILE'] = join(bundle_etc, 'gtk-3.0',
'gtk.immodules')
os.environ['GI_TYPELIB_PATH'] = join(bundle_lib, 'girepository-1.0')
# for forked python
os.environ['PYTHONHOME'] = bundle_res
# Set $PYTHON to point inside the bundle
PYVER = 'python3.11'
sys.path.append(bundle_res)
print('System Path:\n', '\n'.join(sys.path))
# see https://gpodder.github.io/docs/user-manual.html#gpodder-home-folder-and-download-location
# To override gPodder home and/or download directory:
# 1. uncomment (remove the pound sign and space) at the beginning of the relevant line
# 2. replace ~/gPodderData or ~/gPodderDownloads with the path you want for your gPodder home
# (you can move the original folder in the Finder first,
# then drag and drop to the launcher.py in TextEdit to ensure the correct path is set)
# uncomment the following line to override gPodder home
# os.environ['GPODDER_HOME'] = os.path.expanduser('~/gPodderData')
# uncomment the following line to override gPodder download directory
# os.environ['GPODDER_DOWNLOAD_DIR'] = os.path.expanduser('~/gPodderDownloads')
for k, v in os.environ.items():
print("%s=%s" % (k, v))
def gpodder_home():
# don't inadvertently create the new gPodder home,
# it would be preferred to the old one
default_path = join(os.environ['HOME'], 'Library', 'Application Support', 'gPodder')
cands = [
os.path.expanduser(os.environ.get('GPODDER_HOME')) if 'GPODDER_HOME' in os.environ else None,
default_path,
join(os.environ['HOME'], 'gPodder'),
]
for cand in cands:
if cand and os.path.exists(cand):
return cand
return default_path
gphome = gpodder_home()
os.makedirs(join(gphome, 'openssl'), exist_ok=True)
# generate cert.extracted.pem
cert_gen = join(gphome, 'openssl', 'cert.extracted.pem')
cert_pem = join(gphome, 'openssl', 'cert.pem')
regen = False
if not os.path.isfile(cert_gen):
regen = True
else:
last_modified = os.stat(cert_gen).st_mtime
regen = last_modified < time.time() - 3600 * 7
if regen:
print('(Re)generating', cert_pem)
openssl = join(bundle_bin, 'openssl')
MakeCertPem(openssl).regen(cert_gen)
else:
print("No regenerating", cert_gen, "it's fresh enough")
# and link to it by default. Users may want to point cert.pem to MacPorts
# /opt/local/etc/openssl/cert.pem, for instance.
if not os.path.exists(cert_pem):
os.symlink(os.path.basename(cert_gen), cert_pem)
# Set path to CA files
os.environ['SSL_CERT_FILE'] = cert_pem
if app == 'run-python':
python_exe = join(bundle_contents, 'MacOS', 'python3')
# executable is repeated as argv[0].
# Old sys.argv[0] points to Contents/MacOS so must be removed
args = [python_exe] + sys.argv[1:]
# print("running", args)
os.execv(python_exe, args)
elif app == 'run-pip':
python_exe = join(bundle_contents, 'MacOS', 'python3')
pip = join(bundle_contents, 'Resources', 'bin', 'pip3')
# executable is repeated as argv[0].
# Old sys.argv[0] points to Contents/MacOS so must be removed
args = [python_exe, pip] + sys.argv[1:]
# print("running", args)
os.execv(python_exe, args)
else:
runpy.run_path(join(bundle_bin, app), run_name='__main__')
| 6,864
|
Python
|
.py
| 159
| 37.176101
| 101
| 0.649304
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,276
|
depcheck.py
|
gpodder_gpodder/tools/win_installer/misc/depcheck.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2016,2017 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
Deletes unneeded DLLs and checks DLL dependencies.
Execute with the build python, will figure out the rest.
"""
import os
import subprocess
import sys
from functools import cache
from multiprocessing import Process, Queue
import gi # isort:skip
gi.require_version("GIRepository", "2.0") # isort:skip
from gi.repository import GIRepository # isort:skip
def _get_shared_libraries(q, namespace, version):
repo = GIRepository.Repository()
repo.require(namespace, version, 0)
lib = repo.get_shared_library(namespace)
q.put(lib)
@cache
def get_shared_libraries(namespace, version):
# we have to start a new process because multiple versions can't be loaded
# in the same process
q = Queue()
p = Process(target=_get_shared_libraries, args=(q, namespace, version))
p.start()
result = q.get()
p.join()
return result
def get_required_by_typelibs():
deps = set()
repo = GIRepository.Repository()
for tl in os.listdir(repo.get_search_path()[0]):
namespace, version = os.path.splitext(tl)[0].split("-", 1)
lib = get_shared_libraries(namespace, version)
if lib:
libs = lib.lower().split(",")
else:
libs = []
for lib in libs:
deps.add((namespace, version, lib))
return deps
@cache
def get_dependencies(filename):
deps = []
try:
data = subprocess.check_output(["objdump", "-p", filename],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
# can happen with wrong arch binaries
return []
data = data.decode("utf-8")
for line in data.splitlines():
line = line.strip()
if line.startswith("DLL Name:"):
deps.append(line.split(":", 1)[-1].strip().lower())
return deps
def find_lib(root, name):
system_search_path = os.path.join("C:", os.sep, "Windows", "System32")
if get_lib_path(root, name):
return True
elif os.path.exists(os.path.join(system_search_path, name)):
return True
elif name in ["gdiplus.dll"]:
return True
elif name.startswith("msvcr"):
return True
elif name.startswith("api-ms-win-"):
return True
return False
def get_lib_path(root, name):
search_path = os.path.join(root, "bin")
if os.path.exists(os.path.join(search_path, name)):
return os.path.join(search_path, name)
def get_things_to_delete(root):
extensions = [".exe", ".pyd", ".dll"]
all_libs = set()
needed = set()
for base, dirs, files in os.walk(root):
for f in files:
lib = f.lower()
path = os.path.join(base, f)
ext_lower = os.path.splitext(f)[-1].lower()
if ext_lower in extensions:
if ext_lower == ".exe":
# we use .exe as dependency root
needed.add(lib)
all_libs.add(f.lower())
for lib in get_dependencies(path):
all_libs.add(lib)
needed.add(lib)
if not find_lib(root, lib):
print("MISSING:", path, lib)
for namespace, version, lib in get_required_by_typelibs():
all_libs.add(lib)
needed.add(lib)
if not find_lib(root, lib):
print("MISSING:", namespace, version, lib)
to_delete = []
for not_depended_on in (all_libs - needed):
path = get_lib_path(root, not_depended_on)
if path:
to_delete.append(path)
return to_delete
def main(argv):
libs = get_things_to_delete(sys.prefix)
if "--delete" in argv[1:]:
while libs:
for lib in libs:
print("DELETE:", lib)
os.unlink(lib)
libs = get_things_to_delete(sys.prefix)
if __name__ == "__main__":
main(sys.argv)
| 4,232
|
Python
|
.py
| 120
| 27.891667
| 78
| 0.611356
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,277
|
create-launcher.py
|
gpodder_gpodder/tools/win_installer/misc/create-launcher.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Creates simple Python .exe launchers for gui and cli apps
./create-launcher.py "3.8.0" <target-dir>
"""
import os
import shlex
import shutil
import struct
import subprocess
import sys
import tempfile
def build_resource(rc_path, out_path):
"""Raises subprocess.CalledProcessError"""
def is_64bit():
return struct.calcsize("P") == 8
subprocess.check_call(
["windres", "-O", "coff", "-F",
"pe-x86-64" if is_64bit() else "pe-i386", rc_path,
"-o", out_path])
def get_build_args():
python_name = os.path.splitext(os.path.basename(sys.executable))[0]
python_config = os.path.join(
os.path.dirname(sys.executable), python_name + "-config")
cflags = subprocess.check_output(
["sh", python_config, "--cflags"]).strip()
libs = subprocess.check_output(
["sh", python_config, "--libs"]).strip()
cflags = os.fsdecode(cflags)
libs = os.fsdecode(libs)
return shlex.split(cflags) + shlex.split(libs)
def build_exe(source_path, resource_path, is_gui, out_path):
args = ["gcc", "-s"]
if is_gui:
args.append("-mwindows")
args.append("-municode")
args.extend(["-o", out_path, source_path, resource_path])
args.extend(get_build_args())
print("Compiling launcher: %r", args)
subprocess.check_call(args)
def get_launcher_code(entry_point):
module, func = entry_point.split(":", 1)
template = """\
#include "Python.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
PWSTR lpCmdLine, int nCmdShow)
{
int result;
Py_NoUserSiteDirectory = 1;
Py_IgnoreEnvironmentFlag = 1;
Py_DontWriteBytecodeFlag = 1;
Py_Initialize();
PySys_SetArgvEx(__argc, __wargv, 0);
result = PyRun_SimpleString("%s");
Py_Finalize();
return result;
}
"""
launch_code = "import sys; from %s import %s; sys.exit(%s())" % (
module, func, func)
return template % launch_code
def get_resource_code(filename, file_version, file_desc, icon_path,
product_name, product_version, company_name):
template = """\
#include <winuser.h>
1 ICON "%(icon_path)s"
1 VERSIONINFO
FILEVERSION %(file_version_list)s
PRODUCTVERSION %(product_version_list)s
FILEOS 0x4
FILETYPE 0x1
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904E4"
BEGIN
VALUE "CompanyName", "%(company_name)s"
VALUE "FileDescription", "%(file_desc)s"
VALUE "FileVersion", "%(file_version)s"
VALUE "InternalName", "%(internal_name)s"
VALUE "OriginalFilename", "%(filename)s"
VALUE "ProductName", "%(product_name)s"
VALUE "ProductVersion", "%(product_version)s"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
"""
def to_ver_list(v):
return ",".join(map(str, (list(map(int, v.split("."))) + [0] * 4)[:4]))
file_version_list = to_ver_list(file_version)
product_version_list = to_ver_list(product_version)
return template % {
"icon_path": icon_path,
"file_version_list": file_version_list,
"product_version_list": product_version_list,
"file_version": file_version, "product_version": product_version,
"company_name": company_name, "filename": filename,
"internal_name": os.path.splitext(filename)[0],
"product_name": product_name, "file_desc": file_desc,
}
def build_launcher(out_path, icon_path, file_desc, product_name,
product_version, company_name, entry_point, is_gui):
src_ico = os.path.abspath(icon_path)
target = os.path.abspath(out_path)
file_version = product_version
dir_ = os.getcwd()
temp = tempfile.mkdtemp()
try:
os.chdir(temp)
with open("launcher.c", "w") as h:
h.write(get_launcher_code(entry_point))
shutil.copyfile(src_ico, "launcher.ico")
with open("launcher.rc", "w") as h:
h.write(get_resource_code(
os.path.basename(target), file_version, file_desc,
"launcher.ico",
product_name, product_version, company_name))
build_resource("launcher.rc", "launcher.res")
build_exe("launcher.c", "launcher.res", is_gui, target)
finally:
os.chdir(dir_)
shutil.rmtree(temp)
def main():
argv = sys.argv
version = argv[1]
target = argv[2]
company_name = "The gPodder Team"
misc = os.path.dirname(os.path.realpath(__file__))
build_launcher(
os.path.join(target, "gpodder.exe"),
os.path.join(misc, "gpodder.ico"),
"gPodder", "gPodder",
version, company_name, "gpodder_launch.gpodder:main", True)
build_launcher(
os.path.join(target, "gpodder-cmd.exe"),
os.path.join(misc, "gpodder.ico"),
"gPodder", "gPodder",
version, company_name, "gpodder_launch.gpodder:main", False)
build_launcher(
os.path.join(target, "gpo.exe"),
os.path.join(misc, "gpo.ico"),
"gPodder CLI", "gpo",
version, company_name, "gpodder_launch.gpo:main", False)
if __name__ == "__main__":
main()
| 5,631
|
Python
|
.py
| 157
| 29.624204
| 79
| 0.632131
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,278
|
__init__.py
|
gpodder_gpodder/tools/fake-dbus-module/dbus/__init__.py
|
import dbus.exceptions # noqa: F401
class SessionBus(object):
def __init__(self, *args, **kwargs):
self.fake = True
def add_signal_receiver(self, *args, **kwargs):
pass
def name_has_owner(self, *args, **kwargs):
return False
SystemBus = SessionBus
| 291
|
Python
|
.py
| 9
| 26.666667
| 51
| 0.648551
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,279
|
service.py
|
gpodder_gpodder/tools/fake-dbus-module/dbus/service.py
|
def method(*args, **kwargs):
return lambda x: x
class BusName(object):
def __init__(self, *args, **kwargs):
pass
class Object:
def __init__(self, *args, **kwargs):
pass
| 202
|
Python
|
.py
| 8
| 20.125
| 40
| 0.592593
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,280
|
my.py
|
gpodder_gpodder/src/gpodder/my.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# my.py -- mygpo Client Abstraction for gPodder
# Thomas Perl <thp@gpodder.org>; 2010-01-19
#
import atexit
import calendar
import datetime
import logging
import os
import sys
import time
# Append gPodder's user agent to mygpoclient's user agent
import mygpoclient
from mygpoclient import api, public
from mygpoclient import util as mygpoutil
import gpodder
from gpodder import minidb, util
_ = gpodder.gettext
logger = logging.getLogger(__name__)
mygpoclient.user_agent += ' ' + gpodder.user_agent
# 2013-02-08: We should update this to 1.7 once we use the new features
MYGPOCLIENT_REQUIRED = '1.4'
if (not hasattr(mygpoclient, 'require_version')
or not mygpoclient.require_version(MYGPOCLIENT_REQUIRED)):
print("""
Please upgrade your mygpoclient library.
See http://thp.io/2010/mygpoclient/
Required version: %s
Installed version: %s
""" % (MYGPOCLIENT_REQUIRED, mygpoclient.__version__), file=sys.stderr)
sys.exit(1)
try:
from mygpoclient.simple import MissingCredentials
except ImportError:
# if MissingCredentials does not yet exist in the installed version of
# mygpoclient, we use an object that can never be raised/caught
MissingCredentials = object()
EPISODE_ACTIONS_BATCH_SIZE = 100
# Database model classes
class SinceValue(object):
__slots__ = {'host': str, 'device_id': str, 'category': int, 'since': int}
# Possible values for the "category" field
PODCASTS, EPISODES = list(range(2))
def __init__(self, host, device_id, category, since=0):
self.host = host
self.device_id = device_id
self.category = category
self.since = since
class SubscribeAction(object):
__slots__ = {'action_type': int, 'url': str}
# Possible values for the "action_type" field
ADD, REMOVE = list(range(2))
def __init__(self, action_type, url):
self.action_type = action_type
self.url = url
@property
def is_add(self):
return self.action_type == self.ADD
@property
def is_remove(self):
return self.action_type == self.REMOVE
@classmethod
def add(cls, url):
return cls(cls.ADD, url)
@classmethod
def remove(cls, url):
return cls(cls.REMOVE, url)
@classmethod
def undo(cls, action):
if action.is_add:
return cls(cls.REMOVE, action.url)
elif action.is_remove:
return cls(cls.ADD, action.url)
raise ValueError('Cannot undo action: %r' % action)
# New entity name for "received" actions
class ReceivedSubscribeAction(SubscribeAction):
pass
class UpdateDeviceAction(object):
__slots__ = {'device_id': str, 'caption': str, 'device_type': str}
def __init__(self, device_id, caption, device_type):
self.device_id = device_id
self.caption = caption
self.device_type = device_type
class EpisodeAction(object):
__slots__ = {'podcast_url': str, 'episode_url': str, 'device_id': str,
'action': str, 'timestamp': int,
'started': int, 'position': int, 'total': int}
def __init__(self, podcast_url, episode_url, device_id,
action, timestamp, started, position, total):
self.podcast_url = podcast_url
self.episode_url = episode_url
self.device_id = device_id
self.action = action
self.timestamp = timestamp
self.started = started
self.position = position
self.total = total
# New entity name for "received" actions
class ReceivedEpisodeAction(EpisodeAction):
pass
class RewrittenUrl(object):
__slots__ = {'old_url': str, 'new_url': str}
def __init__(self, old_url, new_url):
self.old_url = old_url
self.new_url = new_url
# End Database model classes
# Helper class for displaying changes in the UI
class Change(object):
def __init__(self, action, podcast=None):
self.action = action
self.podcast = podcast
@property
def description(self):
if self.action.is_add:
return _('Add %s') % self.action.url
else:
return _('Remove %s') % self.podcast.title
class MygPoClient(object):
STORE_FILE = 'gpodder.net'
FLUSH_TIMEOUT = 60
FLUSH_RETRIES = 3
def __init__(self, config):
self._store = minidb.Store(os.path.join(gpodder.home, self.STORE_FILE))
self._config = config
self._client = None
# Initialize the _client attribute and register with config
self.on_config_changed()
assert self._client is not None
self._config.add_observer(self.on_config_changed)
self._worker_thread = None
atexit.register(self._at_exit)
def create_device(self):
"""Uploads the device changes to the server
This should be called when device settings change
or when the mygpo client functionality is enabled.
"""
# Remove all previous device update actions
self._store.remove(self._store.load(UpdateDeviceAction))
# Insert our new update action
action = UpdateDeviceAction(self.device_id,
self._config.mygpo.device.caption,
self._config.mygpo.device.type)
self._store.save(action)
def get_rewritten_urls(self):
"""Returns a list of rewritten URLs for uploads
This should be called regularly. Every object returned
should be merged into the database, and the old_url
should be updated to new_url in every podcdast.
"""
rewritten_urls = self._store.load(RewrittenUrl)
self._store.remove(rewritten_urls)
return rewritten_urls
def process_episode_actions(self, find_episode, on_updated=None):
"""Process received episode actions
The parameter "find_episode" should be a function accepting
two parameters (podcast_url and episode_url). It will be used
to get an episode object that needs to be updated. It should
return None if the requested episode does not exist.
The optional callback "on_updated" should accept a single
parameter (the episode object) and will be called whenever
the episode data is changed in some way.
"""
logger.debug('Processing received episode actions...')
for action in self._store.load(ReceivedEpisodeAction):
if action.action not in ('play', 'delete'):
# Ignore all other action types for now
continue
episode = find_episode(action.podcast_url, action.episode_url)
if episode is None:
# The episode does not exist on this client
continue
if action.action == 'play':
logger.debug('Play action for %s', episode.url)
episode.mark(is_played=True)
if (action.timestamp > episode.current_position_updated
and action.position is not None):
logger.debug('Updating position for %s', episode.url)
episode.current_position = action.position
episode.current_position_updated = action.timestamp
if action.total:
logger.debug('Updating total time for %s', episode.url)
episode.total_time = action.total
episode.save()
if on_updated is not None:
on_updated(episode)
elif action.action == 'delete':
if not episode.was_downloaded(and_exists=True):
# Set the episode to a "deleted" state
logger.debug('Marking as deleted: %s', episode.url)
episode.delete_from_disk()
episode.save()
if on_updated is not None:
on_updated(episode)
# Remove all received episode actions
self._store.delete(ReceivedEpisodeAction)
self._store.commit()
logger.debug('Received episode actions processed.')
def get_received_actions(self):
"""Returns a list of ReceivedSubscribeAction objects
The list might be empty. All these actions have to
be processed. The user should confirm which of these
actions should be taken, the rest should be rejected.
Use confirm_received_actions and reject_received_actions
to return and finalize the actions received by this
method in order to not receive duplicate actions.
"""
return self._store.load(ReceivedSubscribeAction)
def confirm_received_actions(self, actions):
"""Confirm that a list of actions has been processed
The UI should call this with a list of actions that
have been accepted by the user and processed by the
podcast backend.
"""
# Simply remove the received actions from the queue
self._store.remove(actions)
def reject_received_actions(self, actions):
"""Reject (undo) a list of ReceivedSubscribeAction objects
The UI should call this with a list of actions that
have been rejected by the user. A reversed set of
actions will be uploaded to the server so that the
state on the server matches the state on the client.
"""
# Create "undo" actions for received subscriptions
self._store.save(SubscribeAction.undo(a) for a in actions)
self.flush()
# After we've handled the reverse-actions, clean up
self._store.remove(actions)
@property
def host(self):
return self._config.mygpo.server
@property
def device_id(self):
return self._config.mygpo.device.uid
def can_access_webservice(self):
return self._config.mygpo.enabled and \
self._config.mygpo.username and \
self._config.mygpo.device.uid
def set_subscriptions(self, urls):
if self.can_access_webservice():
logger.debug('Uploading (overwriting) subscriptions...')
self._client.put_subscriptions(self.device_id, urls)
logger.debug('Subscription upload done.')
else:
raise Exception('Webservice access not enabled')
def _convert_played_episode(self, episode, start, end, total):
return EpisodeAction(episode.channel.url,
episode.url, self.device_id, 'play',
int(time.time()), start, end, total)
def _convert_episode(self, episode, action):
return EpisodeAction(episode.channel.url,
episode.url, self.device_id, action,
int(time.time()), None, None, None)
def on_delete(self, episodes):
logger.debug('Storing %d episode delete actions', len(episodes))
self._store.save(self._convert_episode(e, 'delete') for e in episodes)
def on_download(self, episodes):
logger.debug('Storing %d episode download actions', len(episodes))
self._store.save(self._convert_episode(e, 'download') for e in episodes)
def on_playback_full(self, episode, start, end, total):
logger.debug('Storing full episode playback action')
self._store.save(self._convert_played_episode(episode, start, end, total))
def on_playback(self, episodes):
logger.debug('Storing %d episode playback actions', len(episodes))
self._store.save(self._convert_episode(e, 'play') for e in episodes)
def on_subscribe(self, urls):
# Cancel previously-inserted "remove" actions
self._store.remove(SubscribeAction.remove(url) for url in urls)
# Insert new "add" actions
self._store.save(SubscribeAction.add(url) for url in urls)
self.flush()
def on_unsubscribe(self, urls):
# Cancel previously-inserted "add" actions
self._store.remove(SubscribeAction.add(url) for url in urls)
# Insert new "remove" actions
self._store.save(SubscribeAction.remove(url) for url in urls)
self.flush()
def _at_exit(self):
self._worker_proc(forced=True)
self._store.commit()
self._store.close()
def _worker_proc(self, forced=False):
if not forced:
# Store the current contents of the queue database
self._store.commit()
logger.debug('Worker thread waiting for timeout')
time.sleep(self.FLUSH_TIMEOUT)
# Only work when enabled, UID set and allowed to work
if self.can_access_webservice() and \
(self._worker_thread is not None or forced):
self._worker_thread = None
logger.debug('Worker thread starting to work...')
for retry in range(self.FLUSH_RETRIES):
must_retry = False
if retry:
logger.debug('Retrying flush queue...')
# Update the device first, so it can be created if new
for action in self._store.load(UpdateDeviceAction):
if self.update_device(action):
self._store.remove(action)
else:
must_retry = True
# Upload podcast subscription actions
actions = self._store.load(SubscribeAction)
if self.synchronize_subscriptions(actions):
self._store.remove(actions)
else:
must_retry = True
# Upload episode actions
actions = self._store.load(EpisodeAction)
if self.synchronize_episodes(actions):
self._store.remove(actions)
else:
must_retry = True
if not must_retry or not self.can_access_webservice():
# No more pending actions, or no longer enabled.
# Ready to quit.
break
logger.debug('Worker thread finished.')
else:
logger.info('Worker thread may not execute (disabled).')
# Store the current contents of the queue database
self._store.commit()
def flush(self, now=False):
if not self.can_access_webservice():
logger.debug('Flush requested, but sync disabled.')
return
if self._worker_thread is None or now:
if now:
logger.debug('Flushing NOW.')
else:
logger.debug('Flush requested.')
self._worker_thread = util.run_in_background(lambda: self._worker_proc(now), True)
else:
logger.debug('Flush requested, already waiting.')
def on_config_changed(self, name=None, old_value=None, new_value=None):
if name in ('mygpo.username', 'mygpo.password', 'mygpo.server') \
or self._client is None:
self._client = api.MygPodderClient(self._config.mygpo.username,
self._config.mygpo.password, self._config.mygpo.server)
logger.info('Reloading settings.')
elif name.startswith('mygpo.device.'):
# Update or create the device
self.create_device()
def synchronize_episodes(self, actions):
logger.debug('Starting episode status sync.')
def convert_to_api(action):
dt = datetime.datetime.utcfromtimestamp(action.timestamp)
action_ts = mygpoutil.datetime_to_iso8601(dt)
return api.EpisodeAction(action.podcast_url,
action.episode_url, action.action,
action.device_id, action_ts,
action.started, action.position, action.total)
def convert_from_api(action):
dt = mygpoutil.iso8601_to_datetime(action.timestamp)
action_ts = calendar.timegm(dt.timetuple())
return ReceivedEpisodeAction(action.podcast,
action.episode, action.device,
action.action, action_ts,
action.started, action.position, action.total)
try:
# Load the "since" value from the database
since_o = self._store.get(SinceValue, host=self.host,
device_id=self.device_id,
category=SinceValue.EPISODES)
# Use a default since object for the first-time case
if since_o is None:
since_o = SinceValue(self.host, self.device_id, SinceValue.EPISODES)
# Step 1: Download Episode actions
try:
changes = self._client.download_episode_actions(since_o.since)
received_actions = [convert_from_api(a) for a in changes.actions]
logger.debug('Received %d episode actions', len(received_actions))
self._store.save(received_actions)
# Save the "since" value for later use
self._store.update(since_o, since=changes.since)
except (MissingCredentials, mygpoclient.http.Unauthorized):
# handle outside
raise
except Exception:
logger.warning('Exception while polling for episodes.', exc_info=True)
# Step 2: Upload Episode actions
# Uploads are done in batches; uploading can resume if only parts
# be uploaded; avoids empty uploads as well
for lower in range(0, len(actions), EPISODE_ACTIONS_BATCH_SIZE):
batch = actions[lower:(lower + EPISODE_ACTIONS_BATCH_SIZE)]
# Convert actions to the mygpoclient format for uploading
episode_actions = [convert_to_api(a) for a in batch]
# Upload the episode actions
self._client.upload_episode_actions(episode_actions)
# Actions have been uploaded to the server - remove them
self._store.remove(batch)
logger.debug('Episode actions have been uploaded to the server.')
return True
except (MissingCredentials, mygpoclient.http.Unauthorized):
logger.warning('Invalid credentials. Disabling gpodder.net.')
self._config.mygpo.enabled = False
return False
except Exception as e:
logger.error('Cannot upload episode actions: %s', str(e), exc_info=True)
return False
def synchronize_subscriptions(self, actions):
logger.debug('Starting subscription sync.')
try:
# Load the "since" value from the database
since_o = self._store.get(SinceValue, host=self.host,
device_id=self.device_id,
category=SinceValue.PODCASTS)
# Use a default since object for the first-time case
if since_o is None:
since_o = SinceValue(self.host, self.device_id, SinceValue.PODCASTS)
# Step 1: Pull updates from the server and notify the frontend
result = self._client.pull_subscriptions(self.device_id, since_o.since)
# Update the "since" value in the database
self._store.update(since_o, since=result.since)
# Store received actions for later retrieval (and in case we
# have outdated actions in the database, simply remove them)
for url in result.add:
logger.debug('Received add action: %s', url)
self._store.remove(ReceivedSubscribeAction.remove(url))
self._store.remove(ReceivedSubscribeAction.add(url))
self._store.save(ReceivedSubscribeAction.add(url))
for url in result.remove:
logger.debug('Received remove action: %s', url)
self._store.remove(ReceivedSubscribeAction.add(url))
self._store.remove(ReceivedSubscribeAction.remove(url))
self._store.save(ReceivedSubscribeAction.remove(url))
# Step 2: Push updates to the server and rewrite URLs (if any)
actions = self._store.load(SubscribeAction)
add = [a.url for a in actions if a.is_add]
remove = [a.url for a in actions if a.is_remove]
if add or remove:
logger.debug('Uploading: +%d / -%d', len(add), len(remove))
# Only do a push request if something has changed
result = self._client.update_subscriptions(self.device_id, add, remove)
# Update the "since" value in the database
self._store.update(since_o, since=result.since)
# Store URL rewrites for later retrieval by GUI
for old_url, new_url in result.update_urls:
if new_url:
logger.debug('Rewritten URL: %s', new_url)
self._store.save(RewrittenUrl(old_url, new_url))
# Actions have been uploaded to the server - remove them
self._store.remove(actions)
logger.debug('All actions have been uploaded to the server.')
return True
except (MissingCredentials, mygpoclient.http.Unauthorized):
logger.warning('Invalid credentials. Disabling gpodder.net.')
self._config.mygpo.enabled = False
return False
except Exception as e:
logger.error('Cannot upload subscriptions: %s', str(e), exc_info=True)
return False
def update_device(self, action):
try:
logger.debug('Uploading device settings...')
self._client.update_device_settings(action.device_id,
action.caption, action.device_type)
logger.debug('Device settings uploaded.')
return True
except (MissingCredentials, mygpoclient.http.Unauthorized):
logger.warning('Invalid credentials. Disabling gpodder.net.')
self._config.mygpo.enabled = False
return False
except Exception as e:
logger.error('Cannot update device %s: %s', self.device_id,
str(e), exc_info=True)
return False
def get_devices(self):
result = []
try:
devices = self._client.get_devices()
except (MissingCredentials, mygpoclient.http.Unauthorized):
logger.warning('Invalid credentials. Disabling gpodder.net.')
self._config.mygpo.enabled = False
raise
for d in devices:
result.append((d.device_id, d.caption, d.type))
return result
def open_website(self):
util.open_website('http://' + self._config.mygpo.server)
def get_download_user_subscriptions_url(self):
OPML_URL = self._client.locator.subscriptions_uri()
url = util.url_add_authentication(OPML_URL,
self._config.mygpo.username,
self._config.mygpo.password)
return url
class Directory(object):
def __init__(self):
self.client = public.PublicClient()
def toplist(self):
return [(p.title or p.url, p.url)
for p in self.client.get_toplist()
if p.url]
def search(self, query):
return [(p.title or p.url, p.url)
for p in self.client.search_podcasts(query)
if p.url]
| 24,149
|
Python
|
.py
| 515
| 36.019417
| 94
| 0.622136
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,281
|
vimeo.py
|
gpodder_gpodder/src/gpodder/vimeo.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.vimeo - Vimeo download magic
# Thomas Perl <thp@gpodder.org>; 2012-01-03
#
import logging
import re
import gpodder
from gpodder import registry, util
_ = gpodder.gettext
logger = logging.getLogger(__name__)
VIMEOCOM_RE = re.compile(r'http[s]?://vimeo\.com/(channels/[^/]+|\d+)$', re.IGNORECASE)
VIMEOCOM_VIDEO_RE = re.compile(r'http[s]?://vimeo.com/channels/(?:[^/])+/(\d+)$', re.IGNORECASE)
MOOGALOOP_RE = re.compile(r'http[s]?://vimeo\.com/moogaloop\.swf\?clip_id=(\d+)$', re.IGNORECASE)
SIGNATURE_RE = re.compile(r'"timestamp":(\d+),"signature":"([^"]+)"')
# List of qualities, from lowest to highest
FILEFORMAT_RANKING = ['270p', '360p', '720p', '1080p']
FORMATS = tuple((x, x) for x in FILEFORMAT_RANKING)
@registry.download_url.register
def vimeo_real_download_url(config, episode, allow_partial):
fmt = config.vimeo.fileformat if config else None
res = get_real_download_url(episode.url, preferred_fileformat=fmt)
return None if res == episode.url else res
def get_real_download_url(url, preferred_fileformat=None):
video_id = get_vimeo_id(url)
if video_id is None:
return url
data_config_url = 'https://player.vimeo.com/video/%s/config' % (video_id,)
def get_urls(data_config_url):
data_config = util.urlopen(data_config_url).json()
for fileinfo in list(data_config['request']['files'].values()):
if not isinstance(fileinfo, list):
continue
for item in fileinfo:
yield (item['quality'], item['url'])
fileformat_to_url = dict(get_urls(data_config_url))
if preferred_fileformat is not None and preferred_fileformat in fileformat_to_url:
logger.debug('Picking preferred format: %s', preferred_fileformat)
return fileformat_to_url[preferred_fileformat]
def fileformat_sort_key_func(fileformat):
if fileformat in FILEFORMAT_RANKING:
return FILEFORMAT_RANKING.index(fileformat)
return 0
for fileformat in sorted(fileformat_to_url, key=fileformat_sort_key_func, reverse=True):
logger.debug('Picking best format: %s', fileformat)
return fileformat_to_url[fileformat]
return url
def get_vimeo_id(url):
result = MOOGALOOP_RE.match(url)
if result is not None:
return result.group(1)
result = VIMEOCOM_RE.match(url)
if result is not None:
return result.group(1)
result = VIMEOCOM_VIDEO_RE.match(url)
if result is not None:
return result.group(1)
return None
def is_video_link(url):
return (get_vimeo_id(url) is not None)
def get_real_channel_url(url):
result = VIMEOCOM_RE.match(url)
if result is not None:
return 'http://vimeo.com/%s/videos/rss' % result.group(1)
return url
def get_real_cover(url):
return None
| 3,574
|
Python
|
.py
| 84
| 37.928571
| 97
| 0.702085
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,282
|
download.py
|
gpodder_gpodder/src/gpodder/download.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# download.py -- Download queue management
# Thomas Perl <thp@perli.net> 2007-09-15
#
# Based on libwget.py (2005-10-29)
#
import glob
import logging
import mimetypes
import os
import os.path
import shutil
import threading
import time
import urllib.error
from abc import ABC, abstractmethod
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, HTTPError, RequestException
from requests.packages.urllib3.exceptions import MaxRetryError
from requests.packages.urllib3.util.retry import Retry
import gpodder
from gpodder import config, registry, util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
REDIRECT_RETRIES = 3
class CustomDownload(ABC):
""" abstract class for custom downloads. DownloadTask call retrieve_resume() on it """
@property
@abstractmethod
def partial_filename(self):
"""
Full path to the temporary file actually being downloaded (downloaders
may not support setting a tempname).
"""
...
@partial_filename.setter
@abstractmethod
def partial_filename(self, val):
...
@abstractmethod
def retrieve_resume(self, tempname, reporthook):
"""
:param str tempname: temporary filename for the download
:param func(number, number, number) reporthook: callback for download progress (count, blockSize, totalSize)
:return dict(str, str), str: (headers, real_url)
"""
return {}, None
class CustomDownloader(ABC):
"""
abstract class for custom downloaders.
DownloadTask calls custom_downloader to get a CustomDownload
"""
@abstractmethod
def custom_downloader(self, config, episode):
"""
if this custom downloader has a custom download method (e.g. youtube-dl),
return a CustomDownload. Else return None
:param config: gpodder config (e.g. to get preferred video format)
:param model.PodcastEpisode episode: episode to download
:return CustomDownload: object used to download the episode
"""
return None
class ContentRange(object):
# Based on:
# http://svn.pythonpaste.org/Paste/WebOb/trunk/webob/byterange.py
#
# Copyright (c) 2007 Ian Bicking and Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Represents the Content-Range header
This header is ``start-stop/length``, where stop and length can be
``*`` (represented as None in the attributes).
"""
def __init__(self, start, stop, length):
assert start >= 0, "Bad start: %r" % start
assert stop is None or (stop >= 0 and stop >= start), (
"Bad stop: %r" % stop)
self.start = start
self.stop = stop
self.length = length
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self)
def __str__(self):
if self.stop is None:
stop = '*'
else:
stop = self.stop + 1
if self.length is None:
length = '*'
else:
length = self.length
return 'bytes %s-%s/%s' % (self.start, stop, length)
def __iter__(self):
"""
Mostly so you can unpack this, like:
start, stop, length = res.content_range
"""
return iter([self.start, self.stop, self.length])
@classmethod
def parse(cls, value):
"""
Parse the header. May return None if it cannot parse.
"""
if value is None:
return None
value = value.strip()
if not value.startswith('bytes '):
# Unparsable
return None
value = value[len('bytes '):].strip()
if '/' not in value:
# Invalid, no length given
return None
startstop, length = value.split('/', 1)
if '-' not in startstop:
# Invalid, no range
return None
start, end = startstop.split('-', 1)
try:
start = int(start)
if end == '*':
end = None
else:
end = int(end)
if length == '*':
length = None
else:
length = int(length)
except ValueError:
# Parse problem
return None
if end is None:
return cls(start, None, length)
else:
return cls(start, end - 1, length)
class DownloadCancelledException(Exception):
pass
class DownloadNoURLException(Exception):
pass
class gPodderDownloadHTTPError(Exception):
def __init__(self, url, error_code, error_message):
self.url = url
self.error_code = error_code
self.error_message = error_message
class DownloadURLOpener:
# Sometimes URLs are not escaped correctly - try to fix them
# (see RFC2396; Section 2.4.3. Excluded US-ASCII Characters)
# FYI: The omission of "%" in the list is to avoid double escaping!
ESCAPE_CHARS = {ord(c): '%%%x' % ord(c) for c in ' <>#"{}|\\^[]`'}
def __init__(self, channel, max_retries=3):
super().__init__()
self.channel = channel
self.max_retries = max_retries
def init_session(self):
""" init a session with our own retry codes + retry count """
# I add a few retries for redirects but it means that I will allow max_retries + REDIRECT_RETRIES
# if encountering max_retries connect and REDIRECT_RETRIES read for instance
retry_strategy = Retry(
total=self.max_retries + REDIRECT_RETRIES,
connect=self.max_retries,
read=self.max_retries,
redirect=max(REDIRECT_RETRIES, self.max_retries),
status=self.max_retries,
status_forcelist=Retry.RETRY_AFTER_STATUS_CODES.union((408, 418, 504, 598, 599,)))
adapter = HTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
return http
# The following is based on Python's urllib.py "URLopener.retrieve"
# Also based on http://mail.python.org/pipermail/python-list/2001-October/110069.html
def retrieve_resume(self, url, filename, reporthook=None, data=None, disable_auth=False):
"""Download files from an URL; return (headers, real_url)
Resumes a download if the local filename exists and
the server supports download resuming.
"""
current_size = 0
tfp = None
headers = {
'User-agent': gpodder.user_agent
}
if (self.channel.auth_username or self.channel.auth_password) and not disable_auth:
logger.debug('Authenticating as "%s"', self.channel.auth_username)
auth = (self.channel.auth_username, self.channel.auth_password)
else:
auth = None
if os.path.exists(filename):
try:
current_size = os.path.getsize(filename)
tfp = open(filename, 'ab')
# If the file exists, then only download the remainder
if current_size > 0:
headers['Range'] = 'bytes=%s-' % (current_size)
except:
logger.warning('Cannot resume download: %s', filename, exc_info=True)
tfp = None
current_size = 0
if tfp is None:
tfp = open(filename, 'wb')
# Fix a problem with bad URLs that are not encoded correctly (bug 549)
url = url.translate(self.ESCAPE_CHARS)
proxies = config._proxies
session = self.init_session()
logger.debug(f"DownloadURLOpener.retrieve_resume(): url: {url}, proxies: {proxies}")
with session.get(url,
headers=headers,
stream=True,
auth=auth,
proxies=proxies,
timeout=gpodder.SOCKET_TIMEOUT) as resp:
try:
resp.raise_for_status()
except HTTPError as e:
if auth is not None:
# Try again without authentication (bug 1296)
return self.retrieve_resume(url, filename, reporthook, data, True)
else:
raise gPodderDownloadHTTPError(url, resp.status_code, str(e))
headers = resp.headers
if current_size > 0:
# We told the server to resume - see if she agrees
# See RFC2616 (206 Partial Content + Section 14.16)
# XXX check status code here, too...
conrange = ContentRange.parse(headers.get('content-range', ''))
if conrange is None or conrange.start != current_size:
# Ok, that did not work. Reset the download
# TODO: seek and truncate if content-range differs from request
tfp.close()
tfp = open(filename, 'wb')
current_size = 0
logger.warning('Cannot resume: Invalid Content-Range (RFC2616).')
result = headers, resp.url
bs = 1024 * 8
size = -1
read = current_size
blocknum = current_size // bs
if reporthook:
if "content-length" in headers:
size = int(headers['content-length']) + current_size
reporthook(blocknum, bs, size)
for block in resp.iter_content(bs):
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
tfp.close()
del tfp
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise urllib.error.ContentTooShortError("retrieval incomplete: got only %i out "
"of %i bytes" % (read, size), result)
return result
# end code based on urllib.py
class DefaultDownload(CustomDownload):
def __init__(self, config, episode, url):
self._config = config
self.__episode = episode
self._url = url
self.__partial_filename = None
@property
def partial_filename(self):
return self.__partial_filename
@partial_filename.setter
def partial_filename(self, val):
self.__partial_filename = val
def retrieve_resume(self, tempname, reporthook):
url = self._url
logger.info("Downloading %s", url)
max_retries = max(0, self._config.auto.retries)
downloader = DownloadURLOpener(self.__episode.channel, max_retries=max_retries)
self.partial_filename = tempname
# Retry the download on incomplete download (other retries are done by the Retry strategy)
for retry in range(max_retries + 1):
if retry > 0:
logger.info('Retrying download of %s (%d)', url, retry)
time.sleep(1)
try:
headers, real_url = downloader.retrieve_resume(url,
tempname, reporthook=reporthook)
# If we arrive here, the download was successful
break
except urllib.error.ContentTooShortError:
if retry < max_retries:
logger.info('Content too short: %s - will retry.',
url)
continue
raise
return (headers, real_url)
class DefaultDownloader(CustomDownloader):
@staticmethod
def custom_downloader(config, episode):
url = episode.url
# Resolve URL and start downloading the episode
res = registry.download_url.resolve(config, None, episode, False)
if res:
url = res
if url == episode.url:
# don't modify custom urls (#635 - vimeo breaks if * is unescaped)
url = url.strip()
url = util.iri_to_url(url)
return DefaultDownload(config, episode, url)
class DownloadQueueWorker(object):
def __init__(self, queue, exit_callback, continue_check_callback):
self.queue = queue
self.exit_callback = exit_callback
self.continue_check_callback = continue_check_callback
def __repr__(self):
return threading.current_thread().getName()
def run(self):
logger.info('Starting new thread: %s', self)
while True:
if not self.continue_check_callback(self):
return
task = self.queue.get_next() if self.queue.enabled else None
if not task:
logger.info('No more tasks for %s to carry out.', self)
break
logger.info('%s is processing: %s', self, task)
task.run()
task.recycle()
self.exit_callback(self)
class ForceDownloadWorker(object):
def __init__(self, task):
self.task = task
def __repr__(self):
return threading.current_thread().getName()
def run(self):
logger.info('Starting new thread: %s', self)
logger.info('%s is processing: %s', self, self.task)
self.task.run()
self.task.recycle()
class DownloadQueueManager(object):
def __init__(self, config, queue):
self._config = config
self.tasks = queue
self.worker_threads_access = threading.RLock()
self.worker_threads = []
def disable(self):
self.tasks.enabled = False
def enable(self):
self.tasks.enabled = True
self.__spawn_threads()
def __exit_callback(self, worker_thread):
with self.worker_threads_access:
self.worker_threads.remove(worker_thread)
def __continue_check_callback(self, worker_thread):
with self.worker_threads_access:
if len(self.worker_threads) > self._config.limit.downloads.concurrent and \
self._config.limit.downloads.enabled:
self.worker_threads.remove(worker_thread)
return False
else:
return True
def __spawn_threads(self):
"""Spawn new worker threads if necessary
"""
if not self.tasks.enabled:
return
with self.worker_threads_access:
work_count = self.tasks.available_work_count()
if self._config.limit.downloads.enabled:
# always allow at least 1 download
spawn_limit = max(int(self._config.limit.downloads.concurrent), 1)
else:
spawn_limit = self._config.limit.downloads.concurrent_max
running = len(self.worker_threads)
logger.info('%r tasks to do, can start at most %r threads, %r threads currently running', work_count, spawn_limit, running)
for i in range(0, min(work_count, spawn_limit - running)):
# We have to create a new thread here, there's work to do
logger.info('Starting new worker thread.')
worker = DownloadQueueWorker(self.tasks, self.__exit_callback,
self.__continue_check_callback)
self.worker_threads.append(worker)
util.run_in_background(worker.run)
def update_max_downloads(self):
self.__spawn_threads()
def force_start_task(self, task):
with task:
if task.status in (task.QUEUED, task.PAUSED, task.CANCELLED, task.FAILED):
task.status = task.DOWNLOADING
worker = ForceDownloadWorker(task)
util.run_in_background(worker.run)
def queue_task(self, task):
"""Marks a task as queued
"""
self.tasks.queue_task(task)
self.__spawn_threads()
def has_workers(self):
return len(self.worker_threads) > 0
class DownloadTask(object):
"""An object representing the download task of an episode
You can create a new download task like this:
task = DownloadTask(episode, gpodder.config.Config(CONFIGFILE))
task.status = DownloadTask.QUEUED
task.run()
While the download is in progress, you can access its properties:
task.total_size # in bytes
task.progress # from 0.0 to 1.0
task.speed # in bytes per second
str(task) # name of the episode
task.status # current status
task.status_changed # True if the status has been changed (see below)
task.url # URL of the episode being downloaded
task.podcast_url # URL of the podcast this download belongs to
task.episode # Episode object of this task
You can cancel a running download task by setting its status:
with task:
task.status = DownloadTask.CANCELLING
The task will then abort as soon as possible (due to the nature
of downloading data, this can take a while when the Internet is
busy).
The "status_changed" attribute gets set to True every time the
"status" attribute changes its value. After you get the value of
the "status_changed" attribute, it is always reset to False:
if task.status_changed:
new_status = task.status
# .. update the UI accordingly ..
Obviously, this also means that you must have at most *one*
place in your UI code where you check for status changes and
broadcast the status updates from there.
While the download is taking place and after the .run() method
has finished, you can get the final status to check if the download
was successful:
if task.status == DownloadTask.DONE:
# .. everything ok ..
elif task.status == DownloadTask.FAILED:
# .. an error happened, and the
# error_message attribute is set ..
print task.error_message
elif task.status == DownloadTask.PAUSED:
# .. user paused the download ..
elif task.status == DownloadTask.CANCELLED:
# .. user cancelled the download ..
The difference between cancelling and pausing a DownloadTask is
that the temporary file gets deleted when cancelling, but does
not get deleted when pausing.
Be sure to call .removed_from_list() on this task when removing
it from the UI, so that it can carry out any pending clean-up
actions (e.g. removing the temporary file when the task has not
finished successfully; i.e. task.status != DownloadTask.DONE).
The UI can call the method "notify_as_finished()" to determine if
this episode still has still to be shown as "finished" download
in a notification window. This will return True only the first time
it is called when the status is DONE. After returning True once,
it will always return False afterwards.
The same thing works for failed downloads ("notify_as_failed()").
"""
# Possible states this download task can be in
STATUS_MESSAGE = (_('Queued'), _('Queued'), _('Downloading'),
_('Finished'), _('Failed'), _('Cancelling'), _('Cancelled'), _('Pausing'), _('Paused'))
(NEW, QUEUED, DOWNLOADING, DONE, FAILED, CANCELLING, CANCELLED, PAUSING, PAUSED) = list(range(9))
# Whether this task represents a file download or a device sync operation
ACTIVITY_DOWNLOAD, ACTIVITY_SYNCHRONIZE = list(range(2))
# Minimum time between progress updates (in seconds)
MIN_TIME_BETWEEN_UPDATES = 1.
def __str__(self):
return self.__episode.title
def __enter__(self):
return self.__lock.acquire()
def __exit__(self, exception_type, value, traceback):
self.__lock.release()
def __get_status(self):
return self.__status
def __set_status(self, status):
if status != self.__status:
self.__status_changed = True
self.__status = status
status = property(fget=__get_status, fset=__set_status)
def __get_status_changed(self):
if self.__status_changed:
self.__status_changed = False
return True
else:
return False
status_changed = property(fget=__get_status_changed)
def __get_activity(self):
return self.__activity
def __set_activity(self, activity):
self.__activity = activity
activity = property(fget=__get_activity, fset=__set_activity)
def __get_url(self):
return self.__episode.url
url = property(fget=__get_url)
def __get_podcast_url(self):
return self.__episode.channel.url
podcast_url = property(fget=__get_podcast_url)
def __get_episode(self):
return self.__episode
episode = property(fget=__get_episode)
def __get_downloader(self):
return self.__downloader
def __set_downloader(self, downloader):
# modifying the downloader will only have effect before the download is started
self.__downloader = downloader
downloader = property(fget=__get_downloader, fset=__set_downloader)
def can_queue(self):
return self.status in (self.CANCELLED, self.PAUSED, self.FAILED)
def unpause(self):
with self:
# Resume a downloading task that was transitioning to paused
if self.status == self.PAUSING:
self.status = self.DOWNLOADING
def can_pause(self):
return self.status in (self.DOWNLOADING, self.QUEUED)
def pause(self):
with self:
# Pause a queued download
if self.status == self.QUEUED:
self.status = self.PAUSED
# Request pause of a running download
elif self.status == self.DOWNLOADING:
self.status = self.PAUSING
# download rate limited tasks sleep and take longer to transition from the PAUSING state to the PAUSED state
def can_cancel(self):
return self.status in (self.DOWNLOADING, self.QUEUED, self.PAUSED, self.FAILED)
def cancel(self):
with self:
# Cancelling directly is allowed if the task isn't currently downloading
if self.status in (self.QUEUED, self.PAUSED, self.FAILED):
self.status = self.CANCELLING
# Call run, so the partial file gets deleted, and task recycled
self.run()
# Otherwise request cancellation
elif self.status == self.DOWNLOADING:
self.status = self.CANCELLING
def can_remove(self):
return self.status in (self.CANCELLED, self.FAILED, self.DONE)
def delete_partial_files(self):
temporary_files = [self.tempname]
# youtube-dl creates .partial.* files for adaptive formats
temporary_files += glob.glob('%s.*' % self.tempname)
for tempfile in temporary_files:
util.delete_file(tempfile)
def removed_from_list(self):
if self.status != self.DONE:
self.delete_partial_files()
def __init__(self, episode, config, downloader=None):
assert episode.download_task is None
self.__lock = threading.RLock()
self.__status = DownloadTask.NEW
self.__activity = DownloadTask.ACTIVITY_DOWNLOAD
self.__status_changed = True
self.__episode = episode
self._config = config
# specify a custom downloader to be used for this download
self.__downloader = downloader
# Create the target filename and save it in the database
self.filename = self.__episode.local_filename(create=True)
self.tempname = self.filename + '.partial'
self.total_size = self.__episode.file_size
self.speed = 0.0
self.progress = 0.0
self.error_message = None
self.custom_downloader = None
# Have we already shown this task in a notification?
self._notification_shown = False
# Variables for speed limit and speed calculation
self.__start_time = 0
self.__start_blocks = 0
self.__limit_rate_value = self._config.limit.bandwidth.kbps
self.__limit_rate = self._config.limit.bandwidth.enabled
# Progress update functions
self._progress_updated = None
self._last_progress_updated = 0.
# If the tempname already exists, set progress accordingly
if os.path.exists(self.tempname):
try:
already_downloaded = os.path.getsize(self.tempname)
if self.total_size > 0:
self.progress = max(0.0, min(1.0, already_downloaded / self.total_size))
except OSError as os_error:
logger.error('Cannot get size for %s', os_error)
else:
# "touch self.tempname", so we also get partial
# files for resuming when the file is queued
open(self.tempname, 'w').close()
# Store a reference to this task in the episode
episode.download_task = self
def reuse(self):
if not os.path.exists(self.tempname):
# partial file was deleted when cancelled, recreate it
open(self.tempname, 'w').close()
def notify_as_finished(self):
if self.status == DownloadTask.DONE:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def notify_as_failed(self):
if self.status == DownloadTask.FAILED:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def add_progress_callback(self, callback):
self._progress_updated = callback
def status_updated(self, count, blockSize, totalSize):
# We see a different "total size" while downloading,
# so correct the total size variable in the thread
if totalSize != self.total_size and totalSize > 0:
self.total_size = float(totalSize)
if self.__episode.file_size != self.total_size:
logger.debug('Updating file size of %s to %s',
self.filename, self.total_size)
self.__episode.file_size = self.total_size
self.__episode.save()
if self.total_size > 0:
self.progress = max(0.0, min(1.0, count * blockSize / self.total_size))
if self._progress_updated is not None:
diff = time.time() - self._last_progress_updated
if diff > self.MIN_TIME_BETWEEN_UPDATES or self.progress == 1.:
self._progress_updated(self.progress)
self._last_progress_updated = time.time()
self.calculate_speed(count, blockSize)
if self.status == DownloadTask.CANCELLING:
raise DownloadCancelledException()
if self.status == DownloadTask.PAUSING:
raise DownloadCancelledException()
def calculate_speed(self, count, blockSize):
if count % 5 == 0:
now = time.time()
if self.__start_time > 0:
# Has rate limiting been enabled or disabled?
if self.__limit_rate != self._config.limit.bandwidth.enabled:
# If it has been enabled then reset base time and block count
if self._config.limit.bandwidth.enabled:
self.__start_time = now
self.__start_blocks = count
self.__limit_rate = self._config.limit.bandwidth.enabled
# Has the rate been changed and are we currently limiting?
if self.__limit_rate_value != self._config.limit.bandwidth.kbps and self.__limit_rate:
self.__start_time = now
self.__start_blocks = count
self.__limit_rate_value = self._config.limit.bandwidth.kbps
passed = now - self.__start_time
if passed > 0:
speed = ((count - self.__start_blocks) * blockSize) / passed
else:
speed = 0
else:
self.__start_time = now
self.__start_blocks = count
passed = now - self.__start_time
speed = count * blockSize
self.speed = float(speed)
if self._config.limit.bandwidth.enabled and speed > self._config.limit.bandwidth.kbps:
# calculate the time that should have passed to reach
# the desired download rate and wait if necessary
should_have_passed = (count - self.__start_blocks) * blockSize / (self._config.limit.bandwidth.kbps * 1024.0)
if should_have_passed > passed:
# sleep a maximum of 10 seconds to not cause time-outs
delay = min(10.0, float(should_have_passed - passed))
time.sleep(delay)
def recycle(self):
if self.status not in (self.FAILED, self.PAUSED):
self.episode.download_task = None
def set_episode_download_task(self):
if not self.episode.download_task:
self.episode.download_task = self
def run(self):
# Speed calculation (re-)starts here
self.__start_time = 0
self.__start_blocks = 0
# If the download has already been cancelled/paused, skip it
with self:
if self.status == DownloadTask.CANCELLING:
self.status = DownloadTask.CANCELLED
self.__episode._download_error = None
self.delete_partial_files()
self.progress = 0.0
self.speed = 0.0
self.recycle()
return False
if self.status == DownloadTask.PAUSING:
self.status = DownloadTask.PAUSED
return False
# We only start this download if its status is downloading
if self.status != DownloadTask.DOWNLOADING:
return False
# We are downloading this file right now
self._notification_shown = False
# Restore a reference to this task in the episode
# when running a recycled task following a pause or failed
# see #649
self.set_episode_download_task()
url = self.__episode.url
result = DownloadTask.DOWNLOADING
try:
if url == '':
raise DownloadNoURLException()
if self.downloader:
downloader = self.downloader.custom_downloader(self._config, self.episode)
else:
downloader = registry.custom_downloader.resolve(self._config, None, self.episode)
if downloader:
logger.info('Downloading %s with %s', url, downloader)
else:
downloader = DefaultDownloader.custom_downloader(self._config, self.episode)
self.custom_downloader = downloader
headers, real_url = downloader.retrieve_resume(self.tempname, self.status_updated)
new_mimetype = headers.get('content-type', self.__episode.mime_type)
old_mimetype = self.__episode.mime_type
_basename, ext = os.path.splitext(self.filename)
if new_mimetype != old_mimetype or util.wrong_extension(ext):
logger.info('Updating mime type: %s => %s', old_mimetype, new_mimetype)
old_extension = self.__episode.extension()
self.__episode.mime_type = new_mimetype
# don't call local_filename because we'll get the old download name
new_extension = self.__episode.extension(may_call_local_filename=False)
# If the desired filename extension changed due to the new
# mimetype, we force an update of the local filename to fix the
# extension.
if old_extension != new_extension or util.wrong_extension(ext):
self.filename = self.__episode.local_filename(create=True, force_update=True)
# In some cases, the redirect of a URL causes the real filename to
# be revealed in the final URL (e.g. http://gpodder.org/bug/1423)
if real_url != url and not util.is_known_redirecter(real_url):
realname, realext = util.filename_from_url(real_url)
# Only update from redirect if the redirected-to filename has
# a proper extension (this is needed for e.g. YouTube)
if not util.wrong_extension(realext):
real_filename = ''.join((realname, realext))
self.filename = self.__episode.local_filename(create=True,
force_update=True, template=real_filename)
logger.info('Download was redirected (%s). New filename: %s',
real_url, os.path.basename(self.filename))
# Look at the Content-disposition header; use if if available
disposition_filename = util.get_header_param(headers, 'filename', 'content-disposition')
# Some servers do send the content-disposition header, but provide
# an empty filename, resulting in an empty string here (bug 1440)
if disposition_filename is not None and disposition_filename != '':
# The server specifies a download filename - try to use it
# filename_from_url to remove query string; see #591
fn, ext = util.filename_from_url(disposition_filename)
logger.debug("converting disposition filename '%s' to local filename '%s%s'", disposition_filename, fn, ext)
disposition_filename = fn + ext
self.filename = self.__episode.local_filename(create=True,
force_update=True, template=disposition_filename)
new_mimetype, encoding = mimetypes.guess_type(self.filename)
if new_mimetype is not None:
logger.info('Using content-disposition mimetype: %s',
new_mimetype)
self.__episode.mime_type = new_mimetype
# Re-evaluate filename and tempname to take care of podcast renames
# while downloads are running (which will change both file names)
self.filename = self.__episode.local_filename(create=False)
self.tempname = os.path.join(os.path.dirname(self.filename),
os.path.basename(self.tempname))
shutil.move(self.tempname, self.filename)
# Model- and database-related updates after a download has finished
self.__episode.on_downloaded(self.filename)
except DownloadCancelledException:
logger.info('Download has been cancelled/paused: %s', self)
if self.status == DownloadTask.CANCELLING:
self.__episode._download_error = None
self.delete_partial_files()
self.progress = 0.0
self.speed = 0.0
result = DownloadTask.CANCELLED
except DownloadNoURLException:
result = DownloadTask.FAILED
self.error_message = _('Episode has no URL to download')
except urllib.error.ContentTooShortError:
result = DownloadTask.FAILED
self.error_message = _('Missing content from server')
except ConnectionError as ce:
# special case request exception
result = DownloadTask.FAILED
logger.error('Download failed: %s', str(ce))
d = {'host': ce.args[0].pool.host, 'port': ce.args[0].pool.port}
self.error_message = _("Couldn't connect to server %(host)s:%(port)s" % d)
except RequestException as re:
# extract MaxRetryError to shorten the exception message
if isinstance(re.args[0], MaxRetryError):
re = re.args[0]
logger.error('%s while downloading "%s"', str(re),
self.__episode.title)
result = DownloadTask.FAILED
d = {'error': str(re)}
self.error_message = _('Request Error: %(error)s') % d
except IOError as ioe:
logger.error('%s while downloading "%s": %s', ioe.strerror,
self.__episode.title, ioe.filename)
result = DownloadTask.FAILED
d = {'error': ioe.strerror, 'filename': ioe.filename}
self.error_message = _('I/O Error: %(error)s: %(filename)s') % d
except gPodderDownloadHTTPError as gdhe:
logger.error('HTTP %s while downloading "%s": %s',
gdhe.error_code, self.__episode.title, gdhe.error_message)
result = DownloadTask.FAILED
d = {'code': gdhe.error_code, 'message': gdhe.error_message}
self.error_message = _('HTTP Error %(code)s: %(message)s') % d
except Exception as e:
result = DownloadTask.FAILED
logger.error('Download failed: %s', str(e), exc_info=True)
self.error_message = _('Error: %s') % (str(e),)
with self:
if result == DownloadTask.DOWNLOADING:
# Everything went well - we're done (even if the task was cancelled/paused,
# since it's finished we might as well mark it done)
self.status = DownloadTask.DONE
if self.total_size <= 0:
self.total_size = util.calculate_size(self.filename)
logger.info('Total size updated to %d', self.total_size)
self.progress = 1.0
gpodder.user_extensions.on_episode_downloaded(self.__episode)
return True
self.speed = 0.0
if result == DownloadTask.FAILED:
self.status = DownloadTask.FAILED
self.__episode._download_error = self.error_message
# cancelled/paused -- update state to mark it as safe to manipulate this task again
elif self.status == DownloadTask.PAUSING:
self.status = DownloadTask.PAUSED
elif self.status == DownloadTask.CANCELLING:
self.status = DownloadTask.CANCELLED
# We finished, but not successfully (at least not really)
return False
| 40,424
|
Python
|
.py
| 862
| 35.87703
| 135
| 0.607268
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,283
|
util.py
|
gpodder_gpodder/src/gpodder/util.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
# Copyright (c) 2011 Neal H. Walfield
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# util.py -- Misc utility functions
# Thomas Perl <thp@perli.net> 2007-08-04
#
"""Miscellaneous helper functions for gPodder
This module provides helper and utility functions for gPodder that
are not tied to any specific part of gPodder.
"""
import collections
import datetime
import email
import glob
import http.client
import itertools
import locale
import logging
import mimetypes
import os
import os.path
import platform
import re
import shlex
import shutil
import socket
import stat
import string
import subprocess
import sys
import threading
import time
import urllib.error
import urllib.parse
import webbrowser
from html.entities import entitydefs, name2codepoint
from html.parser import HTMLParser
import requests
import requests.exceptions
from requests.packages.urllib3.util.retry import Retry
import gpodder
logger = logging.getLogger(__name__)
try:
import html5lib
except ImportError:
logger.warning("html5lib was not found, fall-back to HTMLParser")
html5lib = None
if gpodder.ui.win32:
try:
import gpodder.utilwin32ctypes as win32file
except ImportError:
logger.warning('Running on Win32: utilwin32ctypes cannot be loaded')
win32file = None
_ = gpodder.gettext
N_ = gpodder.ngettext
try:
locale.setlocale(locale.LC_ALL, '')
except Exception as e:
logger.warning('Cannot set locale (%s)', e, exc_info=True)
# Native filesystem encoding detection
encoding = sys.getfilesystemencoding()
if encoding is None:
if 'LANG' in os.environ and '.' in os.environ['LANG']:
lang = os.environ['LANG']
(language, encoding) = lang.rsplit('.', 1)
logger.info('Detected encoding: %s', encoding)
elif gpodder.ui.win32:
# To quote http://docs.python.org/howto/unicode.html:
# ,,on Windows, Python uses the name "mbcs" to refer
# to whatever the currently configured encoding is``
encoding = 'mbcs'
else:
encoding = 'iso-8859-15'
logger.info('Assuming encoding: ISO-8859-15 ($LANG not set).')
# Filename / folder name sanitization
def _sanitize_char(c):
if c in string.whitespace:
return b' '
elif c in ',-.()':
return c.encode('utf-8')
elif c in string.punctuation or ord(c) <= 31 or ord(c) >= 127:
return b'_'
return c.encode('utf-8')
SANITIZATION_TABLE = b''.join(map(_sanitize_char, list(map(chr, list(range(256))))))
del _sanitize_char
_MIME_TYPE_LIST = [
('.aac', 'audio/aac'),
('.axa', 'audio/annodex'),
('.flac', 'audio/flac'),
('.m4b', 'audio/m4b'),
('.m4a', 'audio/mp4'),
('.mp3', 'audio/mpeg'),
('.spx', 'audio/ogg'),
('.oga', 'audio/ogg'),
('.ogg', 'audio/ogg'),
('.wma', 'audio/x-ms-wma'),
('.3gp', 'video/3gpp'),
('.axv', 'video/annodex'),
('.divx', 'video/divx'),
('.m4v', 'video/m4v'),
('.mp4', 'video/mp4'),
('.ogv', 'video/ogg'),
('.mov', 'video/quicktime'),
('.flv', 'video/x-flv'),
('.mkv', 'video/x-matroska'),
('.wmv', 'video/x-ms-wmv'),
('.opus', 'audio/opus'),
('.webm', 'video/webm'),
('.webm', 'audio/webm'),
]
_MIME_TYPES = {k: v for v, k in _MIME_TYPE_LIST}
_MIME_TYPES_EXT = dict(_MIME_TYPE_LIST)
def is_absolute_url(url):
"""
Check if url is an absolute url (i.e. has a scheme)
"""
try:
parsed = urllib.parse.urlparse(url)
# fix #1190: when parsing a windows path, scheme=drive_letter, path=\rest_of_path
return parsed.scheme and not parsed.path.startswith("\\")
except ValueError:
return False
def new_gio_file(path):
"""
Create a new Gio.File given a path or uri
"""
from gi.repository import Gio
if is_absolute_url(path):
return Gio.File.new_for_uri(path)
else:
return Gio.File.new_for_path(path)
def make_directory(path):
"""
Tries to create a directory if it does not exist already.
Returns True if the directory exists after the function
call, False otherwise.
"""
from gi.repository import Gio, GLib
if not isinstance(path, Gio.File):
path = new_gio_file(path)
if path.query_exists():
return True
try:
path.make_directory_with_parents()
except GLib.Error as err:
# The sync might be multithreaded, so directories can be created by other threads
if not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.EXISTS):
logger.warning('Could not create directory %s: %s', path.get_uri(), err.message)
return False
return True
def normalize_feed_url(url):
"""
Converts any URL to http:// or ftp:// so that it can be
used with "wget". If the URL cannot be converted (invalid
or unknown scheme), "None" is returned.
This will also normalize feed:// and itpc:// to http://.
>>> normalize_feed_url('itpc://example.org/podcast.rss')
'http://example.org/podcast.rss'
If no URL scheme is defined (e.g. "curry.com"), we will
simply assume the user intends to add a http:// feed.
>>> normalize_feed_url('curry.com')
'http://curry.com/'
There are even some more shortcuts for advanced users
and lazy typists (see the source for details).
>>> normalize_feed_url('fb:43FPodcast')
'http://feeds.feedburner.com/43FPodcast'
It will also take care of converting the domain name to
all-lowercase (because domains are not case sensitive):
>>> normalize_feed_url('http://Example.COM/')
'http://example.com/'
Some other minimalistic changes are also taken care of,
e.g. a ? with an empty query is removed:
>>> normalize_feed_url('http://example.org/test?')
'http://example.org/test'
Username and password in the URL must not be affected
by URL normalization (see gPodder bug 1942):
>>> normalize_feed_url('http://UserName:PassWord@Example.com/')
'http://UserName:PassWord@example.com/'
"""
if not url or len(url) < 8:
return None
# Removes leading and/or trailing whitespaces - if url contains whitespaces
# in between after str.strip() -> conclude invalid url & return None
url = url.strip()
if ' ' in url:
return None
# This is a list of prefixes that you can use to minimize the amount of
# keystrokes that you have to use.
# Feel free to suggest other useful prefixes, and I'll add them here.
PREFIXES = {
'fb:': 'http://feeds.feedburner.com/%s',
'yt:': 'http://www.youtube.com/rss/user/%s/videos.rss',
'sc:': 'https://soundcloud.com/%s',
# YouTube playlists. To get a list of playlists per-user, use:
# https://gdata.youtube.com/feeds/api/users/<username>/playlists
'ytpl:': 'http://gdata.youtube.com/feeds/api/playlists/%s',
}
for prefix, expansion in PREFIXES.items():
if url.startswith(prefix):
url = expansion % (url[len(prefix):],)
break
# Assume HTTP for URLs without scheme
if '://' not in url:
url = 'http://' + url
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
# Domain name is case insensitive, but username/password is not (bug 1942)
if '@' in netloc:
authentication, netloc = netloc.rsplit('@', 1)
netloc = '@'.join((authentication, netloc.lower()))
else:
netloc = netloc.lower()
# Schemes and domain names are case insensitive
scheme = scheme.lower()
# Normalize empty paths to "/"
if path == '':
path = '/'
# feed://, itpc:// and itms:// are really http://
if scheme in ('feed', 'itpc', 'itms'):
scheme = 'http'
if scheme not in ('http', 'https', 'ftp', 'file'):
return None
# urlunsplit might return "a slightly different, but equivalent URL"
return urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
def username_password_from_url(url):
r"""
Returns a tuple (username,password) containing authentication
data from the specified URL or (None,None) if no authentication
data can be found in the URL.
See Section 3.1 of RFC 1738 (http://www.ietf.org/rfc/rfc1738.txt)
>>> username_password_from_url('https://@host.com/')
('', None)
>>> username_password_from_url('telnet://host.com/')
(None, None)
>>> username_password_from_url('ftp://foo:@host.com/')
('foo', '')
>>> username_password_from_url('http://a:b@host.com/')
('a', 'b')
>>> username_password_from_url(1)
Traceback (most recent call last):
...
ValueError: URL has to be a string.
>>> username_password_from_url(None)
Traceback (most recent call last):
...
ValueError: URL has to be a string.
>>> username_password_from_url('http://a@b:c@host.com/')
('a@b', 'c')
>>> username_password_from_url('ftp://a:b:c@host.com/')
('a', 'b:c')
>>> username_password_from_url('http://i%2Fo:P%40ss%3A@host.com/')
('i/o', 'P@ss:')
>>> username_password_from_url('ftp://%C3%B6sterreich@host.com/')
('österreich', None)
>>> username_password_from_url('http://w%20x:y%20z@example.org/')
('w x', 'y z')
>>> username_password_from_url('http://example.com/x@y:z@test.com/')
(None, None)
"""
if not isinstance(url, str):
raise ValueError('URL has to be a string.')
(username, password) = (None, None)
(scheme, netloc, path, params, query, fragment) = urllib.parse.urlparse(url)
if '@' in netloc:
(authentication, netloc) = netloc.rsplit('@', 1)
if ':' in authentication:
(username, password) = authentication.split(':', 1)
# RFC1738 dictates that we should not allow ['/', '@', ':']
# characters in the username and password field (Section 3.1):
#
# 1. The "/" can't be in there at this point because of the way
# urlparse (which we use above) works.
# 2. Due to gPodder bug 1521, we allow "@" in the username and
# password field. We use netloc.rsplit('@', 1), which will
# make sure that we split it at the last '@' in netloc.
# 3. The colon must be excluded (RFC2617, Section 2) in the
# username, but is apparently allowed in the password. This
# is handled by the authentication.split(':', 1) above, and
# will cause any extraneous ':'s to be part of the password.
username = urllib.parse.unquote(username)
password = urllib.parse.unquote(password)
else:
username = urllib.parse.unquote(authentication)
return (username, password)
def directory_is_writable(path):
"""
Returns True if the specified directory exists and is writable
by the current user.
"""
return os.path.isdir(path) and os.access(path, os.W_OK)
def calculate_size(path):
"""
Tries to calculate the size of a directory, including any
subdirectories found. The returned value might not be
correct if the user doesn't have appropriate permissions
to list all subdirectories of the given path.
"""
if path is None:
return 0
if os.path.dirname(path) == '/':
return 0
if os.path.isfile(path):
return os.path.getsize(path)
if os.path.isdir(path) and not os.path.islink(path):
size = os.path.getsize(path)
try:
for item in os.listdir(path):
try:
size += calculate_size(os.path.join(path, item))
except:
logger.warning('Cannot get size for %s', path, exc_info=True)
except:
logger.warning('Cannot access %s', path, exc_info=True)
return size
return 0
def file_modification_datetime(filename):
"""
Returns the modification date of the specified file
as a datetime.datetime object or None if the modification
date cannot be determined.
"""
if filename is None:
return None
if not os.access(filename, os.R_OK):
return None
try:
s = os.stat(filename)
timestamp = s[stat.ST_MTIME]
return datetime.datetime.fromtimestamp(timestamp)
except:
logger.warning('Cannot get mtime for %s', filename, exc_info=True)
return None
def file_age_in_days(filename):
"""
Returns the age of the specified filename in days or
zero if the modification date cannot be determined.
"""
dt = file_modification_datetime(filename)
if dt is None:
return 0
else:
return (datetime.datetime.now() - dt).days
def file_modification_timestamp(filename):
"""
Returns the modification date of the specified file as a number
or -1 if the modification date cannot be determined.
"""
if filename is None:
return -1
try:
s = os.stat(filename)
return s[stat.ST_MTIME]
except:
logger.warning('Cannot get modification timestamp for %s', filename)
return -1
def file_age_to_string(days):
"""
Converts a "number of days" value to a string that
can be used in the UI to display the file age.
>>> file_age_to_string(0)
''
>>> file_age_to_string(1)
'1 day ago'
>>> file_age_to_string(2)
'2 days ago'
"""
if days < 1:
return ''
else:
return N_('%(count)d day ago', '%(count)d days ago', days) % {'count': days}
def is_system_file(filename):
"""
Checks to see if the given file is a system file.
"""
if gpodder.ui.win32 and win32file is not None:
result = win32file.GetFileAttributes(filename)
# -1 / 0xffffffff is returned by GetFileAttributes when an error occurs
# 0x4 is the FILE_ATTRIBUTE_SYSTEM constant
return result != -1 and result != 0xffffffff and result & 0x4 != 0
else:
return False
def get_free_disk_space_win32(path):
"""
Win32-specific code to determine the free disk space remaining
for a given path. Uses code from:
http://mail.python.org/pipermail/python-list/2003-May/203223.html
"""
if win32file is None:
# Cannot determine free disk space
return -1
drive, tail = os.path.splitdrive(path)
userFree, userTotal, freeOnDisk = win32file.GetDiskFreeSpaceEx(drive)
return userFree
def get_free_disk_space(path):
"""
Calculates the free disk space available to the current user
on the file system that contains the given path.
If the path (or its parent folder) does not yet exist, this
function returns zero.
"""
if not os.path.exists(path):
return -1
if gpodder.ui.win32:
return get_free_disk_space_win32(path)
s = os.statvfs(path)
return s.f_bavail * s.f_bsize
def format_date(timestamp):
"""
Converts a UNIX timestamp to a date representation. This
function returns "Today", "Yesterday", a weekday name or
the date in %x format, which (according to the Python docs)
is the "Locale's appropriate date representation".
Returns None if there has been an error converting the
timestamp to a string representation.
"""
if timestamp is None:
return None
seconds_in_a_day = 60 * 60 * 24
today = time.localtime()[:3]
yesterday = time.localtime(time.time() - seconds_in_a_day)[:3]
try:
timestamp_date = time.localtime(timestamp)[:3]
except ValueError:
logger.warning('Cannot convert timestamp', exc_info=True)
return None
except TypeError:
logger.warning('Cannot convert timestamp', exc_info=True)
return None
if timestamp_date == today:
return _('Today')
elif timestamp_date == yesterday:
return _('Yesterday')
try:
diff = int((time.time() - timestamp) / seconds_in_a_day)
except:
logger.warning('Cannot convert "%s" to date.', timestamp, exc_info=True)
return None
try:
timestamp = datetime.datetime.fromtimestamp(timestamp)
except:
return None
if diff < 7:
# Weekday name
return timestamp.strftime('%A')
else:
# Locale's appropriate date representation
return timestamp.strftime('%x')
def format_filesize(bytesize, use_si_units=False, digits=2):
"""
Formats the given size in bytes to be human-readable,
Returns a localized "(unknown)" string when the bytesize
has a negative value.
"""
si_units = (
(_('kB'), 10**3),
(_('MB'), 10**6),
(_('GB'), 10**9),
)
binary_units = (
(_('KiB'), 2**10),
(_('MiB'), 2**20),
(_('GiB'), 2**30),
)
try:
bytesize = float(bytesize)
except:
return _('(unknown)')
if bytesize < 0:
return _('(unknown)')
if use_si_units:
units = si_units
else:
units = binary_units
(used_unit, used_value) = (_('B'), bytesize)
for (unit, value) in units:
if bytesize >= value:
used_value = bytesize / float(value)
used_unit = unit
return locale.format_string('%.' + str(digits) + 'f\u00a0%s', (used_value, used_unit))
def delete_file(filename):
"""Delete a file from the filesystem
Errors (permissions errors or file not found)
are silently ignored.
"""
try:
os.remove(filename)
except:
pass
def is_html(text):
"""Heuristically tell if text is HTML
By looking for an open tag (more or less:)
>>> is_html('<h1>HELLO</h1>')
True
>>> is_html('a < b < c')
False
"""
html_test = re.compile(r'<[a-z][a-z0-9]*(?:\s.*?>|\/?>)', re.IGNORECASE | re.DOTALL)
return bool(html_test.search(text))
def remove_html_tags(html):
"""
Remove HTML tags from a string and replace numeric and
named entities with the corresponding character, so the
HTML text can be displayed in a simple text view.
"""
if html is None:
return None
# If we would want more speed, we could make these global
re_strip_tags = re.compile(r'<[^>]*>')
re_unicode_entities = re.compile(r'&#(\d{2,4});')
re_html_entities = re.compile(r'&(.{2,8});')
re_newline_tags = re.compile(r'(<br[^>]*>|<[/]?ul[^>]*>|</li>)', re.I)
re_listing_tags = re.compile(r'<li[^>]*>', re.I)
result = html
# Convert common HTML elements to their text equivalent
result = re_newline_tags.sub('\n', result)
result = re_listing_tags.sub('\n * ', result)
result = re.sub(r'<[Pp]>', '\n\n', result)
# Remove all HTML/XML tags from the string
result = re_strip_tags.sub('', result)
# Convert numeric XML entities to their unicode character
result = re_unicode_entities.sub(lambda x: chr(int(x.group(1))), result)
# Convert named HTML entities to their unicode character
result = re_html_entities.sub(lambda x: entitydefs.get(x.group(1), ''), result)
# Convert more than two newlines to two newlines
result = re.sub(r'([\r\n]{2})([\r\n])+', '\\1', result)
return result.strip()
class HyperlinkExtractor(object):
def __init__(self):
self.parts = []
self.target_stack = [None]
self.ignore_data = False
def get_result(self):
# Group together multiple consecutive parts with same link target,
# and remove excessive newlines.
group_it = itertools.groupby(self.parts, key=lambda x: x[0])
result = []
for target, parts in group_it:
t = ''.join(text for _, text in parts if text is not None)
# Remove trailing spaces
t = re.sub(r' +\n', '\n', t)
# Convert more than two newlines to two newlines
t = t.replace('\r', '')
t = re.sub(r'\n\n\n+', '\n\n', t)
result.append((target, t))
# Strip leading and trailing whitespace
result[0] = (result[0][0], result[0][1].lstrip())
result[-1] = (result[-1][0], result[-1][1].rstrip())
return result
def htmlws(self, s):
# Replace whitespaces with a single space per HTML spec.
if s is not None:
return re.sub(r'[ \t\n\r]+', ' ', s)
def handle_starttag(self, tag_name, attrs):
try:
handler = getattr(self, 'handle_start_' + tag_name)
except AttributeError:
pass
else:
handler(collections.OrderedDict(attrs))
def handle_endtag(self, tag_name):
try:
handler = getattr(self, 'handle_end_' + tag_name)
except AttributeError:
pass
else:
handler()
def handle_start_a(self, attrs):
self.target_stack.append(attrs.get('href'))
def handle_end_a(self):
if len(self.target_stack) > 1:
self.target_stack.pop()
def handle_start_style(self, attrs):
self.ignore_data = True
def handle_end_style(self):
self.ignore_data = False
def output(self, text):
self.parts.append((self.target_stack[-1], text))
def handle_data(self, data):
if not self.ignore_data:
self.output(self.htmlws(data))
def handle_entityref(self, name):
c = chr(name2codepoint[name])
self.output(c)
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
self.output(c)
def output_newline(self, attrs=None):
self.output('\n')
def output_double_newline(self, attrs=None):
self.output('\n')
def handle_start_img(self, attrs):
self.output(self.htmlws(attrs.get('alt', '')))
def handle_start_li(self, attrs):
self.output('\n * ')
handle_end_li = handle_end_ul = handle_start_br = output_newline
handle_start_p = handle_end_p = output_double_newline
class ExtractHyperlinkedText(object):
def __call__(self, document):
self.extractor = HyperlinkExtractor()
self.visit(document)
return self.extractor.get_result()
def visit(self, element):
# skip functions generated by html5lib for comments in the HTML
if callable(element.tag):
return
NS = '{http://www.w3.org/1999/xhtml}'
tag_name = (element.tag[len(NS):] if element.tag.startswith(NS) else element.tag).lower()
self.extractor.handle_starttag(tag_name, list(element.items()))
if element.text is not None:
self.extractor.handle_data(element.text)
for child in element:
self.visit(child)
if child.tail is not None:
self.extractor.handle_data(child.tail)
self.extractor.handle_endtag(tag_name)
class ExtractHyperlinkedTextHTMLParser(HTMLParser):
def __call__(self, html):
self.extractor = HyperlinkExtractor()
self.target_stack = [None]
self.feed(html)
self.close()
return self.extractor.get_result()
def handle_starttag(self, tag, attrs):
self.extractor.handle_starttag(tag, attrs)
def handle_endtag(self, tag):
self.extractor.handle_endtag(tag)
def handle_data(self, data):
self.extractor.handle_data(data)
def handle_entityref(self, name):
self.extractor.handle_entityref(name)
def handle_charref(self, name):
self.extractor.handle_charref(name)
def extract_hyperlinked_text(html):
"""
Convert HTML to hyperlinked text.
The output is a list of (target, text) tuples, where target is either a URL
or None, and text is a piece of plain text for rendering in a TextView.
"""
if '<' not in html:
# Probably plain text. We would remove all the newlines
# if we treated it as HTML, so just pass it back as-is.
return [(None, html)]
if html5lib is not None:
return ExtractHyperlinkedText()(html5lib.parseFragment(html))
else:
return ExtractHyperlinkedTextHTMLParser()(html)
def nice_html_description(img, description):
"""
basic html formatting + hyperlink highlighting + video thumbnail
"""
description = re.sub(r'https?://[^\s]+', r'<a href="\g<0>">\g<0></a>', description)
description = description.replace('\n', '<br>')
html = """<style type="text/css">
body > img { float: left; max-width: 30vw; margin: 0 1em 1em 0; }
</style>
"""
if img:
html += '<img src="{}">'.format(img)
html += '<p>{}</p>'.format(description)
return html
def wrong_extension(extension):
"""
Determine if a given extension looks like it's
wrong (e.g. empty, extremely long or spaces)
Returns True if the extension most likely is a
wrong one and should be replaced.
>>> wrong_extension('.mp3')
False
>>> wrong_extension('.divx')
False
>>> wrong_extension('mp3')
True
>>> wrong_extension('')
True
>>> wrong_extension('.12 - Everybody')
True
>>> wrong_extension('.mp3 ')
True
>>> wrong_extension('.')
True
>>> wrong_extension('.42')
True
"""
if not extension:
return True
elif len(extension) > 5:
return True
elif ' ' in extension:
return True
elif extension == '.':
return True
elif not extension.startswith('.'):
return True
else:
try:
# ".<number>" is an invalid extension
float(extension)
return True
except:
pass
return False
def extension_from_mimetype(mimetype):
"""
Simply guesses what the file extension should be from the mimetype
>>> extension_from_mimetype('audio/mp4')
'.m4a'
>>> extension_from_mimetype('audio/ogg')
'.ogg'
>>> extension_from_mimetype('audio/mpeg')
'.mp3'
>>> extension_from_mimetype('video/x-matroska')
'.mkv'
>>> extension_from_mimetype('wrong-mimetype')
''
"""
if mimetype in _MIME_TYPES:
return _MIME_TYPES[mimetype]
return mimetypes.guess_extension(mimetype) or ''
def mimetype_from_extension(extension):
"""
Simply guesses what the mimetype should be from the file extension
>>> mimetype_from_extension('.m4a')
'audio/mp4'
>>> mimetype_from_extension('.ogg')
'audio/ogg'
>>> mimetype_from_extension('.mp3')
'audio/mpeg'
>>> mimetype_from_extension('.mkv')
'video/x-matroska'
>>> mimetype_from_extension('._invalid_file_extension_')
''
"""
if extension in _MIME_TYPES_EXT:
return _MIME_TYPES_EXT[extension]
# Need to prepend something to the extension, so guess_type works
mimetype, encoding = mimetypes.guess_type('file' + extension)
return mimetype or ''
def extension_correct_for_mimetype(extension, mimetype):
"""
Check if the given filename extension (e.g. ".ogg") is a possible
extension for a given mimetype (e.g. "application/ogg") and return
a boolean value (True if it's possible, False if not). Also do
>>> extension_correct_for_mimetype('.ogg', 'application/ogg')
True
>>> extension_correct_for_mimetype('.ogv', 'video/ogg')
True
>>> extension_correct_for_mimetype('.ogg', 'audio/mpeg')
False
>>> extension_correct_for_mimetype('.m4a', 'audio/mp4')
True
>>> extension_correct_for_mimetype('mp3', 'audio/mpeg')
Traceback (most recent call last):
...
ValueError: "mp3" is not an extension (missing .)
>>> extension_correct_for_mimetype('.mp3', 'audio mpeg')
Traceback (most recent call last):
...
ValueError: "audio mpeg" is not a mimetype (missing /)
"""
if '/' not in mimetype:
raise ValueError('"%s" is not a mimetype (missing /)' % mimetype)
if not extension.startswith('.'):
raise ValueError('"%s" is not an extension (missing .)' % extension)
if (extension, mimetype) in _MIME_TYPE_LIST:
return True
# Create a "default" extension from the mimetype, e.g. "application/ogg"
# becomes ".ogg", "audio/mpeg" becomes ".mpeg", etc...
default = ['.' + mimetype.split('/')[-1]]
return extension in default + mimetypes.guess_all_extensions(mimetype)
def filename_from_url(url):
"""
Extracts the filename and (lowercase) extension (with dot)
from a URL, e.g. http://server.com/file.MP3?download=yes
will result in the string ("file", ".mp3") being returned.
This function will also try to best-guess the "real"
extension for a media file (audio, video) by
trying to match an extension to these types and recurse
into the query string to find better matches, if the
original extension does not resolve to a known type.
http://my.net/redirect.php?my.net/file.ogg => ("file", ".ogg")
http://server/get.jsp?file=/episode0815.MOV => ("episode0815", ".mov")
http://s/redirect.mp4?http://serv2/test.mp4 => ("test", ".mp4")
"""
(scheme, netloc, path, params, query, fragment) = urllib.parse.urlparse(url)
(filename, extension) = os.path.splitext(
os.path.basename(urllib.parse.unquote(path)))
if file_type_by_extension(extension) is not None and not \
query.startswith(scheme + '://'):
# We have found a valid extension (audio, video)
# and the query string doesn't look like a URL
return (filename, extension.lower())
# If the query string looks like a possible URL, try that first
if len(query.strip()) > 0 and query.find('/') != -1:
query_url = '://'.join((scheme, urllib.parse.unquote(query)))
(query_filename, query_extension) = filename_from_url(query_url)
if file_type_by_extension(query_extension) is not None:
return os.path.splitext(os.path.basename(query_url))
# No exact match found, simply return the original filename & extension
return (filename, extension.lower())
def file_type_by_extension(extension):
"""
Tries to guess the file type by looking up the filename
extension from a table of known file types. Will return
"audio", "video" or None.
>>> file_type_by_extension('.aif')
'audio'
>>> file_type_by_extension('.3GP')
'video'
>>> file_type_by_extension('.m4a')
'audio'
>>> file_type_by_extension('.txt') is None
True
>>> file_type_by_extension(None) is None
True
>>> file_type_by_extension('ogg')
Traceback (most recent call last):
...
ValueError: Extension does not start with a dot: ogg
"""
if not extension:
return None
if not extension.startswith('.'):
raise ValueError('Extension does not start with a dot: %s' % extension)
extension = extension.lower()
if extension in _MIME_TYPES_EXT:
return _MIME_TYPES_EXT[extension].split('/')[0]
# Need to prepend something to the extension, so guess_type works
mimetype, encoding = mimetypes.guess_type('file' + extension)
if mimetype is not None and '/' in mimetype:
filetype, rest = mimetype.split('/', 1)
if filetype in ('audio', 'video', 'image'):
return filetype
return None
def get_first_line(s):
"""
Returns only the first line of a string, stripped so
that it doesn't have whitespace before or after.
"""
if s:
return s.strip().split('\n')[0].strip()
return ''
def object_string_formatter(s, **kwargs):
"""
Makes attributes of object passed in as keyword
arguments available as {OBJECTNAME.ATTRNAME} in
the passed-in string and returns a string with
the above arguments replaced with the attribute
values of the corresponding object.
>>> class x: pass
>>> a = x()
>>> a.title = 'Hello world'
>>> object_string_formatter('{episode.title}', episode=a)
'Hello world'
>>> class x: pass
>>> a = x()
>>> a.published = 123
>>> object_string_formatter('Hi {episode.published} 456', episode=a)
'Hi 123 456'
"""
result = s
for key, o in kwargs.items():
matches = re.findall(r'\{%s\.([^\}]+)\}' % key, s)
for attr in matches:
if hasattr(o, attr):
try:
from_s = '{%s.%s}' % (key, attr)
to_s = str(getattr(o, attr))
result = result.replace(from_s, to_s)
except:
logger.warning('Replace of "%s" failed for "%s".', attr, s)
return result
def format_desktop_command(command, filenames, start_position=None):
"""
Formats a command template from the "Exec=" line of a .desktop
file to a string that can be invoked in a shell.
Handled format strings: %U, %u, %F, %f and a fallback that
appends the filename as first parameter of the command.
Also handles non-standard %p which is replaced with the start_position
(probably only makes sense if starting a single file). (see bug 1140)
See http://standards.freedesktop.org/desktop-entry-spec/1.0/ar01s06.html
Returns a list of commands to execute, either one for
each filename if the application does not support multiple
file names or one for all filenames (%U, %F or unknown).
"""
# Replace backslashes with slashes to fix win32 issues
# (even on win32, "/" works, but "\" does not)
command = command.replace('\\', '/')
if start_position is not None:
command = command.replace('%p', str(start_position))
command = shlex.split(command)
command_before = command
command_after = []
multiple_arguments = True
for fieldcode in ('%U', '%F', '%u', '%f'):
if fieldcode in command:
command_before = command[:command.index(fieldcode)]
command_after = command[command.index(fieldcode) + 1:]
multiple_arguments = fieldcode in ('%U', '%F')
break
if multiple_arguments:
return [command_before + filenames + command_after]
commands = []
for filename in filenames:
commands.append(command_before + [filename] + command_after)
return commands
def url_strip_authentication(url):
"""
Strips authentication data from an URL. Returns the URL with
the authentication data removed from it.
>>> url_strip_authentication('https://host.com/')
'https://host.com/'
>>> url_strip_authentication('telnet://foo:bar@host.com/')
'telnet://host.com/'
>>> url_strip_authentication('ftp://billy@example.org')
'ftp://example.org'
>>> url_strip_authentication('ftp://billy:@example.org')
'ftp://example.org'
>>> url_strip_authentication('http://aa:bc@localhost/x')
'http://localhost/x'
>>> url_strip_authentication('http://i%2Fo:P%40ss%3A@blubb.lan/u.html')
'http://blubb.lan/u.html'
>>> url_strip_authentication('http://c:d@x.org/')
'http://x.org/'
>>> url_strip_authentication('http://P%40%3A:i%2F@cx.lan')
'http://cx.lan'
>>> url_strip_authentication('http://x@x.com:s3cret@example.com/')
'http://example.com/'
"""
url_parts = list(urllib.parse.urlsplit(url))
# url_parts[1] is the HOST part of the URL
# Remove existing authentication data
if '@' in url_parts[1]:
url_parts[1] = url_parts[1].rsplit('@', 1)[1]
return urllib.parse.urlunsplit(url_parts)
def url_add_authentication(url, username, password):
"""
Adds authentication data (username, password) to a given
URL in order to construct an authenticated URL.
>>> url_add_authentication('https://host.com/', '', None)
'https://host.com/'
>>> url_add_authentication('http://example.org/', None, None)
'http://example.org/'
>>> url_add_authentication('telnet://host.com/', 'foo', 'bar')
'telnet://foo:bar@host.com/'
>>> url_add_authentication('ftp://example.org', 'billy', None)
'ftp://billy@example.org'
>>> url_add_authentication('ftp://example.org', 'billy', '')
'ftp://billy:@example.org'
>>> url_add_authentication('http://localhost/x', 'aa', 'bc')
'http://aa:bc@localhost/x'
>>> url_add_authentication('http://blubb.lan/u.html', 'i/o', 'P@ss:')
'http://i%2Fo:P@ss:@blubb.lan/u.html'
>>> url_add_authentication('http://a:b@x.org/', 'c', 'd')
'http://c:d@x.org/'
>>> url_add_authentication('http://i%2F:P%40%3A@cx.lan', 'P@x', 'i/')
'http://P@x:i%2F@cx.lan'
>>> url_add_authentication('http://x.org/', 'a b', 'c d')
'http://a%20b:c%20d@x.org/'
"""
if username is None or username == '':
return url
# Relaxations of the strict quoting rules (bug 1521):
# 1. Accept '@' in username and password
# 2. Acecpt ':' in password only
username = urllib.parse.quote(username, safe='@')
if password is not None:
password = urllib.parse.quote(password, safe='@:')
auth_string = ':'.join((username, password))
else:
auth_string = username
url = url_strip_authentication(url)
url_parts = list(urllib.parse.urlsplit(url))
# url_parts[1] is the HOST part of the URL
url_parts[1] = '@'.join((auth_string, url_parts[1]))
return urllib.parse.urlunsplit(url_parts)
def urlopen(url, headers=None, data=None, timeout=None, **kwargs):
"""
An URL opener with the User-agent set to gPodder (with version)
"""
from gpodder import config
if headers is None:
headers = {}
else:
headers = dict(headers)
if not timeout:
timeout = gpodder.SOCKET_TIMEOUT
retry_strategy = Retry(
total=3,
status_forcelist=Retry.RETRY_AFTER_STATUS_CODES.union((408, 418, 504, 598, 599,)))
s = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
s.mount('http://', a)
s.mount('https://', a)
headers.update({'User-agent': gpodder.user_agent})
proxies = config._proxies
logger.debug(f"urlopen: url: {url}, proxies: {proxies}")
return s.get(url, headers=headers, data=data, proxies=proxies, timeout=timeout, **kwargs)
def get_real_url(url):
"""
Gets the real URL of a file and resolves all redirects.
"""
try:
return urlopen(url).url
except:
logger.error('Getting real url for %s', url, exc_info=True)
return url
def find_command(command):
"""
Searches the system's PATH for a specific command that is
executable by the user. Returns the first occurrence of an
executable binary in the PATH, or None if the command is
not available.
On Windows, this also looks for "<command>.bat" and
"<command>.exe" files if "<command>" itself doesn't exist.
"""
if 'PATH' not in os.environ:
return None
for path in os.environ['PATH'].split(os.pathsep):
command_file = os.path.join(path, command)
if gpodder.ui.win32 and not os.path.exists(command_file):
for extension in ('.bat', '.exe'):
cmd = command_file + extension
if os.path.isfile(cmd):
command_file = cmd
break
if os.path.isfile(command_file) and os.access(command_file, os.X_OK):
return command_file
return None
def idle_add(func, *args):
"""Run a function in the main GUI thread
This is a wrapper function that does the Right Thing depending on if we are
running on Gtk+, Qt or CLI.
You should use this function if you are calling from a Python thread and
modify UI data, so that you make sure that the function is called as soon
as possible from the main UI thread.
"""
if gpodder.ui.gtk:
from gi.repository import GLib
GLib.idle_add(func, *args)
else:
func(*args)
def idle_timeout_add(milliseconds, func, *args):
"""Run a function in the main GUI thread at regular intervals, at idle priority
PRIORITY_HIGH -100
PRIORITY_DEFAULT 0 timeout_add()
PRIORITY_HIGH_IDLE 100
resizing 110
redraw 120
PRIORITY_DEFAULT_IDLE 200 idle_add()
PRIORITY_LOW 300
"""
if not gpodder.ui.gtk:
raise Exception('util.idle_timeout_add() is only supported by Gtk+')
from gi.repository import GLib
return GLib.timeout_add(milliseconds, func, *args, priority=GLib.PRIORITY_DEFAULT_IDLE)
class IdleTimeout(object):
"""Run a function in the main GUI thread at regular intervals since the last run, at idle priority
A simple timeout_add() continuously calls the function if it exceeds the interval,
which lags the UI and prevents idle_add() calls from happening. This class restarts
the timer after the function finishes, allowing other callbacks to run.
"""
def __init__(self, milliseconds, func, *args):
if not gpodder.ui.gtk:
raise Exception('util.IdleTimeout() is only supported by Gtk+')
self.milliseconds = milliseconds
self.max_milliseconds = 0
self.func = func
from gi.repository import GLib
self.id = GLib.timeout_add(milliseconds, self._callback, *args, priority=GLib.PRIORITY_DEFAULT_IDLE)
def set_max_milliseconds(self, max_milliseconds):
self.max_milliseconds = max_milliseconds
return self
def _callback(self, *args):
self.cancel()
start_time = time.time()
if self.func(*args):
if self.max_milliseconds > self.milliseconds:
duration = round((time.time() - start_time) * 1000)
if duration > self.max_milliseconds:
duration = self.max_milliseconds
milliseconds = round(lerp(self.milliseconds, self.max_milliseconds, duration / self.max_milliseconds))
else:
milliseconds = self.milliseconds
from gi.repository import GLib
self.id = GLib.timeout_add(milliseconds, self._callback, *args, priority=GLib.PRIORITY_DEFAULT_IDLE)
def cancel(self):
if self.id:
from gi.repository import GLib
GLib.source_remove(self.id)
self.id = 0
def lerp(a, b, f):
"""Linear interpolation between 'a' and 'b', where 'f' is between 0.0 and 1.0
"""
return ((1.0 - f) * a) + (f * b)
def bluetooth_available():
"""
Returns True or False depending on the availability
of bluetooth functionality on the system.
"""
if find_command('bluetooth-sendto') or \
find_command('gnome-obex-send'):
return True
else:
return False
def bluetooth_send_file(filename):
"""
Sends a file via bluetooth.
This function tries to use "bluetooth-sendto", and if
it is not available, it also tries "gnome-obex-send".
"""
command_line = None
if find_command('bluetooth-sendto'):
command_line = ['bluetooth-sendto']
elif find_command('gnome-obex-send'):
command_line = ['gnome-obex-send']
if command_line is not None:
command_line.append(filename)
return (Popen(command_line, close_fds=True).wait() == 0)
else:
logger.error('Cannot send file. Please install "bluetooth-sendto" or "gnome-obex-send".')
return False
def format_time(seconds):
"""Format a seconds value to a string
>>> format_time(0)
'00:00'
>>> format_time(20)
'00:20'
>>> format_time(3600)
'01:00:00'
>>> format_time(10921)
'03:02:01'
>>> format_time(86401)
'24:00:01'
"""
hours = 0
minutes = 0
if seconds >= 3600:
hours = seconds // 3600
seconds -= hours * 3600
if seconds >= 60:
minutes = seconds // 60
seconds -= minutes * 60
if hours == 0:
return '%02d:%02d' % (minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
def parse_time(value):
"""Parse a time string into seconds
>>> parse_time('00:00')
0
>>> parse_time('00:00:00')
0
>>> parse_time('00:20')
20
>>> parse_time('00:00:20')
20
>>> parse_time('01:00:00')
3600
>>> parse_time('03:02:01')
10921
>>> parse_time('61:08')
3668
>>> parse_time('25:03:30')
90210
>>> parse_time('25:3:30')
90210
>>> parse_time('61.08')
3668
"""
if value == '':
return 0
if not value:
raise ValueError('Invalid value: %s' % (str(value),))
m = re.match(r'(\d+)[:.](\d\d?)[:.](\d\d?)', value)
if m:
hours, minutes, seconds = m.groups()
return (int(hours) * 60 + int(minutes)) * 60 + int(seconds)
m = re.match(r'(\d+)[:.](\d\d?)', value)
if m:
minutes, seconds = m.groups()
return int(minutes) * 60 + int(seconds)
return int(value)
def format_seconds_to_hour_min_sec(seconds):
"""
Take the number of seconds and format it into a
human-readable string (duration).
>>> format_seconds_to_hour_min_sec(3834)
'1 hour, 3 minutes and 54 seconds'
>>> format_seconds_to_hour_min_sec(3600)
'1 hour'
>>> format_seconds_to_hour_min_sec(62)
'1 minute and 2 seconds'
"""
if seconds < 1:
return N_('%(count)d second', '%(count)d seconds',
seconds) % {'count': seconds}
result = []
seconds = int(seconds)
hours = seconds // 3600
seconds = seconds % 3600
minutes = seconds // 60
seconds = seconds % 60
if hours:
result.append(N_('%(count)d hour', '%(count)d hours',
hours) % {'count': hours})
if minutes:
result.append(N_('%(count)d minute', '%(count)d minutes',
minutes) % {'count': minutes})
if seconds:
result.append(N_('%(count)d second', '%(count)d seconds',
seconds) % {'count': seconds})
if len(result) > 1:
return (' ' + _('and') + ' ').join((', '.join(result[:-1]), result[-1]))
else:
return result[0]
def http_request(url, method='HEAD'):
(scheme, netloc, path, params, query, fragment) = urllib.parse.urlparse(url)
if scheme == 'https':
conn = http.client.HTTPSConnection(netloc)
else:
conn = http.client.HTTPConnection(netloc)
start = len(scheme) + len('://') + len(netloc)
conn.request(method, url[start:])
return conn.getresponse()
def gui_open(filename, gui=None):
"""
Open a file or folder with the default application set
by the Desktop environment. This uses "xdg-open" on all
systems with a few exceptions:
on Win32, os.startfile() is used
"""
try:
if gpodder.ui.win32:
os.startfile(filename)
opener = None
elif gpodder.ui.osx:
opener = 'open'
else:
opener = 'xdg-open'
if opener:
opener_fullpath = shutil.which(opener)
if opener_fullpath is None:
raise Exception((_("System default program '%(opener)s' not found"))
% {'opener': opener}
)
Popen([opener_fullpath, filename], close_fds=True)
return True
except:
logger.error('Cannot open file/folder: "%s"', filename, exc_info=True)
if gui is not None:
if opener is None:
message = _("Cannot open file/folder '%(filename)s' using default program") % {'filename': filename}
else:
message = _("Cannot open '%(filename)s' using '%(opener)s'") \
% {'filename': filename, 'opener': opener}
gui.show_message_details(_('Cannot open file/folder'),
str(sys.exc_info()[1]), message)
return False
def open_website(url):
"""
Opens the specified URL using the default system web
browser. This uses Python's "webbrowser" module, so
make sure your system is set up correctly.
"""
run_in_background(lambda: webbrowser.open(url))
return True
def copy_text_to_clipboard(text):
"""
Copies the specified text to both clipboards.
"""
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gdk, Gtk
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
clipboard.set_text(text, -1)
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(text, -1)
def convert_bytes(d):
"""
Convert byte strings to unicode strings
This function will decode byte strings into unicode
strings. Any other data types will be left alone.
>>> convert_bytes(None)
>>> convert_bytes(4711)
4711
>>> convert_bytes(True)
True
>>> convert_bytes(3.1415)
3.1415
>>> convert_bytes('Hello')
'Hello'
>>> type(convert_bytes(b'hoho'))
<class 'bytes'>
"""
if d is None:
return d
elif isinstance(d, bytes):
return d
elif any(isinstance(d, t) for t in (int, int, bool, float)):
return d
elif not isinstance(d, str):
return d.decode('utf-8', 'ignore')
return d
def sanitize_filename(filename, max_length):
"""
Generate a sanitized version of a filename; trim filename
if greater than max_length (0 = no limit).
>>> sanitize_filename('https://www.host.name/feed', 0)
'https___www.host.name_feed'
>>> sanitize_filename('Binärgewitter', 0)
'Binärgewitter'
>>> sanitize_filename('Cool feed (ogg)', 0)
'Cool feed (ogg)'
>>> sanitize_filename('Cool feed (ogg)', 1)
'C'
"""
if max_length > 0 and len(filename.encode('utf-8')) > max_length:
logger.info('Limiting file/folder name "%s" to %d characters.', filename, max_length)
filename = filename.encode('utf-8')
length = len(filename)
while length > max_length:
# strip continuation bytes
while (filename[-1] & 0xC0) == 0x80:
filename = filename[:-1]
length -= 1
# strip leader byte
filename = filename[:-1]
length -= 1
filename = filename.decode('utf-8')
# see #361 - at least slash must be removed
filename = re.sub(r"[\"*/:<>?\\|]", "_", filename)
return filename.strip('.' + string.whitespace)
def sanitize_filename_ext(filename, ext, max_length, max_length_with_ext):
"""
Generate a sanitized version of a filename and extension.
Truncate filename if greater than max_length.
Truncate extension if filename.extension is greater than max_length_with_ext.
:param str filename: filename without extension
:param str ext: extension
:return (str, str): (sanitized_truncated_filename, sanitized_extension)
>>> sanitize_filename_ext('podcast_4987_faz_essay_der_podcast_fur_die_geschichte' \
'_hinter_den_nachrichten_episode_4_04_die_macht_der_tater_brechen', \
".mp3", 120, 131)
('podcast_4987_faz_essay_der_podcast_fur_die_geschichte_hinter_den_nachrichten_episode_4_04_die_macht_der_tater_brechen', '.mp3')
"""
sanitized_fn = sanitize_filename(filename, max_length)
sanitized_ext = sanitize_filename(ext, max_length_with_ext - len(sanitized_fn))
return (sanitized_fn, ('.' + sanitized_ext) if sanitized_ext else '')
def find_mount_point(directory):
"""
Try to find the mount point for a given directory.
If the directory is itself a mount point, return
it. If not, remove the last part of the path and
re-check if it's a mount point. If the directory
resides on your root filesystem, "/" is returned.
>>> find_mount_point('/')
'/'
>>> find_mount_point(b'/something')
Traceback (most recent call last):
...
ValueError: Convert bytes objects to str first.
>>> find_mount_point(None)
Traceback (most recent call last):
...
ValueError: Directory names should be of type str.
>>> find_mount_point(42)
Traceback (most recent call last):
...
ValueError: Directory names should be of type str.
>>> from minimock import mock, restore
>>> mocked_mntpoints = ('/', '/home', '/media/usbdisk', '/media/cdrom')
>>> mock('os.path.ismount', returns_func=lambda x: x in mocked_mntpoints)
>>>
>>> # For mocking os.getcwd(), we simply use a lambda to avoid the
>>> # massive output of "Called os.getcwd()" lines in this doctest
>>> os.getcwd = lambda: '/home/thp'
>>>
>>> find_mount_point('.')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('relativity')
Called os.path.ismount('/home/thp/relativity')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/usbdisk/')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> find_mount_point('/home/thp/Desktop')
Called os.path.ismount('/home/thp/Desktop')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/usbdisk/Podcasts/With Spaces')
Called os.path.ismount('/media/usbdisk/Podcasts/With Spaces')
Called os.path.ismount('/media/usbdisk/Podcasts')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> find_mount_point('/home/')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/cdrom/../usbdisk/blubb//')
Called os.path.ismount('/media/usbdisk/blubb')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> restore()
"""
if isinstance(directory, bytes):
# We do not accept byte strings, because they could fail when
# trying to be converted to some native encoding, so fail loudly
# and leave it up to the callee to decode from the proper encoding.
raise ValueError('Convert bytes objects to str first.')
if not isinstance(directory, str):
# In Python 2, we assumed it's a byte str; in Python 3, we assume
# that it's a unicode str. The abspath/ismount/split functions of
# os.path work with unicode str in Python 3, but not in Python 2.
raise ValueError('Directory names should be of type str.')
directory = os.path.abspath(directory)
while directory != '/':
if os.path.ismount(directory):
return directory
else:
(directory, tail_data) = os.path.split(directory)
return '/'
# matches http:// and ftp:// and mailto://
protocolPattern = re.compile(r'^\w+://')
def isabs(string):
"""
@return true if string is an absolute path or protocoladdress
for addresses beginning in http:// or ftp:// or ldap:// -
they are considered "absolute" paths.
Source: http://code.activestate.com/recipes/208993/
"""
if protocolPattern.match(string):
return 1
return os.path.isabs(string)
def relpath(p1, p2):
"""
Finds relative path from p2 to p1, like os.path.relpath but handles
uris. Returns None if no such path exists due to the paths being on
different devices.
"""
u1 = urllib.parse.urlparse(p1)
u2 = urllib.parse.urlparse(p2)
if u1.scheme and u2.scheme and (u1.scheme != u2.scheme or u1.netloc != u2.netloc):
return None
return os.path.relpath(u1.path, u2.path)
def get_hostname():
"""Return the hostname of this computer
This can be implemented in a different way on each
platform and should yield a unique-per-user device ID.
"""
nodename = platform.node()
if nodename:
return nodename
# Fallback - but can this give us "localhost"?
return socket.gethostname()
def detect_device_type():
"""Device type detection for gpodder.net
This function tries to detect on which
kind of device gPodder is running on.
Possible return values:
desktop, laptop, mobile, server, other
"""
if glob.glob('/proc/acpi/battery/*'):
# Linux: If we have a battery, assume Laptop
return 'laptop'
return 'desktop'
def write_m3u_playlist(m3u_filename, episodes, extm3u=True):
"""Create an M3U playlist from a episode list
If the parameter "extm3u" is False, the list of
episodes should be a list of filenames, and no
extended information will be written into the
M3U files (#EXTM3U / #EXTINF).
If the parameter "extm3u" is True (default), then the
list of episodes should be PodcastEpisode objects,
as the extended metadata will be taken from them.
"""
f = open(m3u_filename, 'w')
if extm3u:
# Mandatory header for extended playlists
f.write('#EXTM3U\n')
for episode in episodes:
if not extm3u:
# Episode objects are strings that contain file names
f.write(episode + '\n')
continue
if episode.was_downloaded(and_exists=True):
filename = episode.local_filename(create=False)
assert filename is not None
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
filename = filename[len(os.path.dirname(m3u_filename) + os.sep):]
f.write('#EXTINF:0,' + episode.playlist_title() + '\n')
f.write(filename + '\n')
f.close()
def generate_names(filename):
basename, ext = os.path.splitext(filename)
for i in itertools.count():
if i:
yield '%s (%d)%s' % (basename, i + 1, ext)
else:
yield filename
def is_known_redirecter(url):
"""Check if a URL redirect is expected, and no filenames should be updated
We usually honor URL redirects, and update filenames accordingly.
In some cases (e.g. Soundcloud) this results in a worse filename,
so we hardcode and detect these cases here to avoid renaming files
for which we know that a "known good default" exists.
The problem here is that by comparing the currently-assigned filename
with the new filename determined by the URL, we cannot really determine
which one is the "better" URL (e.g. "n5rMSpXrqmR9.128.mp3" for Soundcloud).
"""
# Soundcloud-hosted media downloads (we take the track name as filename)
if url.startswith('http://ak-media.soundcloud.com/'):
return True
return False
def atomic_rename(old_name, new_name):
"""Atomically rename/move a (temporary) file
This is usually used when updating a file safely by writing
the new contents into a temporary file and then moving the
temporary file over the original file to replace it.
"""
if gpodder.ui.win32:
# Win32 does not support atomic rename with os.rename
shutil.move(old_name, new_name)
else:
os.rename(old_name, new_name)
def check_command(self, cmd):
"""Check if a command line command/program exists"""
# Prior to Python 2.7.3, this module (shlex) did not support Unicode input.
program = shlex.split(cmd)[0]
return (find_command(program) is not None)
def rename_episode_file(episode, filename):
"""Helper method to update a PodcastEpisode object
Useful after renaming/converting its download file.
"""
if not os.path.exists(filename):
raise ValueError('Target filename does not exist.')
basename, extension = os.path.splitext(filename)
episode.download_filename = os.path.basename(filename)
episode.file_size = os.path.getsize(filename)
episode.mime_type = mimetype_from_extension(extension)
episode.save()
episode.db.commit()
def get_update_info():
"""
Get up to date release information from gpodder.org.
Returns a tuple: (up_to_date, latest_version, release_date, days_since)
Example result (up to date version, 20 days after release):
(True, '3.0.4', '2012-01-24', 20)
Example result (outdated version, 10 days after release):
(False, '3.0.5', '2012-02-29', 10)
"""
url = 'https://api.github.com/repos/gpodder/gpodder/releases/latest'
info = urlopen(url).json()
latest_version = info.get('tag_name', '').replace('gpodder-', '')
release_date = info['published_at']
release_parsed = datetime.datetime.strptime(release_date, '%Y-%m-%dT%H:%M:%SZ')
days_since_release = (datetime.datetime.today() - release_parsed).days
def convert(s):
# Use both public and local version label, see PEP 440
pubv, locv = next(
(v[0], v[1] if len(v) > 1 else '') for v in (s.split('+'),))
return tuple(int(x) if x.isdigit() else x.lower()
for x in pubv.split('.') + (locv.split('.') if locv else []))
up_to_date = (convert(gpodder.__version__) >= convert(latest_version))
return up_to_date, latest_version, release_date, days_since_release
def run_in_background(function, daemon=False):
logger.debug('run_in_background: %s (%s)', function, str(daemon))
thread = threading.Thread(target=function)
thread.setDaemon(daemon)
thread.start()
return thread
def linux_get_active_interfaces():
"""Get active network interfaces using 'ip addr'
A generator function yielding network interface
names with an inet (or inet6) and a broadcast
address, indicating an active network connection
"""
process = Popen(
['ip', 'addr', 'show', 'scope', 'global', 'up'],
close_fds=True, stdout=subprocess.PIPE)
data, x = process.communicate()
for record in re.split(r'^\d+: ',
data.decode(locale.getpreferredencoding()),
flags=re.MULTILINE):
mo = re.match(r'^([^:]*):.*inet.*scope', record, flags=re.DOTALL)
if mo:
yield mo.group(1)
def osx_get_active_interfaces():
"""Get active network interfaces using 'ifconfig'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = Popen(['ifconfig'], close_fds=True, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
for i in re.split(r'\n(?!\t)', stdout.decode('utf-8'), re.MULTILINE):
b = re.match(r'(\w+):.*status: (active|associated)$', i, re.MULTILINE | re.DOTALL)
if b:
yield b.group(1)
def unix_get_active_interfaces():
"""Get active network interfaces using 'ifconfig'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = Popen(['ifconfig'], close_fds=True, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
for i in re.split(r'\n(?!\t)', stdout.decode(locale.getpreferredencoding()), re.MULTILINE):
b = re.match(r'(\w+):.*status: (active|associated)$', i, re.MULTILINE | re.DOTALL)
if b:
yield b.group(1)
def connection_available():
"""Check if an Internet connection is available
Returns True if a connection is available (or if there
is no way to determine the connection). Returns False
if no network interfaces are up (i.e. no connectivity).
"""
try:
if gpodder.ui.win32:
# FIXME: Implement for Windows
return True
elif gpodder.ui.osx:
return len(list(osx_get_active_interfaces())) > 0
else:
# By default, we assume we're online (bug 1730)
online = True
if find_command('ip') is not None:
online = bool(list(linux_get_active_interfaces()))
elif find_command('ifconfig') is not None:
# If ifconfig is available, and it says we don't have
# any active interfaces, assume we're offline
online = bool(list(unix_get_active_interfaces()))
return online
except Exception as e:
logger.warning('Cannot get connection status: %s', e, exc_info=True)
# When we can't determine the connection status, act as if we're online (bug 1730)
return True
def website_reachable(url):
"""
Check if a specific website is available.
"""
if not connection_available():
# No network interfaces up - assume website not reachable
return (False, None)
try:
response = requests.get(url, timeout=1)
return (True, response)
except requests.exceptions.RequestException:
pass
return (False, None)
def delete_empty_folders(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in dirs:
dirname = os.path.join(root, name)
if not os.listdir(dirname):
os.rmdir(dirname)
def guess_encoding(filename):
"""
read filename encoding as defined in PEP 263
- BOM marker => utf-8
- coding: xxx comment in first 2 lines
- else return None
>>> guess_encoding("not.there")
>>> guess_encoding("setup.py")
>>> guess_encoding("share/gpodder/extensions/mpris-listener.py")
'utf-8'
"""
def re_encoding(line):
m = re.match(b"""^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)""", line)
if m:
return m.group(1).decode()
else:
return None
if not filename or not os.path.exists(filename):
return None
with open(filename, "rb") as f:
fst = f.readline()
if fst[:3] == b"\xef\xbb\xbf":
return "utf-8"
encoding = re_encoding(fst)
if not encoding:
snd = f.readline()
encoding = re_encoding(snd)
return encoding
def iri_to_url(url):
"""
Properly escapes Unicode characters in the URL path section
TODO: Explore if this should also handle the domain
Based on: http://stackoverflow.com/a/18269491/1072626
In response to issue: https://github.com/gpodder/gpodder/issues/232
>>> iri_to_url('http://www.valpskott.se/Valpcast/MP3/Valpcast%20-%20Existentiella%20frågor.mp3')
'http://www.valpskott.se/Valpcast/MP3/Valpcast%20-%20Existentiella%20fr%C3%83%C2%A5gor.mp3'
See https://github.com/gpodder/gpodder/issues/399
>>> iri_to_url('//dts.podtrac.com/redirect.mp3/http://myhost/myepisode.mp3')
'//dts.podtrac.com/redirect.mp3/http://myhost/myepisode.mp3'
"""
url = urllib.parse.urlsplit(url)
url = list(url)
# First unquote to avoid escaping quoted content
url[2] = urllib.parse.unquote(url[2])
# extend safe with all allowed chars in path segment of URL, cf pchar rule
# in https://tools.ietf.org/html/rfc3986#appendix-A
url[2] = urllib.parse.quote(url[2], safe="/-._~!$&'()*+,;=:@")
url = urllib.parse.urlunsplit(url)
return url
class Popen(subprocess.Popen):
"""A Popen process that tries not to leak file descriptors.
This is a drop-in replacement for subprocess.Popen(), which takes the same
arguments.
'close_fds' will default to True, if omitted. This stops the process from
inheriting ALL of gPodder's file descriptors, which would keep them
'in-use'. That is of particular concern whenever the download queue is
active and interacting with the filesystem in the background.
On Windows however, redirection cannot coexist with 'close_fds=True'.
Specifying both will raise a ValueError. A message will appear in the log.
For communication with short-lived Windows commands, setting 'close_fds'
to False may be a tolerable risk. Otherwise as a last resort, sending
output to temp files to read afterward might work (probably involving
'shell=True').
See https://github.com/gpodder/gpodder/issues/420
"""
def __init__(self, *args, **kwargs):
self.__logged_returncode = False
if 'close_fds' not in kwargs:
kwargs['close_fds'] = True
try:
super(Popen, self).__init__(*args, **kwargs) # Python 2 syntax
except (ValueError) as e:
if gpodder.ui.win32 and kwargs['close_fds']:
if [(k, v) for (k, v) in kwargs.items() if k in ('stdin', 'stdout', 'stderr') and v]:
logger = logging.getLogger(__name__)
logger.error('util.Popen(close_fds=True) is incompatible with'
' stream redirection on Windows.')
logger.error('With close_fds=False, the process keeps all '
'currently open files locked. It might be tolerable '
'for short-lived commands. Or use temp files.')
raise e
@classmethod
def testPopen():
# Problematic commands (write to stderr or read from stdin).
if gpodder.ui.win32:
cmd = ['findstr.exe', '/!']
cmd_pipe = ['findstr', 'hello']
else:
cmd = ['cat', '--helpp']
cmd_pipe = ['grep', 'hello']
logger.info('Test #1: Implicit close_fds=True, with no redirection')
logger.info('No race condition.')
logger.info('Streams left in the console.')
logger.info('Typical spawn and forget. Might as well wait().')
p = Popen(cmd)
out, err = p.communicate()
print("- - stderr - -\n{}\n- - - - - -\n".format(err))
logger.info('Test #2: Explicit close_fds=False, with redirection.')
logger.info('This has a race condition, but communicate() always returns streams.')
p = Popen(cmd, close_fds=False, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
print("- - stderr - -\n{}\n- - - - - -\n".format(err))
try:
logger.info('Test #3: Implicit close_fds=True, with attempted redirection.')
logger.info('No race condition.')
logger.info('On Windows, this will raise ValueError.')
logger.info('Other platforms will have readable streams returned.')
p = Popen(cmd, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
print("- - stderr - -\n{}\n- - - - - -\n".format(err))
except (ValueError) as e:
print("- - Caught - -\n{}: {}\n- - - - - -\n".format(e.__class__.__name__, e))
try:
logger.info('Test #4: Implicit close_fds=True, given input.')
p = Popen(cmd_pipe, stdin=subprocess.PIPE)
out, err = p.communicate(input=b'hello world')
print("NEVER REACHED ON WINDOWS")
print("- - stderr - -\n{}\n- - - - - -\n".format(err))
except (ValueError) as e:
print("- - Caught - -\n{}: {}\n- - - - - -\n".format(e.__class__.__name__, e))
logger.info('Log spam only occurs if returncode is non-zero or if explaining the Windows redirection error.')
def _parse_mimetype_sorted_dictitems(mimetype):
""" python 3.5 unordered dict compat for doctest. don't use! """
r = parse_mimetype(mimetype)
return r[0], r[1], sorted(r[2].items())
def parse_mimetype(mimetype):
"""
parse mimetype into (type, subtype, parameters)
see RFC 2045 §5.1
TODO: unhandled comments and continuations
>>> _parse_mimetype_sorted_dictitems('application/atom+xml;profile=opds-catalog;type=feed;kind=acquisition')
('application', 'atom+xml', [('kind', 'acquisition'), ('profile', 'opds-catalog'), ('type', 'feed')])
>>> _parse_mimetype_sorted_dictitems('application/atom+xml; profile=opds-catalog ; type=feed ; kind=acquisition')
('application', 'atom+xml', [('kind', 'acquisition'), ('profile', 'opds-catalog'), ('type', 'feed')])
>>> _parse_mimetype_sorted_dictitems(None)
(None, None, [])
>>> _parse_mimetype_sorted_dictitems('')
(None, None, [])
>>> _parse_mimetype_sorted_dictitems('application/x-myapp;quoted="a quoted string with ; etc.";a=b')
('application', 'x-myapp', [('a', 'b'), ('quoted', 'a quoted string with ; etc.')])
"""
class MIMETypeException(Exception):
""" when an exception is encountered parsing mime type """
if not mimetype or '/' not in mimetype:
return (None, None, {})
main, sub = mimetype.split('/', 1)
try:
sub, rawparams = sub.split(';', 1)
params = {}
key = ''
value = ''
invalue = False
inquotes = False
quotedvalue = False
nomore = False
offset = len(main) + 1 + len(sub) + 1
for i, c in enumerate(rawparams):
if inquotes:
if c == '"':
inquotes = False
quotedvalue = True
nomore = True
else:
value += c
continue
if c == ';':
if invalue:
params[key] = value
key = ''
invalue = False
inquotes = False
nomore = False
else:
raise MIMETypeException("Unable to parse mimetype '%s': unexpected ; at %i" % (mimetype, offset + i))
elif c == '"':
if invalue:
if value:
raise MIMETypeException("Unable to parse mimetype '%s': unexpected \" at %i" % (mimetype, offset + i))
inquotes = True
elif c == '=':
if invalue:
raise MIMETypeException("Unable to parse mimetype '%s': unexpected = at %i" % (mimetype, offset + i))
invalue = True
quotedvalue = False
value = ''
elif c in (' ', '\t'):
if invalue and value:
nomore = True
if not invalue and key:
nomore = True
else:
if nomore:
raise MIMETypeException("Unable to parse mimetype '%s': unexpected %s after space at %i" % (mimetype, c, offset + i))
if invalue:
value += c
else:
key += c
# after loop
if invalue:
if value or quotedvalue:
params[key] = value
else:
raise MIMETypeException("Unable to parse mimetype '%s': empty value for %s" % (mimetype, key))
elif key:
raise MIMETypeException("Unable to parse mimetype '%s': missing value for %s" % (mimetype, key))
elif inquotes:
raise MIMETypeException("Unable to parse mimetype '%s': unclosed \"" % mimetype)
return (main, sub, params)
except MIMETypeException as e:
print(e)
return (None, None, {})
def get_header_param(headers, param, header_name):
"""Extract a HTTP header parameter from a dict
Uses the "email" module to retrieve parameters
from HTTP headers. This can be used to get the
"filename" parameter of the "content-disposition"
header for downloads to pick a good filename.
Returns None if the filename cannot be retrieved.
"""
value = None
try:
headers_string = ['%s:%s' % (k, v) for k, v in list(headers.items())]
msg = email.message_from_string('\n'.join(headers_string))
if header_name in msg:
raw_value = msg.get_param(param, header=header_name)
if raw_value is not None:
value = email.utils.collapse_rfc2231_value(raw_value)
except Exception:
logger.error('Cannot get %s from %s', param, header_name, exc_info=True)
return value
def response_text(response, default_encoding='utf-8'):
"""
Utility method to return urlopen response's text.
Requests uses only the charset info in content-type, then defaults to ISO-8859-1
when content-type=text/*.
We could use chardet (via response.apparent_encoding) but it's slow so often it's
simpler to just use the known encoding.
:return: textual body of the response
"""
if 'charset=' in response.headers.get('content-type'):
return response.text
else:
return response.content.decode(default_encoding)
def mount_volume_for_file(file, op=None):
"""
Utility method to mount the enclosing volume for the given file in a blocking
fashion
"""
import gi
gi.require_version('Gio', '2.0')
from gi.repository import Gio, GLib
if gpodder.ui.gtk:
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
else:
loop = GLib.MainLoop()
result = True
message = None
def callback(file, res):
nonlocal result, message
try:
file.mount_enclosing_volume_finish(res)
result = True
except GLib.Error as err:
if (not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_SUPPORTED)
and not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.ALREADY_MOUNTED)):
message = err.message
result = False
finally:
if gpodder.ui.gtk:
Gtk.main_quit()
else:
loop.quit()
file.mount_enclosing_volume(Gio.MountMountFlags.NONE, op, None, callback)
if gpodder.ui.gtk:
Gtk.main()
else:
loop.run()
return result, message
def scale_pixbuf(pixbuf, max_size):
from gi.repository import GdkPixbuf
w_cur = pixbuf.get_width()
h_cur = pixbuf.get_height()
if w_cur <= max_size and h_cur <= max_size:
return pixbuf
f = max_size / (w_cur if w_cur >= h_cur else h_cur)
w_new = int(w_cur * f)
h_new = int(h_cur * f)
return pixbuf.scale_simple(w_new, h_new, GdkPixbuf.InterpType.BILINEAR)
| 78,253
|
Python
|
.py
| 1,967
| 32.684291
| 137
| 0.626828
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,284
|
extensions.py
|
gpodder_gpodder/src/gpodder/extensions.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2009 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Loads and executes user extensions
Extensions are Python scripts in "$GPODDER_HOME/Extensions". Each script must
define a class named "gPodderExtension", otherwise it will be ignored.
The extensions class defines several callbacks that will be called by gPodder
at certain points. See the methods defined below for a list of callbacks and
their parameters.
For an example extension see share/gpodder/examples/extensions.py
"""
import functools
import glob
import importlib
import logging
import os
import re
import gpodder
from gpodder import util
_ = gpodder.gettext
logger = logging.getLogger(__name__)
CATEGORY_DICT = {
'desktop-integration': _('Desktop Integration'),
'interface': _('Interface'),
'post-download': _('Post download'),
}
DEFAULT_CATEGORY = _('Other')
def call_extensions(func):
"""Decorator to create handler functions in ExtensionManager
Calls the specified function in all user extensions that define it.
"""
method_name = func.__name__
@functools.wraps(func)
def handler(self, *args, **kwargs):
result = None
for container in self.containers:
if not container.enabled or container.module is None:
continue
try:
callback = getattr(container.module, method_name, None)
if callback is None:
continue
# If the results are lists, concatenate them to show all
# possible items that are generated by all extension together
cb_res = callback(*args, **kwargs)
if isinstance(result, list) and isinstance(cb_res, list):
result.extend(cb_res)
elif cb_res is not None:
result = cb_res
except Exception as exception:
logger.error('Error in %s in %s: %s', container.filename,
method_name, exception, exc_info=True)
func(self, *args, **kwargs)
return result
return handler
class ExtensionMetadata(object):
# Default fallback metadata in case metadata fields are missing
DEFAULTS = {
'description': _('No description for this extension.'),
'doc': None,
'payment': None,
}
SORTKEYS = {
'title': 1,
'description': 2,
'category': 3,
'authors': 4,
'only_for': 5,
'mandatory_in': 6,
'disable_in': 7,
}
def __init__(self, container, metadata):
if 'title' not in metadata:
metadata['title'] = container.name
category = metadata.get('category', 'other')
metadata['category'] = CATEGORY_DICT.get(category, DEFAULT_CATEGORY)
self.__dict__.update(metadata)
def __getattr__(self, name):
try:
return self.DEFAULTS[name]
except KeyError as e:
raise AttributeError(name, e)
def get_sorted(self):
def kf(x):
return self.SORTKEYS.get(x[0], 99)
return sorted([(k, v) for k, v in list(self.__dict__.items())], key=kf)
def check_ui(self, target, default):
"""Checks metadata information like
__only_for__ = 'gtk'
__mandatory_in__ = 'gtk'
__disable_in__ = 'gtk'
The metadata fields in an extension can be a string with
comma-separated values for UIs. This will be checked against
boolean variables in the "gpodder.ui" object.
Example metadata field in an extension:
__only_for__ = 'gtk'
__only_for__ = 'unity'
In this case, this function will return the value of the default
if any of the following expressions will evaluate to True:
gpodder.ui.gtk
gpodder.ui.unity
gpodder.ui.cli
gpodder.ui.osx
gpodder.ui.win32
New, unknown UIs are silently ignored and will evaluate to False.
"""
if not hasattr(self, target):
return default
uis = [_f for _f in [x.strip() for x in getattr(self, target).split(',')] if _f]
return any(getattr(gpodder.ui, ui.lower(), False) for ui in uis)
@property
def available_for_current_ui(self):
return self.check_ui('only_for', True)
@property
def mandatory_in_current_ui(self):
return self.check_ui('mandatory_in', False)
@property
def disable_in_current_ui(self):
return self.check_ui('disable_in', False)
class MissingDependency(Exception):
def __init__(self, message, dependency, cause=None):
Exception.__init__(self, message)
self.dependency = dependency
self.cause = cause
class MissingModule(MissingDependency):
pass
class MissingCommand(MissingDependency):
pass
class ExtensionContainer(object):
"""An extension container wraps one extension module"""
def __init__(self, manager, name, config, filename=None, module=None):
self.manager = manager
self.name = name
self.config = config
self.filename = filename
self.module = module
self.enabled = False
self.error = None
self.default_config = None
self.parameters = None
self.metadata = ExtensionMetadata(self, self._load_metadata(filename))
def require_command(self, command):
"""Checks if the given command is installed on the system
Returns the complete path of the command
@param command: String with the command name
"""
result = util.find_command(command)
if result is None:
msg = _('Command not found: %(command)s') % {'command': command}
raise MissingCommand(msg, command)
return result
def require_any_command(self, command_list):
"""Checks if any of the given commands is installed on the system
Returns the complete path of first found command in the list
@param command: List with the commands name
"""
for command in command_list:
result = util.find_command(command)
if result is not None:
return result
msg = _('Need at least one of the following commands: %(list_of_commands)s') % \
{'list_of_commands': ', '.join(command_list)}
raise MissingCommand(msg, ', '.join(command_list))
def _load_metadata(self, filename):
if not filename or not os.path.exists(filename):
return {}
encoding = util.guess_encoding(filename)
with open(filename, "r", encoding=encoding) as f:
extension_py = f.read()
metadata = dict(re.findall(r"__([a-z_]+)__ = '([^']+)'", extension_py))
# Support for using gpodder.gettext() as _ to localize text
localized_metadata = dict(re.findall(r"__([a-z_]+)__ = _\('([^']+)'\)",
extension_py))
for key in localized_metadata:
metadata[key] = gpodder.gettext(localized_metadata[key])
return metadata
def set_enabled(self, enabled):
if enabled and not self.enabled:
try:
self.load_extension()
self.error = None
self.enabled = True
if hasattr(self.module, 'on_load'):
self.module.on_load()
except Exception as exception:
logger.error('Cannot load %s from %s: %s', self.name,
self.filename, exception, exc_info=True)
if isinstance(exception, ImportError):
# Wrap ImportError in MissingCommand for user-friendly
# message (might be displayed in the GUI)
if exception.name:
module = exception.name
msg = _('Python module not found: %(module)s') % {
'module': module
}
exception = MissingCommand(msg, module, exception)
self.error = exception
self.enabled = False
elif not enabled and self.enabled:
try:
if hasattr(self.module, 'on_unload'):
self.module.on_unload()
except Exception as exception:
logger.error('Failed to on_unload %s: %s', self.name,
exception, exc_info=True)
self.enabled = False
def load_extension(self):
"""Load and initialize the gPodder extension module"""
if self.module is not None:
logger.info('Module already loaded.')
return
if not self.metadata.available_for_current_ui:
logger.info('Not loading "%s" (only_for = "%s")',
self.name, self.metadata.only_for)
return
basename, _ = os.path.splitext(os.path.basename(self.filename))
try:
# from load_source() on https://docs.python.org/dev/whatsnew/3.12.html
loader = importlib.machinery.SourceFileLoader(basename, self.filename)
spec = importlib.util.spec_from_file_location(basename, self.filename, loader=loader)
module_file = importlib.util.module_from_spec(spec)
loader.exec_module(module_file)
finally:
# Remove the .pyc file if it was created during import
util.delete_file(self.filename + 'c')
self.default_config = getattr(module_file, 'DefaultConfig', {})
if self.default_config:
self.manager.core.config.register_defaults({
'extensions': {
self.name: self.default_config,
}
})
self.config = getattr(self.manager.core.config.extensions, self.name)
self.module = module_file.gPodderExtension(self)
logger.info('Module loaded: %s', self.filename)
class ExtensionManager(object):
"""Loads extensions and manages self-registering plugins"""
def __init__(self, core):
self.core = core
self.filenames = os.environ.get('GPODDER_EXTENSIONS', '').split()
self.containers = []
core.config.add_observer(self._config_value_changed)
enabled_extensions = core.config.extensions.enabled
if os.environ.get('GPODDER_DISABLE_EXTENSIONS', '') != '':
logger.info('Disabling all extensions (from environment)')
return
for name, filename in self._find_extensions():
logger.debug('Found extension "%s" in %s', name, filename)
config = getattr(core.config.extensions, name)
container = ExtensionContainer(self, name, config, filename)
if (name in enabled_extensions
or container.metadata.mandatory_in_current_ui):
container.set_enabled(True)
if (name in enabled_extensions
and container.metadata.disable_in_current_ui):
container.set_enabled(False)
self.containers.append(container)
def shutdown(self):
for container in self.containers:
container.set_enabled(False)
def _config_value_changed(self, name, old_value, new_value):
if name != 'extensions.enabled':
return
for container in self.containers:
new_enabled = (container.name in new_value)
if new_enabled == container.enabled:
continue
if not new_enabled and container.metadata.mandatory_in_current_ui:
# forced extensions are never listed in extensions.enabled
continue
logger.info('Extension "%s" is now %s', container.name,
'enabled' if new_enabled else 'disabled')
container.set_enabled(new_enabled)
if new_enabled and not container.enabled:
logger.warning('Could not enable extension: %s',
container.error)
self.core.config.extensions.enabled = [x
for x in self.core.config.extensions.enabled
if x != container.name]
def _find_extensions(self):
extensions = {}
if not self.filenames:
builtins = os.path.join(gpodder.prefix, 'share', 'gpodder',
'extensions', '*.py')
user_extensions = os.path.join(gpodder.home, 'Extensions', '*.py')
self.filenames = glob.glob(builtins) + glob.glob(user_extensions)
# Let user extensions override built-in extensions of the same name
for filename in self.filenames:
if not filename or not os.path.exists(filename):
logger.info('Skipping non-existing file: %s', filename)
continue
name, _ = os.path.splitext(os.path.basename(filename))
extensions[name] = filename
return sorted(extensions.items())
def get_extensions(self):
"""Get a list of all loaded extensions and their enabled flag"""
return [c for c in self.containers
if c.metadata.available_for_current_ui
and not c.metadata.mandatory_in_current_ui
and not c.metadata.disable_in_current_ui]
# Define all known handler functions here, decorate them with the
# "call_extension" decorator to forward all calls to extension scripts that have
# the same function defined in them. If the handler functions here contain
# any code, it will be called after all the extensions have been called.
@call_extensions
def on_ui_initialized(self, model, update_podcast_callback,
download_episode_callback):
"""Called when the user interface is initialized.
@param model: A gpodder.model.Model instance
@param update_podcast_callback: Function to update a podcast feed
@param download_episode_callback: Function to download an episode
"""
@call_extensions
def on_podcast_subscribe(self, podcast):
"""Called when the user subscribes to a new podcast feed.
@param podcast: A gpodder.model.PodcastChannel instance
"""
@call_extensions
def on_podcast_updated(self, podcast):
"""Called when a podcast feed was updated
This extension will be called even if there were no new episodes.
@param podcast: A gpodder.model.PodcastChannel instance
"""
@call_extensions
def on_podcast_update_failed(self, podcast, exception):
"""Called when a podcast update failed.
@param podcast: A gpodder.model.PodcastChannel instance
@param exception: The reason.
"""
@call_extensions
def on_podcast_save(self, podcast):
"""Called when a podcast is saved to the database
This extensions will be called when the user edits the metadata of
the podcast or when the feed was updated.
@param podcast: A gpodder.model.PodcastChannel instance
"""
@call_extensions
def on_podcast_delete(self, podcast):
"""Called when a podcast is deleted from the database
@param podcast: A gpodder.model.PodcastChannel instance
"""
@call_extensions
def on_episode_playback(self, episode):
"""Called when an episode is played back
This function will be called when the user clicks on "Play" or
"Open" in the GUI to open an episode with the media player.
@param episode: A gpodder.model.PodcastEpisode instance
"""
@call_extensions
def on_episode_save(self, episode):
"""Called when an episode is saved to the database
This extension will be called when a new episode is added to the
database or when the state of an existing episode is changed.
@param episode: A gpodder.model.PodcastEpisode instance
"""
@call_extensions
def on_episode_downloaded(self, episode):
"""Called when an episode has been downloaded
You can retrieve the filename via episode.local_filename(False)
@param episode: A gpodder.model.PodcastEpisode instance
"""
@call_extensions
def on_all_episodes_downloaded(self):
"""Called when all episodes has been downloaded
"""
@call_extensions
def on_episode_synced(self, device, episode):
"""Called when an episode has been synced to device
You can retrieve the filename via episode.local_filename(False)
For MP3PlayerDevice:
You can retrieve the filename on device via
device.get_episode_file_on_device(episode)
You can retrieve the folder name on device via
device.get_episode_folder_on_device(episode)
@param device: A gpodder.sync.Device instance
@param episode: A gpodder.model.PodcastEpisode instance
"""
@call_extensions
def on_create_menu(self):
"""Called when the Extras menu is created
You can add additional Extras menu entries here. You have to return a
list of tuples, where the first item is a label and the second item is a
callable that will get no parameter.
Example return value:
[('Sync to Smartphone', lambda : ...)]
"""
@call_extensions
def on_episodes_context_menu(self, episodes):
"""Called when the episode list context menu is opened
You can add additional context menu entries here. You have to
return a list of tuples, where the first item is a label and
the second item is a callable that will get the episode as its
first and only parameter.
Example return value:
[('Mark as new', lambda episodes: ...)]
@param episodes: A list of gpodder.model.PodcastEpisode instances
"""
@call_extensions
def on_channel_context_menu(self, channel):
"""Called when the channel list context menu is opened
You can add additional context menu entries here. You have to return a
list of tuples, where the first item is a label and the second item is a
callable that will get the channel as its first and only parameter.
Example return value:
[('Update channel', lambda channel: ...)]
@param channel: A gpodder.model.PodcastChannel instance
"""
@call_extensions
def on_episode_delete(self, episode, filename):
"""Called just before the episode's disk file is about to be
deleted."""
@call_extensions
def on_episode_removed_from_podcast(self, episode):
"""Called just before the episode is about to be removed from
the podcast channel, e.g., when the episode has not been
downloaded and it disappears from the feed.
@param podcast: A gpodder.model.PodcastChannel instance
"""
@call_extensions
def on_notification_show(self, title, message):
"""Called when a notification should be shown
@param title: title of the notification
@param message: message of the notification
"""
@call_extensions
def on_download_progress(self, progress):
"""Called when the overall download progress changes
@param progress: The current progress value (0..1)
"""
@call_extensions
def on_ui_object_available(self, name, ui_object):
"""Called when an UI-specific object becomes available
XXX: Experimental. This hook might go away without notice (and be
replaced with something better). Only use for in-tree extensions.
@param name: The name/ID of the object
@param ui_object: The object itself
"""
@call_extensions
def on_application_started(self):
"""Called when the application started.
This is for extensions doing stuff at startup that they don't
want to do if they have just been enabled.
e.g. minimize at startup should not minimize the application when
enabled but only on following startups.
It is called after on_ui_object_available and on_ui_initialized.
"""
@call_extensions
def on_find_partial_downloads_done(self):
"""Called when the application started and the lookout for resume is done
This is mainly for extensions scheduling refresh or downloads at startup,
to prevent race conditions with the find_partial_downloads method.
It is called after on_application_started.
"""
@call_extensions
def on_preferences(self):
"""Called when the preferences dialog is opened
You can add additional tabs to the preferences dialog here. You have to
return a list of tuples, where the first item is a label and the second
item is a callable with no parameters and returns a Gtk widget.
Example return value:
[('Tab name', lambda: ...)]
"""
@call_extensions
def on_channel_settings(self, channel):
"""Called when a channel settings dialog is opened
You can add additional tabs to the channel settings dialog here. You
have to return a list of tuples, where the first item is a label and the
second item is a callable that will get the channel as its first and
only parameter and returns a Gtk widget.
Example return value:
[('Tab name', lambda channel: ...)]
@param channel: A gpodder.model.PodcastChannel instance
"""
| 22,237
|
Python
|
.py
| 488
| 35.915984
| 97
| 0.636835
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,285
|
sync.py
|
gpodder_gpodder/src/gpodder/sync.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# sync.py -- Device synchronization
# Thomas Perl <thp@perli.net> 2007-12-06
# based on libipodsync.py (2006-04-05 Thomas Perl)
# Ported to gPodder 3 by Joseph Wickremasinghe in June 2012
import logging
import os.path
import threading
import time
import gpodder
from gpodder import download, services, util
import gi # isort:skip
gi.require_version('Gio', '2.0') # isort:skip
from gi.repository import GLib, Gio # isort:skip
logger = logging.getLogger(__name__)
_ = gpodder.gettext
gpod_available = True
try:
from gpodder import libgpod_ctypes
except:
logger.info('iPod sync not available')
gpod_available = False
mplayer_available = True if util.find_command('mplayer') is not None else False
eyed3mp3_available = True
try:
import eyed3.mp3
except:
logger.info('eyeD3 MP3 not available')
eyed3mp3_available = False
def open_device(gui):
config = gui._config
device_type = gui._config.device_sync.device_type
if device_type == 'ipod':
return iPodDevice(config,
gui.download_status_model,
gui.download_queue_manager)
elif device_type == 'filesystem':
return MP3PlayerDevice(config,
gui.download_status_model,
gui.download_queue_manager,
gui.mount_volume_for_file)
return None
def get_track_length(filename):
attempted = False
if mplayer_available:
try:
mplayer_output = os.popen('mplayer -msglevel all=-1 -identify -vo null -ao null -frames 0 "%s" 2>/dev/null' % filename).read()
return int(float(mplayer_output[mplayer_output.index('ID_LENGTH'):].splitlines()[0][10:]) * 1000)
except Exception:
logger.error('MPlayer could not determine length: %s', filename, exc_info=True)
attempted = True
if eyed3mp3_available:
try:
length = int(eyed3.mp3.Mp3AudioFile(filename).info.time_secs * 1000)
# Notify user on eyed3 success if mplayer failed.
# A warning is used to make it visible in gpo or on console.
if attempted:
logger.warning('eyed3.mp3 successfully determined length: %s', filename)
return length
except Exception:
logger.error('eyed3.mp3 could not determine length: %s', filename, exc_info=True)
attempted = True
if not attempted:
logger.warning('Could not determine length: %s', filename)
logger.warning('Please install MPlayer or the eyed3.mp3 module for track length detection.')
return int(60 * 60 * 1000 * 3)
# Default is three hours (to be on the safe side)
def episode_filename_on_device(config, episode):
"""
:param gpodder.config.Config config: configuration (for sync options)
:param gpodder.model.PodcastEpisode episode: episode to get filename for
:return str: basename minus extension to use to save episode on device
"""
# get the local file
from_file = episode.local_filename(create=False)
# get the formatted base name
filename_base = util.sanitize_filename(episode.sync_filename(
config.device_sync.custom_sync_name_enabled,
config.device_sync.custom_sync_name),
config.device_sync.max_filename_length)
# add the file extension
to_file = filename_base + os.path.splitext(from_file)[1].lower()
# dirty workaround: on bad (empty) episode titles,
# we simply use the from_file basename
# (please, podcast authors, FIX YOUR RSS FEEDS!)
if os.path.splitext(to_file)[0] == '':
to_file = os.path.basename(from_file)
return to_file
def episode_foldername_on_device(config, episode):
"""
:param gpodder.config.Config config: configuration (for sync options)
:param gpodder.model.PodcastEpisode episode: episode to get folder name for
:return str: folder name to save episode to on device
"""
if config.device_sync.one_folder_per_podcast:
# Add channel title as subfolder
folder = episode.channel.title
# Clean up the folder name for use on limited devices
folder = util.sanitize_filename(folder, config.device_sync.max_filename_length)
else:
folder = None
return folder
class SyncTrack(object):
"""
This represents a track that is on a device. You need
to specify at least the following keyword arguments,
because these will be used to display the track in the
GUI. All other keyword arguments are optional and can
be used to reference internal objects, etc... See the
iPod synchronization code for examples.
Keyword arguments needed:
playcount (How often has the track been played?)
podcast (Which podcast is this track from? Or: Folder name)
If any of these fields is unknown, it should not be
passed to the function (the values will default to None
for all required fields).
"""
def __init__(self, title, length, modified, **kwargs):
self.title = title
self.length = length
self.filesize = util.format_filesize(length)
self.modified = modified
# Set some (possible) keyword arguments to default values
self.playcount = 0
self.podcast = None
# Convert keyword arguments to object attributes
self.__dict__.update(kwargs)
def __repr__(self):
return 'SyncTrack(title={}, podcast={})'.format(self.title, self.podcast)
@property
def playcount_str(self):
return str(self.playcount)
class Device(services.ObservableService):
def __init__(self, config):
self._config = config
self.cancelled = False
self.allowed_types = ['audio', 'video']
self.errors = []
self.tracks_list = []
signals = ['progress', 'sub-progress', 'status', 'done', 'post-done']
services.ObservableService.__init__(self, signals)
def open(self):
pass
def cancel(self):
self.cancelled = True
self.notify('status', _('Cancelled by user'))
def close(self):
self.notify('status', _('Writing data to disk'))
if self._config.device_sync.after_sync.sync_disks and not gpodder.ui.win32:
os.system('sync')
else:
logger.warning('Not syncing disks. Unmount your device before unplugging.')
return True
def create_task(self, track):
return SyncTask(track)
def cancel_task(self, task):
pass
def cleanup_task(self, task):
pass
def add_sync_tasks(self, tracklist, force_played=False, done_callback=None):
for track in list(tracklist):
# Filter tracks that are not meant to be synchronized
does_not_exist = not track.was_downloaded(and_exists=True)
exclude_played = (not track.is_new
and self._config.device_sync.skip_played_episodes)
wrong_type = track.file_type() not in self.allowed_types
if does_not_exist:
tracklist.remove(track)
elif exclude_played or wrong_type:
logger.info('Excluding %s from sync', track.title)
tracklist.remove(track)
if tracklist:
for track in sorted(tracklist, key=lambda e: e.pubdate_prop):
if self.cancelled:
break
# XXX: need to check if track is added properly?
sync_task = self.create_task(track)
sync_task.status = sync_task.NEW
sync_task.device = self
# New Task, we must wait on the GTK Loop
self.download_status_model.register_task(sync_task)
# Executes after task has been registered
util.idle_add(self.download_queue_manager.queue_task, sync_task)
else:
logger.warning("No episodes to sync")
if done_callback:
done_callback()
def get_all_tracks(self):
pass
def add_track(self, track, reporthook=None):
pass
def remove_track(self, track):
pass
def get_free_space(self):
pass
def episode_on_device(self, episode):
return self._track_on_device(episode.title)
def _track_on_device(self, track_name):
for t in self.tracks_list:
title = t.title
if track_name == title:
return t
return None
class iPodDevice(Device):
def __init__(self, config,
download_status_model,
download_queue_manager):
Device.__init__(self, config)
self.mountpoint = self._config.device_sync.device_folder
self.download_status_model = download_status_model
self.download_queue_manager = download_queue_manager
self.ipod = None
def get_free_space(self):
# Reserve 10 MiB for iTunesDB writing (to be on the safe side)
RESERVED_FOR_ITDB = 1024 * 1024 * 10
result = util.get_free_disk_space(self.mountpoint)
if result == -1:
# Can't get free disk space
return -1
return result - RESERVED_FOR_ITDB
def open(self):
Device.open(self)
if not gpod_available:
logger.error('Please install libgpod 0.8.3 to sync with an iPod device.')
return False
if not os.path.isdir(self.mountpoint):
return False
self.notify('status', _('Opening iPod database'))
self.ipod = libgpod_ctypes.iPodDatabase(self.mountpoint)
if not self.ipod.itdb or not self.ipod.podcasts_playlist or not self.ipod.master_playlist:
return False
self.notify('status', _('iPod opened'))
# build the initial tracks_list
self.tracks_list = self.get_all_tracks()
return True
def close(self):
if self.ipod is not None:
self.notify('status', _('Saving iPod database'))
self.ipod.close()
self.ipod = None
Device.close(self)
return True
def get_all_tracks(self):
tracks = []
for track in self.ipod.get_podcast_tracks():
filename = track.filename_on_ipod
if filename is None:
length = 0
modified = ''
else:
length = util.calculate_size(filename)
timestamp = util.file_modification_timestamp(filename)
modified = util.format_date(timestamp)
t = SyncTrack(track.episode_title, length, modified,
ipod_track=track,
playcount=track.playcount,
podcast=track.podcast_title)
tracks.append(t)
return tracks
def episode_on_device(self, episode):
return next((track for track in self.tracks_list
if track.ipod_track.podcast_rss == episode.channel.url
and track.ipod_track.podcast_url == episode.url), None)
def remove_track(self, track):
self.notify('status', _('Removing %s') % track.title)
logger.info('Removing track from iPod: %r', track.title)
track.ipod_track.remove_from_device()
try:
self.tracks_list.remove(next((sync_track for sync_track in self.tracks_list
if sync_track.ipod_track == track), None))
except ValueError:
...
def add_track(self, task, reporthook=None):
episode = task.episode
self.notify('status', _('Adding %s') % episode.title)
tracklist = self.ipod.get_podcast_tracks()
episode_urls = [track.podcast_url for track in tracklist]
if episode.url in episode_urls:
# Mark as played on iPod if played locally (and set podcast flags)
self.update_from_episode(tracklist[episode_urls.index(episode.url)], episode)
return True
local_filename = episode.local_filename(create=False)
# The file has to exist, if we ought to transfer it, and therefore,
# local_filename(create=False) must never return None as filename
assert local_filename is not None
if util.calculate_size(local_filename) > self.get_free_space():
logger.error('Not enough space on %s, sync aborted...', self.mountpoint)
d = {'episode': episode.title, 'mountpoint': self.mountpoint}
message = _('Error copying %(episode)s: Not enough free space on %(mountpoint)s')
self.errors.append(message % d)
self.cancelled = True
return False
(fn, extension) = os.path.splitext(local_filename)
if extension.lower().endswith('ogg'):
# XXX: Proper file extension/format support check for iPod
logger.error('Cannot copy .ogg files to iPod.')
return False
track = self.ipod.add_track(local_filename, episode.title, episode.channel.title,
episode._text_description, episode.url, episode.channel.url,
episode.published, get_track_length(local_filename), episode.file_type() == 'audio')
self.update_from_episode(track, episode, initial=True)
reporthook(episode.file_size, 1, episode.file_size)
return True
def update_from_episode(self, track, episode, *, initial=False):
if initial:
# Set the initial bookmark on the device based on what we have locally
track.initialize_bookmark(episode.is_new, episode.current_position * 1000)
else:
# Copy updated status from iPod
if track.playcount > 0:
episode.is_new = False
if track.bookmark_time > 0:
logger.info('Playback position from iPod: %s', util.format_time(track.bookmark_time / 1000))
episode.is_new = False
episode.current_position = int(track.bookmark_time / 1000)
episode.current_position_updated = time.time()
episode.save()
class MP3PlayerDevice(Device):
def __init__(self, config,
download_status_model,
download_queue_manager,
mount_volume_for_file):
Device.__init__(self, config)
folder = self._config.device_sync.device_folder
self.destination = util.new_gio_file(folder)
self.mount_volume_for_file = mount_volume_for_file
self.download_status_model = download_status_model
self.download_queue_manager = download_queue_manager
def get_free_space(self):
info = self.destination.query_filesystem_info(Gio.FILE_ATTRIBUTE_FILESYSTEM_FREE, None)
return info.get_attribute_uint64(Gio.FILE_ATTRIBUTE_FILESYSTEM_FREE)
def open(self):
Device.open(self)
self.notify('status', _('Opening MP3 player'))
if not self.mount_volume_for_file(self.destination):
return False
try:
info = self.destination.query_info(
Gio.FILE_ATTRIBUTE_ACCESS_CAN_WRITE + ","
+ Gio.FILE_ATTRIBUTE_STANDARD_TYPE,
Gio.FileQueryInfoFlags.NONE,
None)
except GLib.Error as err:
logger.error('querying destination info for %s failed with %s',
self.destination.get_uri(), err.message)
return False
if info.get_file_type() != Gio.FileType.DIRECTORY:
logger.error('destination %s is not a directory', self.destination.get_uri())
return False
# open is ok if the target is a directory, and it can be written to
# for smb, query_info doesn't return FILE_ATTRIBUTE_ACCESS_CAN_WRITE,
# -- if that's the case, just assume that it's writable
if (not info.has_attribute(Gio.FILE_ATTRIBUTE_ACCESS_CAN_WRITE)
or info.get_attribute_boolean(Gio.FILE_ATTRIBUTE_ACCESS_CAN_WRITE)):
self.notify('status', _('MP3 player opened'))
self.tracks_list = self.get_all_tracks()
return True
logger.error('destination %s is not writable', self.destination.get_uri())
return False
def get_episode_folder_on_device(self, episode):
folder = episode_foldername_on_device(self._config, episode)
if folder:
folder = self.destination.get_child(folder)
else:
folder = self.destination
return folder
def get_episode_file_on_device(self, episode):
return episode_filename_on_device(self._config, episode)
def create_task(self, track):
return GioSyncTask(track)
def cancel_task(self, task):
task.cancellable.cancel()
# called by the sync task when it is removed and needs partial files cleaning up
def cleanup_task(self, task):
episode = task.episode
folder = self.get_episode_folder_on_device(episode)
file = self.get_episode_file_on_device(episode)
file = folder.get_child(file)
self.remove_track_file(file)
def add_track(self, task, reporthook=None):
episode = task.episode
self.notify('status', _('Adding %s') % episode.title)
# get the folder on the device
folder = self.get_episode_folder_on_device(episode)
filename = episode.local_filename(create=False)
# The file has to exist, if we ought to transfer it, and therefore,
# local_filename(create=False) must never return None as filename
assert filename is not None
from_file = filename
# verify free space
needed = util.calculate_size(from_file)
free = self.get_free_space()
if free == -1:
logger.warning('Cannot determine free disk space on device')
elif needed > free:
d = {'path': self.destination, 'free': util.format_filesize(free), 'need': util.format_filesize(needed)}
message = _('Not enough space in %(path)s: %(free)s available, but need at least %(need)s')
raise SyncFailedException(message % d)
# get the filename that will be used on the device
to_file = self.get_episode_file_on_device(episode)
to_file = folder.get_child(to_file)
util.make_directory(folder)
to_file_exists = to_file.query_exists()
from_size = episode.file_size
to_size = episode.file_size
# An interrupted sync results in a partial file on the device that must be removed to fully sync it.
# Comparing file size would detect such files and finish uploading.
# However, some devices add metadata to files, increasing their size, and forcing an upload on every sync.
# File size and checksum can not be used.
if to_file_exists and self._config.device_sync.compare_episode_filesize:
try:
info = to_file.query_info(Gio.FILE_ATTRIBUTE_STANDARD_SIZE, Gio.FileQueryInfoFlags.NONE)
to_size = info.get_attribute_uint64(Gio.FILE_ATTRIBUTE_STANDARD_SIZE)
except GLib.Error:
# Assume same size and don't sync again
pass
if not to_file_exists or from_size != to_size:
logger.info('Copying %s (%d bytes) => %s (%d bytes)',
os.path.basename(from_file), from_size,
to_file.get_uri(), to_size)
from_file = Gio.File.new_for_path(from_file)
try:
def hookconvert(current_bytes, total_bytes, user_data):
return reporthook(current_bytes, 1, total_bytes)
from_file.copy(to_file, Gio.FileCopyFlags.OVERWRITE, task.cancellable, hookconvert, None)
except GLib.Error as err:
if err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.CANCELLED):
raise SyncCancelledException()
logger.error('Error copying %s to %s: %s', from_file.get_uri(), to_file.get_uri(), err.message)
d = {'from_file': from_file.get_uri(), 'to_file': to_file.get_uri(), 'message': err.message}
self.errors.append(_('Error copying %(from_file)s to %(to_file)s: %(message)s') % d)
return False
return True
def add_sync_track(self, tracks, file, info, podcast_name):
(title, extension) = os.path.splitext(info.get_name())
timestamp = info.get_modification_time()
modified = util.format_date(timestamp.tv_sec)
t = SyncTrack(title, info.get_size(), modified,
filename=file.get_uri(),
podcast=podcast_name)
tracks.append(t)
def get_all_tracks(self):
tracks = []
attributes = (
Gio.FILE_ATTRIBUTE_STANDARD_NAME + ","
+ Gio.FILE_ATTRIBUTE_STANDARD_TYPE + ","
+ Gio.FILE_ATTRIBUTE_STANDARD_SIZE + ","
+ Gio.FILE_ATTRIBUTE_TIME_MODIFIED)
root_path = self.destination
for path_info in root_path.enumerate_children(attributes, Gio.FileQueryInfoFlags.NONE, None):
if self._config.one_folder_per_podcast:
if path_info.get_file_type() == Gio.FileType.DIRECTORY:
path_file = root_path.get_child(path_info.get_name())
try:
for child_info in path_file.enumerate_children(attributes, Gio.FileQueryInfoFlags.NONE, None):
if child_info.get_file_type() == Gio.FileType.REGULAR:
child_file = path_file.get_child(child_info.get_name())
self.add_sync_track(tracks, child_file, child_info, path_info.get_name())
except GLib.Error as err:
logger.error('get all tracks for %s failed: %s', path_file.get_uri(), err.message)
else:
if path_info.get_file_type() == Gio.FileTypeFlags.REGULAR:
path_file = root_path.get_child(path_info.get_name())
self.add_sync_track(tracks, path_file, path_info, None)
return tracks
def episode_on_device(self, episode):
e = util.sanitize_filename(episode.sync_filename(
self._config.device_sync.custom_sync_name_enabled,
self._config.device_sync.custom_sync_name),
self._config.device_sync.max_filename_length)
return self._track_on_device(e)
def remove_track_file(self, file):
folder = file.get_parent()
if file.query_exists():
try:
file.delete()
except GLib.Error as err:
# if the file went away don't worry about it
if not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
logger.error('deleting file %s failed: %s', file.get_uri(), err.message)
return
if self._config.one_folder_per_podcast:
try:
if self.directory_is_empty(folder):
folder.delete()
except GLib.Error as err:
# if the folder went away don't worry about it (multiple threads could
# make this happen if they both notice the folder is empty simultaneously)
if not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
logger.error('deleting folder %s failed: %s', folder.get_uri(), err.message)
def remove_track(self, track):
self.notify('status', _('Removing %s') % track.title)
# get the folder on the device
file = Gio.File.new_for_uri(track.filename)
self.remove_track_file(file)
def directory_is_empty(self, directory):
for child in directory.enumerate_children(Gio.FILE_ATTRIBUTE_STANDARD_NAME, Gio.FileQueryInfoFlags.NONE, None):
return False
return True
class SyncCancelledException(Exception):
pass
class SyncFailedException(Exception):
pass
class SyncTask(download.DownloadTask):
# An object representing the synchronization task of an episode
# Possible states this sync task can be in
STATUS_MESSAGE = (_('Queued'), _('Queued'), _('Syncing'),
_('Finished'), _('Failed'), _('Cancelling'), _('Cancelled'), _('Pausing'), _('Paused'))
(NEW, QUEUED, DOWNLOADING, DONE, FAILED, CANCELLING, CANCELLED, PAUSING, PAUSED) = list(range(9))
def __str__(self):
return self.__episode.title
def __get_status(self):
return self.__status
def __set_status(self, status):
if status != self.__status:
self.__status_changed = True
self.__status = status
status = property(fget=__get_status, fset=__set_status)
def __get_device(self):
return self.__device
def __set_device(self, device):
self.__device = device
device = property(fget=__get_device, fset=__set_device)
def __get_status_changed(self):
if self.__status_changed:
self.__status_changed = False
return True
else:
return False
status_changed = property(fget=__get_status_changed)
def __get_activity(self):
return self.__activity
def __set_activity(self, activity):
self.__activity = activity
activity = property(fget=__get_activity, fset=__set_activity)
def __get_empty_string(self):
return ''
url = property(fget=__get_empty_string)
podcast_url = property(fget=__get_empty_string)
def __get_episode(self):
return self.__episode
episode = property(fget=__get_episode)
def can_queue(self):
return self.status in (self.CANCELLED, self.PAUSED, self.FAILED)
def can_pause(self):
return self.status in (self.DOWNLOADING, self.QUEUED)
def pause(self):
with self:
# Pause a queued download
if self.status == self.QUEUED:
self.status = self.PAUSED
# Request pause of a running download
elif self.status == self.DOWNLOADING:
self.status = self.PAUSING
def can_cancel(self):
return self.status in (self.DOWNLOADING, self.QUEUED, self.PAUSED, self.FAILED)
def cancel(self):
with self:
# Cancelling directly is allowed if the task isn't currently downloading
if self.status in (self.QUEUED, self.PAUSED, self.FAILED):
self.status = self.CANCELLED
# Call run, so the partial file gets deleted
self.run()
self.recycle()
# Otherwise request cancellation
elif self.status == self.DOWNLOADING:
self.status = self.CANCELLING
self.device.cancel()
def can_remove(self):
return self.status in (self.CANCELLED, self.FAILED, self.DONE)
def removed_from_list(self):
if self.status != self.DONE:
self.device.cleanup_task(self)
def __init__(self, episode):
self.__lock = threading.RLock()
self.__status = SyncTask.NEW
self.__activity = SyncTask.ACTIVITY_SYNCHRONIZE
self.__status_changed = True
self.__episode = episode
# Create the target filename and save it in the database
self.filename = self.__episode.local_filename(create=False)
self.total_size = self.__episode.file_size
self.speed = 0.0
self.progress = 0.0
self.error_message = None
self.custom_downloader = None
# Have we already shown this task in a notification?
self._notification_shown = False
# Variables for speed limit and speed calculation
self.__start_time = 0
self.__start_blocks = 0
self.__limit_rate_value = 999
self.__limit_rate = 999
# Callbacks
self._progress_updated = lambda x: None
def __enter__(self):
return self.__lock.acquire()
def __exit__(self, exception_type, value, traceback):
self.__lock.release()
def notify_as_finished(self):
if self.status == SyncTask.DONE:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def notify_as_failed(self):
if self.status == SyncTask.FAILED:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def add_progress_callback(self, callback):
self._progress_updated = callback
def status_updated(self, count, blockSize, totalSize):
# We see a different "total size" while downloading,
# so correct the total size variable in the thread
if totalSize != self.total_size and totalSize > 0:
self.total_size = float(totalSize)
if self.total_size > 0:
self.progress = max(0.0, min(1.0, (count * blockSize) / self.total_size))
self._progress_updated(self.progress)
if self.status in (SyncTask.CANCELLING, SyncTask.PAUSING):
self._signal_cancel_from_status()
# default implementation
def _signal_cancel_from_status(self):
raise SyncCancelledException()
def recycle(self):
self.episode.download_task = None
def run(self):
# Speed calculation (re-)starts here
self.__start_time = 0
self.__start_blocks = 0
# If the download has already been cancelled/paused, skip it
with self:
if self.status in (SyncTask.CANCELLING, SyncTask.CANCELLED):
self.progress = 0.0
self.speed = 0.0
self.status = SyncTask.CANCELLED
return False
if self.status == SyncTask.PAUSING:
self.status = SyncTask.PAUSED
return False
# We only start this download if its status is downloading
if self.status != SyncTask.DOWNLOADING:
return False
# We are syncing this file right now
self._notification_shown = False
sync_result = SyncTask.DOWNLOADING
try:
logger.info('Starting SyncTask')
self.device.add_track(self, reporthook=self.status_updated)
except SyncCancelledException:
sync_result = SyncTask.CANCELLED
except Exception as e:
sync_result = SyncTask.FAILED
logger.error('Sync failed: %s', str(e), exc_info=True)
self.error_message = _('Error: %s') % (str(e),)
with self:
if sync_result == SyncTask.DOWNLOADING:
# Everything went well - we're done
self.status = SyncTask.DONE
if self.total_size <= 0:
self.total_size = util.calculate_size(self.filename)
logger.info('Total size updated to %d', self.total_size)
self.progress = 1.0
gpodder.user_extensions.on_episode_synced(self.device, self.__episode)
return True
self.speed = 0.0
if sync_result == SyncTask.FAILED:
self.status = SyncTask.FAILED
# cancelled/paused -- update state to mark it as safe to manipulate this task again
elif self.status == SyncTask.PAUSING:
self.status = SyncTask.PAUSED
elif self.status == SyncTask.CANCELLING:
self.status = SyncTask.CANCELLED
# We finished, but not successfully (at least not really)
return False
class GioSyncTask(SyncTask):
def __init__(self, episode):
super().__init__(episode)
# For cancelling the copy
self.cancellable = Gio.Cancellable()
def _signal_cancel_from_status(self):
self.cancellable.cancel()
| 32,594
|
Python
|
.py
| 705
| 35.991489
| 138
| 0.623226
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,286
|
config.py
|
gpodder_gpodder/src/gpodder/config.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# config.py -- gPodder Configuration Manager
# Thomas Perl <thp@perli.net> 2007-11-02
#
import atexit
import logging
import os
import time
import gpodder
from gpodder import jsonconfig, util
_ = gpodder.gettext
defaults = {
# External applications used for playback
'player': {
'audio': 'default',
'video': 'default',
},
# gpodder.net settings
'mygpo': {
'enabled': False,
'server': 'gpodder.net',
'username': '',
'password': '',
'device': {
'uid': util.get_hostname(),
'type': 'desktop',
'caption': _('gPodder on %s') % util.get_hostname(),
},
},
# Various limits (downloading, updating, etc..)
'limit': {
'bandwidth': {
'enabled': False,
'kbps': 500.0, # maximum kB/s per download
},
'downloads': {
'enabled': True,
'concurrent': 1,
'concurrent_max': 16,
},
'episodes': 200, # max episodes per feed
},
# Behavior of downloads
'downloads': {
'chronological_order': True, # download older episodes first
},
# Automatic feed updates, download removal and retry on download timeout
'auto': {
'update': {
'enabled': False,
'frequency': 20, # minutes
},
'cleanup': {
'days': 7,
'played': False,
'unplayed': False,
'unfinished': True,
},
'retries': 3, # number of retries when downloads time out
},
'check_connection': True,
# Software updates from gpodder.org
'software_update': {
'check_on_startup': True, # check for updates on start
'last_check': 0, # unix timestamp of last update check
'interval': 5, # interval (in days) to check for updates
},
'ui': {
# Settings for the Command-Line Interface
'cli': {
'colors': True,
},
# Settings for the Gtk UI
'gtk': {
'state': {
'main_window': {
'width': 700,
'height': 500,
'x': -1, 'y': -1, 'maximized': False,
'paned_position': 200,
'episode_list_size': 200,
'episode_column_sort_id': 0,
'episode_column_sort_order': False,
'episode_column_order': [],
},
'podcastdirectory': {
'width': -1,
'height': -1,
'x': -1, 'y': -1, 'maximized': False,
},
'preferences': {
'width': -1,
'height': -1,
'x': -1, 'y': -1, 'maximized': False,
},
'config_editor': {
'width': -1,
'height': -1,
'x': -1, 'y': -1, 'maximized': False,
},
'channel_editor': {
'width': -1,
'height': -1,
'x': -1, 'y': -1, 'maximized': False,
},
'episode_selector': {
'width': 600,
'height': 400,
'x': -1, 'y': -1, 'maximized': False,
},
'episode_window': {
'width': 500,
'height': 400,
'x': -1, 'y': -1, 'maximized': False,
},
'export_to_local_folder': {
'width': 500,
'height': 400,
'x': -1, 'y': -1, 'maximized': False,
}
},
'toolbar': False,
'new_episodes': 'show', # ignore, show, queue, download
'only_added_are_new': False, # Only just added episodes are considered new after an update
'live_search_delay': 200,
'search_always_visible': False,
'find_as_you_type': True,
'podcast_list': {
'view_mode': 1,
'hide_empty': False,
'all_episodes': True,
'sections': True,
},
'episode_list': {
'view_mode': 1,
'always_show_new': True,
'trim_title_prefix': True,
'descriptions': True,
'show_released_time': False,
'right_align_released_column': False,
'ctrl_click_to_sort': False,
'columns': int('110', 2), # bitfield of visible columns
},
'download_list': {
'remove_finished': True,
},
'html_shownotes': True, # enable webkit renderer
'color_scheme': None, # system, light or dark. Initialized in app.py
},
},
# Synchronization with portable devices (MP3 players, etc..)
'device_sync': {
'device_type': 'none', # Possible values: 'none', 'filesystem', 'ipod'
'device_folder': '/media',
'one_folder_per_podcast': True,
'skip_played_episodes': True,
'delete_played_episodes': False,
'delete_deleted_episodes': False,
'max_filename_length': 120,
'compare_episode_filesize': True,
'custom_sync_name': '{episode.sortdate}_{episode.title}',
'custom_sync_name_enabled': False,
'after_sync': {
'mark_episodes_played': False,
'delete_episodes': False,
'sync_disks': False,
},
'playlists': {
'create': True,
'two_way_sync': False,
'use_absolute_path': True,
'folder': 'Playlists',
'extension': 'm3u',
}
},
'youtube': {
'preferred_fmt_id': 18, # default fmt_id (see fallbacks in youtube.py)
'preferred_fmt_ids': [], # for advanced uses (custom fallback sequence)
'preferred_hls_fmt_id': 93, # default fmt_id (see fallbacks in youtube.py)
'preferred_hls_fmt_ids': [], # for advanced uses (custom fallback sequence)
},
'vimeo': {
'fileformat': '720p', # preferred file format (see vimeo.py)
},
'network': {
'use_proxy': False,
'proxy_type': 'socks5h', # Possible values: socks5h (routes dns through the proxy), socks5, http
'proxy_hostname': '127.0.0.1',
'proxy_port': '8123',
'proxy_use_username_password': False,
'proxy_username': '',
'proxy_password': '',
},
'extensions': {
'enabled': [],
},
'sendto': {
'custom_file_format': '{episode.title}',
'custom_file_format_enabled': False,
},
'path': {
'alternate': '',
},
}
logger = logging.getLogger(__name__)
# Global variable for network proxies. Updated when the network proxy in the config changes
_proxies = None
def get_network_proxy_observer(config):
"""Return an observer function inside a closure containing given config instance."""
def get_proxies_from_config(config):
proxies = None
if config.network.use_proxy:
protocol = config.network.proxy_type
user_pass = ""
if config.network.proxy_use_username_password:
user_pass = f"{config.network.proxy_username}:{config.network.proxy_password}@"
proxy_url = f"{protocol}://{user_pass}{config.network.proxy_hostname}:{config.network.proxy_port}"
proxies = {"http": proxy_url, "https": proxy_url}
logger.debug(f"config observer returning proxies: {proxies}")
return proxies
def network_proxy_observer(name, old_value, new_value):
global _proxies
if name.startswith("network."):
_proxies = get_proxies_from_config(config)
return network_proxy_observer
def config_value_to_string(config_value):
config_type = type(config_value)
if config_type == list:
return ','.join(map(config_value_to_string, config_value))
elif config_type in (str, str):
return config_value
else:
return str(config_value)
def string_to_config_value(new_value, old_value):
config_type = type(old_value)
if config_type == list:
return [_f for _f in [x.strip() for x in new_value.split(',')] if _f]
elif config_type == bool:
return (new_value.strip().lower() in ('1', 'true'))
else:
return config_type(new_value)
class Config(object):
# Number of seconds after which settings are auto-saved
WRITE_TO_DISK_TIMEOUT = 60
def __init__(self, filename='gpodder.json'):
self.__json_config = jsonconfig.JsonConfig(default=defaults,
on_key_changed=self._on_key_changed)
self.__save_thread = None
self.__filename = filename
self.__observers = []
self.load()
self.migrate_defaults()
# If there is no configuration file, we create one here (bug 1511)
if not os.path.exists(self.__filename):
self.save()
atexit.register(self.__atexit)
if self.path.alternate != '':
os.environ['PATH'] += os.pathsep + self.path.alternate
logger.info('Appending alternate PATH: %s' % self.path.alternate)
def register_defaults(self, defaults):
"""
Register default configuration options (e.g. for extensions)
This function takes a dictionary that will be merged into the
current configuration if the keys don't yet exist. This can
be used to add a default configuration for extension modules.
"""
self.__json_config._merge_keys(defaults)
def add_observer(self, callback):
"""
Add a callback function as observer. This callback
will be called when a setting changes. It should
have this signature:
observer(name, old_value, new_value)
The "name" is the setting name, the "old_value" is
the value that has been overwritten with "new_value".
"""
if callback not in self.__observers:
self.__observers.append(callback)
else:
logger.warning('Observer already added: %s', repr(callback))
def remove_observer(self, callback):
"""
Remove an observer previously added to this object.
"""
if callback in self.__observers:
self.__observers.remove(callback)
else:
logger.warning('Observer not added: %s', repr(callback))
def all_keys(self):
return self.__json_config._keys_iter()
def schedule_save(self):
if self.__save_thread is None:
self.__save_thread = util.run_in_background(self.save_thread_proc, True)
def save_thread_proc(self):
time.sleep(self.WRITE_TO_DISK_TIMEOUT)
if self.__save_thread is not None:
self.save()
def __atexit(self):
if self.__save_thread is not None:
self.save()
def save(self, filename=None):
if filename is None:
filename = self.__filename
logger.info('Flushing settings to disk')
try:
# revoke unix group/world permissions (this has no effect under windows)
umask = os.umask(0o077)
with open(filename + '.tmp', 'wt') as fp:
fp.write(repr(self.__json_config))
util.atomic_rename(filename + '.tmp', filename)
except:
logger.error('Cannot write settings to %s', filename)
util.delete_file(filename + '.tmp')
raise
finally:
os.umask(umask)
self.__save_thread = None
def load(self, filename=None):
if filename is not None:
self.__filename = filename
if os.path.exists(self.__filename):
try:
with open(self.__filename, 'rt') as f:
data = f.read()
new_keys_added = self.__json_config._restore(data)
except:
logger.warning('Cannot parse config file: %s',
self.__filename, exc_info=True)
new_keys_added = False
if new_keys_added:
logger.info('New default keys added - saving config.')
self.save()
def toggle_flag(self, name):
setattr(self, name, not getattr(self, name))
def update_field(self, name, new_value):
"""Update a config field, converting strings to the right types"""
old_value = self._lookup(name)
new_value = string_to_config_value(new_value, old_value)
setattr(self, name, new_value)
return True
def _on_key_changed(self, name, old_value, value):
if 'ui.gtk.state' not in name:
# Only log non-UI state changes
logger.debug('%s: %s -> %s', name, old_value, value)
for observer in self.__observers:
try:
observer(name, old_value, value)
except Exception as exception:
logger.error('Error while calling observer %r: %s',
observer, exception, exc_info=True)
self.schedule_save()
def __getattr__(self, name):
return getattr(self.__json_config, name)
def __setattr__(self, name, value):
if name.startswith('_'):
object.__setattr__(self, name, value)
return
setattr(self.__json_config, name, value)
def migrate_defaults(self):
""" change default values in config """
if self.device_sync.max_filename_length == 999:
logger.debug("setting config.device_sync.max_filename_length=120"
" (999 is bad for NTFS and ext{2-4})")
self.device_sync.max_filename_length = 120
def clamp_range(self, name, minval, maxval):
value = getattr(self, name)
if value < minval:
setattr(self, name, minval)
return True
if value > maxval:
setattr(self, name, maxval)
return True
return False
| 14,973
|
Python
|
.py
| 391
| 27.856777
| 110
| 0.547135
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,287
|
dbsqlite.py
|
gpodder_gpodder/src/gpodder/dbsqlite.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# dbsqlite.py -- SQLite persistence layer for gPodder
#
# 2008-06-13 Justin Forest <justin.forest@gmail.com>
# 2010-04-24 Thomas Perl <thp@gpodder.org>
#
import logging
import threading
from sqlite3 import dbapi2 as sqlite
import gpodder
from gpodder import schema, util
_ = gpodder.gettext
logger = logging.getLogger(__name__)
class Database(object):
TABLE_PODCAST = 'podcast'
TABLE_EPISODE = 'episode'
def __init__(self, filename):
self.database_file = filename
self._db = None
self.lock = threading.RLock()
def close(self):
self.commit()
with self.lock:
self.db.isolation_level = None
self.db.execute('VACUUM')
self.db.isolation_level = ''
self._db.close()
self._db = None
def purge(self, max_episodes, podcast_id):
"""
Deletes old episodes. Should be called
before adding new episodes to a podcast.
"""
if max_episodes == 0:
return
with self.lock:
cur = self.cursor()
logger.debug('Purge requested for podcast %d', podcast_id)
sql = """
DELETE FROM %s
WHERE podcast_id = ?
AND state <> ?
AND id NOT IN
(SELECT id FROM %s WHERE podcast_id = ?
ORDER BY published DESC LIMIT ?)""" % (self.TABLE_EPISODE, self.TABLE_EPISODE)
cur.execute(sql, (podcast_id, gpodder.STATE_DOWNLOADED, podcast_id, max_episodes))
cur.close()
@property
def db(self):
if self._db is None:
self._db = sqlite.connect(self.database_file, check_same_thread=False)
# Check schema version, upgrade if necessary
schema.upgrade(self._db, self.database_file)
# Sanity checks for the data in the database
schema.check_data(self)
logger.debug('Database opened.')
return self._db
def cursor(self):
return self.db.cursor()
def commit(self):
with self.lock:
try:
logger.debug('Commit.')
self.db.commit()
except Exception as e:
logger.error('Cannot commit: %s', e, exc_info=True)
def get_content_types(self, pid):
"""Given a podcast ID, returns the content types"""
with self.lock:
cur = self.cursor()
cur.execute('SELECT mime_type FROM %s WHERE podcast_id = ?' % self.TABLE_EPISODE, (pid,))
for (mime_type,) in cur:
yield mime_type
cur.close()
def get_podcast_statistics(self, podcast_id=None):
"""Given a podcast ID, returns the statistics for it
If the podcast_id is omitted (using the default value), the
statistics will be calculated over all podcasts.
Returns a tuple (total, deleted, new, downloaded, unplayed)
"""
total, deleted, new, downloaded, unplayed = 0, 0, 0, 0, 0
with self.lock:
cur = self.cursor()
if podcast_id is not None:
cur.execute('SELECT COUNT(*), state, is_new FROM %s '
'WHERE podcast_id = ? GROUP BY state, is_new'
% self.TABLE_EPISODE, (podcast_id,))
else:
cur.execute('SELECT COUNT(*), state, is_new FROM %s '
'GROUP BY state, is_new' % self.TABLE_EPISODE)
for count, state, is_new in cur:
total += count
if state == gpodder.STATE_DELETED:
deleted += count
elif state == gpodder.STATE_NORMAL and is_new:
new += count
elif state == gpodder.STATE_DOWNLOADED:
downloaded += count
if is_new:
unplayed += count
cur.close()
return (total, deleted, new, downloaded, unplayed)
def load_podcasts(self, factory):
logger.info('Loading podcasts')
sql = 'SELECT * FROM %s' % self.TABLE_PODCAST
with self.lock:
cur = self.cursor()
cur.execute(sql)
keys = [desc[0] for desc in cur.description]
result = [factory(dict(list(zip(keys, row))), self) for row in cur]
cur.close()
return result
def load_episodes(self, podcast, factory):
assert podcast.id
logger.info('Loading episodes for podcast %d', podcast.id)
sql = 'SELECT * FROM %s WHERE podcast_id = ? ORDER BY published DESC' % self.TABLE_EPISODE
args = (podcast.id,)
with self.lock:
cur = self.cursor()
cur.execute(sql, args)
keys = [desc[0] for desc in cur.description]
result = [factory(dict(list(zip(keys, row)))) for row in cur]
cur.close()
return result
def delete_podcast(self, podcast):
assert podcast.id
with self.lock:
cur = self.cursor()
logger.debug('delete_podcast: %d (%s)', podcast.id, podcast.url)
cur.execute("DELETE FROM %s WHERE id = ?" % self.TABLE_PODCAST, (podcast.id, ))
cur.execute("DELETE FROM %s WHERE podcast_id = ?" % self.TABLE_EPISODE, (podcast.id, ))
cur.close()
self.db.commit()
def save_podcast(self, podcast):
self._save_object(podcast, self.TABLE_PODCAST, schema.PodcastColumns)
def save_episode(self, episode):
self._save_object(episode, self.TABLE_EPISODE, schema.EpisodeColumns)
def _save_object(self, o, table, columns):
with self.lock:
try:
cur = self.cursor()
values = [util.convert_bytes(getattr(o, name))
for name in columns]
if o.id is None:
qmarks = ', '.join('?' * len(columns))
sql = 'INSERT INTO %s (%s) VALUES (%s)' % (table, ', '.join(columns), qmarks)
cur.execute(sql, values)
o.id = cur.lastrowid
else:
qmarks = ', '.join('%s = ?' % name for name in columns)
values.append(o.id)
sql = 'UPDATE %s SET %s WHERE id = ?' % (table, qmarks)
cur.execute(sql, values)
except Exception as e:
logger.error('Cannot save %s: %s', o, e, exc_info=True)
cur.close()
def get(self, sql, params=None):
"""
Returns the first cell of a query result, useful for COUNT()s.
"""
with self.lock:
cur = self.cursor()
if params is None:
cur.execute(sql)
else:
cur.execute(sql, params)
row = cur.fetchone()
cur.close()
if row is None:
return None
else:
return row[0]
def podcast_download_folder_exists(self, foldername):
"""
Returns True if a foldername for a channel exists.
False otherwise.
"""
foldername = util.convert_bytes(foldername)
return self.get("SELECT id FROM %s WHERE download_folder = ?" %
self.TABLE_PODCAST, (foldername,)) is not None
def episode_filename_exists(self, podcast_id, filename):
"""
Returns True if a filename for an episode exists.
False otherwise.
"""
filename = util.convert_bytes(filename)
return self.get("SELECT id FROM %s WHERE podcast_id = ? AND download_filename = ?" %
self.TABLE_EPISODE, (podcast_id, filename,)) is not None
def get_last_published(self, podcast):
"""
Look up the most recent publish date of a podcast.
"""
return self.get('SELECT MAX(published) FROM %s WHERE podcast_id = ?' % self.TABLE_EPISODE, (podcast.id,))
def delete_episode_by_guid(self, guid, podcast_id):
"""
Deletes episodes that have a specific GUID for
a given channel. Used after feed updates for
episodes that have disappeared from the feed.
"""
guid = util.convert_bytes(guid)
with self.lock:
cur = self.cursor()
cur.execute('DELETE FROM %s WHERE podcast_id = ? AND guid = ?' %
self.TABLE_EPISODE, (podcast_id, guid))
| 9,198
|
Python
|
.py
| 222
| 30.725225
| 113
| 0.571156
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,288
|
coverart.py
|
gpodder_gpodder/src/gpodder/coverart.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.coverart - Unified cover art downloading module (2012-03-04)
#
import logging
import os
import shutil
import urllib.parse
import gpodder
from gpodder import util, youtube
_ = gpodder.gettext
logger = logging.getLogger(__name__)
class CoverDownloader(object):
# File name extension dict, lists supported cover art extensions
# Values: functions that check if some data is of that file type
SUPPORTED_EXTENSIONS = {
'.png': lambda d: d.startswith(b'\x89PNG\r\n\x1a\n\x00'),
'.jpg': lambda d: d.startswith(b'\xff\xd8'),
'.gif': lambda d: d.startswith(b'GIF89a') or d.startswith(b'GIF87a'),
'.ico': lambda d: d.startswith(b'\0\0\1\0'),
'.svg': lambda d: d.startswith(b'<svg '),
}
EXTENSIONS = list(SUPPORTED_EXTENSIONS.keys())
ALL_EPISODES_ID = ':gpodder:all-episodes:'
# Low timeout to avoid unnecessary hangs of GUIs
TIMEOUT = 5
def __init__(self):
pass
def get_cover_all_episodes(self):
return self._default_filename('podcast-all.png')
def get_cover(self, filename, cover_url, feed_url, title,
username=None, password=None, download=False):
# Detection of "all episodes" podcast
if filename == self.ALL_EPISODES_ID:
return self.get_cover_all_episodes()
# Return already existing files
for extension in self.EXTENSIONS:
if os.path.exists(filename + extension):
return filename + extension
# Handle local files
if cover_url is not None and cover_url.startswith('file://'):
try:
path = urllib.parse.unquote(cover_url).replace('file://', '')
if not os.path.exists(path):
raise ValueError('Cover file not found: %s' % (path))
extension = None
with open(path, 'rb') as fp:
data = fp.read(512)
for filetype, check in list(self.SUPPORTED_EXTENSIONS.items()):
if check(data):
extension = filetype
break
if extension is None:
raise ValueError(
'Unknown file type: %s (%r)' % (cover_url, data[:6]))
# File is ok, copy it
shutil.copyfile(path, filename + extension)
return filename + extension
except Exception as e:
logger.warning('Setting cover art from file failed: %s', e)
return self._fallback_filename(title)
# If allowed to download files, do so here
if download:
# YouTube-specific cover art image resolver
youtube_cover_url = youtube.get_cover(feed_url)
if youtube_cover_url is not None:
cover_url = youtube_cover_url
if not cover_url:
return self._fallback_filename(title)
# We have to add username/password, because password-protected
# feeds might keep their cover art also protected (bug 1521)
if username is not None and password is not None:
cover_url = util.url_add_authentication(cover_url,
username, password)
try:
logger.info('Downloading cover art: %s', cover_url)
response = util.urlopen(cover_url, timeout=self.TIMEOUT)
if response.status_code != 200:
msg = '%s returned status code %d' % (cover_url, response.status_code)
raise ValueError(msg)
data = response.content
except Exception as e:
logger.warning('Cover art download failed: %s', e)
return self._fallback_filename(title)
try:
extension = None
for filetype, check in list(self.SUPPORTED_EXTENSIONS.items()):
if check(data):
extension = filetype
break
if extension is None:
msg = 'Unknown file type: %s (%r)' % (cover_url, data[:6])
raise ValueError(msg)
# Successfully downloaded the cover art - save it!
fp = open(filename + extension, 'wb')
fp.write(data)
fp.close()
return filename + extension
except Exception:
logger.warning('Cannot save cover art', exc_info=True)
# Fallback to cover art based on the podcast title
return self._fallback_filename(title)
def _default_filename(self, basename):
return os.path.join(gpodder.images_folder, basename)
def _fallback_filename(self, title):
return self._default_filename('podcast-%d.png' % (hash(title) % 5))
| 5,630
|
Python
|
.py
| 123
| 34.642276
| 90
| 0.59686
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,289
|
utilwin32ctypes.py
|
gpodder_gpodder/src/gpodder/utilwin32ctypes.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
# Copyright (c) 2018 Eric Le Lay
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ctypes
from ctypes import HRESULT, Structure, byref, c_ulonglong
from ctypes.wintypes import (BOOL, BYTE, DWORD, HANDLE, LPCWSTR,
PULARGE_INTEGER, WORD)
from uuid import UUID
from win32ctypes.core.ctypes._util import check_zero, function_factory
# Use a local copy of dlls.
kernel32 = ctypes.WinDLL('kernel32')
shell32 = ctypes.WinDLL('shell32')
ole32 = ctypes.WinDLL('ole32')
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa373931%28v=vs.85%29.aspx
class GUID(ctypes.Structure):
_fields_ = [
("Data1", DWORD),
("Data2", WORD),
("Data3", WORD),
("Data4", BYTE * 8),
]
def __init__(self, uuidstr=None):
uuid = UUID(uuidstr)
Structure.__init__(self)
self.Data1, self.Data2, self.Data3, self.Data4[0], self.Data4[1], rest = uuid.fields
for i in range(2, 8):
self.Data4[i] = rest >> (8 - i - 1) * 8 & 0xff
REFKNOWNFOLDERID = ctypes.POINTER(GUID)
S_OK = HRESULT(0).value
CoTaskMemFree = function_factory(
ole32.CoTaskMemFree,
[ctypes.c_void_p],
None)
_BaseGetDiskFreeSpaceEx = function_factory(
kernel32.GetDiskFreeSpaceExW,
[LPCWSTR, PULARGE_INTEGER, PULARGE_INTEGER, PULARGE_INTEGER],
BOOL, check_zero)
_BaseGetFileAttributes = function_factory(
kernel32.GetFileAttributesW,
[LPCWSTR],
DWORD)
_BaseSHGetKnownFolderPath = function_factory(
shell32.SHGetKnownFolderPath,
[REFKNOWNFOLDERID, DWORD, HANDLE, ctypes.POINTER(ctypes.c_wchar_p)],
HRESULT)
def GetDiskFreeSpaceEx(lpDirectoryName):
lp_dirname = LPCWSTR(lpDirectoryName)
lpFreeBytesAvailable = c_ulonglong(0)
lpTotalNumberOfBytes = c_ulonglong(0)
lpTotalNumberOfFreeBytes = c_ulonglong(0)
_BaseGetDiskFreeSpaceEx(lp_dirname, byref(lpFreeBytesAvailable), byref(lpTotalNumberOfBytes), byref(lpTotalNumberOfFreeBytes))
freeBytesAvailable = lpFreeBytesAvailable.value
totalNumberOfBytes = lpTotalNumberOfBytes.value
totalNumberOfFreeBytes = lpTotalNumberOfFreeBytes.value
return (freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes)
def GetFileAttributes(lpFileName):
lp_filename = LPCWSTR(lpFileName)
return _BaseGetFileAttributes(lp_filename)
def SHGetKnownFolderPath(rfid, dwFlags):
out_buf = ctypes.c_wchar_p()
try:
ret = _BaseSHGetKnownFolderPath(byref(rfid), dwFlags, None, byref(out_buf))
except WindowsError:
return None
if ret != S_OK:
return None
res = out_buf.value
CoTaskMemFree(out_buf)
return res
# https://msdn.microsoft.com/en-us/library/dd378447(v=vs.85).aspx
class KNOWN_FOLDER_FLAG:
KF_FLAG_DEFAULT = 0x00000000
KF_FLAG_SIMPLE_IDLIST = 0x00000100
KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200
KF_FLAG_DEFAULT_PATH = 0x00000400
KF_FLAG_INIT = 0x00000800
KF_FLAG_NO_ALIAS = 0x00001000
KF_FLAG_DONT_UNEXPAND = 0x00002000
KF_FLAG_DONT_VERIFY = 0x00004000
KF_FLAG_CREATE = 0x00008000
KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000
KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000
KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000
KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000
KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000
KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000
KF_FLAG_ALIAS_ONLY = 0x80000000
# https://msdn.microsoft.com/en-us/library/dd378457(v=vs.85).aspx
class KNOWNFOLDERID:
FOLDERID_Documents = GUID("{FDD39AD0-238F-46AF-ADB4-6C85480369C7}")
def get_documents_folder():
flags = KNOWN_FOLDER_FLAG.KF_FLAG_DEFAULT | \
KNOWN_FOLDER_FLAG.KF_FLAG_DONT_UNEXPAND | \
KNOWN_FOLDER_FLAG.KF_FLAG_CREATE | \
KNOWN_FOLDER_FLAG.KF_FLAG_DONT_VERIFY
return SHGetKnownFolderPath(KNOWNFOLDERID.FOLDERID_Documents, flags)
def get_reg_current_user_string_value(subkey, value_name):
import winreg
try:
my_key = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER, subkey)
except FileNotFoundError:
return None
try:
value, type_ = winreg.QueryValueEx(my_key, value_name)
if type_ == winreg.REG_SZ:
return value
else:
raise WindowsError("Unexpected type for value %s in registry: %i" % (value_name, type_))
except FileNotFoundError:
return None
| 5,113
|
Python
|
.py
| 126
| 35.539683
| 130
| 0.722906
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,290
|
jsonconfig.py
|
gpodder_gpodder/src/gpodder/jsonconfig.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# jsonconfig.py -- JSON Config Backend
# Thomas Perl <thp@gpodder.org> 2012-01-18
#
import copy
import json
from functools import reduce
class JsonConfigSubtree(object):
def __init__(self, parent, name):
self._parent = parent
self._name = name
def __repr__(self):
return '<Subtree %r of JsonConfig>' % (self._name,)
def _attr(self, name):
return '.'.join((self._name, name))
def __getitem__(self, name):
return self._parent._lookup(self._name).__getitem__(name)
def __delitem__(self, name):
self._parent._lookup(self._name).__delitem__(name)
def __setitem__(self, name, value):
self._parent._lookup(self._name).__setitem__(name, value)
def __getattr__(self, name):
if name == 'keys':
# Kludge for using dict() on a JsonConfigSubtree
return getattr(self._parent._lookup(self._name), name)
return getattr(self._parent, self._attr(name))
def __setattr__(self, name, value):
if name.startswith('_'):
object.__setattr__(self, name, value)
else:
self._parent.__setattr__(self._attr(name), value)
class JsonConfig(object):
_INDENT = 2
def __init__(self, data=None, default=None, on_key_changed=None):
"""
Create a new JsonConfig object
data: A JSON string that contains the data to load (optional)
default: A dict that contains default config values (optional)
on_key_changed: Callback when a value changes (optional)
The signature of on_key_changed looks like this:
func(name, old_value, new_value)
name: The key name, e.g. "ui.gtk.toolbar"
old_value: The old value, e.g. False
new_value: The new value, e.g. True
For newly-set keys, on_key_changed is also called. In this case,
None will be the old_value:
>>> def callback(*args): print('callback:', args)
>>> c = JsonConfig(on_key_changed=callback)
>>> c.a.b = 10
callback: ('a.b', None, 10)
>>> c.a.b = 11
callback: ('a.b', 10, 11)
>>> c.x.y.z = [1,2,3]
callback: ('x.y.z', None, [1, 2, 3])
>>> c.x.y.z = 42
callback: ('x.y.z', [1, 2, 3], 42)
Please note that dict-style access will not call on_key_changed:
>>> def callback(*args): print('callback:', args)
>>> c = JsonConfig(on_key_changed=callback)
>>> c.a.b = 1 # This works as expected
callback: ('a.b', None, 1)
>>> c.a['c'] = 10 # This doesn't call on_key_changed!
>>> del c.a['c'] # This also doesn't call on_key_changed!
"""
self._default = default
self._data = copy.deepcopy(self._default) or {}
self._on_key_changed = on_key_changed
if data is not None:
self._restore(data)
def _restore(self, backup):
"""
Restore a previous state saved with repr()
This function allows you to "snapshot" the current values of
the configuration and reload them later on. Any missing
default values will be added on top of the restored config.
Returns True if new keys from the default config have been added,
False if no keys have been added (backup contains all default keys)
>>> c = JsonConfig()
>>> c.a.b = 10
>>> backup = repr(c)
>>> print(c.a.b)
10
>>> c.a.b = 11
>>> print(c.a.b)
11
>>> c._restore(backup)
False
>>> print(c.a.b)
10
"""
self._data = json.loads(backup)
# Add newly-added default configuration options
if self._default is not None:
return self._merge_keys(self._default)
return False
def _merge_keys(self, merge_source):
"""Merge keys from merge_source into this config object
Return True if new keys were merged, False otherwise
"""
added_new_key = False
# Recurse into the data and add missing items
work_queue = [(self._data, merge_source)]
while work_queue:
data, default = work_queue.pop()
for key, value in default.items():
if key not in data:
# Copy defaults for missing key
data[key] = copy.deepcopy(value)
added_new_key = True
elif isinstance(value, dict):
# Recurse into sub-dictionaries
work_queue.append((data[key], value))
elif type(value) != type(data[key]): # noqa
# Type mismatch of current value and default
if isinstance(value, int) and isinstance(data[key], float):
# Convert float to int if default value is int
data[key] = int(data[key])
return added_new_key
def __repr__(self):
"""
>>> c = JsonConfig('{"a": 1}')
>>> print(c)
{
"a": 1
}
"""
return json.dumps(self._data, indent=self._INDENT, sort_keys=True)
def _lookup(self, name):
return reduce(lambda d, k: d[k], name.split('.'), self._data)
def _keys_iter(self):
work_queue = []
work_queue.append(([], self._data))
while work_queue:
path, data = work_queue.pop(0)
if isinstance(data, dict):
for key in sorted(data.keys()):
work_queue.append((path + [key], data[key]))
else:
yield '.'.join(path)
def __getattr__(self, name):
try:
value = self._lookup(name)
if not isinstance(value, dict):
return value
except KeyError:
pass
return JsonConfigSubtree(self, name)
def __setattr__(self, name, value):
if name.startswith('_'):
object.__setattr__(self, name, value)
return
attrs = name.split('.')
target_dict = self._data
while attrs:
attr = attrs.pop(0)
if not attrs:
old_value = target_dict.get(attr, None)
if old_value != value or attr not in target_dict:
target_dict[attr] = value
if self._on_key_changed is not None:
self._on_key_changed(name, old_value, value)
break
target = target_dict.get(attr, None)
if target is None or not isinstance(target, dict):
target_dict[attr] = target = {}
target_dict = target
| 7,442
|
Python
|
.py
| 184
| 30.858696
| 79
| 0.567905
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,291
|
minidb.py
|
gpodder_gpodder/src/gpodder/minidb.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.minidb - A simple SQLite store for Python objects
# Thomas Perl, 2010-01-28
# based on: "ORM wie eine Kirchenmaus - a very poor ORM implementation
# by thp, 2009-11-29 (thp.io/about)"
# This module is also available separately at:
# http://thp.io/2010/minidb/
# For Python 2.5, we need to request the "with" statement
try:
import sqlite3.dbapi2 as sqlite
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
raise Exception('Please install SQLite3 support.')
import threading
class Store(object):
def __init__(self, filename=':memory:'):
self.db = sqlite.connect(filename, check_same_thread=False)
self.lock = threading.RLock()
def _schema(self, class_):
return class_.__name__, sorted(class_.__slots__)
def _set(self, o, slot, value):
# Set a slot on the given object to value, doing a cast if
# necessary. The value None is special-cased and never cast.
cls = o.__class__.__slots__[slot]
if value is not None:
if isinstance(value, bytes):
value = value.decode('utf-8')
value = cls(value)
setattr(o, slot, value)
def commit(self):
with self.lock:
self.db.commit()
def close(self):
with self.lock:
self.db.isolation_level = None
self.db.execute('VACUUM')
self.db.isolation_level = ''
self.db.close()
def _register(self, class_):
with self.lock:
table, slots = self._schema(class_)
cur = self.db.execute('PRAGMA table_info(%s)' % table)
available = cur.fetchall()
if available:
available = [row[1] for row in available]
missing_slots = (s for s in slots if s not in available)
for slot in missing_slots:
self.db.execute('ALTER TABLE %s ADD COLUMN %s TEXT' % (table,
slot))
else:
self.db.execute('CREATE TABLE %s (%s)' % (table,
', '.join('%s TEXT' % s for s in slots)))
def convert(self, v):
if isinstance(v, str):
return v
elif isinstance(v, str):
# XXX: Rewrite ^^^ as "isinstance(v, bytes)" in Python 3
return v.decode('utf-8')
else:
return str(v)
def update(self, o, **kwargs):
self.remove(o)
for k, v in list(kwargs.items()):
setattr(o, k, v)
self.save(o)
def save(self, o):
if hasattr(o, '__iter__'):
klass = None
for child in o:
if klass is None:
klass = child.__class__
self._register(klass)
table, slots = self._schema(klass)
if not isinstance(child, klass):
raise ValueError('Only one type of object allowed')
used = [s for s in slots if getattr(child, s, None) is not None]
values = [self.convert(getattr(child, slot)) for slot in used]
self.db.execute('INSERT INTO %s (%s) VALUES (%s)' % (table,
', '.join(used), ', '.join('?' * len(used))), values)
return
with self.lock:
self._register(o.__class__)
table, slots = self._schema(o.__class__)
values = [self.convert(getattr(o, slot)) for slot in slots]
self.db.execute('INSERT INTO %s (%s) VALUES (%s)' % (table,
', '.join(slots), ', '.join('?' * len(slots))), values)
def delete(self, class_, **kwargs):
with self.lock:
self._register(class_)
table, slots = self._schema(class_)
sql = 'DELETE FROM %s' % (table,)
if kwargs:
sql += ' WHERE %s' % (' AND '.join('%s=?' % k for k in kwargs))
try:
self.db.execute(sql, list(kwargs.values()))
return True
except Exception:
return False
def remove(self, o):
if hasattr(o, '__iter__'):
for child in o:
self.remove(child)
return
with self.lock:
self._register(o.__class__)
table, slots = self._schema(o.__class__)
# Use "None" as wildcard selector in remove actions
slots = [s for s in slots if getattr(o, s, None) is not None]
values = [self.convert(getattr(o, slot)) for slot in slots]
self.db.execute('DELETE FROM %s WHERE %s' % (table,
' AND '.join('%s=?' % s for s in slots)), values)
def load(self, class_, **kwargs):
with self.lock:
self._register(class_)
table, slots = self._schema(class_)
sql = 'SELECT %s FROM %s' % (', '.join(slots), table)
if kwargs:
sql += ' WHERE %s' % (' AND '.join('%s=?' % k for k in kwargs))
try:
cur = self.db.execute(sql, list(kwargs.values()))
except Exception:
raise
def apply(row):
o = class_.__new__(class_)
for attr, value in zip(slots, row):
try:
self._set(o, attr, value)
except ValueError:
return None
return o
return [x for x in [apply(row) for row in cur] if x is not None]
def get(self, class_, **kwargs):
result = self.load(class_, **kwargs)
if result:
return result[0]
else:
return None
if __name__ == '__main__':
class Person(object):
__slots__ = {'username': str, 'id': int}
def __init__(self, username, uid):
self.username = username
self.id = uid
def __repr__(self):
return '<Person "%s" (%d)>' % (self.username, self.id)
m = Store()
m.save(Person('User %d' % x, x * 20) for x in range(50))
p = m.get(Person, id=200)
print(p)
m.remove(p)
p = m.get(Person, id=200)
# Remove some persons again (deletion by value!)
m.remove(Person('User %d' % x, x * 20) for x in range(40))
class Person(object):
__slots__ = {'username': str, 'id': int, 'mail': str}
def __init__(self, username, uid, mail):
self.username = username
self.id = uid
self.mail = mail
def __repr__(self):
return '<Person "%s" (%s)>' % (self.username, self.mail)
# A schema update takes place here
m.save(Person('User %d' % x, x * 20, 'user@home.com') for x in range(50))
print(m.load(Person))
| 7,550
|
Python
|
.py
| 184
| 30.711957
| 81
| 0.543357
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,292
|
deviceplaylist.py
|
gpodder_gpodder/src/gpodder/deviceplaylist.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2011 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import os
import gpodder
from gpodder import util
from gpodder.sync import (episode_filename_on_device,
episode_foldername_on_device)
import gi # isort:skip
gi.require_version('Gio', '2.0') # isort:skip
from gi.repository import Gio, GLib # isort:skip
_ = gpodder.gettext
logger = logging.getLogger(__name__)
class gPodderDevicePlaylist(object):
def __init__(self, config, playlist_name):
self._config = config
self.linebreak = '\r\n'
self.playlist_file = (
util.sanitize_filename(playlist_name, self._config.device_sync.max_filename_length)
+ '.' + self._config.device_sync.playlists.extension)
device_folder = util.new_gio_file(self._config.device_sync.device_folder)
self.playlist_folder = device_folder.resolve_relative_path(self._config.device_sync.playlists.folder)
self.mountpoint = None
try:
self.mountpoint = self.playlist_folder.find_enclosing_mount().get_root()
except GLib.Error as err:
logger.error('find_enclosing_mount folder %s failed: %s', self.playlist_folder.get_uri(), err.message)
if not self.mountpoint:
self.mountpoint = self.playlist_folder
logger.warning('could not find mount point for MP3 player - using %s as MP3 player root', self.mountpoint.get_uri())
self.playlist_absolute_filename = self.playlist_folder.resolve_relative_path(self.playlist_file)
def build_extinf(self, filename):
# TODO: Windows playlists
# if self._config.mp3_player_playlist_win_path:
# filename = filename.replace('\\', os.sep)
# # rebuild the whole filename including the mountpoint
# if self._config.device_sync.playlist_absolute_path:
# absfile = os.path.join(self.mountpoint,filename)
# else: #TODO: Test rel filenames
# absfile = util.rel2abs(filename, os.path.dirname(self.playlist_file))
# fallback: use the basename of the file
(title, extension) = os.path.splitext(os.path.basename(filename))
return "#EXTINF:0,%s%s" % (title.strip(), self.linebreak)
def read_m3u(self):
"""
read all files from the existing playlist
"""
tracks = []
logger.info("Read data from the playlistfile %s" % self.playlist_absolute_filename.get_uri())
if self.playlist_absolute_filename.query_exists():
stream = Gio.DataInputStream.new(self.playlist_absolute_filename.read())
while True:
line = stream.read_line_utf8()[0]
if not line:
break
if not line.startswith('#EXT'):
tracks.append(line.rstrip('\r\n'))
stream.close()
return tracks
def get_filename_for_playlist(self, episode):
"""
get the filename for the given episode for the playlist
"""
return episode_filename_on_device(self._config, episode)
def get_absolute_filename_for_playlist(self, episode):
"""
get the filename including full path for the given episode for the playlist
"""
filename = self.get_filename_for_playlist(episode)
foldername = episode_foldername_on_device(self._config, episode)
if foldername:
filename = os.path.join(foldername, filename)
if self._config.device_sync.playlists.use_absolute_path:
filename = os.path.join(util.relpath(self._config.device_sync.device_folder, self.mountpoint.get_uri()), filename)
return filename
def write_m3u(self, episodes):
"""
write the list into the playlist on the device
"""
logger.info('Writing playlist file: %s', self.playlist_file)
if not util.make_directory(self.playlist_folder):
raise IOError(_('Folder %s could not be created.') % self.playlist_folder, _('Error writing playlist'))
else:
# work around libmtp devices potentially having limited capabilities for partial writes
is_mtp = self.playlist_folder.get_uri().startswith("mtp://")
tempfile = None
if is_mtp:
tempfile = Gio.File.new_tmp()
fs = tempfile[1].get_output_stream()
else:
fs = self.playlist_absolute_filename.replace(None, False, Gio.FileCreateFlags.NONE)
os = Gio.DataOutputStream.new(fs)
os.put_string('#EXTM3U%s' % self.linebreak)
for current_episode in episodes:
filename = self.get_filename_for_playlist(current_episode)
os.put_string(self.build_extinf(filename))
filename = self.get_absolute_filename_for_playlist(current_episode)
os.put_string(filename)
os.put_string(self.linebreak)
os.close()
if is_mtp:
try:
tempfile[0].copy(self.playlist_absolute_filename, Gio.FileCopyFlags.OVERWRITE)
except GLib.Error as err:
logger.error('copying playlist to mtp device file %s failed: %s',
self.playlist_absolute_filename.get_uri(), err.message)
tempfile[0].delete()
| 6,117
|
Python
|
.py
| 123
| 40.357724
| 128
| 0.641111
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,293
|
libgpod_ctypes.py
|
gpodder_gpodder/src/gpodder/libgpod_ctypes.py
|
#
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2022 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# libgpod_ctypes: Minimalistic ctypes-based bindings for libgpod
# (Just enough coverage to get podcast syncing working again...)
# Thomas Perl <m@thp.io>, May 2022
#
import ctypes
import logging
import os
logger = logging.getLogger(__name__)
# libgpod, for iTunesDB access
libgpod = ctypes.CDLL('libgpod.so.4')
# glib, for g_strdup() and g_free()
libglib = ctypes.CDLL('libglib-2.0.so.0')
# glib/gtypes.h: typedef gint gboolean;
gboolean = ctypes.c_int
# glib/gstrfuncs.h: gchar *g_strdup(const gchar *str);
libglib.g_strdup.argtypes = (ctypes.c_char_p,)
# Note: This MUST be c_void_p, so that the glib-allocated buffer will
# be preserved when assigning to track member variables. The reason
# for this is that Python ctypes tries to be helpful and converts a
# c_char_p restype to a Python bytes object, which will be different
# from the memory returned by g_strdup(). For track properties, the
# values will be free'd indirectly by itdb_free() later.
libglib.g_strdup.restype = ctypes.c_void_p
# glib/gmem.h: void g_free(gpointer mem);
libglib.g_free.argtypes = (ctypes.c_void_p,)
libglib.g_free.restype = None
# ctypes.c_time_t will be available in Python 3.12 onwards
# See also: https://github.com/python/cpython/pull/92870
if hasattr(ctypes, 'c_time_t'):
time_t = ctypes.c_time_t
else:
# See also: https://github.com/python/cpython/issues/92869
if ctypes.sizeof(ctypes.c_void_p) == ctypes.sizeof(ctypes.c_int64):
time_t = ctypes.c_int64
else:
# On 32-bit systems, time_t is historically 32-bit, but due to Y2K38
# there have been efforts to establish 64-bit time_t on 32-bit Linux:
# https://linux.slashdot.org/story/20/02/15/0247201/linux-is-ready-for-the-end-of-time
# https://www.gnu.org/software/libc/manual/html_node/64_002dbit-time-symbol-handling.html
logger.info('libgpod may cause issues if time_t is 64-bit on your 32-bit system.')
time_t = ctypes.c_int32
# glib/glist.h: struct _GList
class GList(ctypes.Structure):
...
GList._fields_ = [
('data', ctypes.c_void_p),
('next', ctypes.POINTER(GList)),
('prev', ctypes.POINTER(GList)),
]
# gpod/itdb.h
class Itdb_iTunesDB(ctypes.Structure):
_fields_ = [
('tracks', ctypes.POINTER(GList)),
# ...
]
# gpod/itdb.h: struct _Itdb_Playlist
class Itdb_Playlist(ctypes.Structure):
_fields_ = [
('itdb', ctypes.POINTER(Itdb_iTunesDB)),
('name', ctypes.c_char_p),
('type', ctypes.c_uint8),
('flag1', ctypes.c_uint8),
('flag2', ctypes.c_uint8),
('flag3', ctypes.c_uint8),
('num', ctypes.c_int),
('members', ctypes.POINTER(GList)),
# ...
]
# gpod/itdb.h
class Itdb_Chapterdata(ctypes.Structure):
...
# gpod/itdb.h
class Itdb_Track(ctypes.Structure):
_fields_ = [
('itdb', ctypes.POINTER(Itdb_iTunesDB)),
('title', ctypes.c_char_p),
('ipod_path', ctypes.c_char_p),
('album', ctypes.c_char_p),
('artist', ctypes.c_char_p),
('genre', ctypes.c_char_p),
('filetype', ctypes.c_char_p),
('comment', ctypes.c_char_p),
('category', ctypes.c_char_p),
('composer', ctypes.c_char_p),
('grouping', ctypes.c_char_p),
('description', ctypes.c_char_p),
('podcasturl', ctypes.c_char_p),
('podcastrss', ctypes.c_char_p),
('chapterdata', ctypes.POINTER(Itdb_Chapterdata)),
('subtitle', ctypes.c_char_p),
('tvshow', ctypes.c_char_p),
('tvepisode', ctypes.c_char_p),
('tvnetwork', ctypes.c_char_p),
('albumartist', ctypes.c_char_p),
('keywords', ctypes.c_char_p),
('sort_artist', ctypes.c_char_p),
('sort_title', ctypes.c_char_p),
('sort_album', ctypes.c_char_p),
('sort_albumartist', ctypes.c_char_p),
('sort_composer', ctypes.c_char_p),
('sort_tvshow', ctypes.c_char_p),
('id', ctypes.c_uint32),
('size', ctypes.c_uint32),
('tracklen', ctypes.c_int32),
('cd_nr', ctypes.c_int32),
('cds', ctypes.c_int32),
('track_nr', ctypes.c_int32),
('bitrate', ctypes.c_int32),
('samplerate', ctypes.c_uint16),
('samplerate_low', ctypes.c_uint16),
('year', ctypes.c_int32),
('volume', ctypes.c_int32),
('soundcheck', ctypes.c_uint32),
('soundcheck', ctypes.c_uint32),
('time_added', time_t),
('time_modified', time_t),
('time_played', time_t),
('bookmark_time', ctypes.c_uint32),
('rating', ctypes.c_uint32),
('playcount', ctypes.c_uint32),
('playcount2', ctypes.c_uint32),
('recent_playcount', ctypes.c_uint32),
('transferred', gboolean),
('BPM', ctypes.c_int16),
('app_rating', ctypes.c_uint8),
('type1', ctypes.c_uint8),
('type2', ctypes.c_uint8),
('compilation', ctypes.c_uint8),
('starttime', ctypes.c_uint32),
('stoptime', ctypes.c_uint32),
('checked', ctypes.c_uint8),
('dbid', ctypes.c_uint64),
('drm_userid', ctypes.c_uint32),
('visible', ctypes.c_uint32),
('filetype_marker', ctypes.c_uint32),
('artwork_count', ctypes.c_uint16),
('artwork_size', ctypes.c_uint32),
('samplerate2', ctypes.c_float),
('unk126', ctypes.c_uint16),
('unk132', ctypes.c_uint32),
('time_released', time_t),
('unk144', ctypes.c_uint16),
('explicit_flag', ctypes.c_uint16),
('unk148', ctypes.c_uint32),
('unk152', ctypes.c_uint32),
('skipcount', ctypes.c_uint32),
('recent_skipcount', ctypes.c_uint32),
('last_skipped', ctypes.c_uint32),
('has_artwork', ctypes.c_uint8),
('skip_when_shuffling', ctypes.c_uint8),
('remember_playback_position', ctypes.c_uint8),
('flag4', ctypes.c_uint8),
('dbid2', ctypes.c_uint64),
('lyrics_flag', ctypes.c_uint8),
('movie_flag', ctypes.c_uint8),
('mark_unplayed', ctypes.c_uint8),
('unk179', ctypes.c_uint8),
('unk180', ctypes.c_uint32),
('pregap', ctypes.c_uint32),
('samplecount', ctypes.c_uint64),
('unk196', ctypes.c_uint32),
('postgap', ctypes.c_uint32),
('unk204', ctypes.c_uint32),
('mediatype', ctypes.c_uint32),
# ...
]
# gpod/itdb.h: Itdb_iTunesDB *itdb_parse (const gchar *mp, GError **error);
libgpod.itdb_parse.argtypes = (ctypes.c_char_p, ctypes.c_void_p)
libgpod.itdb_parse.restype = ctypes.POINTER(Itdb_iTunesDB)
# gpod/itdb.h: Itdb_Playlist *itdb_playlist_podcasts (Itdb_iTunesDB *itdb);
libgpod.itdb_playlist_podcasts.argtypes = (ctypes.POINTER(Itdb_iTunesDB),)
libgpod.itdb_playlist_podcasts.restype = ctypes.POINTER(Itdb_Playlist)
# gpod/itdb.h: Itdb_Playlist *itdb_playlist_mpl (Itdb_iTunesDB *itdb);
libgpod.itdb_playlist_mpl.argtypes = (ctypes.POINTER(Itdb_iTunesDB),)
libgpod.itdb_playlist_mpl.restype = ctypes.POINTER(Itdb_Playlist)
# gpod/itdb.h: gboolean itdb_write (Itdb_iTunesDB *itdb, GError **error);
libgpod.itdb_write.argtypes = (ctypes.POINTER(Itdb_iTunesDB), ctypes.c_void_p)
libgpod.itdb_write.restype = gboolean
# gpod/itdb.h: guint32 itdb_playlist_tracks_number (Itdb_Playlist *pl);
libgpod.itdb_playlist_tracks_number.argtypes = (ctypes.POINTER(Itdb_Playlist),)
libgpod.itdb_playlist_tracks_number.restype = ctypes.c_uint32
# gpod/itdb.h: gchar *itdb_filename_on_ipod (Itdb_Track *track);
libgpod.itdb_filename_on_ipod.argtypes = (ctypes.POINTER(Itdb_Track),)
# Needs to be c_void_p, because the returned pointer-to-memory must be free'd with g_free() after use.
libgpod.itdb_filename_on_ipod.restype = ctypes.c_void_p
# gpod/itdb.h: Itdb_Track *itdb_track_new (void);
libgpod.itdb_track_new.argtypes = ()
libgpod.itdb_track_new.restype = ctypes.POINTER(Itdb_Track)
# gpod/itdb.h: void itdb_track_add (Itdb_iTunesDB *itdb, Itdb_Track *track, gint32 pos);
libgpod.itdb_track_add.argtypes = (ctypes.POINTER(Itdb_iTunesDB), ctypes.POINTER(Itdb_Track), ctypes.c_int32)
libgpod.itdb_track_add.restype = None
# gpod/itdb.h: void itdb_playlist_add_track (Itdb_Playlist *pl, Itdb_Track *track, gint32 pos);
libgpod.itdb_playlist_add_track.argtypes = (ctypes.POINTER(Itdb_Playlist), ctypes.POINTER(Itdb_Track), ctypes.c_int32)
libgpod.itdb_playlist_add_track.restype = None
# gpod/itdb.h: gboolean itdb_cp_track_to_ipod (Itdb_Track *track, const gchar *filename, GError **error);
libgpod.itdb_cp_track_to_ipod.argtypes = (ctypes.POINTER(Itdb_Track), ctypes.c_char_p, ctypes.c_void_p)
libgpod.itdb_cp_track_to_ipod.restype = gboolean
# gpod/itdb.h: time_t itdb_time_host_to_mac (time_t time);
libgpod.itdb_time_host_to_mac.argtypes = (time_t,)
libgpod.itdb_time_host_to_mac.restype = time_t
# gpod/itdb.h: void itdb_playlist_remove_track (Itdb_Playlist *pl, Itdb_Track *track);
libgpod.itdb_playlist_remove_track.argtypes = (ctypes.POINTER(Itdb_Playlist), ctypes.POINTER(Itdb_Track))
libgpod.itdb_playlist_remove_track.restype = None
# gpod/itdb.h: void itdb_track_remove (Itdb_Track *track);
libgpod.itdb_track_remove.argtypes = (ctypes.POINTER(Itdb_Track),)
libgpod.itdb_track_remove.restype = None
# gpod/itdb.h: void itdb_free (Itdb_iTunesDB *itdb);
libgpod.itdb_free.argtypes = (ctypes.POINTER(Itdb_iTunesDB),)
libgpod.itdb_free.restype = None
# gpod/itdb.h
ITDB_MEDIATYPE_AUDIO = (1 << 0)
ITDB_MEDIATYPE_MOVIE = (1 << 1)
ITDB_MEDIATYPE_PODCAST = (1 << 2)
ITDB_MEDIATYPE_VIDEO_PODCAST = (ITDB_MEDIATYPE_MOVIE | ITDB_MEDIATYPE_PODCAST)
def glist_foreach(ptr_to_glist, item_type):
cur = ptr_to_glist
while cur:
yield ctypes.cast(cur[0].data, item_type)
if not cur[0].next:
break
cur = cur[0].next
class iPodTrack(object):
def __init__(self, db, track):
self.db = db
self.track = track
self.episode_title = track[0].title.decode()
self.podcast_title = track[0].album.decode()
self.podcast_url = track[0].podcasturl.decode()
self.podcast_rss = track[0].podcastrss.decode()
self.playcount = track[0].playcount
self.bookmark_time = track[0].bookmark_time
# This returns a newly-allocated string, so we have to juggle the memory
# around a bit and take a copy of the string before free'ing it again.
filename_ptr = libgpod.itdb_filename_on_ipod(track)
if filename_ptr:
self.filename_on_ipod = ctypes.string_at(filename_ptr).decode()
libglib.g_free(filename_ptr)
else:
self.filename_on_ipod = None
def __repr__(self):
return 'iPodTrack(episode={}, podcast={})'.format(self.episode_title, self.podcast_title)
def initialize_bookmark(self, is_new, bookmark_time):
self.track[0].mark_unplayed = 0x02 if is_new else 0x01
self.track[0].bookmark_time = int(bookmark_time)
def remove_from_device(self):
libgpod.itdb_playlist_remove_track(self.db.podcasts_playlist, self.track)
libgpod.itdb_playlist_remove_track(self.db.master_playlist, self.track)
# This frees the memory pointed-to by the track object
libgpod.itdb_track_remove(self.track)
self.track = None
# Don't forget to write the database on close
self.db.modified = True
if self.filename_on_ipod is not None:
try:
os.unlink(self.filename_on_ipod)
except Exception:
logger.info('Could not delete podcast file from iPod', exc_info=True)
class iPodDatabase(object):
def __init__(self, mountpoint):
self.mountpoint = mountpoint
self.itdb = libgpod.itdb_parse(mountpoint.encode(), None)
if not self.itdb:
raise ValueError('iTunesDB not found at {}'.format(self.mountpoint))
logger.info('iTunesDB: %s', self.itdb)
self.modified = False
self.podcasts_playlist = libgpod.itdb_playlist_podcasts(self.itdb)
self.master_playlist = libgpod.itdb_playlist_mpl(self.itdb)
self.tracks = [iPodTrack(self, track)
for track in glist_foreach(self.podcasts_playlist[0].members, ctypes.POINTER(Itdb_Track))]
def get_podcast_tracks(self):
return self.tracks
def add_track(self, filename, episode_title, podcast_title, description, podcast_url, podcast_rss,
published_timestamp, track_length, is_audio):
track = libgpod.itdb_track_new()
track[0].title = libglib.g_strdup(episode_title.encode())
track[0].album = libglib.g_strdup(podcast_title.encode())
track[0].artist = libglib.g_strdup(podcast_title.encode())
track[0].description = libglib.g_strdup(description.encode())
track[0].podcasturl = libglib.g_strdup(podcast_url.encode())
track[0].podcastrss = libglib.g_strdup(podcast_rss.encode())
track[0].tracklen = track_length
track[0].size = os.path.getsize(filename)
track[0].time_released = libgpod.itdb_time_host_to_mac(published_timestamp)
if is_audio:
track[0].filetype = libglib.g_strdup(b'mp3')
track[0].mediatype = ITDB_MEDIATYPE_PODCAST
else:
track[0].filetype = libglib.g_strdup(b'm4v')
track[0].mediatype = ITDB_MEDIATYPE_VIDEO_PODCAST
# Start at the beginning, and add "unplayed" bullet
track[0].bookmark_time = 0
track[0].mark_unplayed = 0x02
# from set_podcast_flags()
track[0].remember_playback_position = 0x01
track[0].skip_when_shuffling = 0x01
track[0].flag1 = 0x02
track[0].flag2 = 0x01
track[0].flag3 = 0x01
track[0].flag4 = 0x01
libgpod.itdb_track_add(self.itdb, track, -1)
libgpod.itdb_playlist_add_track(self.podcasts_playlist, track, -1)
libgpod.itdb_playlist_add_track(self.master_playlist, track, -1)
copied = libgpod.itdb_cp_track_to_ipod(track, filename.encode(), None)
logger.info('Copy result: %r', copied)
self.modified = True
self.tracks.append(iPodTrack(self, track))
return self.tracks[-1]
def __del__(self):
# If we hit the finalizer without closing the iTunesDB properly,
# just free the memory, but don't write out any modifications.
self.close(write=False)
def close(self, write=True):
if self.itdb:
if self.modified and write:
result = libgpod.itdb_write(self.itdb, None)
logger.info('Close result: %r', result)
self.modified = False
libgpod.itdb_free(self.itdb)
self.itdb = None
if __name__ == '__main__':
import argparse
import textwrap
parser = argparse.ArgumentParser(description='Dump podcasts in iTunesDB via libgpod')
parser.add_argument('mountpoint', type=str, help='Path to mounted iPod storage')
args = parser.parse_args()
ipod = iPodDatabase(args.mountpoint)
for track in ipod.get_podcast_tracks():
print(textwrap.dedent(f"""
Episode: {track.episode_title}
Podcast: {track.podcast_title}
Episode URL: {track.podcast_url}
Podcast URL: {track.podcast_rss}
Play count: {track.playcount}
Bookmark: {track.bookmark_time / 1000:.0f} seconds
Filename: {track.filename_on_ipod}
""").rstrip())
ipod.close()
| 16,218
|
Python
|
.py
| 357
| 38.809524
| 118
| 0.657308
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,294
|
__init__.py
|
gpodder_gpodder/src/gpodder/__init__.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This metadata block gets parsed by setup.py and pyproject.toml - use single quotes only
__tagline__ = 'Media aggregator and podcast client'
__author__ = 'Thomas Perl <thp@gpodder.org>'
__version__ = '3.11.4'
__date__ = '2023-10-11'
__copyright__ = '© 2005-2023 The gPodder Team'
__license__ = 'GNU General Public License, version 3 or later'
__url__ = 'http://gpodder.org/'
# Use public version part for __version_info__, see PEP 440
__public_version__, __local_version__ = next(
(v[0], v[1] if len(v) > 1 else '') for v in (__version__.split('+'),))
__version_info__ = tuple(int(x) for x in __public_version__.split('.'))
import gettext
import locale
import os
import platform
import socket
import sys
from gpodder.build_info import BUILD_TYPE
# Check if real hard dependencies are available
try:
import podcastparser
except ImportError:
print("""
Error: Module "podcastparser" (python-podcastparser) not found.
The podcastparser module can be downloaded from
http://gpodder.org/podcastparser/
From a source checkout, see https://gpodder.github.io/docs/run-from-git.html
""")
sys.exit(1)
del podcastparser
try:
import mygpoclient
except ImportError:
print("""
Error: Module "mygpoclient" (python-mygpoclient) not found.
The mygpoclient module can be downloaded from
http://gpodder.org/mygpoclient/
From a source checkout, see https://gpodder.github.io/docs/run-from-git.html
""")
sys.exit(1)
del mygpoclient
try:
import sqlite3
except ImportError:
print("""
Error: Module "sqlite3" not found.
Build Python with SQLite 3 support or get it from
http://code.google.com/p/pysqlite/
""")
sys.exit(1)
del sqlite3
# Is gpodder running in verbose mode?
verbose = False
# Is gpodder running in quiet mode?
quiet = False
# The User-Agent string for downloads
user_agent = 'gPodder/%s (+%s) %s' % (__version__, __url__, platform.system())
# Are we running in GUI or console mode?
class UI(object):
def __init__(self):
self.gtk = False
self.cli = False
ui = UI()
# D-Bus specific interface names
dbus_bus_name = 'org.gpodder'
dbus_gui_object_path = '/gui'
dbus_podcasts_object_path = '/podcasts'
dbus_interface = 'org.gpodder.interface'
dbus_podcasts = 'org.gpodder.podcasts'
dbus_session_bus = None
# Set "win32" to True if we are on Windows
ui.win32 = (platform.system() == 'Windows')
# Set "osx" to True if we are on Mac OS X
ui.osx = (platform.system() == 'Darwin')
# We assume it's a freedesktop.org system if it's not Windows or OS X
ui.freedesktop = not ui.win32 and not ui.osx
# i18n setup (will result in "gettext" to be available)
# Use _ = gpodder.gettext in modules to enable string translations
textdomain = 'gpodder'
locale_dir = gettext.bindtextdomain(textdomain)
if ui.win32:
# this must be done prior to gettext.translation to set the locale (see #484)
from gpodder.utilwin32locale import install
install(textdomain, locale_dir)
t = gettext.translation(textdomain, locale_dir, fallback=True)
gettext = t.gettext
ngettext = t.ngettext
del t
# Set up textdomain for Gtk.Builder (this accesses the C library functions)
if hasattr(locale, 'bindtextdomain'):
locale.bindtextdomain(textdomain, locale_dir)
del locale_dir
# Set up socket timeouts to fix bug 174
SOCKET_TIMEOUT = 60
socket.setdefaulttimeout(SOCKET_TIMEOUT)
del socket
SOCKET_TIMEOUT
# Variables reserved for GUI-specific use (will be set accordingly)
ui_folders = []
icon_file = None
images_folder = None
user_extensions = None
# Episode states used in the database
STATE_NORMAL, STATE_DOWNLOADED, STATE_DELETED = list(range(3))
# Paths (gPodder's home folder, config, db, download and data prefix)
home = None
config_file = None
database_file = None
downloads = None
prefix = None
ENV_HOME, ENV_DOWNLOADS = 'GPODDER_HOME', 'GPODDER_DOWNLOAD_DIR'
no_update_check_file = None
# Function to set a new gPodder home folder
def set_home(new_home):
global home, config_file, database_file, downloads
home = os.path.abspath(new_home)
config_file = os.path.join(home, 'Settings.json')
database_file = os.path.join(home, 'Database')
if ENV_DOWNLOADS not in os.environ:
downloads = os.path.join(home, 'Downloads')
def fixup_home(old_home):
if ui.osx or ui.win32:
if ui.osx:
new_home = os.path.expanduser(os.path.join('~', 'Library',
'Application Support', 'gPodder'))
elif BUILD_TYPE == 'windows-portable':
new_home = os.path.normpath(os.path.join(os.path.dirname(sys.executable), "..", "..", "config"))
old_home = new_home # force to config directory
print("D: windows-portable build; forcing home to config directory %s" % new_home, file=sys.stderr)
else: # ui.win32, not portable build
from gpodder.utilwin32ctypes import (
get_documents_folder, get_reg_current_user_string_value)
try:
# from old launcher, see
# https://github.com/gpodder/gpodder/blob/old/gtk2/tools/win32-launcher/folderselector.c
new_home = get_reg_current_user_string_value("Software\\gpodder.org\\gPodder", "GPODDER_HOME")
print("D: windows build; registry home = %s" % new_home, file=sys.stderr)
except Exception as e:
print("E: can't get GPODDER_HOME from registry: %s" % e, file=sys.stderr)
new_home = None
if new_home is None:
try:
new_home = os.path.join(get_documents_folder(), "gPodder")
print("D: windows build; documents home = %s" % new_home, file=sys.stderr)
except Exception as e:
print("E: can't get user's Documents folder: %s" % e, file=sys.stderr)
new_home = old_home
# Users who do not have the old home directory, or who have it but also
# have the new home directory (to cater to situations where the user
# might for some reason or the other have a ~/gPodder/ directory) get
# to use the new, more OS X-ish home.
if not os.path.exists(old_home) or os.path.exists(new_home):
return new_home
return old_home
# Default locations for configuration and data files
default_home = os.path.expanduser(os.path.join('~', 'gPodder'))
default_home = fixup_home(default_home)
set_home(os.path.expanduser(os.environ.get(ENV_HOME, default_home)))
if home != default_home:
print('Storing data in', home, '(GPODDER_HOME is set)', file=sys.stderr)
if ENV_DOWNLOADS in os.environ:
# Allow to relocate the downloads folder (pull request 4, bug 466)
downloads = os.path.expanduser(os.environ[ENV_DOWNLOADS])
print('Storing downloads in %s (%s is set)' % (downloads,
ENV_DOWNLOADS), file=sys.stderr)
# Plugins to load by default
DEFAULT_PLUGINS = [
'gpodder.plugins.soundcloud',
]
def load_plugins():
"""Load (non-essential) plugin modules
This loads a default set of plugins, but you can use
the environment variable "GPODDER_PLUGINS" to modify
the list of plugins."""
PLUGINS = os.environ.get('GPODDER_PLUGINS', None)
if PLUGINS is None:
PLUGINS = DEFAULT_PLUGINS
else:
PLUGINS = PLUGINS.split()
for plugin in PLUGINS:
try:
__import__(plugin)
except Exception as e:
print('Cannot load plugin: %s (%s)' % (plugin, e), file=sys.stderr)
| 8,275
|
Python
|
.py
| 203
| 36.044335
| 111
| 0.690966
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,295
|
log.py
|
gpodder_gpodder/src/gpodder/log.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.log - Logging setup
# Thomas Perl <thp@gpodder.org>; 2012-03-02
# Based on an initial draft by Neal Walfield
import glob
import logging
import os
import sys
import time
import traceback
import gpodder
logger = logging.getLogger(__name__)
def setup(verbose=True, quiet=False):
# mark verbose mode
gpodder.verbose = verbose
gpodder.quiet = quiet and not verbose
# Configure basic stdout logging
STDOUT_FMT = '%(created)f [%(name)s] %(levelname)s: %(message)s'
logging.basicConfig(format=STDOUT_FMT,
level=logging.DEBUG if verbose else logging.ERROR if quiet else logging.WARNING)
# Replace except hook with a custom one that logs it as an error
original_excepthook = sys.excepthook
def on_uncaught_exception(exctype, value, tb):
message = ''.join(traceback.format_exception(exctype, value, tb))
logger.error('Uncaught exception: %s', message)
original_excepthook(exctype, value, tb)
sys.excepthook = on_uncaught_exception
if os.environ.get('GPODDER_WRITE_LOGS', 'yes') != 'no':
# Configure file based logging
logging_basename = time.strftime('%Y-%m-%d.log')
logging_directory = os.path.join(gpodder.home, 'Logs')
if not os.path.isdir(logging_directory):
try:
os.makedirs(logging_directory)
except:
logger.warning('Cannot create output directory: %s',
logging_directory)
return False
# Keep logs around for 5 days
LOG_KEEP_DAYS = 5
# Purge old logfiles if they are older than LOG_KEEP_DAYS days
old_logfiles = glob.glob(os.path.join(logging_directory, '*-*-*.log'))
for old_logfile in old_logfiles:
st = os.stat(old_logfile)
if time.time() - st.st_mtime > 60 * 60 * 24 * LOG_KEEP_DAYS:
logger.info('Purging old logfile: %s', old_logfile)
try:
os.remove(old_logfile)
except:
logger.warning('Cannot purge logfile: %s', exc_info=True)
root = logging.getLogger()
logfile = os.path.join(logging_directory, logging_basename)
file_handler = logging.FileHandler(logfile, 'a', 'utf-8')
FILE_FMT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'
file_handler.setFormatter(logging.Formatter(FILE_FMT))
root.addHandler(file_handler)
logger.debug('==== gPodder starts up (ui=%s) ===', ', '.join(name
for name in ('cli', 'gtk') if getattr(gpodder.ui, name, False)))
return True
| 3,362
|
Python
|
.py
| 76
| 37.447368
| 92
| 0.664832
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,296
|
core.py
|
gpodder_gpodder/src/gpodder/core.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.core - Common functionality used by all UIs
# Thomas Perl <thp@gpodder.org>; 2011-02-06
import gpodder
from gpodder import config, dbsqlite, extensions, model, util
class Core(object):
def __init__(self,
config_class=config.Config,
database_class=dbsqlite.Database,
model_class=model.Model):
# Initialize the gPodder home directory
util.make_directory(gpodder.home)
# Open the database and configuration file
self.db = database_class(gpodder.database_file)
self.model = model_class(self.db)
self.config = config_class(gpodder.config_file)
# Load extension modules and install the extension manager
gpodder.user_extensions = extensions.ExtensionManager(self)
# Load installed/configured plugins
gpodder.load_plugins()
# Update the current device in the configuration
self.config.mygpo.device.type = util.detect_device_type()
def shutdown(self):
# Notify all extensions that we are being shut down
gpodder.user_extensions.shutdown()
# Close the database and store outstanding changes
self.db.close()
| 1,953
|
Python
|
.py
| 44
| 38.886364
| 71
| 0.717071
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,297
|
query.py
|
gpodder_gpodder/src/gpodder/query.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.query - Episode Query Language (EQL) implementation (2010-11-29)
#
import datetime
import re
import gpodder
class Matcher(object):
"""Match implementation for EQL
This class implements the low-level matching of
EQL statements against episode objects.
"""
def __init__(self, episode):
self._episode = episode
def match(self, term):
try:
# case-sensitive search in haystack, or both title and description if no haystack
def S(needle, haystack=None):
if haystack is not None:
return (needle in haystack)
if needle in self._episode.title:
return True
return (needle in self._episode._text_description)
# case-insensitive search in haystack, or both title and description if no haystack
def s(needle, haystack=None):
needle = needle.casefold()
if haystack is not None:
return (needle in haystack.casefold())
if needle in self._episode.title.casefold():
return True
return (needle in self._episode._text_description.casefold())
# case-sensitive regular expression search in haystack, or both title and description if no haystack
def R(needle, haystack=None):
regexp = re.compile(needle)
if haystack is not None:
return regexp.search(haystack)
if regexp.search(self._episode.title):
return True
return regexp.search(self._episode._text_description)
# case-insensitive regular expression search in haystack, or both title and description if no haystack
def r(needle, haystack=None):
regexp = re.compile(needle, re.IGNORECASE)
if haystack is not None:
return regexp.search(haystack)
if regexp.search(self._episode.title):
return True
return regexp.search(self._episode._text_description)
return bool(eval(term, {'__builtins__': None, 'S': S, 's': s, 'R': R, 'r': r}, self))
except Exception:
return False
def __getitem__(self, k):
episode = self._episode
# Adjectives (for direct usage)
if k == 'new':
return (episode.state == gpodder.STATE_NORMAL and episode.is_new)
elif k in ('downloaded', 'dl'):
return episode.was_downloaded(and_exists=True)
elif k in ('deleted', 'rm'):
return episode.state == gpodder.STATE_DELETED
elif k == 'played':
return not episode.is_new
elif k == 'downloading':
return episode.downloading
elif k == 'archive':
return episode.archive
elif k in ('finished', 'fin'):
return episode.is_finished()
elif k in ('video', 'audio'):
return episode.file_type() == k
elif k == 'torrent':
return episode.url.endswith('.torrent') or 'torrent' in episode.mime_type
elif k == 'paused':
return (episode.download_task is not None
and episode.download_task.status in (episode.download_task.PAUSED, episode.download_task.PAUSING))
elif k == 'failed':
return (episode.download_task is not None and episode.download_task.status == episode.download_task.FAILED)
# Nouns (for comparisons)
if k in ('megabytes', 'mb'):
return episode.file_size / (1024 * 1024)
elif k == 'title':
return episode.title
elif k == 'description':
return episode._text_description
elif k == 'since':
return (datetime.datetime.now() - datetime.datetime.fromtimestamp(episode.published)).days
elif k == 'age':
return episode.age_in_days()
elif k in ('minutes', 'min'):
return episode.total_time / 60
elif k in ('remaining', 'rem'):
return (episode.total_time - episode.current_position) / 60
elif k == 'podcast':
return episode.channel.title
elif k == 'section':
return episode.channel.section
elif k == 'url':
return episode.url
elif k == 'link':
return episode.link
elif k == 'filename':
return episode.download_filename
raise KeyError(k)
class EQL(object):
"""A Query in EQL
Objects of this class represent a query on episodes
using EQL. Example usage:
>>> q = EQL('downloaded and megabytes > 10')
>>> # q.filter(channel.get_all_episodes())
>>> # EQL('new and video').match(episode)
Regular expression queries are also supported:
>>> q = EQL('/^The.*/')
>>> q = EQL('/community/i')
Normal string matches are also supported:
>>> q = EQL('"S04"')
>>> q = EQL("'linux'")
The lowercase s() and r() functions perform
case-insensitive string and regular expression
matches:
>>> q = EQL("s('linux')")
>>> q = EQL("r('^the.*')")
The uppercase S() and R() functions perform
case-sensitive string and regular expression
matches:
>>> q = EQL("S('Linux')")
>>> q = EQL("R('^The.*')")
The S, s, R, and r functions search both
title and description by default. Passing
'title' or 'description' in second parameter
refines the search:
>>> q = EQL("s('in title', title)")
>>> q = EQL("s('in description', description)")
Normal EQL queries can be mixed with RegEx
or string matching using the S, s, R and r
functions:
>>> # EQL('downloaded and r("The.*")')
"""
def __init__(self, query):
self._query = query
self._flags = 0
self._regex = False
self._string = False
# Regular expression based query
match = re.match(r'^/(.*)/(i?)$', query)
if match is not None:
self._regex = True
self._query, flags = match.groups()
if flags == 'i':
self._flags |= re.I
# String based query
match = re.match("^([\"'])(.*)(\\1)$", query)
if match is not None:
self._string = True
a, query, b = match.groups()
self._query = query.lower()
# For everything else, compile the expression
if not self._regex and not self._string:
try:
self._query = compile(query, '<eql-string>', 'eval')
except Exception:
self._query = None
def match(self, episode):
if self._query is None:
return False
if self._regex:
return re.search(self._query, episode.title, self._flags) is not None
elif self._string:
return self._query in episode.title.lower() or self._query in episode._text_description.lower()
return Matcher(episode).match(self._query)
def filter(self, episodes):
return list(filter(self.match, episodes))
def UserEQL(query):
"""EQL wrapper for user input
Automatically adds missing quotes around a
non-EQL string for user-based input. In this
case, EQL queries need to be enclosed in ().
"""
if query is None:
return None
if query == '' or (query and query[0] not in "(/'\""):
return EQL("'%s'" % query)
else:
return EQL(query)
| 8,267
|
Python
|
.py
| 199
| 32.261307
| 119
| 0.596183
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,298
|
player.py
|
gpodder_gpodder/src/gpodder/player.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.player - Podcatcher implementation of the Media Player D-Bus API
# Thomas Perl <thp@gpodder.org>; 2010-04-25
#
#
# This API specification aims at providing a documented, easy-to-use API for
# getting and setting the media player position via D-Bus. This should allow
# media players (such as Panucci) and podcast aggregators (such as gPodder) to
# work together and synchronize the playback position of media files.
#
# == Interface: org.gpodder.player ==
#
# - PlaybackStarted(uint32 position, string file_uri)
#
# Emitted when the media player starts playback of a given file at file_uri
# at the position position.
#
#
# - PlaybackStopped(uint32 start_position, uint32 end_position,
# uint32 total_time, string file_uri)
#
# Emitted when the user stops/pauses playback, when the playback ends or the
# player is closed. The file URI is in file_uri, the start time of the
# segment that has just been played is in start_position, the stop time in
# end_position and the (detected) total time of the file is in total_time.
#
# Seeking in the file should also emit a PlaybackStopped signal (at the
# position where the seek is initialized) and a PlaybackStarted signal (at
# the position to which the seek jumps).
#
import urllib.error
import urllib.parse
import urllib.request
import gpodder
class MediaPlayerDBusReceiver(object):
INTERFACE = 'org.gpodder.player'
SIGNAL_STARTED = 'PlaybackStarted'
SIGNAL_STOPPED = 'PlaybackStopped'
def __init__(self, on_play_event):
self.on_play_event = on_play_event
self.bus = gpodder.dbus_session_bus
self.bus.add_signal_receiver(self.on_playback_started,
self.SIGNAL_STARTED,
self.INTERFACE,
None,
None)
self.bus.add_signal_receiver(self.on_playback_stopped,
self.SIGNAL_STOPPED,
self.INTERFACE,
None,
None)
def on_playback_started(self, position, file_uri):
pass
def on_playback_stopped(self, start, end, total, file_uri):
if file_uri.startswith('/'):
file_uri = 'file://' + urllib.parse.quote(file_uri)
self.on_play_event(start, end, total, file_uri)
| 3,188
|
Python
|
.py
| 75
| 36.186667
| 78
| 0.673436
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|
8,299
|
youtube.py
|
gpodder_gpodder/src/gpodder/youtube.py
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.youtube - YouTube and related magic
# Justin Forest <justin.forest@gmail.com> 2008-10-13
#
import io
import json
import logging
import re
import urllib
import xml.etree.ElementTree
from functools import lru_cache
from html.parser import HTMLParser
from urllib.parse import parse_qs
import gpodder
from gpodder import registry, util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
# http://en.wikipedia.org/wiki/YouTube#Quality_and_formats
# https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/youtube.py#L447
# adaptive audio formats
# 140 MP4 128k
# 251 WebM 160k
# 250 WebM 70k
# 249 WebM 50k
# formats and fallbacks of same quality: WebM -> MP4 -> FLV
flv_240 = [5]
flv_270 = [6]
flv_360 = [34]
flv_480 = [35]
mp4_144 = ['160+140']
mp4_240 = ['133+140'] + flv_240
mp4_360 = [18, '134+140'] + flv_360
mp4_480 = ['135+140'] + flv_480
mp4_720 = [22, '136+140']
mp4_1080 = [37, '137+140']
mp4_1440 = ['264+140']
mp4_2160 = ['266+140']
mp4_3072 = [38]
mp4_4320 = ['138+140']
webm_144 = ['278+250'] + mp4_144
webm_240 = ['242+250'] + mp4_240
webm_360 = [43, '243+251'] + mp4_360
webm_480 = [44, '244+251'] + mp4_480
webm_720 = [45, '247+251'] + mp4_720
webm_1080 = [46, '248+251'] + mp4_1080
webm_1440 = ['271+251'] + mp4_1440
webm_2160 = ['313+251'] + mp4_2160
webm_4320 = ['272+251'] + mp4_4320
# fallbacks to lower quality
webm_240 += webm_144
webm_360 += flv_270 + webm_240
webm_480 += webm_360
webm_720 += webm_480
webm_1080 += webm_720
webm_1440 += webm_1080
webm_2160 += webm_1440
webm_4320 += mp4_3072 + webm_2160
mp4_240 += mp4_144
mp4_360 += flv_270 + mp4_240
mp4_480 += mp4_360
mp4_720 += mp4_480
mp4_1080 += mp4_720
mp4_1440 += mp4_1080
mp4_2160 += mp4_1440
mp4_3072 += mp4_2160
mp4_4320 += mp4_3072
flv_270 += flv_240
flv_360 += flv_270
flv_480 += flv_360
# format id, (preferred ids, path(?), description) # video bitrate, audio bitrate
formats = [
# WebM VP8, VP9 or VP9 HFR video, Vorbis or Opus audio
# Fallback to MP4 or FLV
(272, (webm_4320, '272/7680x4320/99/0/0', 'WebM 4320p 8K (7680x4320) youtube-dl')), # N/A, 160 kbps
(313, (webm_2160, '313/3840x2160/99/0/0', 'WebM 2160p 4K (3840x2160) youtube-dl')), # N/A, 160 kbps
(271, (webm_1440, '271/2560x1440/99/0/0', 'WebM 1440p (2560x1440) youtube-dl')), # N/A, 160 kbps
(46, (webm_1080, '46/1920x1080/99/0/0', 'WebM 1080p (1920x1080) youtube-dl')), # N/A, 192 kbps
(45, (webm_720, '45/1280x720/99/0/0', 'WebM 720p (1280x720) youtube-dl')), # 2.0 Mbps, 192 kbps
(44, (webm_480, '44/854x480/99/0/0', 'WebM 480p (854x480) youtube-dl')), # 1.0 Mbps, 128 kbps
(43, (webm_360, '43/640x360/99/0/0', 'WebM 360p (640x360)')), # 0.5 Mbps, 128 kbps
(242, (webm_240, '242/426x240/99/0/0', 'WebM 240p (426x240) youtube-dl')), # N/A, 70 kbps
(278, (webm_144, '278/256x144/99/0/0', 'WebM 144p (256x144) youtube-dl')), # N/A, 70 kbps
# MP4 H.264 video, AAC audio
# Fallback to FLV
(138, (mp4_4320, '138/7680x4320/9/0/115', 'MP4 4320p 8K (7680x4320) youtube-dl')), # N/A, 128 kbps
(38, (mp4_3072, '38/4096x3072/9/0/115', 'MP4 3072p 4K (4096x3072)')), # 5.0 - 3.5 Mbps, 192 kbps
(266, (mp4_2160, '266/3840x2160/9/0/115', 'MP4 2160p 4K (3840x2160) youtube-dl')), # N/A, 128 kbps
(264, (mp4_1440, '264/2560x1440/9/0/115', 'MP4 1440p (2560x1440) youtube-dl')), # N/A, 128 kbps
(37, (mp4_1080, '37/1920x1080/9/0/115', 'MP4 1080p (1920x1080) youtube-dl')), # 4.3 - 3.0 Mbps, 192 kbps
(22, (mp4_720, '22/1280x720/9/0/115', 'MP4 720p (1280x720)')), # 2.9 - 2.0 Mbps, 192 kbps
(135, (mp4_480, '135/854x480/9/0/115', 'MP4 480p (854x480) youtube-dl')), # N/A, 128 kbps
(18, (mp4_360, '18/640x360/9/0/115', 'MP4 360p (640x360)')), # 0.5 Mbps, 96 kbps
(133, (mp4_240, '133/426x240/9/0/115', 'MP4 240p (426x240) youtube-dl')), # N/A, 128 kbps
(160, (mp4_144, '160/256x144/9/0/115', 'MP4 144p (256x144) youtube-dl')), # N/A, 128 kbps
# FLV H.264 video, AAC audio
# Fallback to FLV 6 or 5
(35, (flv_480, '35/854x480/9/0/115', 'FLV 480p (854x480)')), # 1 - 0.80 Mbps, 128 kbps
(34, (flv_360, '34/640x360/9/0/115', 'FLV 360p (640x360)')), # 0.50 Mbps, 128 kbps
# FLV Sorenson H.263 video, MP3 audio
(6, (flv_270, '6/480x270/7/0/0', 'FLV 270p (480x270)')), # 0.80 Mbps, 64 kbps
(5, (flv_240, '5/320x240/7/0/0', 'FLV 240p (320x240)')), # 0.25 Mbps, 64 kbps
]
formats_dict = dict(formats)
# streaming formats and fallbacks to lower quality
hls_144 = [91]
hls_240 = [92] + hls_144
hls_360 = [93] + hls_240
hls_480 = [94] + hls_360
hls_720 = [95] + hls_480
hls_1080 = [96] + hls_720
hls_formats = [
(96, (hls_1080, '9/1920x1080/9/0/115', 'MP4 1080p (1920x1080)')), # N/A, 256 kbps
(95, (hls_720, '9/1280x720/9/0/115', 'MP4 720p (1280x720)')), # N/A, 256 kbps
(94, (hls_480, '9/854x480/9/0/115', 'MP4 480p (854x480)')), # N/A, 128 kbps
(93, (hls_360, '9/640x360/9/0/115', 'MP4 360p (640x360)')), # N/A, 128 kbps
(92, (hls_240, '9/426x240/9/0/115', 'MP4 240p (426x240)')), # N/A, 48 kbps
(91, (hls_144, '9/256x144/9/0/115', 'MP4 144p (256x144)')), # N/A, 48 kbps
]
hls_formats_dict = dict(hls_formats)
CHANNEL_VIDEOS_XML = 'https://www.youtube.com/feeds/videos.xml'
WATCH_ENDPOINT = 'https://www.youtube.com/watch?bpctr=9999999999&has_verified=1&v='
# The page may contain "};" sequences inside the initial player response.
# Use a greedy match with script end tag, and fallback to a non-greedy match without.
INITIAL_PLAYER_RESPONSE_RE1 = r'ytInitialPlayerResponse\s*=\s*({.+})\s*;\s*</script'
INITIAL_PLAYER_RESPONSE_RE2 = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
def get_ipr(page):
for regex in (INITIAL_PLAYER_RESPONSE_RE1, INITIAL_PLAYER_RESPONSE_RE2):
ipr = re.search(regex, page)
if ipr is not None:
return ipr
return None
class YouTubeError(Exception):
pass
def get_fmt_ids(youtube_config, allow_partial):
if allow_partial:
if youtube_config.preferred_hls_fmt_id == 0:
hls_fmt_ids = (youtube_config.preferred_hls_fmt_ids if youtube_config.preferred_hls_fmt_ids else [])
else:
fmt = hls_formats_dict.get(youtube_config.preferred_hls_fmt_id)
if fmt is None:
hls_fmt_ids = []
else:
hls_fmt_ids, path, description = fmt
else:
hls_fmt_ids = []
if youtube_config.preferred_fmt_id == 0:
return (youtube_config.preferred_fmt_ids + hls_fmt_ids if youtube_config.preferred_fmt_ids else hls_fmt_ids)
fmt = formats_dict.get(youtube_config.preferred_fmt_id)
if fmt is None:
return hls_fmt_ids
fmt_ids, path, description = fmt
return fmt_ids + hls_fmt_ids
@registry.download_url.register
def youtube_real_download_url(config, episode, allow_partial):
fmt_ids = get_fmt_ids(config.youtube, allow_partial) if config else None
res, duration = get_real_download_url(episode.url, allow_partial, fmt_ids)
if duration is not None:
episode.total_time = int(int(duration) / 1000)
return None if res == episode.url else res
def youtube_get_old_endpoint(vid):
# TODO: changing 'detailpage' to 'embedded' allows age-restricted content
url = 'https://www.youtube.com/get_video_info?html5=1&c=TVHTML5&cver=6.20180913&el=detailpage&video_id=' + vid
r = util.urlopen(url)
if not r.ok:
raise YouTubeError('Youtube "%s": %d %s' % (url, r.status_code, r.reason))
else:
return r.text, None
def youtube_get_new_endpoint(vid):
url = WATCH_ENDPOINT + vid
r = util.urlopen(url)
if not r.ok:
raise YouTubeError('Youtube "%s": %d %s' % (url, r.status_code, r.reason))
ipr = get_ipr(r.text)
if ipr is None:
try:
url = get_gdpr_consent_url(r.text)
except YouTubeError as e:
raise YouTubeError('Youtube "%s": No ytInitialPlayerResponse found and %s' % (url, str(e)))
r = util.urlopen(url)
if not r.ok:
raise YouTubeError('Youtube "%s": %d %s' % (url, r.status_code, r.reason))
ipr = get_ipr(r.text)
if ipr is None:
raise YouTubeError('Youtube "%s": No ytInitialPlayerResponse found' % url)
return None, ipr.group(1)
def get_total_time(episode):
try:
vid = get_youtube_id(episode.url)
if vid is None:
return 0
url = WATCH_ENDPOINT + vid
r = util.urlopen(url)
if not r.ok:
return 0
ipr = get_ipr(r.text)
if ipr is None:
url = get_gdpr_consent_url(r.text)
r = util.urlopen(url)
if not r.ok:
return 0
ipr = get_ipr(r.text)
if ipr is None:
return 0
player_response = json.loads(ipr.group(1))
return int(player_response['videoDetails']['lengthSeconds']) # 0 if live
except:
return 0
def get_real_download_url(url, allow_partial, preferred_fmt_ids=None):
if not preferred_fmt_ids:
preferred_fmt_ids, _, _ = formats_dict[22] # MP4 720p
duration = None
vid = get_youtube_id(url)
if vid is not None:
try:
old_page, new_page = youtube_get_new_endpoint(vid)
except YouTubeError as e:
logger.info(str(e))
old_page, new_page = youtube_get_old_endpoint(vid)
def find_urls(old_page, new_page):
# streamingData is preferable to url_encoded_fmt_stream_map
# streamingData.formats are the same as url_encoded_fmt_stream_map
# streamingData.adaptiveFormats are audio-only and video-only formats
x = parse_qs(old_page) if old_page else json.loads(new_page)
player_response = json.loads(x['player_response'][0]) if old_page and 'player_response' in x else x
error_message = None
if 'reason' in x:
# TODO: unknown if this is valid for new_page
error_message = util.remove_html_tags(x['reason'][0])
elif 'playabilityStatus' in player_response:
playabilityStatus = player_response['playabilityStatus']
if 'reason' in playabilityStatus:
error_message = util.remove_html_tags(playabilityStatus['reason'])
elif 'liveStreamability' in playabilityStatus \
and not playabilityStatus['liveStreamability'].get('liveStreamabilityRenderer', {}).get('displayEndscreen', False):
# playabilityStatus.liveStreamability -- video is or was a live stream
# playabilityStatus.liveStreamability.liveStreamabilityRenderer.displayEndscreen -- video has ended if present
if allow_partial and 'streamingData' in player_response and 'hlsManifestUrl' in player_response['streamingData']:
r = util.urlopen(player_response['streamingData']['hlsManifestUrl'])
if not r.ok:
raise YouTubeError('HLS Manifest: %d %s' % (r.status_code, r.reason))
manifest = r.text.splitlines()
urls = [line for line in manifest if line[0] != '#']
itag_re = re.compile(r'/itag/([0-9]+)/')
for url in urls:
itag = itag_re.search(url).group(1)
yield int(itag), [url, None]
return
error_message = 'live stream'
elif 'streamingData' in player_response:
if 'formats' in player_response['streamingData']:
for f in player_response['streamingData']['formats']:
if 'url' in f: # DRM videos store url inside a signatureCipher key
yield int(f['itag']), [f['url'], f.get('approxDurationMs')]
if 'adaptiveFormats' in player_response['streamingData']:
for f in player_response['streamingData']['adaptiveFormats']:
if 'url' in f: # DRM videos store url inside a signatureCipher key
yield int(f['itag']), [f['url'], f.get('approxDurationMs')]
return
if error_message is not None:
raise YouTubeError(('Cannot stream video: %s' if allow_partial else 'Cannot download video: %s') % error_message)
if old_page:
r4 = re.search(r'url_encoded_fmt_stream_map=([^&]+)', old_page)
if r4 is not None:
fmt_url_map = urllib.parse.unquote(r4.group(1))
for fmt_url_encoded in fmt_url_map.split(','):
video_info = parse_qs(fmt_url_encoded)
yield int(video_info['itag'][0]), [video_info['url'][0], None]
fmt_id_url_map = sorted(find_urls(old_page, new_page), reverse=True)
if not fmt_id_url_map:
drm = re.search(r'(%22(cipher|signatureCipher)%22%3A|"signatureCipher":)', old_page or new_page)
if drm is not None:
raise YouTubeError('Unsupported DRM content')
raise YouTubeError('No formats found')
formats_available = {fmt_id for fmt_id, url in fmt_id_url_map}
fmt_id_url_map = dict(fmt_id_url_map)
for fmt_id in preferred_fmt_ids:
if not re.search(r'^[0-9]+$', str(fmt_id)):
# skip non-integer formats 'best', '136+140' or twitch '720p'
continue
fmt_id = int(fmt_id)
if fmt_id in formats_available:
fmt = formats_dict.get(fmt_id) or hls_formats_dict.get(fmt_id)
if fmt is not None:
_, _, description = fmt
else:
description = 'Unknown'
logger.info('Found YouTube format: %s (fmt_id=%d)',
description, fmt_id)
url, duration = fmt_id_url_map[fmt_id]
break
else:
raise YouTubeError('No preferred formats found')
return url, duration
@lru_cache(1)
def get_youtube_id(url):
r = re.compile(r'http[s]?://(?:[a-z]+\.)?youtube\.com/watch\?v=([^&]*)', re.IGNORECASE).match(url)
if r is not None:
return r.group(1)
r = re.compile(r'http[s]?://(?:[a-z]+\.)?youtube\.com/v/(.*)[?]', re.IGNORECASE).match(url)
if r is not None:
return r.group(1)
r = re.compile(r'http[s]?://(?:[a-z]+\.)?youtube\.com/v/(.*)\.swf', re.IGNORECASE).match(url)
if r is not None:
return r.group(1)
return for_each_feed_pattern(lambda url, channel: channel, url, None)
def is_video_link(url):
return (get_youtube_id(url) is not None)
def is_youtube_guid(guid):
return guid.startswith('tag:youtube.com,2008:video:')
def for_each_feed_pattern(func, url, fallback_result):
"""
Try to find the username for all possible YouTube feed/webpage URLs
Will call func(url, channel) for each match, and if func() returns
a result other than None, returns this. If no match is found or
func() returns None, return fallback_result.
"""
CHANNEL_MATCH_PATTERNS = [
r'http[s]?://(?:[a-z]+\.)?youtube\.com/user/([a-z0-9]+)',
r'http[s]?://(?:[a-z]+\.)?youtube\.com/profile?user=([a-z0-9]+)',
r'http[s]?://(?:[a-z]+\.)?youtube\.com/rss/user/([a-z0-9]+)/videos\.rss',
r'http[s]?://(?:[a-z]+\.)?youtube\.com/channel/([-_a-z0-9]+)',
r'http[s]?://(?:[a-z]+\.)?youtube\.com/feeds/videos.xml\?user=([a-z0-9]+)',
r'http[s]?://(?:[a-z]+\.)?youtube\.com/feeds/videos.xml\?channel_id=([-_a-z0-9]+)',
r'http[s]?://gdata.youtube.com/feeds/users/([^/]+)/uploads',
r'http[s]?://gdata.youtube.com/feeds/base/users/([^/]+)/uploads',
]
for pattern in CHANNEL_MATCH_PATTERNS:
m = re.match(pattern, url, re.IGNORECASE)
if m is not None:
result = func(url, m.group(1))
if result is not None:
return result
return fallback_result
def get_real_channel_url(url):
def return_user_feed(url, channel):
result = 'https://gdata.youtube.com/feeds/users/{0}/uploads'.format(channel)
logger.debug('YouTube link resolved: %s => %s', url, result)
return result
return for_each_feed_pattern(return_user_feed, url, url)
@lru_cache(1)
def get_channel_id_url(url, feed_data=None):
if 'youtube.com' in url:
# URL may contain channel ID, avoid a network request
m = re.search(r'channel_id=([^"]+)', url)
if m:
# old versions of gpodder allowed newlines and whitespace in feed URLs, strip here to avoid a 404
channel_id = m.group(1).strip()
channel_url = 'https://www.youtube.com/channel/{}'.format(channel_id)
return channel_url
try:
if feed_data is None:
r = util.urlopen(url, cookies={'SOCS': 'CAI'})
if not r.ok:
raise YouTubeError('Youtube "%s": %d %s' % (url, r.status_code, r.reason))
else:
r = feed_data
# video page may contain corrupt HTML/XML, search for tag to avoid exception
m = re.search(r'(channel_id=([^"]+)">|"channelId":"([^"]+)")', r.text)
if m:
channel_id = m.group(2) or m.group(3)
else:
raw_xml_data = io.BytesIO(r.content)
xml_data = xml.etree.ElementTree.parse(raw_xml_data)
channel_id = xml_data.find("{http://www.youtube.com/xml/schemas/2015}channelId").text
if channel_id is None:
# check entries if feed has an empty channelId
m = re.search(r'<yt:channelId>([^<]+)</yt:channelId>', r.text)
if m:
channel_id = m.group(1)
if channel_id is None:
raise Exception('Could not retrieve YouTube channel ID for URL %s.' % url)
# feeds no longer contain the required "UC" prefix on channel ID
if len(channel_id) == 22:
channel_id = "UC" + channel_id
channel_url = 'https://www.youtube.com/channel/{}'.format(channel_id)
return channel_url
except Exception:
logger.warning('Could not retrieve YouTube channel ID for URL %s.' % url, exc_info=True)
raise Exception('Could not retrieve YouTube channel ID for URL %s.' % url)
def get_cover(url, feed_data=None):
if 'youtube.com' in url:
class YouTubeHTMLCoverParser(HTMLParser):
"""This custom html parser searches for the youtube channel thumbnail/avatar"""
def __init__(self):
super().__init__()
self.url = []
def handle_starttag(self, tag, attributes):
attribute_dict = {attribute[0]: attribute[1] for attribute in attributes}
# Look for 900x900px image first.
if tag == 'link' \
and 'rel' in attribute_dict \
and attribute_dict['rel'] == 'image_src':
self.url.append(attribute_dict['href'])
# Fallback to image that may only be 100x100px.
elif tag == 'img' \
and 'class' in attribute_dict \
and attribute_dict['class'] == "channel-header-profile-image":
self.url.append(attribute_dict['src'])
try:
channel_url = get_channel_id_url(url, feed_data)
r = util.urlopen(channel_url)
if not r.ok:
raise YouTubeError('Youtube "%s": %d %s' % (url, r.status_code, r.reason))
html_data = util.response_text(r)
parser = YouTubeHTMLCoverParser()
parser.feed(html_data)
if parser.url:
logger.debug('Youtube cover art for {} is: {}'.format(url, parser.url))
return parser.url[0]
except Exception:
logger.warning('Could not retrieve cover art', exc_info=True)
def get_gdpr_consent_url(html_data):
"""
Creates the URL for automatically accepting GDPR consents
EU GDPR redirects to a form that needs to be posted to be redirected to a get request
with the form data as input to the youtube video URL. This extracts that form data from
the GDPR form and builds up the URL the posted form results.
"""
class ConsentHTML(HTMLParser):
def __init__(self):
super().__init__()
self.url = ''
self.consentForm = False
def handle_starttag(self, tag, attributes):
attribute_dict = {attribute[0]: attribute[1] for attribute in attributes}
if tag == 'form' and attribute_dict['action'] == 'https://consent.youtube.com/s':
self.consentForm = True
self.url = 'https://consent.google.com/s?'
# Get GDPR form elements
if self.consentForm and tag == 'input' and attribute_dict['type'] == 'hidden':
self.url += '&' + attribute_dict['name'] + '=' + urllib.parse.quote_plus(attribute_dict['value'])
def handle_endtag(self, tag):
if tag == 'form':
self.consentForm = False
try:
parser = ConsentHTML()
parser.feed(html_data)
except Exception:
raise YouTubeError('Could not retrieve GDPR accepted consent URL')
if parser.url:
logger.debug('YouTube GDPR accept consent URL is: %s', parser.url)
return parser.url
else:
logger.debug('YouTube GDPR accepted consent URL could not be resolved.')
raise YouTubeError('No acceptable GDPR consent URL')
def get_channel_desc(url, feed_data=None):
if 'youtube.com' in url:
class YouTubeHTMLDesc(HTMLParser):
"""This custom html parser searches for the YouTube channel description."""
def __init__(self):
super().__init__()
self.description = ''
def handle_starttag(self, tag, attributes):
attribute_dict = {attribute[0]: attribute[1] for attribute in attributes}
# Get YouTube channel description.
if tag == 'meta' \
and 'name' in attribute_dict \
and attribute_dict['name'] == "description":
self.description = attribute_dict['content']
try:
channel_url = get_channel_id_url(url, feed_data)
r = util.urlopen(channel_url)
if not r.ok:
raise YouTubeError('Youtube "%s": %d %s' % (url, r.status_code, r.reason))
html_data = util.response_text(r)
parser = YouTubeHTMLDesc()
parser.feed(html_data)
if parser.description:
logger.debug('YouTube description for %s is: %s', url, parser.description)
return parser.description
else:
logger.debug('YouTube description for %s is not provided.', url)
return _('No description available')
except Exception:
logger.warning('Could not retrieve YouTube channel description for %s.' % url, exc_info=True)
def parse_youtube_url(url):
"""
Youtube Channel Links are parsed into youtube feed links
>>> parse_youtube_url("https://www.youtube.com/channel/CHANNEL_ID")
'https://www.youtube.com/feeds/videos.xml?channel_id=CHANNEL_ID'
Youtube User Links are parsed into youtube feed links
>>> parse_youtube_url("https://www.youtube.com/user/USERNAME")
'https://www.youtube.com/feeds/videos.xml?user=USERNAME'
Youtube Playlist Links are parsed into youtube feed links
>>> parse_youtube_url("https://www.youtube.com/playlist?list=PLAYLIST_ID")
'https://www.youtube.com/feeds/videos.xml?playlist_id=PLAYLIST_ID'
>>> parse_youtube_url(None)
None
@param url: the path to the channel, user or playlist
@return: the feed url if successful or the given url if not
"""
if url is None:
return url
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
logger.debug("Analyzing URL: {}".format(" ".join([scheme, netloc, path, query, fragment])))
if 'youtube.com' in netloc:
if path == '/feeds/videos.xml' and re.search(r'^(user|channel|playlist)_id=.*', query):
return url
if '/user/' in path or '/channel/' in path or 'list=' in query:
logger.debug("Valid Youtube URL detected. Parsing...")
if path.startswith('/user/'):
user_id = path.split('/')[2]
query = 'user={user_id}'.format(user_id=user_id)
if path.startswith('/channel/'):
channel_id = path.split('/')[2]
query = 'channel_id={channel_id}'.format(channel_id=channel_id)
if 'list=' in query:
playlist_query = [query_value for query_value in query.split("&") if 'list=' in query_value][0]
playlist_id = playlist_query[5:]
query = 'playlist_id={playlist_id}'.format(playlist_id=playlist_id)
path = '/feeds/videos.xml'
new_url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
logger.debug("New Youtube URL: {}".format(new_url))
return new_url
# look for channel URL in page
logger.debug("Unknown Youtube URL, trying to extract channel ID...")
new_url = get_channel_id_url(url)
if new_url:
logger.debug("New Youtube URL: {}".format(new_url))
return parse_youtube_url(new_url)
logger.debug("Not a valid Youtube URL: {}".format(url))
return url
| 26,966
|
Python
|
.py
| 539
| 40.623377
| 139
| 0.591175
|
gpodder/gpodder
| 1,268
| 204
| 307
|
GPL-3.0
|
9/5/2024, 5:10:30 PM (Europe/Amsterdam)
|